From e1cbb28f3f59e4456d533b727d4fbffead76caa7 Mon Sep 17 00:00:00 2001 From: Jon Masamitsu Date: Fri, 1 Mar 2013 10:19:29 -0800 Subject: [PATCH 001/162] 8011268: NPG: Free unused VirtualSpaceNodes Reviewed-by: mgerdin, coleenp, johnc --- .../share/vm/classfile/classLoaderData.cpp | 1 + hotspot/src/share/vm/memory/metachunk.cpp | 38 +-- hotspot/src/share/vm/memory/metachunk.hpp | 18 +- hotspot/src/share/vm/memory/metaspace.cpp | 225 ++++++++++++++++-- hotspot/src/share/vm/memory/metaspace.hpp | 3 + 5 files changed, 233 insertions(+), 52 deletions(-) diff --git a/hotspot/src/share/vm/classfile/classLoaderData.cpp b/hotspot/src/share/vm/classfile/classLoaderData.cpp index e20de3c252a..75b9f34f2b2 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.cpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp @@ -686,6 +686,7 @@ void ClassLoaderDataGraph::purge() { next = purge_me->next(); delete purge_me; } + Metaspace::purge(); } // CDS support diff --git a/hotspot/src/share/vm/memory/metachunk.cpp b/hotspot/src/share/vm/memory/metachunk.cpp index 4cb955862a8..0ac4ced70f4 100644 --- a/hotspot/src/share/vm/memory/metachunk.cpp +++ b/hotspot/src/share/vm/memory/metachunk.cpp @@ -28,6 +28,7 @@ #include "utilities/copy.hpp" #include "utilities/debug.hpp" +class VirtualSpaceNode; // // Future modification // @@ -45,27 +46,30 @@ size_t Metachunk::_overhead = // Metachunk methods -Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) { - // Set bottom, top, and end. Allow space for the Metachunk itself - Metachunk* chunk = (Metachunk*) ptr; - - MetaWord* chunk_bottom = ptr + _overhead; - chunk->set_bottom(ptr); - chunk->set_top(chunk_bottom); - MetaWord* chunk_end = ptr + word_size; - assert(chunk_end > chunk_bottom, "Chunk must be too small"); - chunk->set_end(chunk_end); - chunk->set_next(NULL); - chunk->set_prev(NULL); - chunk->set_word_size(word_size); +Metachunk::Metachunk(size_t word_size, + VirtualSpaceNode* container) : + _word_size(word_size), + _bottom(NULL), + _end(NULL), + _top(NULL), + _next(NULL), + _prev(NULL), + _container(container) +{ + _bottom = (MetaWord*)this; + _top = (MetaWord*)this + _overhead; + _end = (MetaWord*)this + word_size; #ifdef ASSERT - size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord)); - Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize); + set_is_free(false); + size_t data_word_size = pointer_delta(end(), + top(), + sizeof(MetaWord)); + Copy::fill_to_words((HeapWord*) top(), + data_word_size, + metadata_chunk_initialize); #endif - return chunk; } - MetaWord* Metachunk::allocate(size_t word_size) { MetaWord* result = NULL; // If available, bump the pointer to allocate. diff --git a/hotspot/src/share/vm/memory/metachunk.hpp b/hotspot/src/share/vm/memory/metachunk.hpp index a10cba8dbbe..ff237ab5d3f 100644 --- a/hotspot/src/share/vm/memory/metachunk.hpp +++ b/hotspot/src/share/vm/memory/metachunk.hpp @@ -41,10 +41,13 @@ // | | | | // +--------------+ <- bottom ---+ ---+ +class VirtualSpaceNode; + class Metachunk VALUE_OBJ_CLASS_SPEC { // link to support lists of chunks Metachunk* _next; Metachunk* _prev; + VirtualSpaceNode* _container; MetaWord* _bottom; MetaWord* _end; @@ -61,29 +64,20 @@ class Metachunk VALUE_OBJ_CLASS_SPEC { // the space. static size_t _overhead; - void set_bottom(MetaWord* v) { _bottom = v; } - void set_end(MetaWord* v) { _end = v; } - void set_top(MetaWord* v) { _top = v; } - void set_word_size(size_t v) { _word_size = v; } public: -#ifdef ASSERT - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false), - _next(NULL), _prev(NULL) {} -#else - Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), - _next(NULL), _prev(NULL) {} -#endif + Metachunk(size_t word_size , VirtualSpaceNode* container); // Used to add a Metachunk to a list of Metachunks void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");} void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");} + void set_container(VirtualSpaceNode* v) { _container = v; } MetaWord* allocate(size_t word_size); - static Metachunk* initialize(MetaWord* ptr, size_t word_size); // Accessors Metachunk* next() const { return _next; } Metachunk* prev() const { return _prev; } + VirtualSpaceNode* container() const { return _container; } MetaWord* bottom() const { return _bottom; } MetaWord* end() const { return _end; } MetaWord* top() const { return _top; } diff --git a/hotspot/src/share/vm/memory/metaspace.cpp b/hotspot/src/share/vm/memory/metaspace.cpp index 1f623bf6032..3cc6d8d4959 100644 --- a/hotspot/src/share/vm/memory/metaspace.cpp +++ b/hotspot/src/share/vm/memory/metaspace.cpp @@ -112,6 +112,7 @@ typedef class FreeList ChunkList; class ChunkManager VALUE_OBJ_CLASS_SPEC { // Free list of chunks of different sizes. + // SpecializedChunk // SmallChunk // MediumChunk // HumongousChunk @@ -165,6 +166,10 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { // for special, small, medium, and humongous chunks. static ChunkIndex list_index(size_t size); + // Remove the chunk from its freelist. It is + // expected to be on one of the _free_chunks[] lists. + void remove_chunk(Metachunk* chunk); + // Add the simple linked list of chunks to the freelist of chunks // of type index. void return_chunks(ChunkIndex index, Metachunk* chunks); @@ -255,6 +260,8 @@ class VirtualSpaceNode : public CHeapObj { ReservedSpace _rs; VirtualSpace _virtual_space; MetaWord* _top; + // count of chunks contained in this VirtualSpace + uintx _container_count; // Convenience functions for logical bottom and end MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } @@ -264,10 +271,19 @@ class VirtualSpaceNode : public CHeapObj { char* low() const { return virtual_space()->low(); } char* high() const { return virtual_space()->high(); } + // The first Metachunk will be allocated at the bottom of the + // VirtualSpace + Metachunk* first_chunk() { return (Metachunk*) bottom(); } + + void inc_container_count(); +#ifdef ASSERT + uint container_count_slow(); +#endif + public: VirtualSpaceNode(size_t byte_size); - VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {} + VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} ~VirtualSpaceNode(); // address of next available space in _virtual_space; @@ -288,6 +304,12 @@ class VirtualSpaceNode : public CHeapObj { MetaWord* top() const { return _top; } void inc_top(size_t word_size) { _top += word_size; } + uintx container_count() { return _container_count; } + void dec_container_count(); +#ifdef ASSERT + void verify_container_count(); +#endif + // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; @@ -306,6 +328,10 @@ class VirtualSpaceNode : public CHeapObj { bool expand_by(size_t words, bool pre_touch = false); bool shrink_by(size_t words); + // In preparation for deleting this node, remove all the chunks + // in the node from any freelist. + void purge(ChunkManager* chunk_manager); + #ifdef ASSERT // Debug support static void verify_virtual_space_total(); @@ -317,7 +343,7 @@ class VirtualSpaceNode : public CHeapObj { }; // byte_size is the size of the associated virtualspace. -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) { +VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) { // align up to vm allocation granularity byte_size = align_size_up(byte_size, os::vm_allocation_granularity()); @@ -341,6 +367,39 @@ VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); } +void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + assert(chunk->is_free(), "Should be marked free"); + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + chunk_manager->remove_chunk(chunk); + assert(chunk->next() == NULL && + chunk->prev() == NULL, + "Was not removed from its list"); + chunk = (Metachunk*) next; + } +} + +#ifdef ASSERT +uint VirtualSpaceNode::container_count_slow() { + uint count = 0; + Metachunk* chunk = first_chunk(); + Metachunk* invalid_chunk = (Metachunk*) top(); + while (chunk < invalid_chunk ) { + MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); + // Don't count the chunks on the free lists. Those are + // still part of the VirtualSpaceNode but not currently + // counted. + if (!chunk->is_free()) { + count++; + } + chunk = (Metachunk*) next; + } + return count; +} +#endif + // List of VirtualSpaces for metadata allocation. // It has a _next link for singly linked list and a MemRegion // for total space in the VirtualSpace. @@ -410,14 +469,14 @@ class VirtualSpaceList : public CHeapObj { void initialize(size_t word_size); size_t virtual_space_total() { return _virtual_space_total; } - void inc_virtual_space_total(size_t v) { - Atomic::add_ptr(v, &_virtual_space_total); - } - size_t virtual_space_count() { return _virtual_space_count; } - void inc_virtual_space_count() { - Atomic::inc_ptr(&_virtual_space_count); - } + void inc_virtual_space_total(size_t v); + void dec_virtual_space_total(size_t v); + void inc_virtual_space_count(); + void dec_virtual_space_count(); + + // Unlink empty VirtualSpaceNodes and free it. + void purge(); // Used and capacity in the entire list of virtual spaces. // These are global values shared by all Metaspaces @@ -641,6 +700,28 @@ Mutex* const SpaceManager::_expand_lock = SpaceManager::_expand_lock_name, Mutex::_allow_vm_block_flag); +void VirtualSpaceNode::inc_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count++; + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, + _container_count, container_count_slow())); +} + +void VirtualSpaceNode::dec_container_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _container_count--; +} + +#ifdef ASSERT +void VirtualSpaceNode::verify_container_count() { + assert(_container_count == container_count_slow(), + err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT + "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); +} +#endif + // BlockFreelist methods BlockFreelist::BlockFreelist() : _dictionary(NULL) {} @@ -701,6 +782,10 @@ void BlockFreelist::print_on(outputStream* st) const { VirtualSpaceNode::~VirtualSpaceNode() { _rs.release(); +#ifdef ASSERT + size_t word_size = sizeof(*this) / BytesPerWord; + Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); +#endif } size_t VirtualSpaceNode::used_words_in_vs() const { @@ -733,8 +818,8 @@ Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { // Take the space (bump top on the current virtual space). inc_top(chunk_word_size); - // Point the chunk at the space - Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size); + // Initialize the chunk + Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); return result; } @@ -762,9 +847,11 @@ bool VirtualSpaceNode::shrink_by(size_t words) { Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* result = NULL; - - return take_from_committed(chunk_word_size); + Metachunk* result = take_from_committed(chunk_word_size); + if (result != NULL) { + inc_container_count(); + } + return result; } Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) { @@ -843,6 +930,83 @@ VirtualSpaceList::~VirtualSpaceList() { } } +void VirtualSpaceList::inc_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total + v; +} +void VirtualSpaceList::dec_virtual_space_total(size_t v) { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_total = _virtual_space_total - v; +} + +void VirtualSpaceList::inc_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count++; +} +void VirtualSpaceList::dec_virtual_space_count() { + assert_lock_strong(SpaceManager::expand_lock()); + _virtual_space_count--; +} + +void ChunkManager::remove_chunk(Metachunk* chunk) { + size_t word_size = chunk->word_size(); + ChunkIndex index = list_index(word_size); + if (index != HumongousIndex) { + free_chunks(index)->remove_chunk(chunk); + } else { + humongous_dictionary()->remove_chunk(chunk); + } + + // Chunk is being removed from the chunks free list. + dec_free_chunks_total(chunk->capacity_word_size()); +} + +// Walk the list of VirtualSpaceNodes and delete +// nodes with a 0 container_count. Remove Metachunks in +// the node from their respective freelists. +void VirtualSpaceList::purge() { + assert_lock_strong(SpaceManager::expand_lock()); + // Don't use a VirtualSpaceListIterator because this + // list is being changed and a straightforward use of an iterator is not safe. + VirtualSpaceNode* purged_vsl = NULL; + VirtualSpaceNode* prev_vsl = virtual_space_list(); + VirtualSpaceNode* next_vsl = prev_vsl; + while (next_vsl != NULL) { + VirtualSpaceNode* vsl = next_vsl; + next_vsl = vsl->next(); + // Don't free the current virtual space since it will likely + // be needed soon. + if (vsl->container_count() == 0 && vsl != current_virtual_space()) { + // Unlink it from the list + if (prev_vsl == vsl) { + // This is the case of the current note being the first note. + assert(vsl == virtual_space_list(), "Expected to be the first note"); + set_virtual_space_list(vsl->next()); + } else { + prev_vsl->set_next(vsl->next()); + } + + vsl->purge(chunk_manager()); + dec_virtual_space_total(vsl->reserved()->word_size()); + dec_virtual_space_count(); + purged_vsl = vsl; + delete vsl; + } else { + prev_vsl = vsl; + } + } +#ifdef ASSERT + if (purged_vsl != NULL) { + // List should be stable enough to use an iterator here. + VirtualSpaceListIterator iter(virtual_space_list()); + while (iter.repeat()) { + VirtualSpaceNode* vsl = iter.get_next(); + assert(vsl != purged_vsl, "Purge of vsl failed"); + } + } +#endif +} + size_t VirtualSpaceList::used_words_sum() { size_t allocated_by_vs = 0; VirtualSpaceListIterator iter(virtual_space_list()); @@ -955,8 +1119,10 @@ Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, // Get a chunk from the chunk freelist Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); - // Allocate a chunk out of the current virtual space. - if (next == NULL) { + if (next != NULL) { + next->container()->inc_container_count(); + } else { + // Allocate a chunk out of the current virtual space. next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); } @@ -1567,9 +1733,6 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { } // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); -#ifdef ASSERT - chunk->set_is_free(false); -#endif } else { return NULL; } @@ -1578,6 +1741,11 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { // Remove it from the links to this freelist chunk->set_next(NULL); chunk->set_prev(NULL); +#ifdef ASSERT + // Chunk is no longer on any freelist. Setting to false make container_count_slow() + // work. + chunk->set_is_free(false); +#endif slow_locked_verify(); return chunk; } @@ -1887,11 +2055,13 @@ void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { assert_lock_strong(SpaceManager::expand_lock()); Metachunk* cur = chunks; - // This return chunks one at a time. If a new + // This returns chunks one at a time. If a new // class List can be created that is a base class // of FreeList then something like FreeList::prepend() // can be used in place of this loop while (cur != NULL) { + assert(cur->container() != NULL, "Container should have been set"); + cur->container()->dec_container_count(); // Capture the next link before it is changed // by the call to return_chunk_at_head(); Metachunk* next = cur->next(); @@ -1917,8 +2087,8 @@ SpaceManager::~SpaceManager() { locked_print_chunks_in_use_on(gclog_or_tty); } - // Mangle freed memory. - NOT_PRODUCT(mangle_freed_chunks();) + // Do not mangle freed Metachunks. The chunk size inside Metachunks + // is during the freeing of a VirtualSpaceNodes. // Have to update before the chunks_in_use lists are emptied // below. @@ -1978,6 +2148,7 @@ SpaceManager::~SpaceManager() { " granularity %d", humongous_chunks->word_size(), HumongousChunkGranularity)); Metachunk* next_humongous_chunks = humongous_chunks->next(); + humongous_chunks->container()->dec_container_count(); chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks); humongous_chunks = next_humongous_chunks; } @@ -2716,6 +2887,13 @@ Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, return Metablock::initialize(result, word_size); } +void Metaspace::purge() { + MutexLockerEx cl(SpaceManager::expand_lock(), + Mutex::_no_safepoint_check_flag); + space_list()->purge(); + class_space_list()->purge(); +} + void Metaspace::print_on(outputStream* out) const { // Print both class virtual space counts and metaspace. if (Verbose) { @@ -2733,7 +2911,8 @@ bool Metaspace::contains(const void * ptr) { // aren't deleted presently. When they are, some sort of locking might // be needed. Note, locking this can cause inversion problems with the // caller in MetaspaceObj::is_metadata() function. - return space_list()->contains(ptr) || class_space_list()->contains(ptr); + return space_list()->contains(ptr) || + class_space_list()->contains(ptr); } void Metaspace::verify() { diff --git a/hotspot/src/share/vm/memory/metaspace.hpp b/hotspot/src/share/vm/memory/metaspace.hpp index f704804795f..8d221914572 100644 --- a/hotspot/src/share/vm/memory/metaspace.hpp +++ b/hotspot/src/share/vm/memory/metaspace.hpp @@ -150,6 +150,9 @@ class Metaspace : public CHeapObj { static bool contains(const void *ptr); void dump(outputStream* const out) const; + // Free empty virtualspaces + static void purge(); + void print_on(outputStream* st) const; // Debugging support void verify(); From 4e6c27cef0b04cc571efa62cc9c3eefa4a3051e6 Mon Sep 17 00:00:00 2001 From: Kevin Walls Date: Thu, 18 Apr 2013 17:02:20 +0100 Subject: [PATCH 002/162] 7109087: gc/7072527/TestFullGCCount.java fails when GC is set in command-line Reviewed-by: mgerdin --- hotspot/test/gc/7072527/TestFullGCCount.java | 92 ++++++++++---------- 1 file changed, 44 insertions(+), 48 deletions(-) diff --git a/hotspot/test/gc/7072527/TestFullGCCount.java b/hotspot/test/gc/7072527/TestFullGCCount.java index 14a049a89b6..96b66c1e4d1 100644 --- a/hotspot/test/gc/7072527/TestFullGCCount.java +++ b/hotspot/test/gc/7072527/TestFullGCCount.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,71 +25,67 @@ * @test TestFullGCount.java * @bug 7072527 * @summary CMS: JMM GC counters overcount in some cases - * @run main/othervm -XX:+UseConcMarkSweepGC TestFullGCCount - * + * @run main/othervm -XX:+PrintGC TestFullGCCount */ import java.util.*; import java.lang.management.*; +/* + * Originally for a specific failure in CMS, this test now monitors all + * collectors for double-counting of collections. + */ public class TestFullGCCount { - public String collectorName = "ConcurrentMarkSweep"; + static List collectors = ManagementFactory.getGarbageCollectorMXBeans(); - public static void main(String [] args) { - - TestFullGCCount t = null; - if (args.length==2) { - t = new TestFullGCCount(args[0], args[1]); - } else { - t = new TestFullGCCount(); - } - System.out.println("Monitoring collector: " + t.collectorName); - t.run(); - } - - public TestFullGCCount(String pool, String collector) { - collectorName = collector; - } - - public TestFullGCCount() { - } - - public void run() { - int count = 0; + public static void main(String[] args) { int iterations = 20; - long counts[] = new long[iterations]; - boolean diffAlways2 = true; // assume we will fail + boolean failed = false; + String errorMessage = ""; + HashMap counts = new HashMap(); - for (int i=0; i(iterations)); + } + + // Perform some gc, record collector counts. + for (int i = 0; i < iterations; i++) { System.gc(); - counts[i] = getCollectionCount(); - if (i>0) { - if (counts[i] - counts[i-1] != 2) { - diffAlways2 = false; + addCollectionCount(counts, i); + } + + // Check the increments: + // Old gen collectors should increase by one, + // New collectors may or may not increase. + // Any increase >=2 is unexpected. + for (String collector : counts.keySet()) { + System.out.println("Checking: " + collector); + + for (int i = 0; i < iterations - 1; i++) { + List theseCounts = counts.get(collector); + long a = theseCounts.get(i); + long b = theseCounts.get(i + 1); + if (b - a >= 2) { + failed = true; + errorMessage += "Collector '" + collector + "' has increment " + (b - a) + + " at iteration " + i + "\n"; } } } - if (diffAlways2) { - throw new RuntimeException("FAILED: System.gc must be incrementing count twice."); + if (failed) { + System.err.println(errorMessage); + throw new RuntimeException("FAILED: System.gc collections miscounted."); } System.out.println("Passed."); } - private long getCollectionCount() { - long count = 0; - List pools = ManagementFactory.getMemoryPoolMXBeans(); - List collectors = ManagementFactory.getGarbageCollectorMXBeans(); - for (int i=0; i counts, int iteration) { + for (int i = 0; i < collectors.size(); i++) { GarbageCollectorMXBean collector = collectors.get(i); - String name = collector.getName(); - if (name.contains(collectorName)) { - System.out.println(name + ": collection count = " - + collector.getCollectionCount()); - count = collector.getCollectionCount(); - } + List thisList = counts.get(collector.getName()); + thisList.add(collector.getCollectionCount()); } - return count; } - } - From bdf829cf3edbb691de18ad231501622dc8c749a5 Mon Sep 17 00:00:00 2001 From: Stefan Karlsson Date: Mon, 22 Apr 2013 20:27:36 +0200 Subject: [PATCH 003/162] 8012687: Remove unused is_root checks and closures Reviewed-by: tschatzl, jmasa --- .../gc_implementation/g1/g1CollectedHeap.cpp | 5 +-- .../gc_implementation/g1/g1CollectedHeap.hpp | 3 +- .../vm/gc_implementation/g1/g1MarkSweep.cpp | 7 ++-- .../parallelScavenge/psMarkSweep.cpp | 29 +++++++------- .../parallelScavenge/psMarkSweep.hpp | 1 - .../parallelScavenge/psParallelCompact.cpp | 40 +++++++++---------- .../parallelScavenge/psParallelCompact.hpp | 25 +----------- .../vm/gc_implementation/shared/markSweep.cpp | 9 ++--- .../vm/gc_implementation/shared/markSweep.hpp | 11 +---- .../shared/markSweep.inline.hpp | 2 +- .../src/share/vm/memory/genCollectedHeap.cpp | 5 +-- .../src/share/vm/memory/genCollectedHeap.hpp | 3 +- hotspot/src/share/vm/memory/genMarkSweep.cpp | 12 +++--- hotspot/src/share/vm/memory/sharedHeap.cpp | 7 ++-- hotspot/src/share/vm/memory/sharedHeap.hpp | 3 +- 15 files changed, 59 insertions(+), 103 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 85bc31b8a53..4c1b75133b8 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -5079,10 +5079,9 @@ g1_process_strong_roots(bool is_scavenging, } void -G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure) { +G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) { CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false); - SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure); + SharedHeap::process_weak_roots(root_closure, &roots_in_blobs); } // Weak Reference Processing support diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 4fbf0ff367a..9080bc353cb 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -827,8 +827,7 @@ protected: // Apply "blk" to all the weak roots of the system. These include // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. - void g1_process_weak_roots(OopClosure* root_closure, - OopClosure* non_root_closure); + void g1_process_weak_roots(OopClosure* root_closure); // Frees a non-humongous region by initializing its contents and // adding it to the free list that's passed as a parameter (this is diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp index b987f7df4e7..9fa3dfb273c 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp @@ -308,17 +308,16 @@ void G1MarkSweep::mark_sweep_phase3() { sh->process_strong_roots(true, // activate StrongRootsScope false, // not scavenging. SharedHeap::SO_AllClasses, - &GenMarkSweep::adjust_root_pointer_closure, + &GenMarkSweep::adjust_pointer_closure, NULL, // do not touch code cache here &GenMarkSweep::adjust_klass_closure); assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity"); - g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure); + g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) - g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure, - &GenMarkSweep::adjust_pointer_closure); + g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure); GenMarkSweep::adjust_marks(); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp index 63cd3760282..cf07854cd9c 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp @@ -583,28 +583,27 @@ void PSMarkSweep::mark_sweep_phase3() { ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); - //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure()); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); - PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); adjust_marks(); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp index fcbc103dc3a..7d96afbb4df 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp @@ -44,7 +44,6 @@ class PSMarkSweep : public MarkSweep { static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; } static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; } static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; } diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index 487a4e56553..d0d50a7f699 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -787,12 +787,11 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap( void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true); -PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false); +PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } @@ -805,7 +804,7 @@ void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) { klass->oops_do(_mark_and_push_closure); } void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { - klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure); + klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } void PSParallelCompact::post_initialize() { @@ -2398,7 +2397,7 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm, void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm, ClassLoaderData* cld) { - cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(), + cld->oops_do(PSParallelCompact::adjust_pointer_closure(), PSParallelCompact::adjust_klass_closure(), true); } @@ -2419,32 +2418,31 @@ void PSParallelCompact::adjust_roots() { ClassLoaderDataGraph::clear_claimed_marks(); // General strong roots. - Universe::oops_do(adjust_root_pointer_closure()); - JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles - CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure()); - Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL); - ObjectSynchronizer::oops_do(adjust_root_pointer_closure()); - FlatProfiler::oops_do(adjust_root_pointer_closure()); - Management::oops_do(adjust_root_pointer_closure()); - JvmtiExport::oops_do(adjust_root_pointer_closure()); + Universe::oops_do(adjust_pointer_closure()); + JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles + CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); + Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); + ObjectSynchronizer::oops_do(adjust_pointer_closure()); + FlatProfiler::oops_do(adjust_pointer_closure()); + Management::oops_do(adjust_pointer_closure()); + JvmtiExport::oops_do(adjust_pointer_closure()); // SO_AllClasses - SystemDictionary::oops_do(adjust_root_pointer_closure()); - ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true); + SystemDictionary::oops_do(adjust_pointer_closure()); + ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles - JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure()); + JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); CodeCache::oops_do(adjust_pointer_closure()); - StringTable::oops_do(adjust_root_pointer_closure()); - ref_processor()->weak_oops_do(adjust_root_pointer_closure()); + StringTable::oops_do(adjust_pointer_closure()); + ref_processor()->weak_oops_do(adjust_pointer_closure()); // Roots were visited so references into the young gen in roots // may have been scanned. Process them also. // Should the reference processor have a span that excludes // young gen objects? - PSScavenge::reference_processor()->weak_oops_do( - adjust_root_pointer_closure()); + PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); } void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp index 68b54fb9b13..6ced655c21a 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -799,16 +799,6 @@ class PSParallelCompact : AllStatic { virtual void do_oop(narrowOop* p); }; - // Current unused - class FollowRootClosure: public OopsInGenClosure { - private: - ParCompactionManager* _compaction_manager; - public: - FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - }; - class FollowStackClosure: public VoidClosure { private: ParCompactionManager* _compaction_manager; @@ -818,10 +808,7 @@ class PSParallelCompact : AllStatic { }; class AdjustPointerClosure: public OopClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) { } virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); // do not walk from thread stacks to the code cache on this phase @@ -838,7 +825,6 @@ class PSParallelCompact : AllStatic { friend class AdjustPointerClosure; friend class AdjustKlassClosure; friend class FollowKlassClosure; - friend class FollowRootClosure; friend class InstanceClassLoaderKlass; friend class RefProcTaskProxy; @@ -853,7 +839,6 @@ class PSParallelCompact : AllStatic { static IsAliveClosure _is_alive_closure; static SpaceInfo _space_info[last_space_id]; static bool _print_phases; - static AdjustPointerClosure _adjust_root_pointer_closure; static AdjustPointerClosure _adjust_pointer_closure; static AdjustKlassClosure _adjust_klass_closure; @@ -889,9 +874,6 @@ class PSParallelCompact : AllStatic { static void marking_phase(ParCompactionManager* cm, bool maximum_heap_compaction); - template static inline void adjust_pointer(T* p, bool is_root); - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - template static inline void follow_root(ParCompactionManager* cm, T* p); @@ -1046,7 +1028,6 @@ class PSParallelCompact : AllStatic { // Closure accessors static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } - static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; } static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } @@ -1067,6 +1048,7 @@ class PSParallelCompact : AllStatic { // Check mark and maybe push on marking stack template static inline void mark_and_push(ParCompactionManager* cm, T* p); + template static inline void adjust_pointer(T* p); static void follow_klass(ParCompactionManager* cm, Klass* klass); static void adjust_klass(ParCompactionManager* cm, Klass* klass); @@ -1151,9 +1133,6 @@ class PSParallelCompact : AllStatic { static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; } static ParallelCompactData& summary_data() { return _summary_data; } - static inline void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - // Reference Processing static ReferenceProcessor* const ref_processor() { return _ref_processor; } @@ -1230,7 +1209,7 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { } template -inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) { +inline void PSParallelCompact::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp index 6ea4097daa9..5e52aa1eb84 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp @@ -81,7 +81,7 @@ void MarkSweep::follow_class_loader(ClassLoaderData* cld) { } void MarkSweep::adjust_class_loader(ClassLoaderData* cld) { - cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true); + cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true); } @@ -121,11 +121,10 @@ void MarkSweep::preserve_mark(oop obj, markOop mark) { } } -MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true); -MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false); +MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure; -void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p, _is_root); } -void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); } +void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } +void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } void MarkSweep::adjust_marks() { assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(), diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp index 1de7561ce55..ec724afa5ec 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp @@ -80,10 +80,7 @@ class MarkSweep : AllStatic { }; class AdjustPointerClosure: public OopsInGenClosure { - private: - bool _is_root; public: - AdjustPointerClosure(bool is_root) : _is_root(is_root) {} virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); }; @@ -146,7 +143,6 @@ class MarkSweep : AllStatic { static MarkAndPushClosure mark_and_push_closure; static FollowKlassClosure follow_klass_closure; static FollowStackClosure follow_stack_closure; - static AdjustPointerClosure adjust_root_pointer_closure; static AdjustPointerClosure adjust_pointer_closure; static AdjustKlassClosure adjust_klass_closure; @@ -179,12 +175,7 @@ class MarkSweep : AllStatic { static void adjust_marks(); // Adjust the pointers in the preserved marks table static void restore_marks(); // Restore the marks that we saved in preserve_mark - template static inline void adjust_pointer(T* p, bool isroot); - - static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); } - static void adjust_pointer(oop* p) { adjust_pointer(p, false); } - static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); } - + template static inline void adjust_pointer(T* p); }; class PreservedMark VALUE_OBJ_CLASS_SPEC { diff --git a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp index 9752291959a..8ffe0f78236 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp +++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp @@ -76,7 +76,7 @@ void MarkSweep::push_objarray(oop obj, size_t index) { _objarray_stack.push(task); } -template inline void MarkSweep::adjust_pointer(T* p, bool isroot) { +template inline void MarkSweep::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp index f39631ae00b..a04eb3cb721 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp @@ -633,9 +633,8 @@ gen_process_strong_roots(int level, } void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { - SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure); + CodeBlobClosure* code_roots) { + SharedHeap::process_weak_roots(root_closure, code_roots); // "Local" "weak" refs for (int i = 0; i < _n_gens; i++) { _gens[i]->ref_processor()->weak_oops_do(root_closure); diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.hpp b/hotspot/src/share/vm/memory/genCollectedHeap.hpp index 034511b9b55..783cd372d7c 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp @@ -432,8 +432,7 @@ public: // JNI weak roots, the code cache, system dictionary, symbol table, // string table, and referents of reachable weak refs. void gen_process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // Set the saved marks of generations, if that makes sense. // In particular, if any generation might iterate over the oops diff --git a/hotspot/src/share/vm/memory/genMarkSweep.cpp b/hotspot/src/share/vm/memory/genMarkSweep.cpp index 2180f63886f..3fe04303263 100644 --- a/hotspot/src/share/vm/memory/genMarkSweep.cpp +++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp @@ -282,11 +282,10 @@ void GenMarkSweep::mark_sweep_phase3(int level) { // Need new claim bits for the pointer adjustment tracing. ClassLoaderDataGraph::clear_claimed_marks(); - // Because the two closures below are created statically, cannot + // Because the closure below is created statically, we cannot // use OopsInGenClosure constructor which takes a generation, // as the Universe has not been created when the static constructors // are run. - adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level)); adjust_pointer_closure.set_orig_generation(gch->get_gen(level)); gch->gen_process_strong_roots(level, @@ -294,18 +293,17 @@ void GenMarkSweep::mark_sweep_phase3(int level) { true, // activate StrongRootsScope false, // not scavenging SharedHeap::SO_AllClasses, - &adjust_root_pointer_closure, + &adjust_pointer_closure, false, // do not walk code - &adjust_root_pointer_closure, + &adjust_pointer_closure, &adjust_klass_closure); // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure, /*do_marking=*/ false); - gch->gen_process_weak_roots(&adjust_root_pointer_closure, - &adjust_code_pointer_closure, - &adjust_pointer_closure); + gch->gen_process_weak_roots(&adjust_pointer_closure, + &adjust_code_pointer_closure); adjust_marks(); GenAdjustPointersClosure blk; diff --git a/hotspot/src/share/vm/memory/sharedHeap.cpp b/hotspot/src/share/vm/memory/sharedHeap.cpp index caef7ac7ad0..cd577d4b57e 100644 --- a/hotspot/src/share/vm/memory/sharedHeap.cpp +++ b/hotspot/src/share/vm/memory/sharedHeap.cpp @@ -218,14 +218,13 @@ public: static AlwaysTrueClosure always_true; void SharedHeap::process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure) { + CodeBlobClosure* code_roots) { // Global (weak) JNI handles JNIHandles::weak_oops_do(&always_true, root_closure); CodeCache::blobs_do(code_roots); - StringTable::oops_do(root_closure); - } + StringTable::oops_do(root_closure); +} void SharedHeap::set_barrier_set(BarrierSet* bs) { _barrier_set = bs; diff --git a/hotspot/src/share/vm/memory/sharedHeap.hpp b/hotspot/src/share/vm/memory/sharedHeap.hpp index 2f8e2d910c2..b13bf15b846 100644 --- a/hotspot/src/share/vm/memory/sharedHeap.hpp +++ b/hotspot/src/share/vm/memory/sharedHeap.hpp @@ -249,8 +249,7 @@ public: // JNI weak roots, the code cache, system dictionary, symbol table, // string table. void process_weak_roots(OopClosure* root_closure, - CodeBlobClosure* code_roots, - OopClosure* non_root_closure); + CodeBlobClosure* code_roots); // The functions below are helper functions that a subclass of // "SharedHeap" can use in the implementation of its virtual From ddbf6ad621d46180942e88f44cca8e59a9a432da Mon Sep 17 00:00:00 2001 From: Jon Masamitsu Date: Mon, 22 Apr 2013 22:00:03 -0700 Subject: [PATCH 004/162] 8012111: Remove warning about CMS generation shrinking Reviewed-by: johnc, brutisso, stefank --- .../concurrentMarkSweepGeneration.cpp | 5 +- .../GuardShrinkWarning.java | 60 +++++++++++++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 hotspot/test/gc/concurrentMarkSweep/GuardShrinkWarning.java diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index 70a26089437..ff001185846 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -3426,8 +3426,9 @@ bool ConcurrentMarkSweepGeneration::grow_to_reserved() { void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); - // XXX Fix when compaction is implemented. - warning("Shrinking of CMS not yet implemented"); + if (PrintGCDetails && Verbose) { + warning("Shrinking of CMS not yet implemented"); + } return; } diff --git a/hotspot/test/gc/concurrentMarkSweep/GuardShrinkWarning.java b/hotspot/test/gc/concurrentMarkSweep/GuardShrinkWarning.java new file mode 100644 index 00000000000..a2d47625713 --- /dev/null +++ b/hotspot/test/gc/concurrentMarkSweep/GuardShrinkWarning.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * @test GuardShrinkWarning + * @summary Remove warning about CMS generation shrinking. + * @bug 8012111 + * @key gc + * @key regression + * @library /testlibrary + * @run main/othervm GuardShrinkWarning + * @author jon.masamitsu@oracle.com + */ + +import com.oracle.java.testlibrary.*; + +public class GuardShrinkWarning { + public static void main(String args[]) throws Exception { + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-showversion", + "-XX:+UseConcMarkSweepGC", + "-XX:+ExplicitGCInvokesConcurrent", + "GuardShrinkWarning$SystemGCCaller" + ); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldNotContain("Shrinking of CMS not yet implemented"); + + output.shouldNotContain("error"); + + output.shouldHaveExitValue(0); + } + static class SystemGCCaller { + public static void main(String [] args) { + System.gc(); + } + } +} From c47ec9b4ca00fe6b448c832682b73e8343503c1c Mon Sep 17 00:00:00 2001 From: Mikael Gerdin Date: Tue, 23 Apr 2013 08:39:55 +0200 Subject: [PATCH 005/162] 8011802: NPG: init_dependencies in class loader data graph can cause invalid CLD Restructure initialization of ClassLoaderData to not add a new instance if init_dependencies fail Reviewed-by: stefank, coleenp --- .../share/vm/classfile/classLoaderData.cpp | 43 +++++++++++-------- .../share/vm/classfile/classLoaderData.hpp | 2 +- .../vm/classfile/classLoaderData.inline.hpp | 9 ++-- 3 files changed, 30 insertions(+), 24 deletions(-) diff --git a/hotspot/src/share/vm/classfile/classLoaderData.cpp b/hotspot/src/share/vm/classfile/classLoaderData.cpp index 75b9f34f2b2..083b896f9dc 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.cpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp @@ -53,6 +53,7 @@ #include "classfile/metadataOnStackMark.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" +#include "memory/gcLocker.hpp" #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" @@ -423,7 +424,7 @@ void ClassLoaderData::free_deallocate_list() { // These anonymous class loaders are to contain classes used for JSR292 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) { // Add a new class loader data to the graph. - return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL); + return ClassLoaderDataGraph::add(loader, true, CHECK_NULL); } const char* ClassLoaderData::loader_name() { @@ -495,30 +496,40 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL; ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL; ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL; - // Add a new class loader data node to the list. Assign the newly created // ClassLoaderData into the java/lang/ClassLoader object as a hidden field -ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) { +ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) { // Not assigned a class loader data yet. // Create one. - ClassLoaderData* *list_head = &_head; - ClassLoaderData* next = _head; - - bool is_anonymous = (cld_addr == NULL); ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous); + cld->init_dependencies(THREAD); + if (HAS_PENDING_EXCEPTION) { + delete cld; + return NULL; + } - if (cld_addr != NULL) { - // First, Atomically set it - ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); - if (old != NULL) { - delete cld; - // Returns the data. - return old; + No_Safepoint_Verifier no_safepoints; // nothing is keeping the dependencies array in cld alive + // make sure we don't encounter a GC until we've inserted + // cld into the CLDG + + if (!is_anonymous) { + ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader()); + if (cld_addr != NULL) { + // First, Atomically set it + ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL); + if (old != NULL) { + delete cld; + // Returns the data. + return old; + } } } // We won the race, and therefore the task of adding the data to the list of // class loader data + ClassLoaderData** list_head = &_head; + ClassLoaderData* next = _head; + do { cld->set_next(next); ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next); @@ -531,10 +542,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle lo cld->loader_name()); tty->print_cr("]"); } - // Create dependencies after the CLD is added to the list. Otherwise, - // the GC GC will not find the CLD and the _class_loader field will - // not be updated. - cld->init_dependencies(CHECK_NULL); return cld; } next = exchanged; diff --git a/hotspot/src/share/vm/classfile/classLoaderData.hpp b/hotspot/src/share/vm/classfile/classLoaderData.hpp index e6315182e18..e4e342280c3 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.hpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp @@ -62,7 +62,7 @@ class ClassLoaderDataGraph : public AllStatic { // CMS support. static ClassLoaderData* _saved_head; - static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS); + static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); public: static ClassLoaderData* find_or_create(Handle class_loader, TRAPS); static void purge(); diff --git a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp index b3a5ccf86d1..018b6761c50 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp @@ -43,10 +43,9 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader, TRAP assert(loader() != NULL,"Must be a class loader"); // Gets the class loader data out of the java/lang/ClassLoader object, if non-null // it's already in the loader_data, so no need to add - ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader()); - ClassLoaderData* loader_data_id = *loader_data_addr; - if (loader_data_id) { - return loader_data_id; + ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader()); + if (loader_data) { + return loader_data; } - return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD); + return ClassLoaderDataGraph::add(loader, false, THREAD); } From eafc00bc250c1230969c73d436e564ddc102d97b Mon Sep 17 00:00:00 2001 From: John Cuthbertson Date: Thu, 18 Apr 2013 10:09:23 -0700 Subject: [PATCH 006/162] 8011724: G1: Stack allocate instances of HeapRegionRemSetIterator Stack allocate instances of HeapRegionRemSetIterator during RSet scanning. Reviewed-by: brutisso, jwilhelm --- .../gc_implementation/g1/g1CollectedHeap.cpp | 7 --- .../gc_implementation/g1/g1CollectedHeap.hpp | 12 ----- .../vm/gc_implementation/g1/g1RemSet.cpp | 5 +- .../vm/gc_implementation/g1/g1RemSet.hpp | 12 ++--- .../gc_implementation/g1/heapRegionRemSet.cpp | 52 ++++++------------- .../gc_implementation/g1/heapRegionRemSet.hpp | 17 +++--- .../vm/gc_implementation/g1/sparsePRT.cpp | 4 -- .../vm/gc_implementation/g1/sparsePRT.hpp | 21 +++----- 8 files changed, 38 insertions(+), 92 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 4c1b75133b8..ab0434e3c55 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1955,13 +1955,6 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) : int n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); assert(n_rem_sets > 0, "Invariant."); - HeapRegionRemSetIterator** iter_arr = - NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC); - for (int i = 0; i < n_queues; i++) { - iter_arr[i] = new HeapRegionRemSetIterator(); - } - _rem_set_iterator = iter_arr; - _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 9080bc353cb..32f5a46b471 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -786,9 +786,6 @@ protected: // concurrently after the collection. DirtyCardQueueSet _dirty_card_queue_set; - // The Heap Region Rem Set Iterator. - HeapRegionRemSetIterator** _rem_set_iterator; - // The closure used to refine a single card. RefineCardTableEntryClosure* _refine_cte_cl; @@ -1113,15 +1110,6 @@ public: G1RemSet* g1_rem_set() const { return _g1_rem_set; } ModRefBarrierSet* mr_bs() const { return _mr_bs; } - // The rem set iterator. - HeapRegionRemSetIterator* rem_set_iterator(int i) { - return _rem_set_iterator[i]; - } - - HeapRegionRemSetIterator* rem_set_iterator() { - return _rem_set_iterator[0]; - } - unsigned get_gc_time_stamp() { return _gc_time_stamp; } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp index 04af52e9478..e7151071e9f 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -169,14 +169,13 @@ public: // _try_claimed || r->claim_iter() // is true: either we're supposed to work on claimed-but-not-complete // regions, or we successfully claimed the region. - HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i); - hrrs->init_iterator(iter); + HeapRegionRemSetIterator iter(hrrs); size_t card_index; // We claim cards in block so as to recude the contention. The block size is determined by // the G1RSetScanBlockSize parameter. size_t jump_to_card = hrrs->iter_claimed_next(_block_size); - for (size_t current_card = 0; iter->has_next(card_index); current_card++) { + for (size_t current_card = 0; iter.has_next(card_index); current_card++) { if (current_card >= jump_to_card + _block_size) { jump_to_card = hrrs->iter_claimed_next(_block_size); } diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp index 0468e9ac312..eee6b447087 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp @@ -53,14 +53,14 @@ protected: NumSeqTasks = 1 }; - CardTableModRefBS* _ct_bs; - SubTasksDone* _seq_task; - G1CollectorPolicy* _g1p; + CardTableModRefBS* _ct_bs; + SubTasksDone* _seq_task; + G1CollectorPolicy* _g1p; - ConcurrentG1Refine* _cg1r; + ConcurrentG1Refine* _cg1r; - size_t* _cards_scanned; - size_t _total_cards_scanned; + size_t* _cards_scanned; + size_t _total_cards_scanned; // Used for caching the closure that is responsible for scanning // references into the collection set. diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp index 56e94051c3a..df215a5cd93 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @@ -877,14 +877,9 @@ bool HeapRegionRemSet::iter_is_complete() { return _iter_state == Complete; } -void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const { - iter->initialize(this); -} - #ifndef PRODUCT void HeapRegionRemSet::print() const { - HeapRegionRemSetIterator iter; - init_iterator(&iter); + HeapRegionRemSetIterator iter(this); size_t card_index; while (iter.has_next(card_index)) { HeapWord* card_start = @@ -928,35 +923,23 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, //-------------------- Iteration -------------------- -HeapRegionRemSetIterator:: -HeapRegionRemSetIterator() : - _hrrs(NULL), +HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) : + _hrrs(hrrs), _g1h(G1CollectedHeap::heap()), - _bosa(NULL), - _sparse_iter() { } - -void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) { - _hrrs = hrrs; - _coarse_map = &_hrrs->_other_regions._coarse_map; - _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions; - _bosa = _hrrs->bosa(); - - _is = Sparse; + _coarse_map(&hrrs->_other_regions._coarse_map), + _fine_grain_regions(hrrs->_other_regions._fine_grain_regions), + _bosa(hrrs->bosa()), + _is(Sparse), // Set these values so that we increment to the first region. - _coarse_cur_region_index = -1; - _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1); - - _cur_region_cur_card = 0; - - _fine_array_index = -1; - _fine_cur_prt = NULL; - - _n_yielded_coarse = 0; - _n_yielded_fine = 0; - _n_yielded_sparse = 0; - - _sparse_iter.init(&hrrs->_other_regions._sparse_table); -} + _coarse_cur_region_index(-1), + _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), + _cur_region_cur_card(0), + _fine_array_index(-1), + _fine_cur_prt(NULL), + _n_yielded_coarse(0), + _n_yielded_fine(0), + _n_yielded_sparse(0), + _sparse_iter(&hrrs->_other_regions._sparse_table) {} bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { if (_hrrs->_other_regions._n_coarse_entries == 0) return false; @@ -1209,8 +1192,7 @@ void HeapRegionRemSet::test() { hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); // Now, does iteration yield these three? - HeapRegionRemSetIterator iter; - hrrs->init_iterator(&iter); + HeapRegionRemSetIterator iter(hrrs); size_t sum = 0; size_t card_index; while (iter.has_next(card_index)) { diff --git a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp index 1b1d42d7a35..2e165074e10 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @@ -281,9 +281,6 @@ public: return (_iter_state == Unclaimed) && (_iter_claimed == 0); } - // Initialize the given iterator to iterate over this rem set. - void init_iterator(HeapRegionRemSetIterator* iter) const; - // The actual # of bytes this hr_remset takes up. size_t mem_size() { return _other_regions.mem_size() @@ -345,9 +342,9 @@ public: #endif }; -class HeapRegionRemSetIterator : public CHeapObj { +class HeapRegionRemSetIterator : public StackObj { - // The region over which we're iterating. + // The region RSet over which we're iterating. const HeapRegionRemSet* _hrrs; // Local caching of HRRS fields. @@ -362,8 +359,10 @@ class HeapRegionRemSetIterator : public CHeapObj { size_t _n_yielded_coarse; size_t _n_yielded_sparse; - // If true we're iterating over the coarse table; if false the fine - // table. + // Indicates what granularity of table that we're currently iterating over. + // We start iterating over the sparse table, progress to the fine grain + // table, and then finish with the coarse table. + // See HeapRegionRemSetIterator::has_next(). enum IterState { Sparse, Fine, @@ -403,9 +402,7 @@ class HeapRegionRemSetIterator : public CHeapObj { public: // We require an iterator to be initialized before use, so the // constructor does little. - HeapRegionRemSetIterator(); - - void initialize(const HeapRegionRemSet* hrrs); + HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs); // If there remains one or more cards to be yielded, returns true and // sets "card_index" to one of those cards (which is then considered diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp index 0daa63512a3..a08e8a9801e 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp @@ -35,10 +35,6 @@ #define UNROLL_CARD_LOOPS 1 -void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) { - sprt_iter->init(this); -} - void SparsePRTEntry::init(RegionIdx_t region_ind) { _region_ind = region_ind; _next_index = NullEntry; diff --git a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp index ab0ab1f0205..6e821f73bbc 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp @@ -192,18 +192,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC { size_t compute_card_ind(CardIdx_t ci); public: - RSHashTableIter() : - _tbl_ind(RSHashTable::NullEntry), + RSHashTableIter(RSHashTable* rsht) : + _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0. _bl_ind(RSHashTable::NullEntry), _card_ind((SparsePRTEntry::cards_num() - 1)), - _rsht(NULL) {} - - void init(RSHashTable* rsht) { - _rsht = rsht; - _tbl_ind = -1; // So that first increment gets to 0. - _bl_ind = RSHashTable::NullEntry; - _card_ind = (SparsePRTEntry::cards_num() - 1); - } + _rsht(rsht) {} bool has_next(size_t& card_index); }; @@ -284,8 +277,6 @@ public: static void cleanup_all(); RSHashTable* cur() const { return _cur; } - void init_iterator(SparsePRTIter* sprt_iter); - static void add_to_expanded_list(SparsePRT* sprt); static SparsePRT* get_from_expanded_list(); @@ -321,9 +312,9 @@ public: class SparsePRTIter: public RSHashTableIter { public: - void init(const SparsePRT* sprt) { - RSHashTableIter::init(sprt->cur()); - } + SparsePRTIter(const SparsePRT* sprt) : + RSHashTableIter(sprt->cur()) {} + bool has_next(size_t& card_index) { return RSHashTableIter::has_next(card_index); } From d72b5162011dee40ac4658942c0bbb9b01f9b15d Mon Sep 17 00:00:00 2001 From: Jon Masamitsu Date: Tue, 12 Feb 2013 14:15:45 -0800 Subject: [PATCH 007/162] 8008966: NPG: Inefficient Metaspace counter functions cause large young GC regressions Reviewed-by: mgerdin, coleenp --- .../share/vm/classfile/classLoaderData.cpp | 2 + .../gc_implementation/g1/g1CollectedHeap.cpp | 3 +- .../parallelScavenge/psMarkSweep.cpp | 3 +- .../parallelScavenge/psParallelCompact.cpp | 3 +- .../shared/vmGCOperations.cpp | 5 +- hotspot/src/share/vm/memory/filemap.cpp | 4 +- .../src/share/vm/memory/genCollectedHeap.cpp | 3 +- hotspot/src/share/vm/memory/metaspace.cpp | 422 ++++++++++++------ hotspot/src/share/vm/memory/metaspace.hpp | 96 +++- .../src/share/vm/memory/metaspaceCounters.cpp | 22 +- .../src/share/vm/memory/metaspaceCounters.hpp | 1 + .../src/share/vm/memory/metaspaceShared.cpp | 9 +- 12 files changed, 398 insertions(+), 175 deletions(-) diff --git a/hotspot/src/share/vm/classfile/classLoaderData.cpp b/hotspot/src/share/vm/classfile/classLoaderData.cpp index 083b896f9dc..c5486d458fa 100644 --- a/hotspot/src/share/vm/classfile/classLoaderData.cpp +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp @@ -672,6 +672,8 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) { dead->unload(); data = data->next(); // Remove from loader list. + // This class loader data will no longer be found + // in the ClassLoaderDataGraph. if (prev != NULL) { prev->set_next(data); } else { diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index ab0434e3c55..4fee47a0021 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1304,7 +1304,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, print_heap_before_gc(); - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); HRSPhaseSetter x(HRSPhaseFullGC); verify_region_sets_optional(); @@ -1425,6 +1425,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); // Note: since we've just done a full GC, concurrent // marking is no longer active. Therefore we need not diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp index cf07854cd9c..4df7be7f870 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp @@ -177,7 +177,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { size_t prev_used = heap->used(); // Capture metadata size before collection for sizing. - size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); // For PrintGCDetails size_t old_gen_prev_used = old_gen->used_in_bytes(); @@ -238,6 +238,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); BiasedLocking::restore_marks(); Threads::gc_epilogue(); diff --git a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp index d0d50a7f699..5bc000a4680 100644 --- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -891,7 +891,7 @@ public: _heap_used = heap->used(); _young_gen_used = heap->young_gen()->used_in_bytes(); _old_gen_used = heap->old_gen()->used_in_bytes(); - _metadata_used = MetaspaceAux::used_in_bytes(); + _metadata_used = MetaspaceAux::allocated_used_bytes(); }; size_t heap_used() const { return _heap_used; } @@ -1026,6 +1026,7 @@ void PSParallelCompact::post_compact() // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); Threads::gc_epilogue(); CodeCache::gc_epilogue(); diff --git a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp index 756ed28f010..211a084ab38 100644 --- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp +++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp @@ -225,7 +225,10 @@ void VM_CollectForMetadataAllocation::doit() { gclog_or_tty->print_cr("\nCMS full GC for Metaspace"); } heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold); - _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype); + // After a GC try to allocate without expanding. Could fail + // and expansion will be tried below. + _result = + _loader_data->metaspace_non_null()->allocate(_size, _mdtype); } if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) { // If still failing, allow the Metaspace to expand. diff --git a/hotspot/src/share/vm/memory/filemap.cpp b/hotspot/src/share/vm/memory/filemap.cpp index 133685932fd..dbc0c87edce 100644 --- a/hotspot/src/share/vm/memory/filemap.cpp +++ b/hotspot/src/share/vm/memory/filemap.cpp @@ -238,8 +238,8 @@ void FileMapInfo::write_header() { void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) { align_file_position(); - size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord; - size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord; + size_t used = space->used_bytes_slow(Metaspace::NonClassType); + size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType); struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; write_region(i, (char*)space->bottom(), used, capacity, read_only, false); } diff --git a/hotspot/src/share/vm/memory/genCollectedHeap.cpp b/hotspot/src/share/vm/memory/genCollectedHeap.cpp index a04eb3cb721..370dd500d84 100644 --- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp @@ -377,7 +377,7 @@ void GenCollectedHeap::do_collection(bool full, ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); - const size_t metadata_prev_used = MetaspaceAux::used_in_bytes(); + const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes(); print_heap_before_gc(); @@ -556,6 +556,7 @@ void GenCollectedHeap::do_collection(bool full, if (complete) { // Delete metaspaces for unloaded class loaders and clean up loader_data graph ClassLoaderDataGraph::purge(); + MetaspaceAux::verify_metrics(); // Resize the metaspace capacity after full collections MetaspaceGC::compute_new_size(); update_full_collections_completed(); diff --git a/hotspot/src/share/vm/memory/metaspace.cpp b/hotspot/src/share/vm/memory/metaspace.cpp index 3cc6d8d4959..e94750eb642 100644 --- a/hotspot/src/share/vm/memory/metaspace.cpp +++ b/hotspot/src/share/vm/memory/metaspace.cpp @@ -47,7 +47,6 @@ typedef BinaryTreeDictionary ChunkTreeDictionary; // the free chunk lists const bool metaspace_slow_verify = false; - // Parameters for stress mode testing const uint metadata_deallocate_a_lot_block = 10; const uint metadata_deallocate_a_lock_chunk = 3; @@ -220,7 +219,6 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { void print_on(outputStream* st); }; - // Used to manage the free list of Metablocks (a block corresponds // to the allocation of a quantum of metadata). class BlockFreelist VALUE_OBJ_CLASS_SPEC { @@ -298,7 +296,7 @@ class VirtualSpaceNode : public CHeapObj { MemRegion* reserved() { return &_reserved; } VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } - // Returns true if "word_size" is available in the virtual space + // Returns true if "word_size" is available in the VirtualSpace bool is_available(size_t word_size) { return _top + word_size <= end(); } MetaWord* top() const { return _top; } @@ -313,6 +311,7 @@ class VirtualSpaceNode : public CHeapObj { // used and capacity in this single entry in the list size_t used_words_in_vs() const; size_t capacity_words_in_vs() const; + size_t free_words_in_vs() const; bool initialize(); @@ -449,6 +448,8 @@ class VirtualSpaceList : public CHeapObj { VirtualSpaceList(size_t word_size); VirtualSpaceList(ReservedSpace rs); + size_t free_bytes(); + Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words, size_t medium_chunk_bunch); @@ -579,7 +580,11 @@ class SpaceManager : public CHeapObj { bool has_small_chunk_limit() { return !vs_list()->is_class(); } // Sum of all space in allocated chunks - size_t _allocation_total; + size_t _allocated_blocks_words; + + // Sum of all allocated chunks + size_t _allocated_chunks_words; + size_t _allocated_chunks_count; // Free lists of blocks are per SpaceManager since they // are assumed to be in chunks in use by the SpaceManager @@ -635,12 +640,27 @@ class SpaceManager : public CHeapObj { size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; } size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } - size_t allocation_total() const { return _allocation_total; } - void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); } + size_t allocated_blocks_words() const { return _allocated_blocks_words; } + size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } + size_t allocated_chunks_words() const { return _allocated_chunks_words; } + size_t allocated_chunks_count() const { return _allocated_chunks_count; } + bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } static Mutex* expand_lock() { return _expand_lock; } + // Increment the per Metaspace and global running sums for Metachunks + // by the given size. This is used when a Metachunk to added to + // the in-use list. + void inc_size_metrics(size_t words); + // Increment the per Metaspace and global running sums Metablocks by the given + // size. This is used when a Metablock is allocated. + void inc_used_metrics(size_t words); + // Delete the portion of the running sums for this SpaceManager. That is, + // the globals running sums for the Metachunks and Metablocks are + // decremented for all the Metachunks in-use by this SpaceManager. + void dec_total_from_size_metrics(); + // Set the sizes for the initial chunks. void get_initial_chunk_sizes(Metaspace::MetaspaceType type, size_t* chunk_word_size, @@ -686,7 +706,7 @@ class SpaceManager : public CHeapObj { void verify_chunk_size(Metachunk* chunk); NOT_PRODUCT(void mangle_freed_chunks();) #ifdef ASSERT - void verify_allocation_total(); + void verify_allocated_blocks_words(); #endif }; @@ -797,6 +817,9 @@ size_t VirtualSpaceNode::capacity_words_in_vs() const { return pointer_delta(end(), bottom(), sizeof(MetaWord)); } +size_t VirtualSpaceNode::free_words_in_vs() const { + return pointer_delta(end(), top(), sizeof(MetaWord)); +} // Allocates the chunk from the virtual space only. // This interface is also used internally for debugging. Not all @@ -1071,6 +1094,10 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : link_vs(class_entry, rs.size()/BytesPerWord); } +size_t VirtualSpaceList::free_bytes() { + return virtual_space_list()->free_words_in_vs() * BytesPerWord; +} + // Allocate another meta virtual space and add it to the list. bool VirtualSpaceList::grow_vs(size_t vs_word_size) { assert_lock_strong(SpaceManager::expand_lock()); @@ -1211,9 +1238,9 @@ bool VirtualSpaceList::contains(const void *ptr) { // // After the GC the compute_new_size() for MetaspaceGC is called to // resize the capacity of the metaspaces. The current implementation -// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used +// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used // to resize the Java heap by some GC's. New flags can be implemented -// if really needed. MinHeapFreeRatio is used to calculate how much +// if really needed. MinMetaspaceFreeRatio is used to calculate how much // free space is desirable in the metaspace capacity to decide how much // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much // free space is desirable in the metaspace capacity before decreasing @@ -1248,7 +1275,11 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) { } bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { + + size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); // If the user wants a limit, impose one. + size_t max_metaspace_size_bytes = MaxMetaspaceSize; + size_t metaspace_size_bytes = MetaspaceSize; if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) && MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) { return false; @@ -1260,57 +1291,48 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { // If this is part of an allocation after a GC, expand // unconditionally. - if(MetaspaceGC::expand_after_GC()) { + if (MetaspaceGC::expand_after_GC()) { return true; } - size_t metaspace_size_words = MetaspaceSize / BytesPerWord; + // If the capacity is below the minimum capacity, allow the // expansion. Also set the high-water-mark (capacity_until_GC) // to that minimum capacity so that a GC will not be induced // until that minimum capacity is exceeded. - if (vsl->capacity_words_sum() < metaspace_size_words || + if (committed_capacity_bytes < metaspace_size_bytes || capacity_until_GC() == 0) { - set_capacity_until_GC(metaspace_size_words); + set_capacity_until_GC(metaspace_size_bytes); return true; } else { - if (vsl->capacity_words_sum() < capacity_until_GC()) { + if (committed_capacity_bytes < capacity_until_GC()) { return true; } else { if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr(" allocation request size " SIZE_FORMAT " capacity_until_GC " SIZE_FORMAT - " capacity_words_sum " SIZE_FORMAT - " used_words_sum " SIZE_FORMAT - " free chunks " SIZE_FORMAT - " free chunks count %d", + " allocated_capacity_bytes " SIZE_FORMAT, word_size, capacity_until_GC(), - vsl->capacity_words_sum(), - vsl->used_words_sum(), - vsl->chunk_manager()->free_chunks_total(), - vsl->chunk_manager()->free_chunks_count()); + MetaspaceAux::allocated_capacity_bytes()); } return false; } } } -// Variables are in bytes + void MetaspaceGC::compute_new_size() { assert(_shrink_factor <= 100, "invalid shrink factor"); uint current_shrink_factor = _shrink_factor; _shrink_factor = 0; - VirtualSpaceList *vsl = Metaspace::space_list(); - - size_t capacity_after_gc = vsl->capacity_bytes_sum(); - // Check to see if these two can be calculated without walking the CLDG - size_t used_after_gc = vsl->used_bytes_sum(); - size_t capacity_until_GC = vsl->capacity_bytes_sum(); - size_t free_after_gc = capacity_until_GC - used_after_gc; + // Until a faster way of calculating the "used" quantity is implemented, + // use "capacity". + const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes(); + const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; const double maximum_used_percentage = 1.0 - minimum_free_percentage; @@ -1323,45 +1345,34 @@ void MetaspaceGC::compute_new_size() { MetaspaceSize); if (PrintGCDetails && Verbose) { - const double free_percentage = ((double)free_after_gc) / capacity_until_GC; gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); gclog_or_tty->print_cr(" " " minimum_free_percentage: %6.2f" " maximum_used_percentage: %6.2f", minimum_free_percentage, maximum_used_percentage); - double d_free_after_gc = free_after_gc / (double) K; gclog_or_tty->print_cr(" " - " free_after_gc : %6.1fK" - " used_after_gc : %6.1fK" - " capacity_after_gc : %6.1fK" - " metaspace HWM : %6.1fK", - free_after_gc / (double) K, - used_after_gc / (double) K, - capacity_after_gc / (double) K, - capacity_until_GC / (double) K); - gclog_or_tty->print_cr(" " - " free_percentage: %6.2f", - free_percentage); + " used_after_gc : %6.1fKB", + used_after_gc / (double) K); } + size_t shrink_bytes = 0; if (capacity_until_GC < minimum_desired_capacity) { // If we have less capacity below the metaspace HWM, then // increment the HWM. size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; // Don't expand unless it's significant if (expand_bytes >= MinMetaspaceExpansion) { - size_t expand_words = expand_bytes / BytesPerWord; - MetaspaceGC::inc_capacity_until_GC(expand_words); + MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes); } if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); + size_t new_capacity_until_GC = capacity_until_GC; gclog_or_tty->print_cr(" expanding:" - " minimum_desired_capacity: %6.1fK" - " expand_words: %6.1fK" - " MinMetaspaceExpansion: %6.1fK" - " new metaspace HWM: %6.1fK", + " minimum_desired_capacity: %6.1fKB" + " expand_bytes: %6.1fKB" + " MinMetaspaceExpansion: %6.1fKB" + " new metaspace HWM: %6.1fKB", minimum_desired_capacity / (double) K, expand_bytes / (double) K, MinMetaspaceExpansion / (double) K, @@ -1371,11 +1382,10 @@ void MetaspaceGC::compute_new_size() { } // No expansion, now see if we want to shrink - size_t shrink_words = 0; // We would never want to shrink more than this - size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity; - assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT, - max_shrink_words)); + size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; + assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT, + max_shrink_bytes)); // Should shrinking be considered? if (MaxMetaspaceFreeRatio < 100) { @@ -1385,17 +1395,15 @@ void MetaspaceGC::compute_new_size() { size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); maximum_desired_capacity = MAX2(maximum_desired_capacity, MetaspaceSize); - if (PrintGC && Verbose) { + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr(" " " maximum_free_percentage: %6.2f" " minimum_used_percentage: %6.2f", maximum_free_percentage, minimum_used_percentage); gclog_or_tty->print_cr(" " - " capacity_until_GC: %6.1fK" - " minimum_desired_capacity: %6.1fK" - " maximum_desired_capacity: %6.1fK", - capacity_until_GC / (double) K, + " minimum_desired_capacity: %6.1fKB" + " maximum_desired_capacity: %6.1fKB", minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K); } @@ -1405,17 +1413,17 @@ void MetaspaceGC::compute_new_size() { if (capacity_until_GC > maximum_desired_capacity) { // Capacity too large, compute shrinking size - shrink_words = capacity_until_GC - maximum_desired_capacity; + shrink_bytes = capacity_until_GC - maximum_desired_capacity; // We don't want shrink all the way back to initSize if people call // System.gc(), because some programs do that between "phases" and then // we'd just have to grow the heap up again for the next phase. So we // damp the shrinking: 0% on the first call, 10% on the second call, 40% // on the third call, and 100% by the fourth call. But if we recompute // size without shrinking, it goes back to 0%. - shrink_words = shrink_words / 100 * current_shrink_factor; - assert(shrink_words <= max_shrink_words, + shrink_bytes = shrink_bytes / 100 * current_shrink_factor; + assert(shrink_bytes <= max_shrink_bytes, err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, - shrink_words, max_shrink_words)); + shrink_bytes, max_shrink_bytes)); if (current_shrink_factor == 0) { _shrink_factor = 10; } else { @@ -1429,11 +1437,11 @@ void MetaspaceGC::compute_new_size() { MetaspaceSize / (double) K, maximum_desired_capacity / (double) K); gclog_or_tty->print_cr(" " - " shrink_words: %.1fK" + " shrink_bytes: %.1fK" " current_shrink_factor: %d" " new shrink factor: %d" " MinMetaspaceExpansion: %.1fK", - shrink_words / (double) K, + shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K); @@ -1441,23 +1449,11 @@ void MetaspaceGC::compute_new_size() { } } - // Don't shrink unless it's significant - if (shrink_words >= MinMetaspaceExpansion) { - VirtualSpaceNode* csp = vsl->current_virtual_space(); - size_t available_to_shrink = csp->capacity_words_in_vs() - - csp->used_words_in_vs(); - shrink_words = MIN2(shrink_words, available_to_shrink); - csp->shrink_by(shrink_words); - MetaspaceGC::dec_capacity_until_GC(shrink_words); - if (PrintGCDetails && Verbose) { - size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes(); - gclog_or_tty->print_cr(" metaspace HWM: %.1fK", new_capacity_until_GC / (double) K); - } + if (shrink_bytes >= MinMetaspaceExpansion && + ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { + MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes); } - assert(used_after_gc <= vsl->capacity_bytes_sum(), - "sanity check"); - } // Metadebug methods @@ -1860,18 +1856,28 @@ size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { } size_t SpaceManager::sum_capacity_in_chunks_in_use() const { - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - size_t sum = 0; - for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { - Metachunk* chunk = chunks_in_use(i); - while (chunk != NULL) { - // Just changed this sum += chunk->capacity_word_size(); - // sum += chunk->word_size() - Metachunk::overhead(); - sum += chunk->capacity_word_size(); - chunk = chunk->next(); + // For CMS use "allocated_chunks_words()" which does not need the + // Metaspace lock. For the other collectors sum over the + // lists. Use both methods as a check that "allocated_chunks_words()" + // is correct. That is, sum_capacity_in_chunks() is too expensive + // to use in the product and allocated_chunks_words() should be used + // but allow for checking that allocated_chunks_words() returns the same + // value as sum_capacity_in_chunks_in_use() which is the definitive + // answer. + if (UseConcMarkSweepGC) { + return allocated_chunks_words(); + } else { + MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); + size_t sum = 0; + for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { + Metachunk* chunk = chunks_in_use(i); + while (chunk != NULL) { + sum += chunk->capacity_word_size(); + chunk = chunk->next(); + } } - } return sum; + } } size_t SpaceManager::sum_count_in_chunks_in_use() { @@ -2029,12 +2035,44 @@ void SpaceManager::print_on(outputStream* st) const { SpaceManager::SpaceManager(Mutex* lock, VirtualSpaceList* vs_list) : _vs_list(vs_list), - _allocation_total(0), + _allocated_blocks_words(0), + _allocated_chunks_words(0), + _allocated_chunks_count(0), _lock(lock) { initialize(); } +void SpaceManager::inc_size_metrics(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Total of allocated Metachunks and allocated Metachunks count + // for each SpaceManager + _allocated_chunks_words = _allocated_chunks_words + words; + _allocated_chunks_count++; + // Global total of capacity in allocated Metachunks + MetaspaceAux::inc_capacity(words); + // Global total of allocated Metablocks. + // used_words_slow() includes the overhead in each + // Metachunk so include it in the used when the + // Metachunk is first added (so only added once per + // Metachunk). + MetaspaceAux::inc_used(Metachunk::overhead()); +} + +void SpaceManager::inc_used_metrics(size_t words) { + // Add to the per SpaceManager total + Atomic::add_ptr(words, &_allocated_blocks_words); + // Add to the global total + MetaspaceAux::inc_used(words); +} + +void SpaceManager::dec_total_from_size_metrics() { + MetaspaceAux::dec_capacity(allocated_chunks_words()); + MetaspaceAux::dec_used(allocated_blocks_words()); + // Also deduct the overhead per Metachunk + MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead()); +} + void SpaceManager::initialize() { Metadebug::init_allocation_fail_alot_count(); for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { @@ -2073,7 +2111,10 @@ void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { SpaceManager::~SpaceManager() { // This call this->_lock which can't be done while holding expand_lock() - const size_t in_use_before = sum_capacity_in_chunks_in_use(); + assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), + err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT + " allocated_chunks_words() " SIZE_FORMAT, + sum_capacity_in_chunks_in_use(), allocated_chunks_words())); MutexLockerEx fcl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); @@ -2082,6 +2123,8 @@ SpaceManager::~SpaceManager() { chunk_manager->slow_locked_verify(); + dec_total_from_size_metrics(); + if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); locked_print_chunks_in_use_on(gclog_or_tty); @@ -2092,7 +2135,7 @@ SpaceManager::~SpaceManager() { // Have to update before the chunks_in_use lists are emptied // below. - chunk_manager->inc_free_chunks_total(in_use_before, + chunk_manager->inc_free_chunks_total(allocated_chunks_words(), sum_count_in_chunks_in_use()); // Add all the chunks in use by this space manager @@ -2158,7 +2201,6 @@ SpaceManager::~SpaceManager() { chunk_manager->humongous_dictionary()->total_count(), chunk_size_name(HumongousIndex)); } - set_chunks_in_use(HumongousIndex, NULL); chunk_manager->slow_locked_verify(); } @@ -2238,12 +2280,17 @@ void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); } + // Add to the running sum of capacity + inc_size_metrics(new_chunk->word_size()); + assert(new_chunk->is_empty(), "Not ready for reuse"); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print("SpaceManager::add_chunk: %d) ", sum_count_in_chunks_in_use()); new_chunk->print_on(gclog_or_tty); - vs_list()->chunk_manager()->locked_print_free_chunks(tty); + if (vs_list() != NULL) { + vs_list()->chunk_manager()->locked_print_free_chunks(tty); + } } } @@ -2314,7 +2361,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) { // of memory if this returns null. if (DumpSharedSpaces) { assert(current_chunk() != NULL, "should never happen"); - inc_allocation_total(word_size); + inc_used_metrics(word_size); return current_chunk()->allocate(word_size); // caller handles null result } if (current_chunk() != NULL) { @@ -2325,7 +2372,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) { result = grow_and_allocate(word_size); } if (result > 0) { - inc_allocation_total(word_size); + inc_used_metrics(word_size); assert(result != (MetaWord*) chunks_in_use(MediumIndex), "Head of the list is being allocated"); } @@ -2359,20 +2406,14 @@ void SpaceManager::verify_chunk_size(Metachunk* chunk) { } #ifdef ASSERT -void SpaceManager::verify_allocation_total() { +void SpaceManager::verify_allocated_blocks_words() { // Verification is only guaranteed at a safepoint. - if (SafepointSynchronize::is_at_safepoint()) { - gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT - " sum_used_in_chunks_in_use " SIZE_FORMAT, - this, - allocation_total(), - sum_used_in_chunks_in_use()); - } - MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); - assert(allocation_total() == sum_used_in_chunks_in_use(), + assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), + "Verification can fail if the applications is running"); + assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), err_msg("allocation total is not consistent " SIZE_FORMAT " vs " SIZE_FORMAT, - allocation_total(), sum_used_in_chunks_in_use())); + allocated_blocks_words(), sum_used_in_chunks_in_use())); } #endif @@ -2428,14 +2469,65 @@ void SpaceManager::mangle_freed_chunks() { // MetaspaceAux -size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) { + +size_t MetaspaceAux::_allocated_capacity_words = 0; +size_t MetaspaceAux::_allocated_used_words = 0; + +size_t MetaspaceAux::free_bytes() { + size_t result = 0; + if (Metaspace::class_space_list() != NULL) { + result = result + Metaspace::class_space_list()->free_bytes(); + } + if (Metaspace::space_list() != NULL) { + result = result + Metaspace::space_list()->free_bytes(); + } + return result; +} + +void MetaspaceAux::dec_capacity(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + assert(words <= _allocated_capacity_words, + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_capacity_words " SIZE_FORMAT, + words, _allocated_capacity_words)); + _allocated_capacity_words = _allocated_capacity_words - words; +} + +void MetaspaceAux::inc_capacity(size_t words) { + assert_lock_strong(SpaceManager::expand_lock()); + // Needs to be atomic + _allocated_capacity_words = _allocated_capacity_words + words; +} + +void MetaspaceAux::dec_used(size_t words) { + assert(words <= _allocated_used_words, + err_msg("About to decrement below 0: words " SIZE_FORMAT + " is greater than _allocated_used_words " SIZE_FORMAT, + words, _allocated_used_words)); + // For CMS deallocation of the Metaspaces occurs during the + // sweep which is a concurrent phase. Protection by the expand_lock() + // is not enough since allocation is on a per Metaspace basis + // and protected by the Metaspace lock. + jlong minus_words = (jlong) - (jlong) words; + Atomic::add_ptr(minus_words, &_allocated_used_words); +} + +void MetaspaceAux::inc_used(size_t words) { + // _allocated_used_words tracks allocations for + // each piece of metadata. Those allocations are + // generally done concurrently by different application + // threads so must be done atomically. + Atomic::add_ptr(words, &_allocated_used_words); +} + +size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { size_t used = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); - // Sum allocation_total for each metaspace + // Sum allocated_blocks_words for each metaspace if (msp != NULL) { - used += msp->used_words(mdtype); + used += msp->used_words_slow(mdtype); } } return used * BytesPerWord; @@ -2453,13 +2545,15 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) { return free * BytesPerWord; } -size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) { - size_t capacity = free_chunks_total(mdtype); +size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { + // Don't count the space in the freelists. That space will be + // added to the capacity calculation as needed. + size_t capacity = 0; ClassLoaderDataGraphMetaspaceIterator iter; while (iter.repeat()) { Metaspace* msp = iter.get_next(); if (msp != NULL) { - capacity += msp->capacity_words(mdtype); + capacity += msp->capacity_words_slow(mdtype); } } return capacity * BytesPerWord; @@ -2486,23 +2580,30 @@ size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) return free_chunks_total(mdtype) * BytesPerWord; } +size_t MetaspaceAux::free_chunks_total() { + return free_chunks_total(Metaspace::ClassType) + + free_chunks_total(Metaspace::NonClassType); +} + +size_t MetaspaceAux::free_chunks_total_in_bytes() { + return free_chunks_total() * BytesPerWord; +} + void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { gclog_or_tty->print(", [Metaspace:"); if (PrintGCDetails && Verbose) { gclog_or_tty->print(" " SIZE_FORMAT "->" SIZE_FORMAT - "(" SIZE_FORMAT "/" SIZE_FORMAT ")", + "(" SIZE_FORMAT ")", prev_metadata_used, - used_in_bytes(), - capacity_in_bytes(), + allocated_capacity_bytes(), reserved_in_bytes()); } else { gclog_or_tty->print(" " SIZE_FORMAT "K" "->" SIZE_FORMAT "K" - "(" SIZE_FORMAT "K/" SIZE_FORMAT "K)", + "(" SIZE_FORMAT "K)", prev_metadata_used / K, - used_in_bytes()/ K, - capacity_in_bytes()/K, + allocated_capacity_bytes() / K, reserved_in_bytes()/ K); } @@ -2517,23 +2618,30 @@ void MetaspaceAux::print_on(outputStream* out) { out->print_cr(" Metaspace total " SIZE_FORMAT "K, used " SIZE_FORMAT "K," " reserved " SIZE_FORMAT "K", - capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K); - out->print_cr(" data space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K); - out->print_cr(" class space " - SIZE_FORMAT "K, used " SIZE_FORMAT "K," - " reserved " SIZE_FORMAT "K", - capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K); + allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K); +#if 0 +// The calls to capacity_bytes_slow() and used_bytes_slow() cause +// lock ordering assertion failures with some collectors. Do +// not include this code until the lock ordering is fixed. + if (PrintGCDetails && Verbose) { + out->print_cr(" data space " + SIZE_FORMAT "K, used " SIZE_FORMAT "K," + " reserved " SIZE_FORMAT "K", + capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K); + out->print_cr(" class space " + SIZE_FORMAT "K, used " SIZE_FORMAT "K," + " reserved " SIZE_FORMAT "K", + capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K); + } +#endif } // Print information for class space and data space separately. // This is almost the same as above. void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype); - size_t capacity_bytes = capacity_in_bytes(mdtype); - size_t used_bytes = used_in_bytes(mdtype); + size_t capacity_bytes = capacity_bytes_slow(mdtype); + size_t used_bytes = used_bytes_slow(mdtype); size_t free_bytes = free_in_bytes(mdtype); size_t used_and_free = used_bytes + free_bytes + free_chunks_capacity_bytes; @@ -2606,6 +2714,36 @@ void MetaspaceAux::verify_free_chunks() { Metaspace::class_space_list()->chunk_manager()->verify(); } +void MetaspaceAux::verify_capacity() { +#ifdef ASSERT + size_t running_sum_capacity_bytes = allocated_capacity_bytes(); + // For purposes of the running sum of used, verify against capacity + size_t capacity_in_use_bytes = capacity_bytes_slow(); + assert(running_sum_capacity_bytes == capacity_in_use_bytes, + err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT + " capacity_bytes_slow()" SIZE_FORMAT, + running_sum_capacity_bytes, capacity_in_use_bytes)); +#endif +} + +void MetaspaceAux::verify_used() { +#ifdef ASSERT + size_t running_sum_used_bytes = allocated_used_bytes(); + // For purposes of the running sum of used, verify against capacity + size_t used_in_use_bytes = used_bytes_slow(); + assert(allocated_used_bytes() == used_in_use_bytes, + err_msg("allocated_used_bytes() " SIZE_FORMAT + " used_bytes_slow()()" SIZE_FORMAT, + allocated_used_bytes(), used_in_use_bytes)); +#endif +} + +void MetaspaceAux::verify_metrics() { + verify_capacity(); + verify_used(); +} + + // Metaspace methods size_t Metaspace::_first_chunk_word_size = 0; @@ -2755,8 +2893,8 @@ MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) MetaWord* result; MetaspaceGC::set_expand_after_GC(true); size_t before_inc = MetaspaceGC::capacity_until_GC(); - size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size); - MetaspaceGC::inc_capacity_until_GC(delta_words); + size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord; + MetaspaceGC::inc_capacity_until_GC(delta_bytes); if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC()); @@ -2774,8 +2912,8 @@ char* Metaspace::bottom() const { return (char*)vsm()->current_chunk()->bottom(); } -size_t Metaspace::used_words(MetadataType mdtype) const { - // return vsm()->allocation_total(); +size_t Metaspace::used_words_slow(MetadataType mdtype) const { + // return vsm()->allocated_used_words(); return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() : vsm()->sum_used_in_chunks_in_use(); // includes overhead! } @@ -2790,11 +2928,19 @@ size_t Metaspace::free_words(MetadataType mdtype) const { // have been made. Don't include space in the global freelist and // in the space available in the dictionary which // is already counted in some chunk. -size_t Metaspace::capacity_words(MetadataType mdtype) const { +size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() : vsm()->sum_capacity_in_chunks_in_use(); } +size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { + return used_words_slow(mdtype) * BytesPerWord; +} + +size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { + return capacity_words_slow(mdtype) * BytesPerWord; +} + void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { if (SafepointSynchronize::is_at_safepoint()) { assert(Thread::current()->is_VM_thread(), "should be the VM thread"); @@ -2921,10 +3067,6 @@ void Metaspace::verify() { } void Metaspace::dump(outputStream* const out) const { - if (UseMallocOnly) { - // Just print usage for now - out->print_cr("usage %d", used_words(Metaspace::NonClassType)); - } out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); vsm()->dump(out); out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); diff --git a/hotspot/src/share/vm/memory/metaspace.hpp b/hotspot/src/share/vm/memory/metaspace.hpp index 8d221914572..1108c79feab 100644 --- a/hotspot/src/share/vm/memory/metaspace.hpp +++ b/hotspot/src/share/vm/memory/metaspace.hpp @@ -111,6 +111,10 @@ class Metaspace : public CHeapObj { SpaceManager* _class_vsm; SpaceManager* class_vsm() const { return _class_vsm; } + // Allocate space for metadata of type mdtype. This is space + // within a Metachunk and is used by + // allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS) + // which returns a Metablock. MetaWord* allocate(size_t word_size, MetadataType mdtype); // Virtual Space lists for both classes and other metadata @@ -133,11 +137,14 @@ class Metaspace : public CHeapObj { static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; } char* bottom() const; - size_t used_words(MetadataType mdtype) const; + size_t used_words_slow(MetadataType mdtype) const; size_t free_words(MetadataType mdtype) const; - size_t capacity_words(MetadataType mdtype) const; + size_t capacity_words_slow(MetadataType mdtype) const; size_t waste_words(MetadataType mdtype) const; + size_t used_bytes_slow(MetadataType mdtype) const; + size_t capacity_bytes_slow(MetadataType mdtype) const; + static Metablock* allocate(ClassLoaderData* loader_data, size_t size, bool read_only, MetadataType mdtype, TRAPS); void deallocate(MetaWord* ptr, size_t byte_size, bool is_class); @@ -161,28 +168,81 @@ class Metaspace : public CHeapObj { class MetaspaceAux : AllStatic { // Statistics for class space and data space in metaspace. - static size_t used_in_bytes(Metaspace::MetadataType mdtype); + + // These methods iterate over the classloader data graph + // for the given Metaspace type. These are slow. + static size_t used_bytes_slow(Metaspace::MetadataType mdtype); static size_t free_in_bytes(Metaspace::MetadataType mdtype); - static size_t capacity_in_bytes(Metaspace::MetadataType mdtype); + static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype); + + // Iterates over the virtual space list. static size_t reserved_in_bytes(Metaspace::MetadataType mdtype); static size_t free_chunks_total(Metaspace::MetadataType mdtype); static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype); public: - // Total of space allocated to metadata in all Metaspaces - static size_t used_in_bytes() { - return used_in_bytes(Metaspace::ClassType) + - used_in_bytes(Metaspace::NonClassType); + // Running sum of space in all Metachunks that has been + // allocated to a Metaspace. This is used instead of + // iterating over all the classloaders + static size_t _allocated_capacity_words; + // Running sum of space in all Metachunks that have + // are being used for metadata. + static size_t _allocated_used_words; + + public: + // Decrement and increment _allocated_capacity_words + static void dec_capacity(size_t words); + static void inc_capacity(size_t words); + + // Decrement and increment _allocated_used_words + static void dec_used(size_t words); + static void inc_used(size_t words); + + // Total of space allocated to metadata in all Metaspaces. + // This sums the space used in each Metachunk by + // iterating over the classloader data graph + static size_t used_bytes_slow() { + return used_bytes_slow(Metaspace::ClassType) + + used_bytes_slow(Metaspace::NonClassType); } - // Total of available space in all Metaspaces - // Total of capacity allocated to all Metaspaces. This includes - // space in Metachunks not yet allocated and in the Metachunk - // freelist. - static size_t capacity_in_bytes() { - return capacity_in_bytes(Metaspace::ClassType) + - capacity_in_bytes(Metaspace::NonClassType); + // Used by MetaspaceCounters + static size_t free_chunks_total(); + static size_t free_chunks_total_in_bytes(); + + static size_t allocated_capacity_words() { + return _allocated_capacity_words; + } + static size_t allocated_capacity_bytes() { + return _allocated_capacity_words * BytesPerWord; + } + + static size_t allocated_used_words() { + return _allocated_used_words; + } + static size_t allocated_used_bytes() { + return _allocated_used_words * BytesPerWord; + } + + static size_t free_bytes(); + + // Total capacity in all Metaspaces + static size_t capacity_bytes_slow() { +#ifdef PRODUCT + // Use allocated_capacity_bytes() in PRODUCT instead of this function. + guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); +#endif + size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); + size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); + assert(allocated_capacity_bytes() == class_capacity + non_class_capacity, + err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT + " class_capacity + non_class_capacity " SIZE_FORMAT + " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, + allocated_capacity_bytes(), class_capacity + non_class_capacity, + class_capacity, non_class_capacity)); + + return class_capacity + non_class_capacity; } // Total space reserved in all Metaspaces @@ -201,6 +261,11 @@ class MetaspaceAux : AllStatic { static void print_waste(outputStream* out); static void dump(outputStream* out); static void verify_free_chunks(); + // Checks that the values returned by allocated_capacity_bytes() and + // capacity_bytes_slow() are the same. + static void verify_capacity(); + static void verify_used(); + static void verify_metrics(); }; // Metaspace are deallocated when their class loader are GC'ed. @@ -235,7 +300,6 @@ class MetaspaceGC : AllStatic { public: static size_t capacity_until_GC() { return _capacity_until_GC; } - static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; } static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; } static void dec_capacity_until_GC(size_t v) { _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0; diff --git a/hotspot/src/share/vm/memory/metaspaceCounters.cpp b/hotspot/src/share/vm/memory/metaspaceCounters.cpp index dc2f4f733aa..b2be29bca2f 100644 --- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp +++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp @@ -29,6 +29,16 @@ MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL; +size_t MetaspaceCounters::calc_total_capacity() { + // The total capacity is the sum of + // 1) capacity of Metachunks in use by all Metaspaces + // 2) unused space at the end of each Metachunk + // 3) space in the freelist + size_t total_capacity = MetaspaceAux::allocated_capacity_bytes() + + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes(); + return total_capacity; +} + MetaspaceCounters::MetaspaceCounters() : _capacity(NULL), _used(NULL), @@ -36,8 +46,8 @@ MetaspaceCounters::MetaspaceCounters() : if (UsePerfData) { size_t min_capacity = MetaspaceAux::min_chunk_size(); size_t max_capacity = MetaspaceAux::reserved_in_bytes(); - size_t curr_capacity = MetaspaceAux::capacity_in_bytes(); - size_t used = MetaspaceAux::used_in_bytes(); + size_t curr_capacity = calc_total_capacity(); + size_t used = MetaspaceAux::allocated_used_bytes(); initialize(min_capacity, max_capacity, curr_capacity, used); } @@ -82,15 +92,13 @@ void MetaspaceCounters::initialize(size_t min_capacity, void MetaspaceCounters::update_capacity() { assert(UsePerfData, "Should not be called unless being used"); - assert(_capacity != NULL, "Should be initialized"); - size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes(); - _capacity->set_value(capacity_in_bytes); + size_t total_capacity = calc_total_capacity(); + _capacity->set_value(total_capacity); } void MetaspaceCounters::update_used() { assert(UsePerfData, "Should not be called unless being used"); - assert(_used != NULL, "Should be initialized"); - size_t used_in_bytes = MetaspaceAux::used_in_bytes(); + size_t used_in_bytes = MetaspaceAux::allocated_used_bytes(); _used->set_value(used_in_bytes); } diff --git a/hotspot/src/share/vm/memory/metaspaceCounters.hpp b/hotspot/src/share/vm/memory/metaspaceCounters.hpp index 4b6de646b60..46a9308888a 100644 --- a/hotspot/src/share/vm/memory/metaspaceCounters.hpp +++ b/hotspot/src/share/vm/memory/metaspaceCounters.hpp @@ -37,6 +37,7 @@ class MetaspaceCounters: public CHeapObj { size_t max_capacity, size_t curr_capacity, size_t used); + size_t calc_total_capacity(); public: MetaspaceCounters(); ~MetaspaceCounters(); diff --git a/hotspot/src/share/vm/memory/metaspaceShared.cpp b/hotspot/src/share/vm/memory/metaspaceShared.cpp index 4f53114c6cd..5f0f152e975 100644 --- a/hotspot/src/share/vm/memory/metaspaceShared.cpp +++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp @@ -376,18 +376,17 @@ void VM_PopulateDumpSharedSpace::doit() { const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT; Metaspace* ro_space = _loader_data->ro_metaspace(); Metaspace* rw_space = _loader_data->rw_metaspace(); - const size_t BPW = BytesPerWord; // Allocated size of each space (may not be all occupied) - const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW; - const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW; + const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType); + const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType); const size_t md_alloced = md_end-md_low; const size_t mc_alloced = mc_end-mc_low; const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced; // Occupied size of each space. - const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW; - const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW; + const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType); + const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType); const size_t md_bytes = size_t(md_top - md_low); const size_t mc_bytes = size_t(mc_top - mc_low); From fa3b99342326219f90a25f7c75f3c1559022a205 Mon Sep 17 00:00:00 2001 From: John Cuthbertson Date: Wed, 10 Apr 2013 10:57:34 -0700 Subject: [PATCH 008/162] 8010780: G1: Eden occupancy/capacity output wrong after a full GC Move the calculation and recording of eden capacity to the start of a GC and print a detailed heap transition for full GCs. Reviewed-by: tschatzl, jmasa --- .../gc_implementation/g1/g1CollectedHeap.cpp | 398 +++++++++--------- .../g1/g1CollectorPolicy.cpp | 33 +- .../g1/g1CollectorPolicy.hpp | 30 +- 3 files changed, 237 insertions(+), 224 deletions(-) diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 610942585e7..4d1dd34ca04 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1322,234 +1322,240 @@ bool G1CollectedHeap::do_collection(bool explicit_gc, gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); - TraceCollectorStats tcs(g1mm()->full_collection_counters()); - TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); - - double start = os::elapsedTime(); - g1_policy()->record_full_collection_start(); - - // Note: When we have a more flexible GC logging framework that - // allows us to add optional attributes to a GC log record we - // could consider timing and reporting how long we wait in the - // following two methods. - wait_while_free_regions_coming(); - // If we start the compaction before the CM threads finish - // scanning the root regions we might trip them over as we'll - // be moving objects / updating references. So let's wait until - // they are done. By telling them to abort, they should complete - // early. - _cm->root_regions()->abort(); - _cm->root_regions()->wait_until_scan_finished(); - append_secondary_free_list_if_not_empty_with_lock(); - - gc_prologue(true); - increment_total_collections(true /* full gc */); - increment_old_marking_cycles_started(); - - size_t g1h_prev_used = used(); - assert(used() == recalculate_used(), "Should be equal"); - - verify_before_gc(); - - pre_full_gc_dump(); - - COMPILER2_PRESENT(DerivedPointerTable::clear()); - - // Disable discovery and empty the discovered lists - // for the CM ref processor. - ref_processor_cm()->disable_discovery(); - ref_processor_cm()->abandon_partial_discovery(); - ref_processor_cm()->verify_no_references_recorded(); - - // Abandon current iterations of concurrent marking and concurrent - // refinement, if any are in progress. We have to do this before - // wait_until_scan_finished() below. - concurrent_mark()->abort(); - - // Make sure we'll choose a new allocation region afterwards. - release_mutator_alloc_region(); - abandon_gc_alloc_regions(); - g1_rem_set()->cleanupHRRS(); - - // We should call this after we retire any currently active alloc - // regions so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - - // We may have added regions to the current incremental collection - // set between the last GC or pause and now. We need to clear the - // incremental collection set and then start rebuilding it afresh - // after this full GC. - abandon_collection_set(g1_policy()->inc_cset_head()); - g1_policy()->clear_incremental_cset(); - g1_policy()->stop_incremental_cset_building(); - - tear_down_region_sets(false /* free_list_only */); - g1_policy()->set_gcs_are_young(true); - - // See the comments in g1CollectedHeap.hpp and - // G1CollectedHeap::ref_processing_init() about - // how reference processing currently works in G1. - - // Temporarily make discovery by the STW ref processor single threaded (non-MT). - ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); - - // Temporarily clear the STW ref processor's _is_alive_non_header field. - ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); - - ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); - ref_processor_stw()->setup_policy(do_clear_all_soft_refs); - - // Do collection work { - HandleMark hm; // Discard invalid handles created during gc - G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); - } + TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty); + TraceCollectorStats tcs(g1mm()->full_collection_counters()); + TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); - assert(free_regions() == 0, "we should not have added any free regions"); - rebuild_region_sets(false /* free_list_only */); + double start = os::elapsedTime(); + g1_policy()->record_full_collection_start(); - // Enqueue any discovered reference objects that have - // not been removed from the discovered lists. - ref_processor_stw()->enqueue_discovered_references(); + // Note: When we have a more flexible GC logging framework that + // allows us to add optional attributes to a GC log record we + // could consider timing and reporting how long we wait in the + // following two methods. + wait_while_free_regions_coming(); + // If we start the compaction before the CM threads finish + // scanning the root regions we might trip them over as we'll + // be moving objects / updating references. So let's wait until + // they are done. By telling them to abort, they should complete + // early. + _cm->root_regions()->abort(); + _cm->root_regions()->wait_until_scan_finished(); + append_secondary_free_list_if_not_empty_with_lock(); - COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + gc_prologue(true); + increment_total_collections(true /* full gc */); + increment_old_marking_cycles_started(); - MemoryService::track_memory_usage(); + assert(used() == recalculate_used(), "Should be equal"); - verify_after_gc(); + verify_before_gc(); - assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); - ref_processor_stw()->verify_no_references_recorded(); + pre_full_gc_dump(); - // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(); + COMPILER2_PRESENT(DerivedPointerTable::clear()); - // Note: since we've just done a full GC, concurrent - // marking is no longer active. Therefore we need not - // re-enable reference discovery for the CM ref processor. - // That will be done at the start of the next marking cycle. - assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); - ref_processor_cm()->verify_no_references_recorded(); + // Disable discovery and empty the discovered lists + // for the CM ref processor. + ref_processor_cm()->disable_discovery(); + ref_processor_cm()->abandon_partial_discovery(); + ref_processor_cm()->verify_no_references_recorded(); - reset_gc_time_stamp(); - // Since everything potentially moved, we will clear all remembered - // sets, and clear all cards. Later we will rebuild remebered - // sets. We will also reset the GC time stamps of the regions. - clear_rsets_post_compaction(); - check_gc_time_stamps(); + // Abandon current iterations of concurrent marking and concurrent + // refinement, if any are in progress. We have to do this before + // wait_until_scan_finished() below. + concurrent_mark()->abort(); - // Resize the heap if necessary. - resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + // Make sure we'll choose a new allocation region afterwards. + release_mutator_alloc_region(); + abandon_gc_alloc_regions(); + g1_rem_set()->cleanupHRRS(); - if (_hr_printer.is_active()) { - // We should do this after we potentially resize the heap so - // that all the COMMIT / UNCOMMIT events are generated before - // the end GC event. + // We should call this after we retire any currently active alloc + // regions so that all the ALLOC / RETIRE events are generated + // before the start GC event. + _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - print_hrs_post_compaction(); - _hr_printer.end_gc(true /* full */, (size_t) total_collections()); - } + // We may have added regions to the current incremental collection + // set between the last GC or pause and now. We need to clear the + // incremental collection set and then start rebuilding it afresh + // after this full GC. + abandon_collection_set(g1_policy()->inc_cset_head()); + g1_policy()->clear_incremental_cset(); + g1_policy()->stop_incremental_cset_building(); - if (_cg1r->use_cache()) { - _cg1r->clear_and_record_card_counts(); - _cg1r->clear_hot_cache(); - } + tear_down_region_sets(false /* free_list_only */); + g1_policy()->set_gcs_are_young(true); - // Rebuild remembered sets of all regions. - if (G1CollectedHeap::use_parallel_gc_threads()) { - uint n_workers = - AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), - workers()->active_workers(), - Threads::number_of_non_daemon_threads()); - assert(UseDynamicNumberOfGCThreads || - n_workers == workers()->total_workers(), - "If not dynamic should be using all the workers"); - workers()->set_active_workers(n_workers); - // Set parallel threads in the heap (_n_par_threads) only - // before a parallel phase and always reset it to 0 after - // the phase so that the number of parallel threads does - // no get carried forward to a serial phase where there - // may be code that is "possibly_parallel". - set_par_threads(n_workers); + // See the comments in g1CollectedHeap.hpp and + // G1CollectedHeap::ref_processing_init() about + // how reference processing currently works in G1. - ParRebuildRSTask rebuild_rs_task(this); - assert(check_heap_region_claim_values( - HeapRegion::InitialClaimValue), "sanity check"); - assert(UseDynamicNumberOfGCThreads || - workers()->active_workers() == workers()->total_workers(), - "Unless dynamic should use total workers"); - // Use the most recent number of active workers - assert(workers()->active_workers() > 0, - "Active workers not properly set"); - set_par_threads(workers()->active_workers()); - workers()->run_task(&rebuild_rs_task); - set_par_threads(0); - assert(check_heap_region_claim_values( - HeapRegion::RebuildRSClaimValue), "sanity check"); - reset_heap_region_claim_values(); - } else { - RebuildRSOutOfRegionClosure rebuild_rs(this); - heap_region_iterate(&rebuild_rs); - } + // Temporarily make discovery by the STW ref processor single threaded (non-MT). + ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false); - if (G1Log::fine()) { - print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity()); - } + // Temporarily clear the STW ref processor's _is_alive_non_header field. + ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL); - if (true) { // FIXME - MetaspaceGC::compute_new_size(); - } + ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); + ref_processor_stw()->setup_policy(do_clear_all_soft_refs); - // Start a new incremental collection set for the next pause - assert(g1_policy()->collection_set() == NULL, "must be"); - g1_policy()->start_incremental_cset_building(); + // Do collection work + { + HandleMark hm; // Discard invalid handles created during gc + G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); + } - // Clear the _cset_fast_test bitmap in anticipation of adding - // regions to the incremental collection set for the next - // evacuation pause. - clear_cset_fast_test(); + assert(free_regions() == 0, "we should not have added any free regions"); + rebuild_region_sets(false /* free_list_only */); - init_mutator_alloc_region(); + // Enqueue any discovered reference objects that have + // not been removed from the discovered lists. + ref_processor_stw()->enqueue_discovered_references(); - double end = os::elapsedTime(); - g1_policy()->record_full_collection_end(); + COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); + + MemoryService::track_memory_usage(); + + verify_after_gc(); + + assert(!ref_processor_stw()->discovery_enabled(), "Postcondition"); + ref_processor_stw()->verify_no_references_recorded(); + + // Delete metaspaces for unloaded class loaders and clean up loader_data graph + ClassLoaderDataGraph::purge(); + + // Note: since we've just done a full GC, concurrent + // marking is no longer active. Therefore we need not + // re-enable reference discovery for the CM ref processor. + // That will be done at the start of the next marking cycle. + assert(!ref_processor_cm()->discovery_enabled(), "Postcondition"); + ref_processor_cm()->verify_no_references_recorded(); + + reset_gc_time_stamp(); + // Since everything potentially moved, we will clear all remembered + // sets, and clear all cards. Later we will rebuild remebered + // sets. We will also reset the GC time stamps of the regions. + clear_rsets_post_compaction(); + check_gc_time_stamps(); + + // Resize the heap if necessary. + resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); + + if (_hr_printer.is_active()) { + // We should do this after we potentially resize the heap so + // that all the COMMIT / UNCOMMIT events are generated before + // the end GC event. + + print_hrs_post_compaction(); + _hr_printer.end_gc(true /* full */, (size_t) total_collections()); + } + + if (_cg1r->use_cache()) { + _cg1r->clear_and_record_card_counts(); + _cg1r->clear_hot_cache(); + } + + // Rebuild remembered sets of all regions. + if (G1CollectedHeap::use_parallel_gc_threads()) { + uint n_workers = + AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), + workers()->active_workers(), + Threads::number_of_non_daemon_threads()); + assert(UseDynamicNumberOfGCThreads || + n_workers == workers()->total_workers(), + "If not dynamic should be using all the workers"); + workers()->set_active_workers(n_workers); + // Set parallel threads in the heap (_n_par_threads) only + // before a parallel phase and always reset it to 0 after + // the phase so that the number of parallel threads does + // no get carried forward to a serial phase where there + // may be code that is "possibly_parallel". + set_par_threads(n_workers); + + ParRebuildRSTask rebuild_rs_task(this); + assert(check_heap_region_claim_values( + HeapRegion::InitialClaimValue), "sanity check"); + assert(UseDynamicNumberOfGCThreads || + workers()->active_workers() == workers()->total_workers(), + "Unless dynamic should use total workers"); + // Use the most recent number of active workers + assert(workers()->active_workers() > 0, + "Active workers not properly set"); + set_par_threads(workers()->active_workers()); + workers()->run_task(&rebuild_rs_task); + set_par_threads(0); + assert(check_heap_region_claim_values( + HeapRegion::RebuildRSClaimValue), "sanity check"); + reset_heap_region_claim_values(); + } else { + RebuildRSOutOfRegionClosure rebuild_rs(this); + heap_region_iterate(&rebuild_rs); + } + + if (true) { // FIXME + MetaspaceGC::compute_new_size(); + } #ifdef TRACESPINNING - ParallelTaskTerminator::print_termination_counts(); + ParallelTaskTerminator::print_termination_counts(); #endif - gc_epilogue(true); + // Discard all rset updates + JavaThread::dirty_card_queue_set().abandon_logs(); + assert(!G1DeferredRSUpdate + || (G1DeferredRSUpdate && + (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); - // Discard all rset updates - JavaThread::dirty_card_queue_set().abandon_logs(); - assert(!G1DeferredRSUpdate - || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any"); + _young_list->reset_sampled_info(); + // At this point there should be no regions in the + // entire heap tagged as young. + assert(check_young_list_empty(true /* check_heap */), + "young list should be empty at this point"); - _young_list->reset_sampled_info(); - // At this point there should be no regions in the - // entire heap tagged as young. - assert( check_young_list_empty(true /* check_heap */), - "young list should be empty at this point"); + // Update the number of full collections that have been completed. + increment_old_marking_cycles_completed(false /* concurrent */); - // Update the number of full collections that have been completed. - increment_old_marking_cycles_completed(false /* concurrent */); + _hrs.verify_optional(); + verify_region_sets_optional(); - _hrs.verify_optional(); - verify_region_sets_optional(); + // Start a new incremental collection set for the next pause + assert(g1_policy()->collection_set() == NULL, "must be"); + g1_policy()->start_incremental_cset_building(); + + // Clear the _cset_fast_test bitmap in anticipation of adding + // regions to the incremental collection set for the next + // evacuation pause. + clear_cset_fast_test(); + + init_mutator_alloc_region(); + + double end = os::elapsedTime(); + g1_policy()->record_full_collection_end(); + + if (G1Log::fine()) { + g1_policy()->print_heap_transition(); + } + + // We must call G1MonitoringSupport::update_sizes() in the same scoping level + // as an active TraceMemoryManagerStats object (i.e. before the destructor for the + // TraceMemoryManagerStats is called) so that the G1 memory pools are updated + // before any GC notifications are raised. + g1mm()->update_sizes(); + + gc_epilogue(true); + } + + if (G1Log::finer()) { + g1_policy()->print_detailed_heap_transition(); + } print_heap_after_gc(); - // We must call G1MonitoringSupport::update_sizes() in the same scoping level - // as an active TraceMemoryManagerStats object (i.e. before the destructor for the - // TraceMemoryManagerStats is called) so that the G1 memory pools are updated - // before any GC notifications are raised. - g1mm()->update_sizes(); + post_full_gc_dump(); } - post_full_gc_dump(); - return true; } @@ -3829,7 +3835,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { // The elapsed time induced by the start time below deliberately elides // the possible verification above. double sample_start_time_sec = os::elapsedTime(); - size_t start_used_bytes = used(); #if YOUNG_LIST_VERBOSE gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:"); @@ -3837,8 +3842,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->record_collection_pause_start(sample_start_time_sec, - start_used_bytes); + g1_policy()->record_collection_pause_start(sample_start_time_sec); double scan_wait_start = os::elapsedTime(); // We have to wait until the CM threads finish scanning the diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp index 87acfec56f9..34d8a3688be 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -406,7 +406,6 @@ void G1CollectorPolicy::init() { } _free_regions_at_end_of_collection = _g1->free_regions(); update_young_list_target_length(); - _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes; // We may immediately start allocating regions and placing them on the // collection set list. Initialize the per-collection set info @@ -746,6 +745,7 @@ G1CollectorPolicy::verify_young_ages(HeapRegion* head, void G1CollectorPolicy::record_full_collection_start() { _full_collection_start_sec = os::elapsedTime(); + record_heap_size_info_at_start(); // Release the future to-space so that it is available for compaction into. _g1->set_full_collection(); } @@ -788,8 +788,7 @@ void G1CollectorPolicy::record_stop_world_start() { _stop_world_start = os::elapsedTime(); } -void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, - size_t start_used) { +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) { // We only need to do this here as the policy will only be applied // to the GC we're about to start. so, no point is calculating this // every time we calculate / recalculate the target young length. @@ -803,19 +802,14 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec, _trace_gen0_time_data.record_start_collection(s_w_t_ms); _stop_world_start = 0.0; + record_heap_size_info_at_start(); + phase_times()->record_cur_collection_start_sec(start_time_sec); - _cur_collection_pause_used_at_start_bytes = start_used; - _cur_collection_pause_used_regions_at_start = _g1->used_regions(); _pending_cards = _g1->pending_card_num(); _collection_set_bytes_used_before = 0; _bytes_copied_during_gc = 0; - YoungList* young_list = _g1->young_list(); - _eden_bytes_before_gc = young_list->eden_used_bytes(); - _survivor_bytes_before_gc = young_list->survivor_used_bytes(); - _capacity_before_gc = _g1->capacity(); - _last_gc_was_young = false; // do that for any other surv rate groups @@ -1153,6 +1147,21 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { byte_size_in_proper_unit((double)(bytes)), \ proper_unit_for_byte_size((bytes)) +void G1CollectorPolicy::record_heap_size_info_at_start() { + YoungList* young_list = _g1->young_list(); + _eden_bytes_before_gc = young_list->eden_used_bytes(); + _survivor_bytes_before_gc = young_list->survivor_used_bytes(); + _capacity_before_gc = _g1->capacity(); + + _cur_collection_pause_used_at_start_bytes = _g1->used(); + _cur_collection_pause_used_regions_at_start = _g1->used_regions(); + + size_t eden_capacity_before_gc = + (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc; + + _prev_eden_capacity = eden_capacity_before_gc; +} + void G1CollectorPolicy::print_heap_transition() { _g1->print_size_transition(gclog_or_tty, _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity()); @@ -1183,8 +1192,6 @@ void G1CollectorPolicy::print_detailed_heap_transition() { EXT_SIZE_PARAMS(_capacity_before_gc), EXT_SIZE_PARAMS(used), EXT_SIZE_PARAMS(capacity)); - - _prev_eden_capacity = eden_capacity; } void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time, diff --git a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp index 0c04167c8a4..08867850394 100644 --- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -671,34 +671,36 @@ public: bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); - // Update the heuristic info to record a collection pause of the given - // start time, where the given number of bytes were used at the start. - // This may involve changing the desired size of a collection set. + // Record the start and end of an evacuation pause. + void record_collection_pause_start(double start_time_sec); + void record_collection_pause_end(double pause_time_ms); - void record_stop_world_start(); - - void record_collection_pause_start(double start_time_sec, size_t start_used); + // Record the start and end of a full collection. + void record_full_collection_start(); + void record_full_collection_end(); // Must currently be called while the world is stopped. - void record_concurrent_mark_init_end(double - mark_init_elapsed_time_ms); + void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms); + // Record start and end of remark. void record_concurrent_mark_remark_start(); void record_concurrent_mark_remark_end(); + // Record start, end, and completion of cleanup. void record_concurrent_mark_cleanup_start(); void record_concurrent_mark_cleanup_end(int no_of_gc_threads); void record_concurrent_mark_cleanup_completed(); - void record_concurrent_pause(); + // Records the information about the heap size for reporting in + // print_detailed_heap_transition + void record_heap_size_info_at_start(); - void record_collection_pause_end(double pause_time); + // Print heap sizing transition (with less and more detail). void print_heap_transition(); void print_detailed_heap_transition(); - // Record the fact that a full collection occurred. - void record_full_collection_start(); - void record_full_collection_end(); + void record_stop_world_start(); + void record_concurrent_pause(); // Record how much space we copied during a GC. This is typically // called when a GC alloc region is being retired. From 7f78a7f47504d1c570b8865ba20149d88ab2684d Mon Sep 17 00:00:00 2001 From: Jon Masamitsu Date: Mon, 1 Apr 2013 10:50:30 -0700 Subject: [PATCH 009/162] 8011173: NPG: Replace the ChunkList implementation with class FreeList Reviewed-by: mgerdin, tschatzl, johnc, coleenp --- hotspot/src/share/vm/memory/metaspace.cpp | 137 +++++++--------------- 1 file changed, 40 insertions(+), 97 deletions(-) diff --git a/hotspot/src/share/vm/memory/metaspace.cpp b/hotspot/src/share/vm/memory/metaspace.cpp index 533d982a9d7..1f623bf6032 100644 --- a/hotspot/src/share/vm/memory/metaspace.cpp +++ b/hotspot/src/share/vm/memory/metaspace.cpp @@ -103,27 +103,7 @@ bool MetaspaceGC::_should_concurrent_collect = false; // a chunk is placed on the free list of blocks (BlockFreelist) and // reused from there. -// Pointer to list of Metachunks. -class ChunkList VALUE_OBJ_CLASS_SPEC { - // List of free chunks - Metachunk* _head; - - public: - // Constructor - ChunkList() : _head(NULL) {} - - // Accessors - Metachunk* head() { return _head; } - void set_head(Metachunk* v) { _head = v; } - - // Link at head of the list - void add_at_head(Metachunk* head, Metachunk* tail); - void add_at_head(Metachunk* head); - - size_t sum_list_size(); - size_t sum_list_count(); - size_t sum_list_capacity(); -}; +typedef class FreeList ChunkList; // Manages the global free lists of chunks. // Has three lists of free chunks, and a total size and @@ -185,6 +165,10 @@ class ChunkManager VALUE_OBJ_CLASS_SPEC { // for special, small, medium, and humongous chunks. static ChunkIndex list_index(size_t size); + // Add the simple linked list of chunks to the freelist of chunks + // of type index. + void return_chunks(ChunkIndex index, Metachunk* chunks); + // Total of the space in the free chunks list size_t free_chunks_total(); size_t free_chunks_total_in_bytes(); @@ -899,6 +883,9 @@ VirtualSpaceList::VirtualSpaceList(size_t word_size ) : Mutex::_no_safepoint_check_flag); bool initialization_succeeded = grow_vs(word_size); + _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); + _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk); + _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk); assert(initialization_succeeded, " VirtualSpaceList initialization should not fail"); } @@ -913,6 +900,9 @@ VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : Mutex::_no_safepoint_check_flag); VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); bool succeeded = class_entry->initialize(); + _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk); + _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk); + _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk); assert(succeeded, " VirtualSpaceList initialization should not fail"); link_vs(class_entry, rs.size()/BytesPerWord); } @@ -1380,76 +1370,6 @@ bool Metadebug::test_metadata_failure() { } #endif -// ChunkList methods - -size_t ChunkList::sum_list_size() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result += cur->word_size(); - cur = cur->next(); - } - return result; -} - -size_t ChunkList::sum_list_count() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result++; - cur = cur->next(); - } - return result; -} - -size_t ChunkList::sum_list_capacity() { - size_t result = 0; - Metachunk* cur = head(); - while (cur != NULL) { - result += cur->capacity_word_size(); - cur = cur->next(); - } - return result; -} - -void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) { - assert_lock_strong(SpaceManager::expand_lock()); - assert(head == tail || tail->next() == NULL, - "Not the tail or the head has already been added to a list"); - - if (TraceMetadataChunkAllocation && Verbose) { - gclog_or_tty->print("ChunkList::add_at_head(head, tail): "); - Metachunk* cur = head; - while (cur != NULL) { - gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size()); - cur = cur->next(); - } - gclog_or_tty->print_cr(""); - } - - if (tail != NULL) { - tail->set_next(_head); - } - set_head(head); -} - -void ChunkList::add_at_head(Metachunk* list) { - if (list == NULL) { - // Nothing to add - return; - } - assert_lock_strong(SpaceManager::expand_lock()); - Metachunk* head = list; - Metachunk* tail = list; - Metachunk* cur = head->next(); - // Search for the tail since it is not passed. - while (cur != NULL) { - tail = cur; - cur = cur->next(); - } - add_at_head(head, tail); -} - // ChunkManager methods // Verification of _free_chunks_total and _free_chunks_count does not @@ -1553,7 +1473,7 @@ size_t ChunkManager::sum_free_chunks() { continue; } - result = result + list->sum_list_capacity(); + result = result + list->count() * list->size(); } result = result + humongous_dictionary()->total_size(); return result; @@ -1567,7 +1487,7 @@ size_t ChunkManager::sum_free_chunks_count() { if (list == NULL) { continue; } - count = count + list->sum_list_count(); + count = count + list->count(); } count = count + humongous_dictionary()->total_free_blocks(); return count; @@ -1622,7 +1542,7 @@ Metachunk* ChunkManager::free_chunks_get(size_t word_size) { } // Remove the chunk as the head of the list. - free_list->set_head(chunk->next()); + free_list->remove_chunk(chunk); // Chunk is being removed from the chunks free list. dec_free_chunks_total(chunk->capacity_word_size()); @@ -1679,7 +1599,7 @@ Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { size_t list_count; if (list_index(word_size) < HumongousIndex) { ChunkList* list = find_free_chunks_list(word_size); - list_count = list->sum_list_count(); + list_count = list->count(); } else { list_count = humongous_dictionary()->total_count(); } @@ -1958,6 +1878,29 @@ void SpaceManager::initialize() { } } +void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { + if (chunks == NULL) { + return; + } + ChunkList* list = free_chunks(index); + assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); + assert_lock_strong(SpaceManager::expand_lock()); + Metachunk* cur = chunks; + + // This return chunks one at a time. If a new + // class List can be created that is a base class + // of FreeList then something like FreeList::prepend() + // can be used in place of this loop + while (cur != NULL) { + // Capture the next link before it is changed + // by the call to return_chunk_at_head(); + Metachunk* next = cur->next(); + cur->set_is_free(true); + list->return_chunk_at_head(cur); + cur = next; + } +} + SpaceManager::~SpaceManager() { // This call this->_lock which can't be done while holding expand_lock() const size_t in_use_before = sum_capacity_in_chunks_in_use(); @@ -1995,11 +1938,11 @@ SpaceManager::~SpaceManager() { chunk_size_name(i)); } Metachunk* chunks = chunks_in_use(i); - chunk_manager->free_chunks(i)->add_at_head(chunks); + chunk_manager->return_chunks(i, chunks); set_chunks_in_use(i, NULL); if (TraceMetadataChunkAllocation && Verbose) { gclog_or_tty->print_cr("updated freelist count %d %s", - chunk_manager->free_chunks(i)->sum_list_count(), + chunk_manager->free_chunks(i)->count(), chunk_size_name(i)); } assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); From 56370657b97f9d8ec5805ce15f26048adeb38323 Mon Sep 17 00:00:00 2001 From: Jiangli Zhou Date: Tue, 9 Apr 2013 17:17:41 -0400 Subject: [PATCH 010/162] 8010862: The Method counter fields used for profiling can be allocated lazily Allocate the method's profiling related metadata until they are needed. Reviewed-by: coleenp, roland --- .../classes/sun/jvm/hotspot/oops/Method.java | 34 ++--- .../sun/jvm/hotspot/oops/MethodCounters.java | 86 ++++++++++++ .../src/cpu/sparc/vm/cppInterpreter_sparc.cpp | 24 ++-- .../src/cpu/sparc/vm/interp_masm_sparc.cpp | 48 ++++--- .../src/cpu/sparc/vm/interp_masm_sparc.hpp | 7 +- .../sparc/vm/templateInterpreter_sparc.cpp | 36 +++-- .../src/cpu/sparc/vm/templateTable_sparc.cpp | 27 ++-- hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp | 29 ++-- hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp | 16 ++- hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp | 3 +- hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp | 16 ++- hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp | 3 +- .../cpu/x86/vm/templateInterpreter_x86_32.cpp | 49 ++++--- .../cpu/x86/vm/templateInterpreter_x86_64.cpp | 55 ++++---- .../src/cpu/x86/vm/templateTable_x86_32.cpp | 30 +++- .../src/cpu/x86/vm/templateTable_x86_64.cpp | 28 +++- hotspot/src/share/vm/c1/c1_LIRGenerator.cpp | 19 +-- hotspot/src/share/vm/ci/ciMethod.cpp | 21 ++- hotspot/src/share/vm/ci/ciMethod.hpp | 3 +- hotspot/src/share/vm/ci/ciReplay.cpp | 9 +- .../vm/interpreter/interpreterRuntime.cpp | 13 +- .../vm/interpreter/interpreterRuntime.hpp | 3 +- .../vm/interpreter/invocationCounter.cpp | 12 +- hotspot/src/share/vm/oops/method.cpp | 60 ++++---- hotspot/src/share/vm/oops/method.hpp | 132 +++++++++++++----- hotspot/src/share/vm/oops/methodCounters.cpp | 37 +++++ hotspot/src/share/vm/oops/methodCounters.hpp | 124 ++++++++++++++++ hotspot/src/share/vm/oops/methodData.cpp | 19 +-- hotspot/src/share/vm/opto/parseHelper.cpp | 18 +-- .../vm/runtime/advancedThresholdPolicy.cpp | 13 +- .../share/vm/runtime/compilationPolicy.cpp | 64 ++++++--- hotspot/src/share/vm/runtime/fprofiler.cpp | 5 +- .../vm/runtime/simpleThresholdPolicy.cpp | 8 +- hotspot/src/share/vm/runtime/vmStructs.cpp | 17 ++- 34 files changed, 773 insertions(+), 295 deletions(-) create mode 100644 hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java create mode 100644 hotspot/src/share/vm/oops/methodCounters.cpp create mode 100644 hotspot/src/share/vm/oops/methodCounters.hpp diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java index 31dc39c5431..c61d58dc429 100644 --- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,19 +49,13 @@ public class Method extends Metadata { Type type = db.lookupType("Method"); constMethod = type.getAddressField("_constMethod"); methodData = type.getAddressField("_method_data"); + methodCounters = type.getAddressField("_method_counters"); methodSize = new CIntField(type.getCIntegerField("_method_size"), 0); accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0); code = type.getAddressField("_code"); vtableIndex = new CIntField(type.getCIntegerField("_vtable_index"), 0); - if (!VM.getVM().isCore()) { - invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); - backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); - } bytecodeOffset = type.getSize(); - interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); - interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); - /* interpreterEntry = type.getAddressField("_interpreter_entry"); fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point"); @@ -80,18 +74,14 @@ public class Method extends Metadata { // Fields private static AddressField constMethod; private static AddressField methodData; + private static AddressField methodCounters; private static CIntField methodSize; private static CIntField accessFlags; private static CIntField vtableIndex; - private static CIntField invocationCounter; - private static CIntField backedgeCounter; private static long bytecodeOffset; private static AddressField code; - private static CIntField interpreterThrowoutCountField; - private static CIntField interpreterInvocationCountField; - // constant method names - , // Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable private static Symbol objectInitializerName; @@ -127,6 +117,10 @@ public class Method extends Metadata { Address addr = methodData.getValue(getAddress()); return (MethodData) VMObjectFactory.newObject(MethodData.class, addr); } + public MethodCounters getMethodCounters() { + Address addr = methodCounters.getValue(getAddress()); + return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr); + } /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */ public long getMethodSize() { return methodSize.getValue(this); } public long getMaxStack() { return getConstMethod().getMaxStack(); } @@ -139,16 +133,10 @@ public class Method extends Metadata { public long getCodeSize() { return getConstMethod().getCodeSize(); } public long getVtableIndex() { return vtableIndex.getValue(this); } public long getInvocationCounter() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "must not be used in core build"); - } - return invocationCounter.getValue(this); + return getMethodCounters().getInvocationCounter(); } public long getBackedgeCounter() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(!VM.getVM().isCore(), "must not be used in core build"); - } - return backedgeCounter.getValue(this); + return getMethodCounters().getBackedgeCounter(); } // get associated compiled native method, if available, else return null. @@ -369,10 +357,10 @@ public class Method extends Metadata { } public int interpreterThrowoutCount() { - return (int) interpreterThrowoutCountField.getValue(this); + return getMethodCounters().interpreterThrowoutCount(); } public int interpreterInvocationCount() { - return (int) interpreterInvocationCountField.getValue(this); + return getMethodCounters().interpreterInvocationCount(); } } diff --git a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java new file mode 100644 index 00000000000..854aa818c89 --- /dev/null +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.oops; + +import java.io.*; +import java.util.*; +import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.types.*; +import sun.jvm.hotspot.utilities.*; + +public class MethodCounters extends Metadata { + public MethodCounters(Address addr) { + super(addr); + } + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + Type type = db.lookupType("MethodCounters"); + + interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0); + interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0); + if (!VM.getVM().isCore()) { + invocationCounter = new CIntField(type.getCIntegerField("_invocation_counter"), 0); + backedgeCounter = new CIntField(type.getCIntegerField("_backedge_counter"), 0); + } + } + + private static CIntField interpreterInvocationCountField; + private static CIntField interpreterThrowoutCountField; + private static CIntField invocationCounter; + private static CIntField backedgeCounter; + + public int interpreterInvocationCount() { + return (int) interpreterInvocationCountField.getValue(this); + } + + public int interpreterThrowoutCount() { + return (int) interpreterThrowoutCountField.getValue(this); + } + public long getInvocationCounter() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "must not be used in core build"); + } + return invocationCounter.getValue(this); + } + public long getBackedgeCounter() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!VM.getVM().isCore(), "must not be used in core build"); + } + return backedgeCounter.getValue(this); + } + + public void printValueOn(PrintStream tty) { + } +} + diff --git a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp index b1451fb0bae..8a71d0923ab 100644 --- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -404,14 +404,20 @@ address CppInterpreter::deopt_entry(TosState state, int length) { // ??: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { + Label done; + const Register Rcounters = G3_scratch; + + __ ld_ptr(STATE(_method), G5_method); + __ get_method_counters(G5_method, Rcounters, done); + // Update standard invocation counters - __ increment_invocation_counter(O0, G3_scratch); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ ld_ptr(STATE(_method), G3_scratch); - Address interpreter_invocation_counter(G3_scratch, 0, in_bytes(Method::interpreter_invocation_counter_offset())); - __ ld(interpreter_invocation_counter, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, interpreter_invocation_counter); + __ increment_invocation_counter(Rcounters, O0, G4_scratch); + if (ProfileInterpreter) { + Address interpreter_invocation_counter(Rcounters, 0, + in_bytes(MethodCounters::interpreter_invocation_counter_offset())); + __ ld(interpreter_invocation_counter, G4_scratch); + __ inc(G4_scratch); + __ st(G4_scratch, interpreter_invocation_counter); } Address invocation_limit(G3_scratch, (address)&InvocationCounter::InterpreterInvocationLimit); @@ -420,7 +426,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); __ delayed()->nop(); - + __ bind(done); } address InterpreterGenerator::generate_empty_entry(void) { diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp index 6060f003b68..c543e364198 100644 --- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "oops/markOop.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" +#include "oops/methodCounters.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/jvmtiThreadState.hpp" @@ -2086,19 +2087,28 @@ void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { #endif /* CC_INTERP */ -void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { +void InterpreterMacroAssembler::get_method_counters(Register method, + Register Rcounters, + Label& skip) { + Label has_counters; + Address method_counters(method, in_bytes(Method::method_counters_offset())); + ld_ptr(method_counters, Rcounters); + br_notnull_short(Rcounters, Assembler::pt, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + ld_ptr(method_counters, Rcounters); + br_null_short(Rcounters, Assembler::pn, skip); // No MethodCounters, OutOfMemory + bind(has_counters); +} + +void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); -#ifdef CC_INTERP - Address inv_counter(G5_method, Method::invocation_counter_offset() + + assert_different_registers(Rcounters, Rtmp, Rtmp2); + + Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset()); - Address be_counter (G5_method, Method::backedge_counter_offset() + + Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset()); -#else - Address inv_counter(Lmethod, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); - Address be_counter (Lmethod, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); -#endif /* CC_INTERP */ int delta = InvocationCounter::count_increment; // Load each counter in a register @@ -2122,19 +2132,15 @@ void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Reg // Note that this macro must leave the backedge_count + invocation_count in Rtmp! } -void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { +void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) { assert(UseCompiler, "incrementing must be useful"); -#ifdef CC_INTERP - Address be_counter (G5_method, Method::backedge_counter_offset() + + assert_different_registers(Rcounters, Rtmp, Rtmp2); + + Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset()); - Address inv_counter(G5_method, Method::invocation_counter_offset() + + Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset()); -#else - Address be_counter (Lmethod, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); - Address inv_counter(Lmethod, Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); -#endif /* CC_INTERP */ + int delta = InvocationCounter::count_increment; // Load each counter in a register ld( be_counter, Rtmp ); diff --git a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp index bf84848aa61..1a7901526ad 100644 --- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -263,8 +263,9 @@ class InterpreterMacroAssembler: public MacroAssembler { void compute_stack_base( Register Rdest ); #endif /* CC_INTERP */ - void increment_invocation_counter( Register Rtmp, Register Rtmp2 ); - void increment_backedge_counter( Register Rtmp, Register Rtmp2 ); + void get_method_counters(Register method, Register Rcounters, Label& skip); + void increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); + void increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ); #ifndef CC_INTERP void test_backedge_count_for_osr( Register backedge_count, Register branch_bcp, Register Rtmp ); diff --git a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp index 4a6372ae0de..d8281cadaec 100644 --- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -292,11 +292,15 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state) // ??: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. + // Note: In tiered we increment either counters in MethodCounters* or in + // MDO depending if we're profiling or not. + const Register Rcounters = G3_scratch; + Label done; + if (TieredCompilation) { const int increment = InvocationCounter::count_increment; const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // If no method data exists, go to profile_continue. __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch); @@ -311,23 +315,26 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ ba_short(done); } - // Increment counter in Method* + // Increment counter in MethodCounters* __ bind(no_mdo); - Address invocation_counter(Lmethod, - in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); + Address invocation_counter(Rcounters, + in_bytes(MethodCounters::invocation_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + __ get_method_counters(Lmethod, Rcounters, done); __ increment_mask_and_jump(invocation_counter, increment, mask, - G3_scratch, Lscratch, + G4_scratch, Lscratch, Assembler::zero, overflow); __ bind(done); } else { // Update standard invocation counters - __ increment_invocation_counter(O0, G3_scratch); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - Address interpreter_invocation_counter(Lmethod,in_bytes(Method::interpreter_invocation_counter_offset())); - __ ld(interpreter_invocation_counter, G3_scratch); - __ inc(G3_scratch); - __ st(G3_scratch, interpreter_invocation_counter); + __ get_method_counters(Lmethod, Rcounters, done); + __ increment_invocation_counter(Rcounters, O0, G4_scratch); + if (ProfileInterpreter) { + Address interpreter_invocation_counter(Rcounters, + in_bytes(MethodCounters::interpreter_invocation_counter_offset())); + __ ld(interpreter_invocation_counter, G4_scratch); + __ inc(G4_scratch); + __ st(G4_scratch, interpreter_invocation_counter); } if (ProfileInterpreter && profile_method != NULL) { @@ -345,6 +352,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp(O0, G3_scratch); __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance __ delayed()->nop(); + __ bind(done); } } diff --git a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp index 01d593fb034..91fc1deee51 100644 --- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp @@ -1604,9 +1604,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // Normal (non-jsr) branch handling // Save the current Lbcp - const Register O0_cur_bcp = O0; - __ mov( Lbcp, O0_cur_bcp ); - + const Register l_cur_bcp = Lscratch; + __ mov( Lbcp, l_cur_bcp ); bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; if ( increment_invocation_counter_for_backward_branches ) { @@ -1616,6 +1615,9 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // Bump bytecode pointer by displacement (take the branch) __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr + const Register Rcounters = G3_scratch; + __ get_method_counters(Lmethod, Rcounters, Lforward); + if (TieredCompilation) { Label Lno_mdo, Loverflow; int increment = InvocationCounter::count_increment; @@ -1628,21 +1630,22 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // Increment backedge counter in the MDO Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset())); - __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, + __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, Assembler::notZero, &Lforward); __ ba_short(Loverflow); } - // If there's no MDO, increment counter in Method* + // If there's no MDO, increment counter in MethodCounters* __ bind(Lno_mdo); - Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, + Address backedge_counter(Rcounters, + in_bytes(MethodCounters::backedge_counter_offset()) + + in_bytes(InvocationCounter::counter_offset())); + __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, Assembler::notZero, &Lforward); __ bind(Loverflow); // notify point for loop, pass branch bytecode - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); // Was an OSR adapter generated? // O0 = osr nmethod @@ -1679,15 +1682,15 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { } else { // Update Backedge branch separately from invocations const Register G4_invoke_ctr = G4; - __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); + __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); if (ProfileInterpreter) { __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); if (UseOnStackReplacement) { - __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); + __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch); } } else { if (UseOnStackReplacement) { - __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); + __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); } } } diff --git a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp index 24e6694082e..08db8e074d0 100644 --- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp +++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -570,20 +570,28 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register // rcx: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { + Label done; + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + const Address backedge_counter (rax, + MethodCounter::backedge_counter_offset() + + InvocationCounter::counter_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); - const Address backedge_counter (rbx, Method::backedge_counter_offset() + InvocationCounter::counter_offset()); + __ get_method_counters(rbx, rax, done); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter - + __ movl(rcx, invocation_counter); __ increment(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -593,7 +601,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); - + __ bind(done); } void InterpreterGenerator::generate_counter_overflow(Label* do_continue) { @@ -977,7 +985,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); @@ -1029,8 +1036,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { } #endif - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax); NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread // Since at this point in the method invocation the exception handler diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp index 78d97a975b6..0e7f483a649 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -266,6 +266,20 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R addptr(cache, tmp); // construct pointer to cache entry } +void InterpreterMacroAssembler::get_method_counters(Register method, + Register mcs, Label& skip) { + Label has_counters; + movptr(mcs, Address(method, Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::notZero, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + movptr(mcs, Address(method,Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory + bind(has_counters); +} + // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index) { diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp index 6dada56de0d..49a41f61d54 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,6 +89,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); + void get_method_counters(Register method, Register mcs, Label& skip); // load cpool->resolved_references(index); void load_resolved_reference_at_index(Register result, Register index); diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp index 890826310a8..88d57d046a5 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -271,6 +271,20 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, addptr(cache, tmp); // construct pointer to cache entry } +void InterpreterMacroAssembler::get_method_counters(Register method, + Register mcs, Label& skip) { + Label has_counters; + movptr(mcs, Address(method, Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::notZero, has_counters); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::build_method_counters), method); + movptr(mcs, Address(method,Method::method_counters_offset())); + testptr(mcs, mcs); + jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory + bind(has_counters); +} + // Load object from cpool->resolved_references(index) void InterpreterMacroAssembler::load_resolved_reference_at_index( Register result, Register index) { diff --git a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp index 66a001366ff..8bfc44fe4bc 100644 --- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,6 +111,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset, size_t index_size = sizeof(u2)); void get_cache_index_at_bcp(Register index, int bcp_offset, size_t index_size = sizeof(u2)); + void get_method_counters(Register method, Register mcs, Label& skip); // load cpool->resolved_references(index); void load_resolved_reference_at_index(Register result, Register index); diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp index fb13a44045a..f8a55c1640b 100644 --- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -344,13 +344,13 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, // rcx: invocation counter // void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) { - const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); - // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. + Label done; + // Note: In tiered we increment either counters in MethodCounters* or in MDO + // depending if we're profiling or not. if (TieredCompilation) { int increment = InvocationCounter::count_increment; int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // Are we profiling? __ movptr(rax, Address(rbx, Method::method_data_offset())); @@ -363,23 +363,38 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ jmpb(done); } __ bind(no_mdo); - // Increment counter in Method* (we don't need to load it, it's in rcx). - __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); + // Increment counter in MethodCounters + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + + __ get_method_counters(rbx, rax, done); + __ increment_mask_and_jump(invocation_counter, increment, mask, + rcx, false, Assembler::zero, overflow); __ bind(done); } else { - const Address backedge_counter (rbx, Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); + const Address backedge_counter (rax, + MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx,Method::interpreter_invocation_counter_offset())); + __ get_method_counters(rbx, rax, done); + + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } - // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter + // Update standard invocation counters + __ movl(rcx, invocation_counter); __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -399,6 +414,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); + __ bind(done); } } @@ -868,7 +884,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset()); @@ -897,9 +912,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { // NULL oop temp (mirror or jni oop result) __ push((int32_t)NULL_WORD); - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame - generate_fixed_frame(true); // make sure method is native & not abstract @@ -1286,7 +1299,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); const Address size_of_locals (rdx, ConstMethod::size_of_locals_offset()); @@ -1326,7 +1338,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { __ bind(exit); } - if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count // initialize fixed part of activation frame generate_fixed_frame(false); diff --git a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp index 6b3f7b6ba26..27e16264438 100644 --- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -300,13 +300,12 @@ void InterpreterGenerator::generate_counter_incr( Label* overflow, Label* profile_method, Label* profile_method_continue) { - const Address invocation_counter(rbx, in_bytes(Method::invocation_counter_offset()) + - in_bytes(InvocationCounter::counter_offset())); + Label done; // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not. if (TieredCompilation) { int increment = InvocationCounter::count_increment; int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift; - Label no_mdo, done; + Label no_mdo; if (ProfileInterpreter) { // Are we profiling? __ movptr(rax, Address(rbx, Method::method_data_offset())); @@ -319,25 +318,36 @@ void InterpreterGenerator::generate_counter_incr( __ jmpb(done); } __ bind(no_mdo); - // Increment counter in Method* (we don't need to load it, it's in ecx). - __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow); + // Increment counter in MethodCounters + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); + __ get_method_counters(rbx, rax, done); + __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, + false, Assembler::zero, overflow); __ bind(done); } else { - const Address backedge_counter(rbx, - Method::backedge_counter_offset() + - InvocationCounter::counter_offset()); + const Address backedge_counter(rax, + MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset()); + const Address invocation_counter(rax, + MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset()); - if (ProfileInterpreter) { // %%% Merge this into MethodData* - __ incrementl(Address(rbx, - Method::interpreter_invocation_counter_offset())); + __ get_method_counters(rbx, rax, done); + + if (ProfileInterpreter) { + __ incrementl(Address(rax, + MethodCounters::interpreter_invocation_counter_offset())); } // Update standard invocation counters - __ movl(rax, backedge_counter); // load backedge counter - + __ movl(rcx, invocation_counter); __ incrementl(rcx, InvocationCounter::count_increment); + __ movl(invocation_counter, rcx); // save invocation count + + __ movl(rax, backedge_counter); // load backedge counter __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits - __ movl(invocation_counter, rcx); // save invocation count __ addl(rcx, rax); // add both counters // profile_method is non-null only for interpreted method so @@ -354,6 +364,7 @@ void InterpreterGenerator::generate_counter_incr( __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit)); __ jcc(Assembler::aboveEqual, *overflow); + __ bind(done); } } @@ -843,9 +854,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod (rbx, Method::const_offset()); - const Address invocation_counter(rbx, Method:: - invocation_counter_offset() + - InvocationCounter::counter_offset()); const Address access_flags (rbx, Method::access_flags_offset()); const Address size_of_parameters(rcx, ConstMethod:: size_of_parameters_offset()); @@ -876,10 +884,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) { // (static native method holder mirror/jni oop result) __ push((int) NULL_WORD); - if (inc_counter) { - __ movl(rcx, invocation_counter); // (pre-)fetch invocation count - } - // initialize fixed part of activation frame generate_fixed_frame(true); @@ -1296,9 +1300,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { address entry_point = __ pc(); const Address constMethod(rbx, Method::const_offset()); - const Address invocation_counter(rbx, - Method::invocation_counter_offset() + - InvocationCounter::counter_offset()); const Address access_flags(rbx, Method::access_flags_offset()); const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset()); @@ -1343,10 +1344,6 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) { __ bind(exit); } - // (pre-)fetch invocation count - if (inc_counter) { - __ movl(rcx, invocation_counter); - } // initialize fixed part of activation frame generate_fixed_frame(false); diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp index 371517c0537..6c6cc4f9474 100644 --- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1546,9 +1546,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ get_method(rcx); // ECX holds method __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count - const ByteSize be_offset = Method::backedge_counter_offset() + InvocationCounter::counter_offset(); - const ByteSize inv_offset = Method::invocation_counter_offset() + InvocationCounter::counter_offset(); - const int method_offset = frame::interpreter_frame_method_offset * wordSize; + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + + InvocationCounter::counter_offset(); + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + + InvocationCounter::counter_offset(); // Load up EDX with the branch displacement __ movl(rdx, at_bcp(1)); @@ -1596,6 +1597,22 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; @@ -1613,16 +1630,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ jmp(dispatch); } __ bind(no_mdo); - // Increment backedge counter in Method* + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, rax, false, Assembler::zero, &backedge_counter_overflow); } else { // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ addl(rax, Address(rcx, be_offset)); // add both counters diff --git a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp index d1e22d25990..4a9bc83163e 100644 --- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1564,11 +1564,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx // holds bumped taken count - const ByteSize be_offset = Method::backedge_counter_offset() + + const ByteSize be_offset = MethodCounters::backedge_counter_offset() + InvocationCounter::counter_offset(); - const ByteSize inv_offset = Method::invocation_counter_offset() + + const ByteSize inv_offset = MethodCounters::invocation_counter_offset() + InvocationCounter::counter_offset(); - const int method_offset = frame::interpreter_frame_method_offset * wordSize; // Load up edx with the branch displacement __ movl(rdx, at_bcp(1)); @@ -1618,6 +1617,22 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { // r14: locals pointer __ testl(rdx, rdx); // check if forward or backward branch __ jcc(Assembler::positive, dispatch); // count only if backward branch + + // check if MethodCounters exists + Label has_counters; + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ testptr(rax, rax); + __ jcc(Assembler::notZero, has_counters); + __ push(rdx); + __ push(rcx); + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters), + rcx); + __ pop(rcx); + __ pop(rdx); + __ movptr(rax, Address(rcx, Method::method_counters_offset())); + __ jcc(Assembler::zero, dispatch); + __ bind(has_counters); + if (TieredCompilation) { Label no_mdo; int increment = InvocationCounter::count_increment; @@ -1635,16 +1650,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ jmp(dispatch); } __ bind(no_mdo); - // Increment backedge counter in Method* + // Increment backedge counter in MethodCounters* + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, rax, false, Assembler::zero, &backedge_counter_overflow); } else { // increment counter + __ movptr(rcx, Address(rcx, Method::method_counters_offset())); __ movl(rax, Address(rcx, be_offset)); // load backedge counter __ incrementl(rax, InvocationCounter::count_increment); // increment counter __ movl(Address(rcx, be_offset), rax); // store counter __ movl(rax, Address(rcx, inv_offset)); // load invocation counter + __ andl(rax, InvocationCounter::count_mask_value); // and the status bits __ addl(rax, Address(rcx, be_offset)); // add both counters diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp index 84402a9ef0d..493b7230c80 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3063,21 +3063,20 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, assert(level > CompLevel_simple, "Shouldn't be here"); int offset = -1; - LIR_Opr counter_holder = new_register(T_METADATA); - LIR_Opr meth; + LIR_Opr counter_holder; if (level == CompLevel_limited_profile) { - offset = in_bytes(backedge ? Method::backedge_counter_offset() : - Method::invocation_counter_offset()); - __ metadata2reg(method->constant_encoding(), counter_holder); - meth = counter_holder; + address counters_adr = method->ensure_method_counters(); + counter_holder = new_pointer_register(); + __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); + offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : + MethodCounters::invocation_counter_offset()); } else if (level == CompLevel_full_profile) { + counter_holder = new_register(T_METADATA); offset = in_bytes(backedge ? MethodData::backedge_counter_offset() : MethodData::invocation_counter_offset()); ciMethodData* md = method->method_data_or_null(); assert(md != NULL, "Sanity"); __ metadata2reg(md->constant_encoding(), counter_holder); - meth = new_register(T_METADATA); - __ metadata2reg(method->constant_encoding(), meth); } else { ShouldNotReachHere(); } @@ -3088,6 +3087,8 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, __ store(result, counter); if (notify) { LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT); + LIR_Opr meth = new_register(T_METADATA); + __ metadata2reg(method->constant_encoding(), meth); __ logical_and(result, mask, result); __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); // The bci for info can point to cmp for if's we want the if bci diff --git a/hotspot/src/share/vm/ci/ciMethod.cpp b/hotspot/src/share/vm/ci/ciMethod.cpp index 646de740f8f..780f4ad868f 100644 --- a/hotspot/src/share/vm/ci/ciMethod.cpp +++ b/hotspot/src/share/vm/ci/ciMethod.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -904,6 +904,20 @@ ciMethodData* ciMethod::method_data_or_null() { return md; } +// ------------------------------------------------------------------ +// ciMethod::ensure_method_counters +// +address ciMethod::ensure_method_counters() { + check_is_loaded(); + VM_ENTRY_MARK; + methodHandle mh(THREAD, get_Method()); + MethodCounters *counter = mh->method_counters(); + if (counter == NULL) { + counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL); + } + return (address)counter; +} + // ------------------------------------------------------------------ // ciMethod::should_exclude // @@ -1191,13 +1205,14 @@ void ciMethod::dump_replay_data(outputStream* st) { ASSERT_IN_VM; ResourceMark rm; Method* method = get_Method(); + MethodCounters* mcs = method->method_counters(); Klass* holder = method->method_holder(); st->print_cr("ciMethod %s %s %s %d %d %d %d %d", holder->name()->as_quoted_ascii(), method->name()->as_quoted_ascii(), method->signature()->as_quoted_ascii(), - method->invocation_counter()->raw_counter(), - method->backedge_counter()->raw_counter(), + mcs == NULL ? 0 : mcs->invocation_counter()->raw_counter(), + mcs == NULL ? 0 : mcs->backedge_counter()->raw_counter(), interpreter_invocation_count(), interpreter_throwout_count(), _instructions_size); diff --git a/hotspot/src/share/vm/ci/ciMethod.hpp b/hotspot/src/share/vm/ci/ciMethod.hpp index cbef39b3cdc..f206f1991a0 100644 --- a/hotspot/src/share/vm/ci/ciMethod.hpp +++ b/hotspot/src/share/vm/ci/ciMethod.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -262,6 +262,7 @@ class ciMethod : public ciMetadata { bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const; bool check_call(int refinfo_index, bool is_static) const; bool ensure_method_data(); // make sure it exists in the VM also + address ensure_method_counters(); int instructions_size(); int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC diff --git a/hotspot/src/share/vm/ci/ciReplay.cpp b/hotspot/src/share/vm/ci/ciReplay.cpp index 030d93db7c3..0d68b8cc891 100644 --- a/hotspot/src/share/vm/ci/ciReplay.cpp +++ b/hotspot/src/share/vm/ci/ciReplay.cpp @@ -920,12 +920,17 @@ void ciReplay::initialize(ciMethod* m) { method->print_name(tty); tty->cr(); } else { + EXCEPTION_CONTEXT; + MethodCounters* mcs = method->method_counters(); // m->_instructions_size = rec->instructions_size; m->_instructions_size = -1; m->_interpreter_invocation_count = rec->interpreter_invocation_count; m->_interpreter_throwout_count = rec->interpreter_throwout_count; - method->invocation_counter()->_counter = rec->invocation_counter; - method->backedge_counter()->_counter = rec->backedge_counter; + if (mcs == NULL) { + mcs = Method::build_method_counters(method, CHECK_AND_CLEAR); + } + mcs->invocation_counter()->_counter = rec->invocation_counter; + mcs->backedge_counter()->_counter = rec->backedge_counter; } } diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp index de6b3d12812..64bd057e22b 100644 --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -454,7 +454,7 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea continuation = Interpreter::remove_activation_entry(); #endif // Count this for compilation purposes - h_method->interpreter_throwout_increment(); + h_method->interpreter_throwout_increment(THREAD); } else { // handler in this method => change bci/bcp to handler bci/bcp and continue there handler_pc = h_method->code_base() + handler_bci; @@ -908,6 +908,15 @@ IRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int r fr.interpreter_frame_set_mdp(new_mdp); IRT_END +IRT_ENTRY(MethodCounters*, InterpreterRuntime::build_method_counters(JavaThread* thread, Method* m)) + MethodCounters* mcs = Method::build_method_counters(m, thread); + if (HAS_PENDING_EXCEPTION) { + assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); + CLEAR_PENDING_EXCEPTION; + } + return mcs; +IRT_END + IRT_ENTRY(void, InterpreterRuntime::at_safepoint(JavaThread* thread)) // We used to need an explict preserve_arguments here for invoke bytecodes. However, diff --git a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp index 3aa2c8348e2..d46c43e94e0 100644 --- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -169,6 +169,7 @@ class InterpreterRuntime: AllStatic { #ifdef ASSERT static void verify_mdp(Method* method, address bcp, address mdp); #endif // ASSERT + static MethodCounters* build_method_counters(JavaThread* thread, Method* m); }; diff --git a/hotspot/src/share/vm/interpreter/invocationCounter.cpp b/hotspot/src/share/vm/interpreter/invocationCounter.cpp index 747516369ce..6a113d0ffc6 100644 --- a/hotspot/src/share/vm/interpreter/invocationCounter.cpp +++ b/hotspot/src/share/vm/interpreter/invocationCounter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,15 +104,19 @@ const char* InvocationCounter::state_as_short_string(State state) { static address do_nothing(methodHandle method, TRAPS) { // dummy action for inactive invocation counters - method->invocation_counter()->set_carry(); - method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, ""); + mcs->invocation_counter()->set_carry(); + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); return NULL; } static address do_decay(methodHandle method, TRAPS) { // decay invocation counters so compilation gets delayed - method->invocation_counter()->decay(); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, ""); + mcs->invocation_counter()->decay(); return NULL; } diff --git a/hotspot/src/share/vm/oops/method.cpp b/hotspot/src/share/vm/oops/method.cpp index 1e74a214830..b06c081781a 100644 --- a/hotspot/src/share/vm/oops/method.cpp +++ b/hotspot/src/share/vm/oops/method.cpp @@ -91,7 +91,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { set_hidden(false); set_dont_inline(false); set_method_data(NULL); - set_interpreter_throwout_count(0); + set_method_counters(NULL); set_vtable_index(Method::garbage_vtable_index); // Fix and bury in Method* @@ -105,16 +105,6 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { } NOT_PRODUCT(set_compiled_invocation_count(0);) - set_interpreter_invocation_count(0); - invocation_counter()->init(); - backedge_counter()->init(); - clear_number_of_breakpoints(); - -#ifdef TIERED - set_rate(0); - set_prev_event_count(0); - set_prev_time(0); -#endif } // Release Method*. The nmethod will be gone when we get here because @@ -124,6 +114,8 @@ void Method::deallocate_contents(ClassLoaderData* loader_data) { set_constMethod(NULL); MetadataFactory::free_metadata(loader_data, method_data()); set_method_data(NULL); + MetadataFactory::free_metadata(loader_data, method_counters()); + set_method_counters(NULL); // The nmethod will be gone when we get here. if (code() != NULL) _code = NULL; } @@ -323,7 +315,10 @@ bool Method::was_executed_more_than(int n) { // compiler does not bump invocation counter of compiled methods return true; } - else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) { + else if ((method_counters() != NULL && + method_counters()->invocation_counter()->carry()) || + (method_data() != NULL && + method_data()->invocation_counter()->carry())) { // The carry bit is set when the counter overflows and causes // a compilation to occur. We don't know how many times // the counter has been reset, so we simply assume it has @@ -387,6 +382,18 @@ void Method::build_interpreter_method_data(methodHandle method, TRAPS) { } } +MethodCounters* Method::build_method_counters(Method* m, TRAPS) { + methodHandle mh(m); + ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); + MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL); + if (mh->method_counters() == NULL) { + mh->set_method_counters(counters); + } else { + MetadataFactory::free_metadata(loader_data, counters); + } + return mh->method_counters(); +} + void Method::cleanup_inline_caches() { // The current system doesn't use inline caches in the interpreter // => nothing to do (keep this method around for future use) @@ -794,8 +801,6 @@ void Method::unlink_method() { set_signature_handler(NULL); } NOT_PRODUCT(set_compiled_invocation_count(0);) - invocation_counter()->reset(); - backedge_counter()->reset(); _adapter = NULL; _from_compiled_entry = NULL; @@ -808,8 +813,7 @@ void Method::unlink_method() { assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?"); set_method_data(NULL); - set_interpreter_throwout_count(0); - set_interpreter_invocation_count(0); + set_method_counters(NULL); } // Called when the method_holder is getting linked. Setup entrypoints so the method @@ -1545,28 +1549,34 @@ void Method::clear_all_breakpoints() { int Method::invocation_count() { + MethodCounters *mcs = method_counters(); if (TieredCompilation) { MethodData* const mdo = method_data(); - if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { + if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) || + ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); + return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) + + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0); } } else { - return invocation_counter()->count(); + return (mcs == NULL) ? 0 : mcs->invocation_counter()->count(); } } int Method::backedge_count() { + MethodCounters *mcs = method_counters(); if (TieredCompilation) { MethodData* const mdo = method_data(); - if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { + if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) || + ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) { return InvocationCounter::count_limit; } else { - return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); + return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) + + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0); } } else { - return backedge_counter()->count(); + return (mcs == NULL) ? 0 : mcs->backedge_counter()->count(); } } @@ -1621,12 +1631,12 @@ void BreakpointInfo::set(Method* method) { assert(orig_bytecode() == code, "original bytecode must be the same"); } #endif + Thread *thread = Thread::current(); *method->bcp_from(_bci) = Bytecodes::_breakpoint; - method->incr_number_of_breakpoints(); + method->incr_number_of_breakpoints(thread); SystemDictionary::notice_modification(); { // Deoptimize all dependents on this method - Thread *thread = Thread::current(); HandleMark hm(thread); methodHandle mh(thread, method); Universe::flush_dependents_on_method(mh); @@ -1636,7 +1646,7 @@ void BreakpointInfo::set(Method* method) { void BreakpointInfo::clear(Method* method) { *method->bcp_from(_bci) = orig_bytecode(); assert(method->number_of_breakpoints() > 0, "must not go negative"); - method->decr_number_of_breakpoints(); + method->decr_number_of_breakpoints(Thread::current()); } // jmethodID handling diff --git a/hotspot/src/share/vm/oops/method.hpp b/hotspot/src/share/vm/oops/method.hpp index ea92383d1c2..3ca007f304b 100644 --- a/hotspot/src/share/vm/oops/method.hpp +++ b/hotspot/src/share/vm/oops/method.hpp @@ -31,6 +31,7 @@ #include "interpreter/invocationCounter.hpp" #include "oops/annotations.hpp" #include "oops/constantPool.hpp" +#include "oops/methodCounters.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.hpp" #include "oops/typeArrayOop.hpp" @@ -100,6 +101,7 @@ class CheckedExceptionElement; class LocalVariableTableElement; class AdapterHandlerEntry; class MethodData; +class MethodCounters; class ConstMethod; class InlineTableSizes; class KlassSizeStats; @@ -109,7 +111,7 @@ class Method : public Metadata { private: ConstMethod* _constMethod; // Method read-only data. MethodData* _method_data; - int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) + MethodCounters* _method_counters; AccessFlags _access_flags; // Access flags int _vtable_index; // vtable index of this method (see VtableIndexFlag) // note: can have vtables with >2**16 elements (because of inheritance) @@ -124,15 +126,6 @@ class Method : public Metadata { _hidden : 1, _dont_inline : 1, : 3; - u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting - u2 _number_of_breakpoints; // fullspeed debugging support - InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations - InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations - -#ifdef TIERED - float _rate; // Events (invocation and backedge counter increments) per millisecond - jlong _prev_time; // Previous time the rate was acquired -#endif #ifndef PRODUCT int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) @@ -247,11 +240,31 @@ class Method : public Metadata { void clear_all_breakpoints(); // Tracking number of breakpoints, for fullspeed debugging. // Only mutated by VM thread. - u2 number_of_breakpoints() const { return _number_of_breakpoints; } - void incr_number_of_breakpoints() { ++_number_of_breakpoints; } - void decr_number_of_breakpoints() { --_number_of_breakpoints; } + u2 number_of_breakpoints() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->number_of_breakpoints(); + } + } + void incr_number_of_breakpoints(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->incr_number_of_breakpoints(); + } + } + void decr_number_of_breakpoints(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->decr_number_of_breakpoints(); + } + } // Initialization only - void clear_number_of_breakpoints() { _number_of_breakpoints = 0; } + void clear_number_of_breakpoints() { + if (method_counters() != NULL) { + method_counters()->clear_number_of_breakpoints(); + } + } // index into InstanceKlass methods() array // note: also used by jfr @@ -288,14 +301,20 @@ class Method : public Metadata { void set_highest_osr_comp_level(int level); // Count of times method was exited via exception while interpreting - void interpreter_throwout_increment() { - if (_interpreter_throwout_count < 65534) { - _interpreter_throwout_count++; + void interpreter_throwout_increment(TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->interpreter_throwout_increment(); } } - int interpreter_throwout_count() const { return _interpreter_throwout_count; } - void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; } + int interpreter_throwout_count() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->interpreter_throwout_count(); + } + } // size of parameters int size_of_parameters() const { return constMethod()->size_of_parameters(); } @@ -339,23 +358,54 @@ class Method : public Metadata { MethodData* method_data() const { return _method_data; } + void set_method_data(MethodData* data) { _method_data = data; } - // invocation counter - InvocationCounter* invocation_counter() { return &_invocation_counter; } - InvocationCounter* backedge_counter() { return &_backedge_counter; } + MethodCounters* method_counters() const { + return _method_counters; + } + + + void set_method_counters(MethodCounters* counters) { + _method_counters = counters; + } #ifdef TIERED // We are reusing interpreter_invocation_count as a holder for the previous event count! // We can do that since interpreter_invocation_count is not used in tiered. - int prev_event_count() const { return _interpreter_invocation_count; } - void set_prev_event_count(int count) { _interpreter_invocation_count = count; } - jlong prev_time() const { return _prev_time; } - void set_prev_time(jlong time) { _prev_time = time; } - float rate() const { return _rate; } - void set_rate(float rate) { _rate = rate; } + int prev_event_count() const { + if (method_counters() == NULL) { + return 0; + } else { + return method_counters()->interpreter_invocation_count(); + } + } + void set_prev_event_count(int count, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_interpreter_invocation_count(count); + } + } + jlong prev_time() const { + return method_counters() == NULL ? 0 : method_counters()->prev_time(); + } + void set_prev_time(jlong time, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_prev_time(time); + } + } + float rate() const { + return method_counters() == NULL ? 0 : method_counters()->rate(); + } + void set_rate(float rate, TRAPS) { + MethodCounters* mcs = get_method_counters(CHECK); + if (mcs != NULL) { + mcs->set_rate(rate); + } + } #endif int invocation_count(); @@ -366,14 +416,17 @@ class Method : public Metadata { static void build_interpreter_method_data(methodHandle method, TRAPS); + static MethodCounters* build_method_counters(Method* m, TRAPS); + int interpreter_invocation_count() { if (TieredCompilation) return invocation_count(); - else return _interpreter_invocation_count; + else return (method_counters() == NULL) ? 0 : + method_counters()->interpreter_invocation_count(); } - void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; } - int increment_interpreter_invocation_count() { + int increment_interpreter_invocation_count(TRAPS) { if (TieredCompilation) ShouldNotReachHere(); - return ++_interpreter_invocation_count; + MethodCounters* mcs = get_method_counters(CHECK_0); + return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); } #ifndef PRODUCT @@ -582,12 +635,12 @@ class Method : public Metadata { #endif /* CC_INTERP */ static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } static ByteSize code_offset() { return byte_offset_of(Method, _code); } - static ByteSize invocation_counter_offset() { return byte_offset_of(Method, _invocation_counter); } - static ByteSize backedge_counter_offset() { return byte_offset_of(Method, _backedge_counter); } static ByteSize method_data_offset() { return byte_offset_of(Method, _method_data); } - static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(Method, _interpreter_invocation_count); } + static ByteSize method_counters_offset() { + return byte_offset_of(Method, _method_counters); + } #ifndef PRODUCT static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } #endif // not PRODUCT @@ -598,8 +651,6 @@ class Method : public Metadata { // for code generation static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } - static int interpreter_invocation_counter_offset_in_bytes() - { return offset_of(Method, _interpreter_invocation_count); } static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } static int intrinsic_id_size_in_bytes() { return sizeof(u1); } @@ -757,6 +808,13 @@ class Method : public Metadata { private: void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); + MethodCounters* get_method_counters(TRAPS) { + if (_method_counters == NULL) { + build_method_counters(this, CHECK_AND_CLEAR_NULL); + } + return _method_counters; + } + public: bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } diff --git a/hotspot/src/share/vm/oops/methodCounters.cpp b/hotspot/src/share/vm/oops/methodCounters.cpp new file mode 100644 index 00000000000..53d3e682b77 --- /dev/null +++ b/hotspot/src/share/vm/oops/methodCounters.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ +#include "precompiled.hpp" +#include "oops/methodCounters.hpp" +#include "runtime/thread.inline.hpp" + +MethodCounters* MethodCounters::allocate(ClassLoaderData* loader_data, TRAPS) { + return new(loader_data, size(), false, THREAD) MethodCounters(); +} + +void MethodCounters::clear_counters() { + invocation_counter()->reset(); + backedge_counter()->reset(); + set_interpreter_throwout_count(0); + set_interpreter_invocation_count(0); +} diff --git a/hotspot/src/share/vm/oops/methodCounters.hpp b/hotspot/src/share/vm/oops/methodCounters.hpp new file mode 100644 index 00000000000..0a6c895b328 --- /dev/null +++ b/hotspot/src/share/vm/oops/methodCounters.hpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_METHODCOUNTERS_HPP +#define SHARE_VM_OOPS_METHODCOUNTERS_HPP + +#include "oops/metadata.hpp" +#include "interpreter/invocationCounter.hpp" + +class MethodCounters: public MetaspaceObj { + friend class VMStructs; + private: + int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) + u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting + u2 _number_of_breakpoints; // fullspeed debugging support + InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations + InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations + +#ifdef TIERED + float _rate; // Events (invocation and backedge counter increments) per millisecond + jlong _prev_time; // Previous time the rate was acquired +#endif + + MethodCounters() : _interpreter_invocation_count(0), + _interpreter_throwout_count(0), + _number_of_breakpoints(0) +#ifdef TIERED + , _rate(0), + _prev_time(0) +#endif + { + invocation_counter()->init(); + backedge_counter()->init(); + } + + public: + static MethodCounters* allocate(ClassLoaderData* loader_data, TRAPS); + + void deallocate_contents(ClassLoaderData* loader_data) {} + DEBUG_ONLY(bool on_stack() { return false; }) // for template + + static int size() { return sizeof(MethodCounters) / wordSize; } + + bool is_klass() const { return false; } + + void clear_counters(); + + int interpreter_invocation_count() { + return _interpreter_invocation_count; + } + void set_interpreter_invocation_count(int count) { + _interpreter_invocation_count = count; + } + int increment_interpreter_invocation_count() { + return ++_interpreter_invocation_count; + } + + void interpreter_throwout_increment() { + if (_interpreter_throwout_count < 65534) { + _interpreter_throwout_count++; + } + } + int interpreter_throwout_count() const { + return _interpreter_throwout_count; + } + void set_interpreter_throwout_count(int count) { + _interpreter_throwout_count = count; + } + + u2 number_of_breakpoints() const { return _number_of_breakpoints; } + void incr_number_of_breakpoints() { ++_number_of_breakpoints; } + void decr_number_of_breakpoints() { --_number_of_breakpoints; } + void clear_number_of_breakpoints() { _number_of_breakpoints = 0; } + +#ifdef TIERED + jlong prev_time() const { return _prev_time; } + void set_prev_time(jlong time) { _prev_time = time; } + float rate() const { return _rate; } + void set_rate(float rate) { _rate = rate; } +#endif + + // invocation counter + InvocationCounter* invocation_counter() { return &_invocation_counter; } + InvocationCounter* backedge_counter() { return &_backedge_counter; } + + static ByteSize interpreter_invocation_counter_offset() { + return byte_offset_of(MethodCounters, _interpreter_invocation_count); + } + + static ByteSize invocation_counter_offset() { + return byte_offset_of(MethodCounters, _invocation_counter); + } + + static ByteSize backedge_counter_offset() { + return byte_offset_of(MethodCounters, _backedge_counter); + } + + static int interpreter_invocation_counter_offset_in_bytes() { + return offset_of(MethodCounters, _interpreter_invocation_count); + } + +}; +#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP diff --git a/hotspot/src/share/vm/oops/methodData.cpp b/hotspot/src/share/vm/oops/methodData.cpp index e43b93bafc0..b1a024f3d7c 100644 --- a/hotspot/src/share/vm/oops/methodData.cpp +++ b/hotspot/src/share/vm/oops/methodData.cpp @@ -732,14 +732,17 @@ int MethodData::mileage_of(Method* method) { } else { int iic = method->interpreter_invocation_count(); if (mileage < iic) mileage = iic; - InvocationCounter* ic = method->invocation_counter(); - InvocationCounter* bc = method->backedge_counter(); - int icval = ic->count(); - if (ic->carry()) icval += CompileThreshold; - if (mileage < icval) mileage = icval; - int bcval = bc->count(); - if (bc->carry()) bcval += CompileThreshold; - if (mileage < bcval) mileage = bcval; + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); + int icval = ic->count(); + if (ic->carry()) icval += CompileThreshold; + if (mileage < icval) mileage = icval; + int bcval = bc->count(); + if (bc->carry()) bcval += CompileThreshold; + if (mileage < bcval) mileage = bcval; + } } return mileage; } diff --git a/hotspot/src/share/vm/opto/parseHelper.cpp b/hotspot/src/share/vm/opto/parseHelper.cpp index f2a1bd2bef4..e9486088dc9 100644 --- a/hotspot/src/share/vm/opto/parseHelper.cpp +++ b/hotspot/src/share/vm/opto/parseHelper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -337,19 +337,21 @@ void Parse::increment_and_test_invocation_counter(int limit) { if (!count_invocations()) return; // Get the Method* node. - const TypePtr* adr_type = TypeMetadataPtr::make(method()); - Node *method_node = makecon(adr_type); + ciMethod* m = method(); + address counters_adr = m->ensure_method_counters(); - // Load the interpreter_invocation_counter from the Method*. - int offset = Method::interpreter_invocation_counter_offset_in_bytes(); - Node* adr_node = basic_plus_adr(method_node, method_node, offset); - Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type); + Node* ctrl = control(); + const TypePtr* adr_type = TypeRawPtr::make(counters_adr); + Node *counters_node = makecon(adr_type); + Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, + MethodCounters::interpreter_invocation_counter_offset_in_bytes()); + Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type); test_counter_against_threshold(cnt, limit); // Add one to the counter and store Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1))); - store_to_memory( NULL, adr_node, incr, T_INT, adr_type ); + store_to_memory( ctrl, adr_iic_node, incr, T_INT, adr_type ); } //----------------------------method_data_addressing--------------------------- diff --git a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp index ce5804a6052..81c2b74739d 100644 --- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp +++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,10 +74,11 @@ void AdvancedThresholdPolicy::initialize() { // update_rate() is called from select_task() while holding a compile queue lock. void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { + JavaThread* THREAD = JavaThread::current(); if (is_old(m)) { // We don't remove old methods from the queue, // so we can just zero the rate. - m->set_rate(0); + m->set_rate(0, THREAD); return; } @@ -93,13 +94,13 @@ void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) { if (delta_s >= TieredRateUpdateMinTime) { // And we must've taken the previous point at least 1ms before. if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t); - m->set_prev_event_count(event_count); - m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond + m->set_prev_time(t, THREAD); + m->set_prev_event_count(event_count, THREAD); + m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond } else if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0); + m->set_rate(0, THREAD); } } } diff --git a/hotspot/src/share/vm/runtime/compilationPolicy.cpp b/hotspot/src/share/vm/runtime/compilationPolicy.cpp index 713163a1001..cec42ae9195 100644 --- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp +++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -197,8 +197,10 @@ void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) { // BUT also make sure the method doesn't look like it was never executed. // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). - m->invocation_counter()->set_carry(); - m->backedge_counter()->set_carry(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + mcs->invocation_counter()->set_carry(); + mcs->backedge_counter()->set_carry(); assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); } @@ -206,8 +208,10 @@ void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) { void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { // Delay next back-branch event but pump up invocation counter to triger // whole method compilation. - InvocationCounter* i = m->invocation_counter(); - InvocationCounter* b = m->backedge_counter(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* i = mcs->invocation_counter(); + InvocationCounter* b = mcs->backedge_counter(); // Don't set invocation_counter's value too low otherwise the method will // look like immature (ic < ~5300) which prevents the inlining based on @@ -226,7 +230,10 @@ void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { class CounterDecay : public AllStatic { static jlong _last_timestamp; static void do_method(Method* m) { - m->invocation_counter()->decay(); + MethodCounters* mcs = m->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + } } public: static void decay(); @@ -264,30 +271,43 @@ void NonTieredCompPolicy::do_safepoint_work() { void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { ScopeDesc* sd = trap_scope; + MethodCounters* mcs; + InvocationCounter* c; for (; !sd->is_top(); sd = sd->sender()) { - // Reset ICs of inlined methods, since they can trigger compilations also. - sd->method()->invocation_counter()->reset(); + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + // Reset ICs of inlined methods, since they can trigger compilations also. + mcs->invocation_counter()->reset(); + } } - InvocationCounter* c = sd->method()->invocation_counter(); - if (is_osr) { - // It was an OSR method, so bump the count higher. - c->set(c->state(), CompileThreshold); - } else { - c->reset(); + mcs = sd->method()->method_counters(); + if (mcs != NULL) { + c = mcs->invocation_counter(); + if (is_osr) { + // It was an OSR method, so bump the count higher. + c->set(c->state(), CompileThreshold); + } else { + c->reset(); + } + mcs->backedge_counter()->reset(); } - sd->method()->backedge_counter()->reset(); } // This method can be called by any component of the runtime to notify the policy // that it's recommended to delay the complation of this method. void NonTieredCompPolicy::delay_compilation(Method* method) { - method->invocation_counter()->decay(); - method->backedge_counter()->decay(); + MethodCounters* mcs = method->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL"); + mcs->invocation_counter()->decay(); + mcs->backedge_counter()->decay(); } void NonTieredCompPolicy::disable_compilation(Method* method) { - method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); - method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + MethodCounters* mcs = method->method_counters(); + if (mcs != NULL) { + mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); + mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); + } } CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) { @@ -370,8 +390,10 @@ nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, i #ifndef PRODUCT void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) { if (TraceInvocationCounterOverflow) { - InvocationCounter* ic = m->invocation_counter(); - InvocationCounter* bc = m->backedge_counter(); + MethodCounters* mcs = m->method_counters(); + assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); + InvocationCounter* ic = mcs->invocation_counter(); + InvocationCounter* bc = mcs->backedge_counter(); ResourceMark rm; const char* msg = bci == InvocationEntryBci diff --git a/hotspot/src/share/vm/runtime/fprofiler.cpp b/hotspot/src/share/vm/runtime/fprofiler.cpp index ce910b45fca..111c4db5aa3 100644 --- a/hotspot/src/share/vm/runtime/fprofiler.cpp +++ b/hotspot/src/share/vm/runtime/fprofiler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -421,7 +421,8 @@ class interpretedNode : public ProfilerNode { void print_method_on(outputStream* st) { ProfilerNode::print_method_on(st); - if (Verbose) method()->invocation_counter()->print_short(); + MethodCounters* mcs = method()->method_counters(); + if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short(); } }; diff --git a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp index fbb0f89c380..0752fac9d79 100644 --- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp +++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,8 +153,10 @@ void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { // Set carry flags on the counters if necessary void SimpleThresholdPolicy::handle_counter_overflow(Method* method) { - set_carry_if_necessary(method->invocation_counter()); - set_carry_if_necessary(method->backedge_counter()); + MethodCounters *mcs = method->method_counters(); + assert(mcs != NULL, ""); + set_carry_if_necessary(mcs->invocation_counter()); + set_carry_if_necessary(mcs->backedge_counter()); MethodData* mdo = method->method_data(); if (mdo != NULL) { set_carry_if_necessary(mdo->invocation_counter()); diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp index b3e445f3d9e..f26d86db5c2 100644 --- a/hotspot/src/share/vm/runtime/vmStructs.cpp +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp @@ -77,6 +77,7 @@ #include "oops/klass.hpp" #include "oops/markOop.hpp" #include "oops/methodData.hpp" +#include "oops/methodCounters.hpp" #include "oops/method.hpp" #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.hpp" @@ -348,16 +349,17 @@ typedef BinaryTreeDictionary MetablockTreeDictionary; nonstatic_field(MethodData, _arg_local, intx) \ nonstatic_field(MethodData, _arg_stack, intx) \ nonstatic_field(MethodData, _arg_returned, intx) \ - nonstatic_field(Method, _constMethod, ConstMethod*) \ - nonstatic_field(Method, _method_data, MethodData*) \ - nonstatic_field(Method, _interpreter_invocation_count, int) \ + nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ + nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ + nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \ + nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \ + nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \ + nonstatic_field(Method, _constMethod, ConstMethod*) \ + nonstatic_field(Method, _method_data, MethodData*) \ + nonstatic_field(Method, _method_counters, MethodCounters*) \ nonstatic_field(Method, _access_flags, AccessFlags) \ nonstatic_field(Method, _vtable_index, int) \ nonstatic_field(Method, _method_size, u2) \ - nonstatic_field(Method, _interpreter_throwout_count, u2) \ - nonstatic_field(Method, _number_of_breakpoints, u2) \ - nonstatic_field(Method, _invocation_counter, InvocationCounter) \ - nonstatic_field(Method, _backedge_counter, InvocationCounter) \ nonproduct_nonstatic_field(Method, _compiled_invocation_count, int) \ volatile_nonstatic_field(Method, _code, nmethod*) \ nonstatic_field(Method, _i2i_entry, address) \ @@ -1381,6 +1383,7 @@ typedef BinaryTreeDictionary MetablockTreeDictionary; declare_type(ConstantPoolCache, MetaspaceObj) \ declare_type(MethodData, Metadata) \ declare_type(Method, Metadata) \ + declare_type(MethodCounters, MetaspaceObj) \ declare_type(ConstMethod, MetaspaceObj) \ \ declare_toplevel_type(Symbol) \ From 37fe1421a84ea88556ff0afbfb19ba00973ebbec Mon Sep 17 00:00:00 2001 From: Zhengyu Gu Date: Wed, 10 Apr 2013 08:55:50 -0400 Subject: [PATCH 011/162] 8010151: nsk/regression/b6653214 fails "assert(snapshot != NULL) failed: Worker should not be started" Fixed a racing condition when shutting down NMT while worker thread is being started, also fixed a few mis-declared volatile pointers. Reviewed-by: dholmes, dlong --- hotspot/src/share/vm/runtime/thread.hpp | 4 +-- .../src/share/vm/services/memTrackWorker.cpp | 11 ++++---- .../src/share/vm/services/memTrackWorker.hpp | 4 ++- hotspot/src/share/vm/services/memTracker.cpp | 26 +++++++++---------- hotspot/src/share/vm/services/memTracker.hpp | 8 +++--- 5 files changed, 27 insertions(+), 26 deletions(-) diff --git a/hotspot/src/share/vm/runtime/thread.hpp b/hotspot/src/share/vm/runtime/thread.hpp index 259a4765cf4..d64ca311c65 100644 --- a/hotspot/src/share/vm/runtime/thread.hpp +++ b/hotspot/src/share/vm/runtime/thread.hpp @@ -1056,11 +1056,11 @@ class JavaThread: public Thread { #if INCLUDE_NMT // native memory tracking inline MemRecorder* get_recorder() const { return (MemRecorder*)_recorder; } - inline void set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; } + inline void set_recorder(MemRecorder* rc) { _recorder = rc; } private: // per-thread memory recorder - volatile MemRecorder* _recorder; + MemRecorder* volatile _recorder; #endif // INCLUDE_NMT // Suspend/resume support for JavaThread diff --git a/hotspot/src/share/vm/services/memTrackWorker.cpp b/hotspot/src/share/vm/services/memTrackWorker.cpp index 8c38d1a3731..3e9bcd2f6c4 100644 --- a/hotspot/src/share/vm/services/memTrackWorker.cpp +++ b/hotspot/src/share/vm/services/memTrackWorker.cpp @@ -39,7 +39,7 @@ void GenerationData::reset() { } } -MemTrackWorker::MemTrackWorker() { +MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) { // create thread uses cgc thread type for now. We should revisit // the option, or create new thread type. _has_error = !os::create_thread(this, os::cgc_thread); @@ -88,8 +88,7 @@ void MemTrackWorker::run() { assert(MemTracker::is_on(), "native memory tracking is off"); this->initialize_thread_local_storage(); this->record_stack_base_and_size(); - MemSnapshot* snapshot = MemTracker::get_snapshot(); - assert(snapshot != NULL, "Worker should not be started"); + assert(_snapshot != NULL, "Worker should not be started"); MemRecorder* rec; unsigned long processing_generation = 0; bool worker_idle = false; @@ -109,7 +108,7 @@ void MemTrackWorker::run() { } // merge the recorder into staging area - if (!snapshot->merge(rec)) { + if (!_snapshot->merge(rec)) { MemTracker::shutdown(MemTracker::NMT_out_of_memory); } else { NOT_PRODUCT(_merge_count ++;) @@ -132,7 +131,7 @@ void MemTrackWorker::run() { _head = (_head + 1) % MAX_GENERATIONS; } // promote this generation data to snapshot - if (!snapshot->promote(number_of_classes)) { + if (!_snapshot->promote(number_of_classes)) { // failed to promote, means out of memory MemTracker::shutdown(MemTracker::NMT_out_of_memory); } @@ -140,7 +139,7 @@ void MemTrackWorker::run() { // worker thread is idle worker_idle = true; MemTracker::report_worker_idle(); - snapshot->wait(1000); + _snapshot->wait(1000); ThreadCritical tc; // check if more data arrived if (!_gen[_head].has_more_recorder()) { diff --git a/hotspot/src/share/vm/services/memTrackWorker.hpp b/hotspot/src/share/vm/services/memTrackWorker.hpp index be80e294d58..964aad31db8 100644 --- a/hotspot/src/share/vm/services/memTrackWorker.hpp +++ b/hotspot/src/share/vm/services/memTrackWorker.hpp @@ -85,8 +85,10 @@ class MemTrackWorker : public NamedThread { bool _has_error; + MemSnapshot* _snapshot; + public: - MemTrackWorker(); + MemTrackWorker(MemSnapshot* snapshot); ~MemTrackWorker(); _NOINLINE_ void* operator new(size_t size); _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant); diff --git a/hotspot/src/share/vm/services/memTracker.cpp b/hotspot/src/share/vm/services/memTracker.cpp index 13b6de80a21..3d4393073fc 100644 --- a/hotspot/src/share/vm/services/memTracker.cpp +++ b/hotspot/src/share/vm/services/memTracker.cpp @@ -53,12 +53,12 @@ void SyncThreadRecorderClosure::do_thread(Thread* thread) { } -MemRecorder* MemTracker::_global_recorder = NULL; +MemRecorder* volatile MemTracker::_global_recorder = NULL; MemSnapshot* MemTracker::_snapshot = NULL; MemBaseline MemTracker::_baseline; Mutex* MemTracker::_query_lock = NULL; -volatile MemRecorder* MemTracker::_merge_pending_queue = NULL; -volatile MemRecorder* MemTracker::_pooled_recorders = NULL; +MemRecorder* volatile MemTracker::_merge_pending_queue = NULL; +MemRecorder* volatile MemTracker::_pooled_recorders = NULL; MemTrackWorker* MemTracker::_worker_thread = NULL; int MemTracker::_sync_point_skip_count = 0; MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; @@ -128,7 +128,7 @@ void MemTracker::start() { _snapshot = new (std::nothrow)MemSnapshot(); if (_snapshot != NULL) { - if (!_snapshot->out_of_memory() && start_worker()) { + if (!_snapshot->out_of_memory() && start_worker(_snapshot)) { _state = NMT_started; NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); return; @@ -209,7 +209,7 @@ void MemTracker::final_shutdown() { // delete all pooled recorders void MemTracker::delete_all_pooled_recorders() { // free all pooled recorders - volatile MemRecorder* cur_head = _pooled_recorders; + MemRecorder* volatile cur_head = _pooled_recorders; if (cur_head != NULL) { MemRecorder* null_ptr = NULL; while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, @@ -543,14 +543,14 @@ void MemTracker::sync() { /* * Start worker thread. */ -bool MemTracker::start_worker() { - assert(_worker_thread == NULL, "Just Check"); - _worker_thread = new (std::nothrow) MemTrackWorker(); - if (_worker_thread == NULL || _worker_thread->has_error()) { - if (_worker_thread != NULL) { - delete _worker_thread; - _worker_thread = NULL; - } +bool MemTracker::start_worker(MemSnapshot* snapshot) { + assert(_worker_thread == NULL && _snapshot != NULL, "Just Check"); + _worker_thread = new (std::nothrow) MemTrackWorker(snapshot); + if (_worker_thread == NULL) { + return false; + } else if (_worker_thread->has_error()) { + delete _worker_thread; + _worker_thread = NULL; return false; } _worker_thread->start(); diff --git a/hotspot/src/share/vm/services/memTracker.hpp b/hotspot/src/share/vm/services/memTracker.hpp index ebcc41500d3..a7d06755288 100644 --- a/hotspot/src/share/vm/services/memTracker.hpp +++ b/hotspot/src/share/vm/services/memTracker.hpp @@ -421,7 +421,7 @@ class MemTracker : AllStatic { private: // start native memory tracking worker thread - static bool start_worker(); + static bool start_worker(MemSnapshot* snapshot); // called by worker thread to complete shutdown process static void final_shutdown(); @@ -475,18 +475,18 @@ class MemTracker : AllStatic { // a thread can start to allocate memory before it is attached // to VM 'Thread', those memory activities are recorded here. // ThreadCritical is required to guard this global recorder. - static MemRecorder* _global_recorder; + static MemRecorder* volatile _global_recorder; // main thread id debug_only(static intx _main_thread_tid;) // pending recorders to be merged - static volatile MemRecorder* _merge_pending_queue; + static MemRecorder* volatile _merge_pending_queue; NOT_PRODUCT(static volatile jint _pending_recorder_count;) // pooled memory recorders - static volatile MemRecorder* _pooled_recorders; + static MemRecorder* volatile _pooled_recorders; // memory recorder pool management, uses following // counter to determine if a released memory recorder From 01e43be718963d47cdd27ccd5f91cfbfbdd351c7 Mon Sep 17 00:00:00 2001 From: Christian Thalinger Date: Fri, 12 Apr 2013 12:22:59 -0700 Subject: [PATCH 012/162] 8011933: add number of classes, methods and time spent to CompileTheWorld Reviewed-by: jrose, kvn --- .../src/share/vm/classfile/classLoader.cpp | 28 ++++++++++++------- .../src/share/vm/classfile/classLoader.hpp | 5 ++-- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/hotspot/src/share/vm/classfile/classLoader.cpp b/hotspot/src/share/vm/classfile/classLoader.cpp index 7e92f0661c8..5b9358b98e5 100644 --- a/hotspot/src/share/vm/classfile/classLoader.cpp +++ b/hotspot/src/share/vm/classfile/classLoader.cpp @@ -1274,13 +1274,16 @@ void ClassLoader::compile_the_world() { Handle system_class_loader (THREAD, SystemDictionary::java_system_loader()); // Iterate over all bootstrap class path entries ClassPathEntry* e = _first_entry; + jlong start = os::javaTimeMillis(); while (e != NULL) { // We stop at rt.jar, unless it is the first bootstrap path entry if (e->is_rt_jar() && e != _first_entry) break; e->compile_the_world(system_class_loader, CATCH); e = e->next(); } - tty->print_cr("CompileTheWorld : Done"); + jlong end = os::javaTimeMillis(); + tty->print_cr("CompileTheWorld : Done (%d classes, %d methods, %d ms)", + _compile_the_world_class_counter, _compile_the_world_method_counter, (end - start)); { // Print statistics as if before normal exit: extern void print_statistics(); @@ -1289,7 +1292,8 @@ void ClassLoader::compile_the_world() { vm_exit(0); } -int ClassLoader::_compile_the_world_counter = 0; +int ClassLoader::_compile_the_world_class_counter = 0; +int ClassLoader::_compile_the_world_method_counter = 0; static int _codecache_sweep_counter = 0; // Filter out all exceptions except OOMs @@ -1311,8 +1315,8 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { // If the file has a period after removing .class, it's not really a // valid class file. The class loader will check everything else. if (strchr(buffer, '.') == NULL) { - _compile_the_world_counter++; - if (_compile_the_world_counter > CompileTheWorldStopAt) return; + _compile_the_world_class_counter++; + if (_compile_the_world_class_counter > CompileTheWorldStopAt) return; // Construct name without extension TempNewSymbol sym = SymbolTable::new_symbol(buffer, CHECK); @@ -1329,16 +1333,16 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { if (HAS_PENDING_EXCEPTION) { // If something went wrong in preloading we just ignore it clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer); + tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_class_counter, buffer); } } - if (_compile_the_world_counter >= CompileTheWorldStartAt) { + if (_compile_the_world_class_counter >= CompileTheWorldStartAt) { if (k.is_null() || exception_occurred) { // If something went wrong (e.g. ExceptionInInitializerError) we skip this class - tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer); + tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_class_counter, buffer); } else { - tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer); + tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer); // Preload all classes to get around uncommon traps // Iterate over all methods in class for (int n = 0; n < k->methods()->length(); n++) { @@ -1356,7 +1360,9 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string()); + } else { + _compile_the_world_method_counter++; } if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) { // Clobber the first compile and force second tier compilation @@ -1370,7 +1376,9 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) { methodHandle(), 0, "CTW", THREAD); if (HAS_PENDING_EXCEPTION) { clear_pending_exception_if_not_oom(CHECK); - tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string()); + tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string()); + } else { + _compile_the_world_method_counter++; } } } diff --git a/hotspot/src/share/vm/classfile/classLoader.hpp b/hotspot/src/share/vm/classfile/classLoader.hpp index aa68ed2d0c1..786914cad22 100644 --- a/hotspot/src/share/vm/classfile/classLoader.hpp +++ b/hotspot/src/share/vm/classfile/classLoader.hpp @@ -340,11 +340,12 @@ class ClassLoader: AllStatic { // Force compilation of all methods in all classes in bootstrap class path (stress test) #ifndef PRODUCT private: - static int _compile_the_world_counter; + static int _compile_the_world_class_counter; + static int _compile_the_world_method_counter; public: static void compile_the_world(); static void compile_the_world_in(char* name, Handle loader, TRAPS); - static int compile_the_world_counter() { return _compile_the_world_counter; } + static int compile_the_world_counter() { return _compile_the_world_class_counter; } #endif //PRODUCT }; From 4ac4649aabfe803de91f98f74c2faaa19715ec5e Mon Sep 17 00:00:00 2001 From: Christian Thalinger Date: Fri, 12 Apr 2013 15:43:07 -0700 Subject: [PATCH 013/162] 8011678: test/Makefile should pick up JT_HOME environment variable Reviewed-by: kvn --- hotspot/test/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hotspot/test/Makefile b/hotspot/test/Makefile index 7bd42a1e28a..f8bccc6693b 100644 --- a/hotspot/test/Makefile +++ b/hotspot/test/Makefile @@ -162,7 +162,9 @@ clean: # jtreg tests # Expect JT_HOME to be set for jtreg tests. (home for jtreg) -JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg +ifndef JT_HOME + JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg +endif ifdef JPRT_JTREG_HOME JT_HOME = $(JPRT_JTREG_HOME) endif From 55c4ce1ca1ed5cc8369b9e7524e6bf8afcb9c281 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 12 Apr 2013 15:53:30 -0700 Subject: [PATCH 014/162] 7104565: trim jprt build targets Remove JPRT debug builds, remove -DDEBUG -DFASTDEBUG and use ASSERT instead in sources Reviewed-by: dholmes, kvn, coleenp --- hotspot/make/Makefile | 63 ++++++++++++------- hotspot/make/bsd/Makefile | 44 ++++++------- hotspot/make/bsd/makefiles/buildtree.make | 16 +++-- hotspot/make/bsd/makefiles/debug.make | 19 +++--- hotspot/make/bsd/makefiles/defs.make | 11 ++-- hotspot/make/bsd/makefiles/fastdebug.make | 6 +- hotspot/make/bsd/makefiles/jvmg.make | 42 ------------- hotspot/make/bsd/makefiles/profiled.make | 30 --------- hotspot/make/jprt.properties | 49 ++++----------- hotspot/make/linux/Makefile | 43 ++++++------- hotspot/make/linux/makefiles/buildtree.make | 16 +++-- hotspot/make/linux/makefiles/debug.make | 19 +++--- hotspot/make/linux/makefiles/defs.make | 25 ++++---- hotspot/make/linux/makefiles/fastdebug.make | 6 +- hotspot/make/linux/makefiles/jvmg.make | 42 ------------- hotspot/make/linux/makefiles/profiled.make | 30 --------- hotspot/make/solaris/Makefile | 28 +++++---- hotspot/make/solaris/makefiles/buildtree.make | 20 +++--- hotspot/make/solaris/makefiles/debug.make | 20 +++--- hotspot/make/solaris/makefiles/defs.make | 13 ++-- hotspot/make/solaris/makefiles/fastdebug.make | 8 +-- hotspot/make/solaris/makefiles/jvmg.make | 56 ----------------- hotspot/make/solaris/makefiles/profiled.make | 44 ------------- hotspot/make/windows/build.make | 10 +-- hotspot/make/windows/makefiles/defs.make | 6 +- hotspot/make/windows/makefiles/vm.make | 6 +- .../projectfiles/compiler2/ADLCompiler.dsp | 2 +- .../projectfiles/tiered/ADLCompiler.dsp | 2 +- hotspot/src/cpu/sparc/vm/frame_sparc.cpp | 4 +- .../src/os/bsd/dtrace/generateJvmOffsets.cpp | 6 +- .../os/solaris/dtrace/generateJvmOffsets.cpp | 8 +-- hotspot/src/os/windows/vm/os_windows.cpp | 3 - hotspot/src/share/tools/hsdis/Makefile | 7 +-- .../src/share/vm/classfile/stackMapFrame.hpp | 6 +- .../concurrentMarkSweepGeneration.cpp | 14 ++--- hotspot/src/share/vm/memory/allocation.hpp | 4 +- hotspot/src/share/vm/runtime/vmThread.cpp | 2 +- 37 files changed, 225 insertions(+), 505 deletions(-) delete mode 100644 hotspot/make/bsd/makefiles/jvmg.make delete mode 100644 hotspot/make/bsd/makefiles/profiled.make delete mode 100644 hotspot/make/linux/makefiles/jvmg.make delete mode 100644 hotspot/make/linux/makefiles/profiled.make delete mode 100644 hotspot/make/solaris/makefiles/jvmg.make delete mode 100644 hotspot/make/solaris/makefiles/profiled.make diff --git a/hotspot/make/Makefile b/hotspot/make/Makefile index 8dad67f074e..e1fda57c4c6 100644 --- a/hotspot/make/Makefile +++ b/hotspot/make/Makefile @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Top level gnumake file for hotspot builds @@ -85,15 +85,15 @@ else endif # Typical C1/C2 targets made available with this Makefile -C1_VM_TARGETS=product1 fastdebug1 optimized1 jvmg1 -C2_VM_TARGETS=product fastdebug optimized jvmg -ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero -SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark -MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 jvmgminimal1 +C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1 +C2_VM_TARGETS=product fastdebug optimized debug +ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero +SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark +MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1 COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug -COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 docs export_debug +COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug # JDK directory list JDK_DIRS=bin include jre lib demo @@ -103,13 +103,13 @@ all: all_product all_fastdebug ifeq ($(JVM_VARIANT_MINIMAL1),true) all_product: productminimal1 all_fastdebug: fastdebugminimal1 -all_debug: jvmgminimal1 +all_debug: debugminimal1 endif ifdef BUILD_CLIENT_ONLY all_product: product1 docs export_product all_fastdebug: fastdebug1 docs export_fastdebug -all_debug: jvmg1 docs export_debug +all_debug: debug1 docs export_debug else ifeq ($(MACOSX_UNIVERSAL),true) all_product: universal_product @@ -127,13 +127,13 @@ all_optimized: optimized optimized1 docs export_optimized allzero: all_productzero all_fastdebugzero all_productzero: productzero docs export_product all_fastdebugzero: fastdebugzero docs export_fastdebug -all_debugzero: jvmgzero docs export_debug +all_debugzero: debugzero docs export_debug all_optimizedzero: optimizedzero docs export_optimized allshark: all_productshark all_fastdebugshark all_productshark: productshark docs export_product all_fastdebugshark: fastdebugshark docs export_fastdebug -all_debugshark: jvmgshark docs export_debug +all_debugshark: debugshark docs export_debug all_optimizedshark: optimizedshark docs export_optimized # Do everything @@ -227,7 +227,7 @@ generic_buildshark: $(MKDIR) -p $(OUTPUTDIR) $(CD) $(OUTPUTDIR); \ $(MAKE) -f $(ABS_OS_MAKEFILE) \ - $(MAKE_ARGS) $(VM_TARGET) + $(MAKE_ARGS) $(VM_TARGET) generic_buildminimal1: ifeq ($(JVM_VARIANT_MINIMAL1),true) @@ -260,7 +260,7 @@ export_fastdebug: EXPORT_SUBDIR=/$(@:export_%=%) \ generic_export export_debug: - $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=${VM_DEBUG} \ + $(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \ EXPORT_SUBDIR=/$(@:export_%=%) \ generic_export export_optimized: @@ -281,7 +281,7 @@ export_fastdebug_jdk:: ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \ generic_export export_debug_jdk:: - $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=${VM_DEBUG} \ + $(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=$(@:export_%_jdk=%) \ ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \ generic_export @@ -323,7 +323,7 @@ endif # Bin files (windows) ifeq ($(OSNAME),windows) -# Get jvm.lib +# Get jvm.lib $(EXPORT_LIB_DIR)/%.lib: $(MISC_DIR)/%.lib $(install-file) @@ -541,11 +541,11 @@ generic_test: @$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -showversion -help # C2 test targets -test_product test_optimized test_fastdebug test_jvmg: +test_product test_optimized test_fastdebug test_debug: @$(MAKE) generic_test ALTJVM_DIR="$(C2_DIR)/$(@:test_%=%)" # C1 test targets -test_product1 test_optimized1 test_fastdebug1 test_jvmg1: +test_product1 test_optimized1 test_fastdebug1 test_debug1: ifeq ($(ARCH_DATA_MODEL), 32) @$(MAKE) generic_test ALTJVM_DIR="$(C1_DIR)/$(@:test_%1=%)" else @@ -553,15 +553,15 @@ test_product1 test_optimized1 test_fastdebug1 test_jvmg1: endif # Zero test targets -test_productzero test_optimizedzero test_fastdebugzero test_jvmgzero: +test_productzero test_optimizedzero test_fastdebugzero test_debugzero: @$(MAKE) generic_test ALTJVM_DIR="$(ZERO_DIR)/$(@:test_%zero=%)" # Shark test targets -test_productshark test_optimizedshark test_fastdebugshark test_jvmgshark: +test_productshark test_optimizedshark test_fastdebugshark test_debugshark: @$(MAKE) generic_test ALTJVM_DIR="$(SHARK_DIR)/$(@:test_%shark=%)" # Minimal1 test targets -test_productminimal1 test_optimizedminimal1 test_fastdebugminimal1 test_jvmgminimal1: +test_productminimal1 test_optimizedminimal1 test_fastdebugminimal1 test_debugminimal1: @$(MAKE) generic_test ALTJVM_DIR="$(MINIMAL1_DIR)/$(@:test_%minimal1=%)" @@ -626,7 +626,7 @@ help: intro_help target_help variable_help notes_help examples_help # Intro help message intro_help: @$(ECHO) \ -"Makefile for the Hotspot workspace." +"Makefile for the Hotspot workspace." @$(ECHO) \ "Default behavior is to build and create an export area for the j2se builds." @@ -637,7 +637,7 @@ target_help: @$(ECHO) "world: Same as: all create_jdk" @$(ECHO) "all_product: Same as: product product1 export_product" @$(ECHO) "all_fastdebug: Same as: fastdebug fastdebug1 export_fastdebug" - @$(ECHO) "all_debug: Same as: jvmg jvmg1 export_debug" + @$(ECHO) "all_debug: Same as: debug debug1 export_debug" @$(ECHO) "all_optimized: Same as: optimized optimized1 export_optimized" @$(ECHO) "clean: Clean all areas" @$(ECHO) "export_product: Export product files to EXPORT_PATH" @@ -730,7 +730,7 @@ examples_help: @$(ECHO) \ " $(MAKE) world" @$(ECHO) \ -" $(MAKE) ALT_BOOTDIR=/opt/java/jdk$(PREVIOUS_JDK_VERSION)" +" $(MAKE) ALT_BOOTDIR=/opt/java/jdk$(PREVIOUS_JDK_VERSION)" @$(ECHO) \ " $(MAKE) ALT_JDK_IMPORT_PATH=/opt/java/jdk$(JDK_VERSION)" @@ -741,6 +741,23 @@ include $(GAMMADIR)/make/$(OSNAME)/makefiles/universal.gmk endif endif +# Compatibility for transition to new naming +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgminimal1: warn_jvmg_deprecated debugminimal1 + +jvmgcore: warn_jvmg_deprecated debugcore + +jvmgzero: warn_jvmg_deprecated debugzero + +jvmgshark: warn_jvmg_deprecated debugshark + # JPRT rule to build this workspace include $(GAMMADIR)/make/jprt.gmk diff --git a/hotspot/make/bsd/Makefile b/hotspot/make/bsd/Makefile index 024aef9cba6..d9058c24e25 100644 --- a/hotspot/make/bsd/Makefile +++ b/hotspot/make/bsd/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -142,55 +142,43 @@ VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH)) # # debug compiler2 __compiler2/debug # fastdebug compiler2 __compiler2/fastdebug -# jvmg compiler2 __compiler2/jvmg # optimized compiler2 __compiler2/optimized -# profiled compiler2 __compiler2/profiled # product compiler2 __compiler2/product # # debug1 compiler1 __compiler1/debug # fastdebug1 compiler1 __compiler1/fastdebug -# jvmg1 compiler1 __compiler1/jvmg # optimized1 compiler1 __compiler1/optimized -# profiled1 compiler1 __compiler1/profiled # product1 compiler1 __compiler1/product # # debugcore core __core/debug # fastdebugcore core __core/fastdebug -# jvmgcore core __core/jvmg # optimizedcore core __core/optimized -# profiledcore core __core/profiled # productcore core __core/product # # debugzero zero __zero/debug # fastdebugzero zero __zero/fastdebug -# jvmgzero zero __zero/jvmg # optimizedzero zero __zero/optimized -# profiledzero zero __zero/profiled # productzero zero __zero/product # # debugshark shark __shark/debug # fastdebugshark shark __shark/fastdebug -# jvmgshark shark __shark/jvmg # optimizedshark shark __shark/optimized -# profiledshark shark __shark/profiled # productshark shark __shark/product # # fastdebugminimal1 minimal1 __minimal1/fastdebug -# jvmgminimal1 minimal1 __minimal1/jvmg +# debugminimal1 minimal1 __minimal1/debug # productminimal1 minimal1 __minimal1/product # # What you get with each target: # -# debug* - "thin" libjvm - debug info linked into the gamma launcher +# debug* - debug compile with asserts enabled # fastdebug* - optimized compile, but with asserts enabled -# jvmg* - "fat" libjvm - debug info linked into libjvm.so # optimized* - optimized compile, no asserts -# profiled* - gprof # product* - the shippable thing: optimized compile, no asserts, -DPRODUCT # This target list needs to be coordinated with the usage message # in the build.sh script: -TARGETS = debug jvmg fastdebug optimized profiled product +TARGETS = debug fastdebug optimized product ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true) SUBDIR_DOCS = $(OSNAME)_$(VARIANTARCH)_docs @@ -354,15 +342,29 @@ docs: checks $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs # Synonyms for win32-like targets. -compiler2: jvmg product +compiler2: debug product -compiler1: jvmg1 product1 +compiler1: debug1 product1 -core: jvmgcore productcore +core: debugcore productcore -zero: jvmgzero productzero +zero: debugzero productzero -shark: jvmgshark productshark +shark: debugshark productshark + +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgcore: warn_jvmg_deprecated debugcore + +jvmgzero: warn_jvmg_deprecated debugzero + +jvmgshark: warn_jvmg_deprecated debugshark clean_docs: rm -rf $(SUBDIR_DOCS) diff --git a/hotspot/make/bsd/makefiles/buildtree.make b/hotspot/make/bsd/makefiles/buildtree.make index 752e0febb76..9d0a2174c54 100644 --- a/hotspot/make/bsd/makefiles/buildtree.make +++ b/hotspot/make/bsd/makefiles/buildtree.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Usage: @@ -46,11 +46,11 @@ # Makefile - for "make foo" # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles -# adlc.make - +# adlc.make - # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # env.[ck]sh - environment settings -# +# # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -122,7 +122,7 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/jvmtifiles \ $(PLATFORM_DIR)/generated/dtracefiles -TARGETS = debug fastdebug jvmg optimized product profiled +TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) # For dependencies and recursive makes. @@ -186,8 +186,8 @@ $(SIMPLE_DIRS): $(QUIETLY) mkdir -p $@ # Convenience macro which takes a source relative path, applies $(1) to the -# absolute path, and then replaces $(GAMMADIR) in the result with a -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. +# absolute path, and then replaces $(GAMMADIR) in the result with a +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) # This bit is needed to enable local rebuilds. @@ -279,8 +279,6 @@ flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - [ "$(TARGET)" = profiled ] && \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ ) > $@ @@ -381,7 +379,7 @@ jdkpath.sh: $(BUILDTREE_MAKE) $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo "JDK=${JAVA_HOME}"; \ - ) > $@ + ) > $@ FORCE: diff --git a/hotspot/make/bsd/makefiles/debug.make b/hotspot/make/bsd/makefiles/debug.make index 5849ed67eed..c14d974fbfb 100644 --- a/hotspot/make/bsd/makefiles/debug.make +++ b/hotspot/make/bsd/makefiles/debug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -27,17 +27,16 @@ # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) -CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug -_JUNK_ := $(shell echo -e >&2 ""\ - "----------------------------------------------------------------------\n" \ - "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \ - "Please use 'make jvmg' to build debug JVM. \n" \ - "----------------------------------------------------------------------\n") - VERSION = debug -SYSDEFS += -DASSERT -DDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/bsd/makefiles/defs.make b/hotspot/make/bsd/makefiles/defs.make index 4c56c973892..14d3d4aa2ca 100644 --- a/hotspot/make/bsd/makefiles/defs.make +++ b/hotspot/make/bsd/makefiles/defs.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot bsd builds. @@ -86,7 +86,7 @@ ifneq (,$(findstring $(ARCH), amd64 x86_64)) VM_PLATFORM = bsd_i486 HS_ARCH = x86 # We have to reset ARCH to i386 since SRCARCH relies on it - ARCH = i386 + ARCH = i386 endif endif @@ -146,9 +146,6 @@ else LIBRARY_SUFFIX=so endif -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=jvmg - EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html # client and server subdirectories have symbolic links to ../libjsig.so @@ -177,7 +174,7 @@ ifeq ($(JVM_VARIANT_MINIMAL1),true) else EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo endif - endif + endif endif # Serviceability Binaries diff --git a/hotspot/make/bsd/makefiles/fastdebug.make b/hotspot/make/bsd/makefiles/fastdebug.make index c59310634c3..951891ed813 100644 --- a/hotspot/make/bsd/makefiles/fastdebug.make +++ b/hotspot/make/bsd/makefiles/fastdebug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013 Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -59,5 +59,5 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug VERSION = optimized -SYSDEFS += -DASSERT -DFASTDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/bsd/makefiles/jvmg.make b/hotspot/make/bsd/makefiles/jvmg.make deleted file mode 100644 index 52dbdb94b98..00000000000 --- a/hotspot/make/bsd/makefiles/jvmg.make +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making debug version of VM - -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) - -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ - -# Set the environment variable HOTSPARC_GENERIC to "true" -# to inhibit the effect of the previous line on CFLAGS. - -# Linker mapfile -MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug - -VERSION = debug -SYSDEFS += -DASSERT -DDEBUG -PICFLAGS = DEFAULT diff --git a/hotspot/make/bsd/makefiles/profiled.make b/hotspot/make/bsd/makefiles/profiled.make deleted file mode 100644 index fa5c9153b23..00000000000 --- a/hotspot/make/bsd/makefiles/profiled.make +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making profiled version of Gamma VM -# (It is also optimized.) - -CFLAGS += -pg -AOUT_FLAGS += -pg -LDNOMAP = true diff --git a/hotspot/make/jprt.properties b/hotspot/make/jprt.properties index 8bf107758c9..af7f321b722 100644 --- a/hotspot/make/jprt.properties +++ b/hotspot/make/jprt.properties @@ -133,15 +133,15 @@ jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}} # Standard list of jprt build targets for this source tree jprt.build.targets.standard= \ - ${jprt.my.solaris.sparc}-{product|fastdebug|debug}, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug|debug}, \ - ${jprt.my.solaris.i586}-{product|fastdebug|debug}, \ - ${jprt.my.solaris.x64}-{product|fastdebug|debug}, \ - ${jprt.my.linux.i586}-{product|fastdebug|debug}, \ + ${jprt.my.solaris.sparc}-{product|fastdebug}, \ + ${jprt.my.solaris.sparcv9}-{product|fastdebug}, \ + ${jprt.my.solaris.i586}-{product|fastdebug}, \ + ${jprt.my.solaris.x64}-{product|fastdebug}, \ + ${jprt.my.linux.i586}-{product|fastdebug}, \ ${jprt.my.linux.x64}-{product|fastdebug}, \ - ${jprt.my.macosx.x64}-{product|fastdebug|debug}, \ - ${jprt.my.windows.i586}-{product|fastdebug|debug}, \ - ${jprt.my.windows.x64}-{product|fastdebug|debug}, \ + ${jprt.my.macosx.x64}-{product|fastdebug}, \ + ${jprt.my.windows.i586}-{product|fastdebug}, \ + ${jprt.my.windows.x64}-{product|fastdebug}, \ ${jprt.my.linux.armvh}-{product|fastdebug} jprt.build.targets.open= \ @@ -150,7 +150,7 @@ jprt.build.targets.open= \ ${jprt.my.linux.x64}-{productOpen} jprt.build.targets.embedded= \ - ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \ + ${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \ ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \ ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \ ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \ @@ -174,21 +174,18 @@ jprt.my.solaris.sparc.test.targets= \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \ ${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \ ${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_default, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_default, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \ ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \ @@ -201,21 +198,18 @@ jprt.my.solaris.sparcv9.test.targets= \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \ ${jprt.my.solaris.sparcv9}-product-c2-runThese, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \ ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \ @@ -229,21 +223,18 @@ jprt.my.solaris.x64.test.targets= \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \ ${jprt.my.solaris.x64}-product-c2-runThese, \ ${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \ ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ @@ -258,28 +249,24 @@ jprt.my.solaris.i586.test.targets= \ ${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \ ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \ ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.solaris.i586}-product-c1-GCBasher_default, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \ ${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_default, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \ ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \ - ${jprt.my.solaris.i586}-product-c1-GCOld_default, \ ${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \ ${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \ ${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \ ${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \ ${jprt.my.solaris.i586}-product-c1-GCOld_G1, \ ${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \ - ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \ ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \ ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \ ${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \ @@ -293,21 +280,19 @@ jprt.my.linux.i586.test.targets = \ ${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \ ${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \ ${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp, \ - ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_default, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \ ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \ - ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \ + ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \ ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \ @@ -318,21 +303,18 @@ jprt.my.linux.x64.test.targets = \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \ @@ -342,21 +324,18 @@ jprt.my.macosx.x64.test.targets = \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \ - ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \ ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \ @@ -369,14 +348,12 @@ jprt.my.windows.i586.test.targets = \ ${jprt.my.windows.i586}-product-{c1|c2}-runThese, \ ${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \ ${jprt.my.windows.i586}-fastdebug-c1-runThese_Xshare, \ - ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \ ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \ - ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_default, \ ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \ ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \ ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \ @@ -396,14 +373,12 @@ jprt.my.windows.x64.test.targets = \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \ ${jprt.my.windows.x64}-product-c2-runThese, \ ${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_default, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \ - ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_default, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \ ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \ @@ -419,7 +394,7 @@ jprt.my.windows.x64.test.targets = \ # Some basic "smoke" tests for OpenJDK builds jprt.test.targets.open = \ - ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98, \ + ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \ ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \ ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98 @@ -520,5 +495,5 @@ jprt.make.rule.test.targets.jdk7u8=${jprt.make.rule.test.targets.jdk7} jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} # 7155453: Work-around to prevent popups on OSX from blocking test completion -# but the work-around is added to all platforms to be consistent +# but the work-around is added to all platforms to be consistent jprt.jbb.options=-Djava.awt.headless=true diff --git a/hotspot/make/linux/Makefile b/hotspot/make/linux/Makefile index 04d0b8e5c64..0820c254612 100644 --- a/hotspot/make/linux/Makefile +++ b/hotspot/make/linux/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -142,55 +142,42 @@ VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH)) # # debug compiler2 __compiler2/debug # fastdebug compiler2 __compiler2/fastdebug -# jvmg compiler2 __compiler2/jvmg # optimized compiler2 __compiler2/optimized -# profiled compiler2 __compiler2/profiled # product compiler2 __compiler2/product # # debug1 compiler1 __compiler1/debug # fastdebug1 compiler1 __compiler1/fastdebug -# jvmg1 compiler1 __compiler1/jvmg # optimized1 compiler1 __compiler1/optimized -# profiled1 compiler1 __compiler1/profiled # product1 compiler1 __compiler1/product # # debugcore core __core/debug # fastdebugcore core __core/fastdebug -# jvmgcore core __core/jvmg # optimizedcore core __core/optimized -# profiledcore core __core/profiled # productcore core __core/product # # debugzero zero __zero/debug # fastdebugzero zero __zero/fastdebug -# jvmgzero zero __zero/jvmg # optimizedzero zero __zero/optimized -# profiledzero zero __zero/profiled # productzero zero __zero/product # # debugshark shark __shark/debug # fastdebugshark shark __shark/fastdebug -# jvmgshark shark __shark/jvmg # optimizedshark shark __shark/optimized -# profiledshark shark __shark/profiled # productshark shark __shark/product # # fastdebugminimal1 minimal1 __minimal1/fastdebug -# jvmgminimal1 minimal1 __minimal1/jvmg # productminimal1 minimal1 __minimal1/product # # What you get with each target: # -# debug* - "thin" libjvm - debug info linked into the gamma launcher +# debug* - debug compile with asserts enabled # fastdebug* - optimized compile, but with asserts enabled -# jvmg* - "fat" libjvm - debug info linked into libjvm.so # optimized* - optimized compile, no asserts -# profiled* - gprof # product* - the shippable thing: optimized compile, no asserts, -DPRODUCT # This target list needs to be coordinated with the usage message # in the build.sh script: -TARGETS = debug jvmg fastdebug optimized profiled product +TARGETS = debug fastdebug optimized product ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true) SUBDIR_DOCS = $(OSNAME)_$(VARIANTARCH)_docs @@ -357,15 +344,29 @@ docs: checks $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs # Synonyms for win32-like targets. -compiler2: jvmg product +compiler2: debug product -compiler1: jvmg1 product1 +compiler1: debug1 product1 -core: jvmgcore productcore +core: debugcore productcore -zero: jvmgzero productzero +zero: debugzero productzero -shark: jvmgshark productshark +shark: debugshark productshark + +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgcore: warn_jvmg_deprecated debugcore + +jvmgzero: warn_jvmg_deprecated debugzero + +jvmgshark: warn_jvmg_deprecated debugshark clean_docs: rm -rf $(SUBDIR_DOCS) diff --git a/hotspot/make/linux/makefiles/buildtree.make b/hotspot/make/linux/makefiles/buildtree.make index f980dcdafe0..0b7c12001b3 100644 --- a/hotspot/make/linux/makefiles/buildtree.make +++ b/hotspot/make/linux/makefiles/buildtree.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Usage: @@ -46,11 +46,11 @@ # Makefile - for "make foo" # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles -# adlc.make - +# adlc.make - # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # env.[ck]sh - environment settings -# +# # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -117,7 +117,7 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles -TARGETS = debug fastdebug jvmg optimized product profiled +TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) # For dependencies and recursive makes. @@ -179,8 +179,8 @@ $(SIMPLE_DIRS): $(QUIETLY) mkdir -p $@ # Convenience macro which takes a source relative path, applies $(1) to the -# absolute path, and then replaces $(GAMMADIR) in the result with a -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. +# absolute path, and then replaces $(GAMMADIR) in the result with a +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) # This bit is needed to enable local rebuilds. @@ -284,8 +284,6 @@ flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - [ "$(TARGET)" = profiled ] && \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ ) > $@ @@ -376,7 +374,7 @@ jdkpath.sh: $(BUILDTREE_MAKE) $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo "JDK=${JAVA_HOME}"; \ - ) > $@ + ) > $@ FORCE: diff --git a/hotspot/make/linux/makefiles/debug.make b/hotspot/make/linux/makefiles/debug.make index e51d4c192d2..7c57280a12c 100644 --- a/hotspot/make/linux/makefiles/debug.make +++ b/hotspot/make/linux/makefiles/debug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -27,17 +27,16 @@ # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) -CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. # Linker mapfile MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug -_JUNK_ := $(shell echo -e >&2 ""\ - "----------------------------------------------------------------------\n" \ - "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \ - "Please use 'make jvmg' to build debug JVM. \n" \ - "----------------------------------------------------------------------\n") - VERSION = debug -SYSDEFS += -DASSERT -DDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/linux/makefiles/defs.make b/hotspot/make/linux/makefiles/defs.make index 9bdbdf226e6..778830814ac 100644 --- a/hotspot/make/linux/makefiles/defs.make +++ b/hotspot/make/linux/makefiles/defs.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot linux builds. @@ -92,7 +92,7 @@ ifneq (,$(findstring $(ARCH), amd64 x86_64)) VM_PLATFORM = linux_i486 HS_ARCH = x86 # We have to reset ARCH to i686 since SRCARCH relies on it - ARCH = i686 + ARCH = i686 endif endif @@ -240,9 +240,6 @@ JDK_INCLUDE_SUBDIR=linux # Library suffix LIBRARY_SUFFIX=so -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=jvmg - EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html # client and server subdirectories have symbolic links to ../libjsig.so @@ -279,7 +276,7 @@ ifeq ($(JVM_VARIANT_CLIENT),true) else EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo endif - endif + endif endif ifeq ($(JVM_VARIANT_MINIMAL1),true) @@ -292,15 +289,15 @@ ifeq ($(JVM_VARIANT_MINIMAL1),true) else EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo endif - endif + endif endif # Serviceability Binaries # No SA Support for PPC, IA64, ARM or zero ADD_SA_BINARIES/x86 = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ - $(EXPORT_LIB_DIR)/sa-jdi.jar + $(EXPORT_LIB_DIR)/sa-jdi.jar ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \ - $(EXPORT_LIB_DIR)/sa-jdi.jar + $(EXPORT_LIB_DIR)/sa-jdi.jar ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) ifeq ($(ZIP_DEBUGINFO_FILES),1) ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz @@ -310,10 +307,10 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo endif endif -ADD_SA_BINARIES/ppc = -ADD_SA_BINARIES/ia64 = -ADD_SA_BINARIES/arm = -ADD_SA_BINARIES/zero = +ADD_SA_BINARIES/ppc = +ADD_SA_BINARIES/ia64 = +ADD_SA_BINARIES/arm = +ADD_SA_BINARIES/zero = -include $(HS_ALT_MAKE)/linux/makefiles/defs.make diff --git a/hotspot/make/linux/makefiles/fastdebug.make b/hotspot/make/linux/makefiles/fastdebug.make index 86ffc36b7c2..abefd2b4ca0 100644 --- a/hotspot/make/linux/makefiles/fastdebug.make +++ b/hotspot/make/linux/makefiles/fastdebug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -59,5 +59,5 @@ CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug VERSION = optimized -SYSDEFS += -DASSERT -DFASTDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/linux/makefiles/jvmg.make b/hotspot/make/linux/makefiles/jvmg.make deleted file mode 100644 index 24047f7c358..00000000000 --- a/hotspot/make/linux/makefiles/jvmg.make +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making debug version of VM - -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) - -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ - -# Set the environment variable HOTSPARC_GENERIC to "true" -# to inhibit the effect of the previous line on CFLAGS. - -# Linker mapfile -MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug - -VERSION = debug -SYSDEFS += -DASSERT -DDEBUG -PICFLAGS = DEFAULT diff --git a/hotspot/make/linux/makefiles/profiled.make b/hotspot/make/linux/makefiles/profiled.make deleted file mode 100644 index fa5c9153b23..00000000000 --- a/hotspot/make/linux/makefiles/profiled.make +++ /dev/null @@ -1,30 +0,0 @@ -# -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making profiled version of Gamma VM -# (It is also optimized.) - -CFLAGS += -pg -AOUT_FLAGS += -pg -LDNOMAP = true diff --git a/hotspot/make/solaris/Makefile b/hotspot/make/solaris/Makefile index 7ae82d856f3..7ae2be418d3 100644 --- a/hotspot/make/solaris/Makefile +++ b/hotspot/make/solaris/Makefile @@ -120,37 +120,29 @@ endif # # debug compiler2 __compiler2/debug # fastdebug compiler2 __compiler2/fastdebug -# jvmg compiler2 __compiler2/jvmg # optimized compiler2 __compiler2/optimized -# profiled compiler2 __compiler2/profiled # product compiler2 __compiler2/product # # debug1 compiler1 __compiler1/debug # fastdebug1 compiler1 __compiler1/fastdebug -# jvmg1 compiler1 __compiler1/jvmg # optimized1 compiler1 __compiler1/optimized -# profiled1 compiler1 __compiler1/profiled # product1 compiler1 __compiler1/product # # debugcore core __core/debug # fastdebugcore core __core/fastdebug -# jvmgcore core __core/jvmg # optimizedcore core __core/optimized -# profiledcore core __core/profiled # productcore core __core/product # # What you get with each target: # -# debug* - "thin" libjvm - debug info linked into the gamma launcher +# debug* - debug compile with asserts enabled # fastdebug* - optimized compile, but with asserts enabled -# jvmg* - "fat" libjvm - debug info linked into libjvm.so # optimized* - optimized compile, no asserts -# profiled* - gprof # product* - the shippable thing: optimized compile, no asserts, -DPRODUCT # This target list needs to be coordinated with the usage message # in the build.sh script: -TARGETS = debug jvmg fastdebug optimized profiled product +TARGETS = debug fastdebug optimized product SUBDIR_DOCS = $(OSNAME)_$(BUILDARCH)_docs SUBDIRS_C1 = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS)) @@ -267,11 +259,21 @@ docs: checks $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs # Synonyms for win32-like targets. -compiler2: jvmg product +compiler2: debug product -compiler1: jvmg1 product1 +compiler1: debug1 product1 -core: jvmgcore productcore +core: debugcore productcore + +warn_jvmg_deprecated: + echo "Warning: The jvmg target has been replaced with debug" + echo "Warning: Please update your usage" + +jvmg: warn_jvmg_deprecated debug + +jvmg1: warn_jvmg_deprecated debug1 + +jvmgcore: warn_jvmg_deprecated debugcore clean_docs: rm -rf $(SUBDIR_DOCS) diff --git a/hotspot/make/solaris/makefiles/buildtree.make b/hotspot/make/solaris/makefiles/buildtree.make index a3ab0b5e52c..989470f293f 100644 --- a/hotspot/make/solaris/makefiles/buildtree.make +++ b/hotspot/make/solaris/makefiles/buildtree.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Usage: @@ -46,11 +46,11 @@ # Makefile - for "make foo" # flags.make - with macro settings # vm.make - to support making "$(MAKE) -v vm.make" in makefiles -# adlc.make - +# adlc.make - # jvmti.make - generate JVMTI bindings from the spec (JSR-163) # sa.make - generate SA jar file and natives # env.[ck]sh - environment settings -# +# # The makefiles are split this way so that "make foo" will run faster by not # having to read the dependency files for the vm. @@ -69,7 +69,7 @@ PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).gcc GCC_LIB = /usr/local/lib else PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH) -GCC_LIB = +GCC_LIB = endif ifdef FORCE_TIERED @@ -110,7 +110,7 @@ SIMPLE_DIRS = \ $(PLATFORM_DIR)/generated/adfiles \ $(PLATFORM_DIR)/generated/jvmtifiles -TARGETS = debug fastdebug jvmg optimized product profiled +TARGETS = debug fastdebug optimized product SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS)) # For dependencies and recursive makes. @@ -153,7 +153,7 @@ ifndef OPENJDK endif endif -BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) BUILDTREE = \ $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS) @@ -172,8 +172,8 @@ $(SIMPLE_DIRS): $(QUIETLY) mkdir -p $@ # Convenience macro which takes a source relative path, applies $(1) to the -# absolute path, and then replaces $(GAMMADIR) in the result with a -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. +# absolute path, and then replaces $(GAMMADIR) in the result with a +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile. gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2))) # This bit is needed to enable local rebuilds. @@ -274,8 +274,6 @@ flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo; \ - [ "$(TARGET)" = profiled ] && \ - echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \ echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \ ) > $@ @@ -366,7 +364,7 @@ jdkpath.sh: $(BUILDTREE_MAKE) $(QUIETLY) ( \ $(BUILDTREE_COMMENT); \ echo "JDK=${JAVA_HOME}"; \ - ) > $@ + ) > $@ FORCE: diff --git a/hotspot/make/solaris/makefiles/debug.make b/hotspot/make/solaris/makefiles/debug.make index 602c07dc366..3fba8e1239d 100644 --- a/hotspot/make/solaris/makefiles/debug.make +++ b/hotspot/make/solaris/makefiles/debug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -37,22 +37,20 @@ ifeq ($(COMPILER_REV_NUMERIC),508) endif endif -CFLAGS += $(DEBUG_CFLAGS/BYFILE) +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ + +# Set the environment variable HOTSPARC_GENERIC to "true" +# to inhibit the effect of the previous line on CFLAGS. # Linker mapfiles MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug -# This mapfile is only needed when compiling with dtrace support, +# This mapfile is only needed when compiling with dtrace support, # and mustn't be otherwise. MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) -_JUNK_ := $(shell echo >&2 ""\ - "-------------------------------------------------------------------------\n" \ - "WARNING: 'gnumake debug' is deprecated. It will be removed in the future.\n" \ - "Please use 'gnumake jvmg' to build debug JVM. \n" \ - "-------------------------------------------------------------------------\n") - VERSION = debug -SYSDEFS += -DASSERT -DDEBUG +SYSDEFS += -DASSERT PICFLAGS = DEFAULT diff --git a/hotspot/make/solaris/makefiles/defs.make b/hotspot/make/solaris/makefiles/defs.make index 14d0aced5a1..74ca7f70e2a 100644 --- a/hotspot/make/solaris/makefiles/defs.make +++ b/hotspot/make/solaris/makefiles/defs.make @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot solaris builds. @@ -172,9 +172,6 @@ JDK_INCLUDE_SUBDIR=solaris # Library suffix LIBRARY_SUFFIX=so -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=jvmg - EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html # client and server subdirectories have symbolic links to ../libjsig.$(LIBRARY_SUFFIX) @@ -221,8 +218,8 @@ ifeq ($(JVM_VARIANT_SERVER),true) endif ifeq ($(JVM_VARIANT_CLIENT),true) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt - EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) - EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX) + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) + EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.$(LIBRARY_SUFFIX) ifeq ($(ARCH_DATA_MODEL),32) EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX) @@ -257,4 +254,4 @@ ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo endif endif -EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar +EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar diff --git a/hotspot/make/solaris/makefiles/fastdebug.make b/hotspot/make/solaris/makefiles/fastdebug.make index fdafd773bd3..3719c3edbda 100644 --- a/hotspot/make/solaris/makefiles/fastdebug.make +++ b/hotspot/make/solaris/makefiles/fastdebug.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Sets make macros for making debug version of VM @@ -118,10 +118,10 @@ CFLAGS += $(DEBUG_CFLAGS/BYFILE) MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug -# This mapfile is only needed when compiling with dtrace support, +# This mapfile is only needed when compiling with dtrace support, # and mustn't be otherwise. MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) VERSION = optimized -SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS PICFLAGS = DEFAULT diff --git a/hotspot/make/solaris/makefiles/jvmg.make b/hotspot/make/solaris/makefiles/jvmg.make deleted file mode 100644 index c9102393c8f..00000000000 --- a/hotspot/make/solaris/makefiles/jvmg.make +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making debug version of VM - -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) - -ifeq ("${Platform_compiler}", "sparcWorks") - -ifeq ($(COMPILER_REV_NUMERIC),508) - # SS11 SEGV when compiling with -g and -xarch=v8, using different backend - DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0 - DEBUG_CFLAGS/jvmtiTagMap.o = $(DEBUG_CFLAGS) -xO0 -endif -endif - -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ - -# Set the environment variable HOTSPARC_GENERIC to "true" -# to inhibit the effect of the previous line on CFLAGS. - -# Linker mapfiles -MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \ - $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug - -# This mapfile is only needed when compiling with dtrace support, -# and mustn't be otherwise. -MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE) - -VERSION = debug -SYSDEFS += -DASSERT -DDEBUG -PICFLAGS = DEFAULT diff --git a/hotspot/make/solaris/makefiles/profiled.make b/hotspot/make/solaris/makefiles/profiled.make deleted file mode 100644 index cbbdb03bcdb..00000000000 --- a/hotspot/make/solaris/makefiles/profiled.make +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Sets make macros for making profiled version of Gamma VM -# (It is also optimized.) - -CFLAGS += -pg - -# On x86 Solaris 2.6, 7, and 8 if LD_LIBRARY_PATH has /usr/lib in it then -# adlc linked with -pg puts out empty header files. To avoid linking adlc -# with -pg the profile flag is split out separately and used in rules.make - -PROF_AOUT_FLAGS += -pg - -# To do a profiled build of the product, such as for generating the -# reordering file, set PROFILE_PRODUCT. Otherwise the reordering file will -# contain references to functions which are not defined in the PRODUCT build. - -ifdef PROFILE_PRODUCT - SYSDEFS += -DPRODUCT -endif - -LDNOMAP = true diff --git a/hotspot/make/windows/build.make b/hotspot/make/windows/build.make index e2f50542523..c072a170135 100644 --- a/hotspot/make/windows/build.make +++ b/hotspot/make/windows/build.make @@ -1,5 +1,5 @@ # -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -235,18 +235,14 @@ product release optimized: checks $(variantDir) $(variantDir)\local.make sanity cd $(variantDir) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product ARCH=$(ARCH) -# The debug or jvmg (all the same thing) is an optional build -debug jvmg: checks $(variantDir) $(variantDir)\local.make sanity +# The debug build is an optional build +debug: checks $(variantDir) $(variantDir)\local.make sanity cd $(variantDir) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=debug ARCH=$(ARCH) fastdebug: checks $(variantDir) $(variantDir)\local.make sanity cd $(variantDir) nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=fastdebug ARCH=$(ARCH) -develop: checks $(variantDir) $(variantDir)\local.make sanity - cd $(variantDir) - nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH) - # target to create just the directory structure tree: checks $(variantDir) $(variantDir)\local.make sanity mkdir $(variantDir)\product diff --git a/hotspot/make/windows/makefiles/defs.make b/hotspot/make/windows/makefiles/defs.make index ca1f327be75..4bee0afa8b0 100644 --- a/hotspot/make/windows/makefiles/defs.make +++ b/hotspot/make/windows/makefiles/defs.make @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # The common definitions for hotspot windows builds. @@ -209,8 +209,6 @@ endif ifneq (,$(findstring MINGW,$(SYSTEM_UNAME))) USING_MINGW=true endif -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms -VM_DEBUG=debug # Windows wants particular paths due to nmake (must be after macros defined) # It is important that gnumake invokes nmake with C:\\...\\ formated @@ -292,7 +290,7 @@ ifeq ($(BUILD_WIN_SA), 1) MAKE_ARGS += BUILD_WIN_SA=1 endif -# Propagate compiler and tools paths from configure to nmake. +# Propagate compiler and tools paths from configure to nmake. # Need to make sure they contain \\ and not /. ifneq ($(SPEC),) ifeq ($(USING_CYGWIN), true) diff --git a/hotspot/make/windows/makefiles/vm.make b/hotspot/make/windows/makefiles/vm.make index bd02d42fa4f..9e7c64b8f33 100644 --- a/hotspot/make/windows/makefiles/vm.make +++ b/hotspot/make/windows/makefiles/vm.make @@ -31,11 +31,7 @@ COMMONSRC=$(WorkSpace)\src ALTSRC=$(WorkSpace)\src\closed !ifdef RELEASE -!ifdef DEVELOP -CXX_FLAGS=$(CXX_FLAGS) /D "DEBUG" -!else CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT" -!endif !else CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT" !endif @@ -186,7 +182,7 @@ VM_PATH={$(VM_PATH)} # Special case files not using precompiled header files. -c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp +c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp diff --git a/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp b/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp index a93b8c46eca..557cce42c76 100644 --- a/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp +++ b/hotspot/make/windows/projectfiles/compiler2/ADLCompiler.dsp @@ -67,7 +67,7 @@ LINK32=link.exe # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" # ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c # ADD BASE RSC /l 0x409 # ADD RSC /l 0x409 BSC32=bscmake.exe diff --git a/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp b/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp index a93b8c46eca..557cce42c76 100644 --- a/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp +++ b/hotspot/make/windows/projectfiles/tiered/ADLCompiler.dsp @@ -67,7 +67,7 @@ LINK32=link.exe # PROP Ignore_Export_Lib 0 # PROP Target_Dir "" # ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c # ADD BASE RSC /l 0x409 # ADD RSC /l 0x409 BSC32=bscmake.exe diff --git a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp index 1c368ffe84e..94cef1a9a25 100644 --- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -304,7 +304,7 @@ bool frame::safe_for_sender(JavaThread *thread) { // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else. // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier - // window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the + // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding // that initial frame and retrying. diff --git a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp index a21245afdc1..21d979f3efb 100644 --- a/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp +++ b/hotspot/src/os/bsd/dtrace/generateJvmOffsets.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ #define PR_MODEL_LP64 2 #ifdef COMPILER1 -#if defined(DEBUG) || defined(FASTDEBUG) +#ifdef ASSERT /* * To avoid the most part of potential link errors @@ -84,7 +84,7 @@ address StubRoutines::_call_stub_return_address = NULL; StubQueue* AbstractInterpreter::_code = NULL; -#endif /* defined(DEBUG) || defined(FASTDEBUG) */ +#endif /* ASSERT */ #endif /* COMPILER1 */ #define GEN_OFFS(Type,Name) \ diff --git a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp index 1a269539695..3fb13e5e97c 100644 --- a/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp +++ b/hotspot/src/os/solaris/dtrace/generateJvmOffsets.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,14 +55,14 @@ #include "utilities/accessFlags.hpp" #include "utilities/globalDefinitions.hpp" #ifdef COMPILER1 -#if defined(DEBUG) || defined(FASTDEBUG) +#ifdef ASSERT /* * To avoid the most part of potential link errors * we link this program with -z nodefs . * * But for 'debug1' and 'fastdebug1' we still have to provide - * a particular workaround for the following symbols bellow. + * a particular workaround for the following symbols below. * It will be good to find out a generic way in the future. */ @@ -79,7 +79,7 @@ address StubRoutines::_call_stub_return_address = NULL; StubQueue* AbstractInterpreter::_code = NULL; -#endif /* defined(DEBUG) || defined(FASTDEBUG) */ +#endif /* ASSERT */ #endif /* COMPILER1 */ #define GEN_OFFS(Type,Name) \ diff --git a/hotspot/src/os/windows/vm/os_windows.cpp b/hotspot/src/os/windows/vm/os_windows.cpp index 0a8034d3217..a9b8136d305 100644 --- a/hotspot/src/os/windows/vm/os_windows.cpp +++ b/hotspot/src/os/windows/vm/os_windows.cpp @@ -4238,9 +4238,6 @@ char * os::native_path(char *path) { path[3] = '\0'; } - #ifdef DEBUG - jio_fprintf(stderr, "sysNativePath: %s\n", path); - #endif DEBUG return path; } diff --git a/hotspot/src/share/tools/hsdis/Makefile b/hotspot/src/share/tools/hsdis/Makefile index 3cfdf57e6bd..2a48f9a81d8 100644 --- a/hotspot/src/share/tools/hsdis/Makefile +++ b/hotspot/src/share/tools/hsdis/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # # Single gnu makefile for solaris, linux and windows (windows requires cygwin and mingw) @@ -66,7 +66,7 @@ ARCH=i386 endif CC = $(MINGW)-gcc CONFIGURE_ARGS= --host=$(MINGW) --target=$(MINGW) -else #linux +else #linux CPU = $(shell uname -m) ARCH1=$(CPU:x86_64=amd64) ARCH=$(ARCH1:i686=i386) @@ -116,7 +116,6 @@ OUTFLAGS += -o $@ else #Windows OS = windows CC = gcc -#CPPFLAGS += /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG" CFLAGS += /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi- CFLAGS += LIBARCH=\"$(LIBARCH)\" DLDFLAGS += /dll /subsystem:windows /incremental:no \ diff --git a/hotspot/src/share/vm/classfile/stackMapFrame.hpp b/hotspot/src/share/vm/classfile/stackMapFrame.hpp index bdeb956ba68..237accec0d4 100644 --- a/hotspot/src/share/vm/classfile/stackMapFrame.hpp +++ b/hotspot/src/share/vm/classfile/stackMapFrame.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -175,14 +175,14 @@ class StackMapFrame : public ResourceObj { ErrorContext* ctx, TRAPS) const; inline void set_mark() { -#ifdef DEBUG +#ifdef ASSERT // Put bogus type to indicate it's no longer valid. if (_stack_mark != -1) { for (int i = _stack_mark - 1; i >= _stack_size; --i) { _stack[i] = VerificationType::bogus_type(); } } -#endif // def DEBUG +#endif // def ASSERT _stack_mark = _stack_size; } diff --git a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp index c609f10f620..70a26089437 100644 --- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -6921,7 +6921,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( size = CompactibleFreeListSpace::adjustObjectSize( p->oop_iterate(_scanningClosure)); } - #ifdef DEBUG + #ifdef ASSERT size_t direct_size = CompactibleFreeListSpace::adjustObjectSize(p->size()); assert(size == direct_size, "Inconsistency in size"); @@ -6933,7 +6933,7 @@ size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( assert(_bitMap->isMarked(addr+size-1), "inconsistent Printezis mark"); } - #endif // DEBUG + #endif // ASSERT } else { // an unitialized object assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); @@ -7075,14 +7075,14 @@ bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { HeapWord* addr = (HeapWord*)p; assert(_span.contains(addr), "we are scanning the CMS generation"); bool is_obj_array = false; - #ifdef DEBUG + #ifdef ASSERT if (!_parallel) { assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); assert(_collector->overflow_list_is_empty(), "overflow list should be empty"); } - #endif // DEBUG + #endif // ASSERT if (_bit_map->isMarked(addr)) { // Obj arrays are precisely marked, non-arrays are not; // so we scan objArrays precisely and non-arrays in their @@ -7102,14 +7102,14 @@ bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { } } } - #ifdef DEBUG + #ifdef ASSERT if (!_parallel) { assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); assert(_collector->overflow_list_is_empty(), "overflow list should be empty"); } - #endif // DEBUG + #endif // ASSERT return is_obj_array; } @@ -8320,7 +8320,7 @@ size_t SweepClosure::do_live_chunk(FreeChunk* fc) { assert(size == CompactibleFreeListSpace::adjustObjectSize(size), "alignment problem"); -#ifdef DEBUG +#ifdef ASSERT if (oop(addr)->klass_or_null() != NULL) { // Ignore mark word because we are running concurrent with mutators assert(oop(addr)->is_oop(true), "live block should be an oop"); diff --git a/hotspot/src/share/vm/memory/allocation.hpp b/hotspot/src/share/vm/memory/allocation.hpp index bc01b0135b0..dd9a13503d8 100644 --- a/hotspot/src/share/vm/memory/allocation.hpp +++ b/hotspot/src/share/vm/memory/allocation.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -178,7 +178,7 @@ const bool NMT_track_callsite = false; #endif // INCLUDE_NMT // debug build does not inline -#if defined(_DEBUG_) +#if defined(_NMT_NOINLINE_) #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) diff --git a/hotspot/src/share/vm/runtime/vmThread.cpp b/hotspot/src/share/vm/runtime/vmThread.cpp index 286d457c997..8cfc2700835 100644 --- a/hotspot/src/share/vm/runtime/vmThread.cpp +++ b/hotspot/src/share/vm/runtime/vmThread.cpp @@ -123,7 +123,7 @@ VM_Operation* VMOperationQueue::queue_drain(int prio) { _queue[prio]->set_next(_queue[prio]); _queue[prio]->set_prev(_queue[prio]); assert(queue_empty(prio), "drain corrupted queue"); -#ifdef DEBUG +#ifdef ASSERT int len = 0; VM_Operation* cur; for(cur = r; cur != NULL; cur=cur->next()) len++; From 25c31f374227f5f6f9cd0213af40e019c8073867 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 12 Apr 2013 19:14:47 -0700 Subject: [PATCH 015/162] 6443505: Ideal() function for CmpLTMask Repair wrong code generation, added new matching rule Reviewed-by: kvn, twisti --- hotspot/src/cpu/sparc/vm/sparc.ad | 21 ++- hotspot/src/cpu/x86/vm/x86_32.ad | 125 ++++++++++-------- hotspot/src/cpu/x86/vm/x86_64.ad | 62 ++++++--- hotspot/src/share/vm/opto/cfgnode.cpp | 9 +- .../test/compiler/6443505/Test6443505.java | 107 +++++++++++++++ 5 files changed, 241 insertions(+), 83 deletions(-) create mode 100644 hotspot/test/compiler/6443505/Test6443505.java diff --git a/hotspot/src/cpu/sparc/vm/sparc.ad b/hotspot/src/cpu/sparc/vm/sparc.ad index 96accce0256..4107392e6b4 100644 --- a/hotspot/src/cpu/sparc/vm/sparc.ad +++ b/hotspot/src/cpu/sparc/vm/sparc.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -8223,10 +8223,25 @@ instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} - ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) ); - ins_pipe( cadd_cmpltmask ); + ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); + ins_pipe(cadd_cmpltmask); %} +instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ + match(Set p (AndI (CmpLTMask p q) y)); + effect(KILL ccr); + ins_cost(DEFAULT_COST*3); + + format %{ "CMP $p,$q\n\t" + "MOV $y,$p\n\t" + "MOVge G0,$p" %} + ins_encode %{ + __ cmp($p$$Register, $q$$Register); + __ mov($y$$Register, $p$$Register); + __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); + %} + ins_pipe(ialu_reg_reg_ialu); +%} //----------------------------------------------------------------- // Direct raw moves between float and general registers using VIS3. diff --git a/hotspot/src/cpu/x86/vm/x86_32.ad b/hotspot/src/cpu/x86/vm/x86_32.ad index 67f33d3ba27..4560fb2c526 100644 --- a/hotspot/src/cpu/x86/vm/x86_32.ad +++ b/hotspot/src/cpu/x86/vm/x86_32.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -2317,30 +2317,6 @@ encode %{ emit_rm(cbuf, 0x3, $p$$reg, tmpReg); %} - enc_class enc_cmpLTP_mem(rRegI p, rRegI q, memory mem, eCXRegI tmp) %{ // cadd_cmpLT - int tmpReg = $tmp$$reg; - - // SUB $p,$q - emit_opcode(cbuf,0x2B); - emit_rm(cbuf, 0x3, $p$$reg, $q$$reg); - // SBB $tmp,$tmp - emit_opcode(cbuf,0x1B); - emit_rm(cbuf, 0x3, tmpReg, tmpReg); - // AND $tmp,$y - cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand - emit_opcode(cbuf,0x23); - int reg_encoding = tmpReg; - int base = $mem$$base; - int index = $mem$$index; - int scale = $mem$$scale; - int displace = $mem$$disp; - relocInfo::relocType disp_reloc = $mem->disp_reloc(); - encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc); - // ADD $p,$tmp - emit_opcode(cbuf,0x03); - emit_rm(cbuf, 0x3, $p$$reg, tmpReg); - %} - enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{ // TEST shift,32 emit_opcode(cbuf,0xF7); @@ -8922,9 +8898,9 @@ instruct convP2B( rRegI dst, eRegP src, eFlagsReg cr ) %{ %} %} -instruct cmpLTMask( eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr ) %{ +instruct cmpLTMask(eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr) %{ match(Set dst (CmpLTMask p q)); - effect( KILL cr ); + effect(KILL cr); ins_cost(400); // SETlt can only use low byte of EAX,EBX, ECX, or EDX as destination @@ -8932,50 +8908,83 @@ instruct cmpLTMask( eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr ) %{ "CMP $p,$q\n\t" "SETlt $dst\n\t" "NEG $dst" %} - ins_encode( OpcRegReg(0x33,dst,dst), - OpcRegReg(0x3B,p,q), - setLT_reg(dst), neg_reg(dst) ); - ins_pipe( pipe_slow ); + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Rd = $dst$$Register; + Label done; + __ xorl(Rd, Rd); + __ cmpl(Rp, Rq); + __ setb(Assembler::less, Rd); + __ negl(Rd); + %} + + ins_pipe(pipe_slow); %} -instruct cmpLTMask0( rRegI dst, immI0 zero, eFlagsReg cr ) %{ +instruct cmpLTMask0(rRegI dst, immI0 zero, eFlagsReg cr) %{ match(Set dst (CmpLTMask dst zero)); - effect( DEF dst, KILL cr ); + effect(DEF dst, KILL cr); ins_cost(100); - format %{ "SAR $dst,31" %} - opcode(0xC1, 0x7); /* C1 /7 ib */ - ins_encode( RegOpcImm( dst, 0x1F ) ); - ins_pipe( ialu_reg ); + format %{ "SAR $dst,31\t# cmpLTMask0" %} + ins_encode %{ + __ sarl($dst$$Register, 31); + %} + ins_pipe(ialu_reg); %} - -instruct cadd_cmpLTMask( ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp, eFlagsReg cr ) %{ +/* better to save a register than avoid a branch */ +instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); - effect( KILL tmp, KILL cr ); + effect(KILL cr); ins_cost(400); - // annoyingly, $tmp has no edges so you cant ask for it in - // any format or encoding - format %{ "SUB $p,$q\n\t" - "SBB ECX,ECX\n\t" - "AND ECX,$y\n\t" - "ADD $p,ECX" %} - ins_encode( enc_cmpLTP(p,q,y,tmp) ); - ins_pipe( pipe_cmplt ); + format %{ "SUB $p,$q\t# cadd_cmpLTMask\n\t" + "JGE done\n\t" + "ADD $p,$y\n" + "done: " %} + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Ry = $y$$Register; + Label done; + __ subl(Rp, Rq); + __ jccb(Assembler::greaterEqual, done); + __ addl(Rp, Ry); + __ bind(done); + %} + + ins_pipe(pipe_cmplt); +%} + +/* better to save a register than avoid a branch */ +instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{ + match(Set y (AndI (CmpLTMask p q) y)); + effect(KILL cr); + + ins_cost(300); + + format %{ "CMPL $p, $q\t# and_cmpLTMask\n\t" + "JLT done\n\t" + "XORL $y, $y\n" + "done: " %} + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Ry = $y$$Register; + Label done; + __ cmpl(Rp, Rq); + __ jccb(Assembler::less, done); + __ xorl(Ry, Ry); + __ bind(done); + %} + + ins_pipe(pipe_cmplt); %} /* If I enable this, I encourage spilling in the inner loop of compress. -instruct cadd_cmpLTMask_mem( ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr ) %{ +instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q))); - effect( USE_KILL tmp, KILL cr ); - ins_cost(400); - - format %{ "SUB $p,$q\n\t" - "SBB ECX,ECX\n\t" - "AND ECX,$y\n\t" - "ADD $p,ECX" %} - ins_encode( enc_cmpLTP_mem(p,q,y,tmp) ); -%} */ //----------Long Instructions------------------------------------------------ diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad index a9b5820e273..2953b8bb41d 100644 --- a/hotspot/src/cpu/x86/vm/x86_64.ad +++ b/hotspot/src/cpu/x86/vm/x86_64.ad @@ -1,5 +1,5 @@ // -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. +// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -9434,7 +9434,7 @@ instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr) match(Set dst (CmpLTMask p q)); effect(KILL cr); - ins_cost(400); // XXX + ins_cost(400); format %{ "cmpl $p, $q\t# cmpLTMask\n\t" "setlt $dst\n\t" "movzbl $dst, $dst\n\t" @@ -9452,37 +9452,63 @@ instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr) match(Set dst (CmpLTMask dst zero)); effect(KILL cr); - ins_cost(100); // XXX + ins_cost(100); format %{ "sarl $dst, #31\t# cmpLTMask0" %} - opcode(0xC1, 0x7); /* C1 /7 ib */ - ins_encode(reg_opc_imm(dst, 0x1F)); + ins_encode %{ + __ sarl($dst$$Register, 31); + %} ins_pipe(ialu_reg); %} - -instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr) +/* Better to save a register than avoid a branch */ +instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr) %{ match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); - effect(TEMP tmp, KILL cr); - - ins_cost(400); // XXX - format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t" - "sbbl $tmp, $tmp\n\t" - "andl $tmp, $y\n\t" - "addl $p, $tmp" %} + effect(KILL cr); + ins_cost(300); + format %{ "subl $p,$q\t# cadd_cmpLTMask\n\t" + "jge done\n\t" + "addl $p,$y\n" + "done: " %} ins_encode %{ Register Rp = $p$$Register; Register Rq = $q$$Register; Register Ry = $y$$Register; - Register Rt = $tmp$$Register; + Label done; __ subl(Rp, Rq); - __ sbbl(Rt, Rt); - __ andl(Rt, Ry); - __ addl(Rp, Rt); + __ jccb(Assembler::greaterEqual, done); + __ addl(Rp, Ry); + __ bind(done); %} ins_pipe(pipe_cmplt); %} +/* Better to save a register than avoid a branch */ +instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr) +%{ + match(Set y (AndI (CmpLTMask p q) y)); + effect(KILL cr); + + ins_cost(300); + + format %{ "cmpl $p, $q\t# and_cmpLTMask\n\t" + "jlt done\n\t" + "xorl $y, $y\n" + "done: " %} + ins_encode %{ + Register Rp = $p$$Register; + Register Rq = $q$$Register; + Register Ry = $y$$Register; + Label done; + __ cmpl(Rp, Rq); + __ jccb(Assembler::less, done); + __ xorl(Ry, Ry); + __ bind(done); + %} + ins_pipe(pipe_cmplt); +%} + + //---------- FP Instructions------------------------------------------------ instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) diff --git a/hotspot/src/share/vm/opto/cfgnode.cpp b/hotspot/src/share/vm/opto/cfgnode.cpp index 12bccd9a538..68fcc9cbb8e 100644 --- a/hotspot/src/share/vm/opto/cfgnode.cpp +++ b/hotspot/src/share/vm/opto/cfgnode.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1306,10 +1306,11 @@ static Node* is_cond_add(PhaseGVN *phase, PhiNode *phi, int true_path) { return NULL; Node *x = n2; - Node *y = n1->in(1); - if( n2 == n1->in(1) ) { + Node *y = NULL; + if( x == n1->in(1) ) { y = n1->in(2); - } else if( n2 == n1->in(1) ) { + } else if( x == n1->in(2) ) { + y = n1->in(1); } else return NULL; // Not so profitable if compare and add are constants diff --git a/hotspot/test/compiler/6443505/Test6443505.java b/hotspot/test/compiler/6443505/Test6443505.java new file mode 100644 index 00000000000..28461b5f7dc --- /dev/null +++ b/hotspot/test/compiler/6443505/Test6443505.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 6443505 + * @summary Some cases for CmpLTMask missed; also wrong code. + * + * @run main/othervm -Xcomp -XX:CompileOnly="Test6443505.compiled" Test6443505 + */ + +public class Test6443505 { + + public static void main(String[] args) throws InterruptedException { + test(Integer.MIN_VALUE, 0); + test(0, Integer.MIN_VALUE); + test(Integer.MIN_VALUE, -1); + test(-1, Integer.MIN_VALUE); + test(Integer.MIN_VALUE, 1); + test(1, Integer.MIN_VALUE); + + test(Integer.MAX_VALUE, 0); + test(0, Integer.MAX_VALUE); + test(Integer.MAX_VALUE, -1); + test(-1, Integer.MAX_VALUE); + test(Integer.MAX_VALUE, 1); + test(1, Integer.MAX_VALUE); + + test(Integer.MIN_VALUE, Integer.MAX_VALUE); + test(Integer.MAX_VALUE, Integer.MIN_VALUE); + + test(1, -1); + test(1, 0); + test(1, 1); + test(-1, -1); + test(-1, 0); + test(-1, 1); + test(0, -1); + test(0, 0); + test(0, 1); + } + + public static void test(int a, int b) throws InterruptedException { + int C = compiled(4, a, b); + int I = interpreted(4, a, b); + if (C != I) { + System.err.println("#1 C = " + C + ", I = " + I); + System.err.println("#1 C != I, FAIL"); + System.exit(97); + } + + C = compiled(a, b, q, 4); + I = interpreted(a, b, q, 4); + if (C != I) { + System.err.println("#2 C = " + C + ", I = " + I); + System.err.println("#2 C != I, FAIL"); + System.exit(97); + } + + } + + static int q = 4; + + // If improperly compiled, uses carry/borrow bit, which is wrong. + // with -XX:+PrintOptoAssembly, look for cadd_cmpLTMask + static int compiled(int p, int x, int y) { + return (x < y) ? q + (x - y) : (x - y); + } + + // interpreted reference + static int interpreted(int p, int x, int y) { + return (x < y) ? q + (x - y) : (x - y); + } + + // Test new code with a range of cases + // with -XX:+PrintOptoAssembly, look for and_cmpLTMask + static int compiled(int x, int y, int q, int p) { + return (x < y) ? p + q : q; + } + + // interpreted reference + static int interpreted(int x, int y, int q, int p) { + return (x < y) ? p + q : q; + } + +} From 361d401c11d612c0b0a50cc28c754f21dc71e10a Mon Sep 17 00:00:00 2001 From: Roland Westrelin Date: Mon, 15 Apr 2013 09:42:46 +0200 Subject: [PATCH 016/162] 8011582: assert(nbits == 32 || (-(1 << nbits-1) <= x && x < ( 1 << nbits-1))) failed: value out of range C1 runtime's predicate_failed_trap should use jump_to on sparc Reviewed-by: kvn --- hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp index 6723ef2c352..be4ae63e6aa 100644 --- a/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp +++ b/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp @@ -1000,9 +1000,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); restore_live_registers(sasm); - __ restore(); - __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); - __ delayed()->nop(); + + AddressLiteral dest(deopt_blob->unpack_with_reexecution()); + __ jump_to(dest, O0); + __ delayed()->restore(); } break; From 9f96eb3ed5caefc5d1a5dfbdf530852550437e21 Mon Sep 17 00:00:00 2001 From: Roland Westrelin Date: Mon, 15 Apr 2013 17:17:11 +0200 Subject: [PATCH 017/162] 8011648: C1: optimized build is broken after 7153771 Missing #ifdef ASSERT Reviewed-by: kvn --- hotspot/src/share/vm/c1/c1_Canonicalizer.cpp | 2 ++ hotspot/src/share/vm/c1/c1_Canonicalizer.hpp | 2 ++ hotspot/src/share/vm/c1/c1_Instruction.hpp | 2 ++ hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp | 2 ++ hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp | 2 ++ hotspot/src/share/vm/c1/c1_LIR.cpp | 4 ++++ hotspot/src/share/vm/c1/c1_LIR.hpp | 5 ++++- hotspot/src/share/vm/c1/c1_LIRGenerator.cpp | 5 ++--- hotspot/src/share/vm/c1/c1_LIRGenerator.hpp | 2 ++ hotspot/src/share/vm/c1/c1_Optimizer.cpp | 5 ++++- hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp | 2 ++ hotspot/src/share/vm/c1/c1_ValueMap.hpp | 2 ++ 12 files changed, 30 insertions(+), 5 deletions(-) diff --git a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp index a4cda5f904f..b80b199c9c2 100644 --- a/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp +++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.cpp @@ -938,5 +938,7 @@ void Canonicalizer::do_ProfileCall(ProfileCall* x) {} void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {} void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {} void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {} +#ifdef ASSERT void Canonicalizer::do_Assert(Assert* x) {} +#endif void Canonicalizer::do_MemBar(MemBar* x) {} diff --git a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp index b8bcfd7e65f..9e34ac79a31 100644 --- a/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp +++ b/hotspot/src/share/vm/c1/c1_Canonicalizer.hpp @@ -108,7 +108,9 @@ class Canonicalizer: InstructionVisitor { virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT virtual void do_Assert (Assert* x); +#endif }; #endif // SHARE_VM_C1_C1_CANONICALIZER_HPP diff --git a/hotspot/src/share/vm/c1/c1_Instruction.hpp b/hotspot/src/share/vm/c1/c1_Instruction.hpp index b93525bf502..6b1f6ddd380 100644 --- a/hotspot/src/share/vm/c1/c1_Instruction.hpp +++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp @@ -111,7 +111,9 @@ class ProfileInvoke; class RuntimeCall; class MemBar; class RangeCheckPredicate; +#ifdef ASSERT class Assert; +#endif // A Value is a reference to the instruction creating the value typedef Instruction* Value; diff --git a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp index 4c88e50cb21..cfca00ab277 100644 --- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp +++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.cpp @@ -871,12 +871,14 @@ void InstructionPrinter::do_RangeCheckPredicate(RangeCheckPredicate* x) { } } +#ifdef ASSERT void InstructionPrinter::do_Assert(Assert* x) { output()->print("assert "); print_value(x->x()); output()->print(" %s ", cond_name(x->cond())); print_value(x->y()); } +#endif void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { print_unsafe_object_op(x, "UnsafePrefetchWrite"); diff --git a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp index d8d6502ebd6..8c80b6c7507 100644 --- a/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp +++ b/hotspot/src/share/vm/c1/c1_InstructionPrinter.hpp @@ -136,7 +136,9 @@ class InstructionPrinter: public InstructionVisitor { virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT virtual void do_Assert (Assert* x); +#endif }; #endif // PRODUCT diff --git a/hotspot/src/share/vm/c1/c1_LIR.cpp b/hotspot/src/share/vm/c1/c1_LIR.cpp index df0828ee555..f26d1812c44 100644 --- a/hotspot/src/share/vm/c1/c1_LIR.cpp +++ b/hotspot/src/share/vm/c1/c1_LIR.cpp @@ -1778,7 +1778,9 @@ const char * LIR_Op::name() const { // LIR_OpProfileCall case lir_profile_call: s = "profile_call"; break; // LIR_OpAssert +#ifdef ASSERT case lir_assert: s = "assert"; break; +#endif case lir_none: ShouldNotReachHere();break; default: s = "illegal_op"; break; } @@ -2025,12 +2027,14 @@ void LIR_OpLock::print_instr(outputStream* out) const { out->print("[lbl:0x%x]", stub()->entry()); } +#ifdef ASSERT void LIR_OpAssert::print_instr(outputStream* out) const { print_condition(out, condition()); out->print(" "); in_opr1()->print(out); out->print(" "); in_opr2()->print(out); out->print(", \""); out->print(msg()); out->print("\""); } +#endif void LIR_OpDelay::print_instr(outputStream* out) const { diff --git a/hotspot/src/share/vm/c1/c1_LIR.hpp b/hotspot/src/share/vm/c1/c1_LIR.hpp index 5bd0e57d6f9..61dd59e3fe9 100644 --- a/hotspot/src/share/vm/c1/c1_LIR.hpp +++ b/hotspot/src/share/vm/c1/c1_LIR.hpp @@ -881,8 +881,9 @@ class LIR_OpLock; class LIR_OpTypeCheck; class LIR_OpCompareAndSwap; class LIR_OpProfileCall; +#ifdef ASSERT class LIR_OpAssert; - +#endif // LIR operation codes enum LIR_Code { @@ -1139,7 +1140,9 @@ class LIR_Op: public CompilationResourceObj { virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } +#ifdef ASSERT virtual LIR_OpAssert* as_OpAssert() { return NULL; } +#endif virtual void verify() const {} }; diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp index ffb2965fdfc..1928df57836 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp @@ -3103,8 +3103,8 @@ void LIRGenerator::do_RuntimeCall(RuntimeCall* x) { } } -void LIRGenerator::do_Assert(Assert *x) { #ifdef ASSERT +void LIRGenerator::do_Assert(Assert *x) { ValueTag tag = x->x()->type()->tag(); If::Condition cond = x->cond(); @@ -3124,9 +3124,8 @@ void LIRGenerator::do_Assert(Assert *x) { LIR_Opr right = yin->result(); __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); -#endif } - +#endif void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { diff --git a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp index 4c70a9f64fd..d3c76865dbd 100644 --- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp @@ -537,7 +537,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure { virtual void do_RuntimeCall (RuntimeCall* x); virtual void do_MemBar (MemBar* x); virtual void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT virtual void do_Assert (Assert* x); +#endif }; diff --git a/hotspot/src/share/vm/c1/c1_Optimizer.cpp b/hotspot/src/share/vm/c1/c1_Optimizer.cpp index 74e9d2240db..90dc2797210 100644 --- a/hotspot/src/share/vm/c1/c1_Optimizer.cpp +++ b/hotspot/src/share/vm/c1/c1_Optimizer.cpp @@ -535,7 +535,9 @@ public: void do_RuntimeCall (RuntimeCall* x); void do_MemBar (MemBar* x); void do_RangeCheckPredicate(RangeCheckPredicate* x); +#ifdef ASSERT void do_Assert (Assert* x); +#endif }; @@ -718,8 +720,9 @@ void NullCheckVisitor::do_ProfileInvoke (ProfileInvoke* x) {} void NullCheckVisitor::do_RuntimeCall (RuntimeCall* x) {} void NullCheckVisitor::do_MemBar (MemBar* x) {} void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {} +#ifdef ASSERT void NullCheckVisitor::do_Assert (Assert* x) {} - +#endif void NullCheckEliminator::visit(Value* p) { assert(*p != NULL, "should not find NULL instructions"); diff --git a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp index af6d9d94815..ae1a2556881 100644 --- a/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp +++ b/hotspot/src/share/vm/c1/c1_RangeCheckElimination.hpp @@ -166,7 +166,9 @@ public: void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ }; void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ }; +#ifdef ASSERT void do_Assert (Assert* x) { /* nothing to do */ }; +#endif }; #ifdef ASSERT diff --git a/hotspot/src/share/vm/c1/c1_ValueMap.hpp b/hotspot/src/share/vm/c1/c1_ValueMap.hpp index c76ef46bef4..820d1909efa 100644 --- a/hotspot/src/share/vm/c1/c1_ValueMap.hpp +++ b/hotspot/src/share/vm/c1/c1_ValueMap.hpp @@ -207,7 +207,9 @@ class ValueNumberingVisitor: public InstructionVisitor { void do_RuntimeCall (RuntimeCall* x) { /* nothing to do */ }; void do_MemBar (MemBar* x) { /* nothing to do */ }; void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ }; +#ifdef ASSERT void do_Assert (Assert* x) { /* nothing to do */ }; +#endif }; From fbdab16d851ec916f10fad388ebb8341ed40c399 Mon Sep 17 00:00:00 2001 From: Christian Thalinger Date: Mon, 15 Apr 2013 16:20:05 -0700 Subject: [PATCH 018/162] 7172922: export_ makefile targets do not work unless all supported variants are built Reviewed-by: dholmes, kvn --- hotspot/make/Makefile | 307 +++++++++++++++++++++--------------------- 1 file changed, 156 insertions(+), 151 deletions(-) diff --git a/hotspot/make/Makefile b/hotspot/make/Makefile index e1fda57c4c6..e0d9826e468 100644 --- a/hotspot/make/Makefile +++ b/hotspot/make/Makefile @@ -287,186 +287,191 @@ export_debug_jdk:: # Export file copy rules XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt -DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs -C1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1 -C2_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2 -ZERO_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_zero -SHARK_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_shark -C1_DIR=$(C1_BASE_DIR)/$(VM_SUBDIR) -C2_DIR=$(C2_BASE_DIR)/$(VM_SUBDIR) -ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR) -SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR) -MINIMAL1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1 -MINIMAL1_DIR=$(MINIMAL1_BASE_DIR)/$(VM_SUBDIR) +DOCS_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_docs +C1_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1/$(VM_SUBDIR) +C2_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2/$(VM_SUBDIR) +MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1/$(VM_SUBDIR) +ZERO_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_zero/$(VM_SUBDIR) +SHARK_DIR =$(OUTPUTDIR)/$(VM_PLATFORM)_shark/$(VM_SUBDIR) +# Server (C2) ifeq ($(JVM_VARIANT_SERVER), true) - MISC_DIR=$(C2_DIR) - GEN_DIR=$(C2_BASE_DIR)/generated +# Common +$(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(C2_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(C2_DIR)/../generated/jvmtifiles/% + $(install-file) +# Windows +$(EXPORT_SERVER_DIR)/%.dll: $(C2_DIR)/%.dll + $(install-file) +$(EXPORT_SERVER_DIR)/%.pdb: $(C2_DIR)/%.pdb + $(install-file) +$(EXPORT_SERVER_DIR)/%.map: $(C2_DIR)/%.map + $(install-file) +$(EXPORT_LIB_DIR)/%.lib: $(C2_DIR)/%.lib + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.diz: $(C2_DIR)/%.diz + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.dll: $(C2_DIR)/%.dll + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C2_DIR)/%.pdb + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.map: $(C2_DIR)/%.map + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/64/%.diz: $(C2_DIR)/%.diz + $(install-file) endif + +# Client (C1) ifeq ($(JVM_VARIANT_CLIENT), true) - MISC_DIR=$(C1_DIR) - GEN_DIR=$(C1_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_ZEROSHARK), true) - MISC_DIR=$(SHARK_DIR) - GEN_DIR=$(SHARK_BASE_DIR)/generated -endif -ifeq ($(JVM_VARIANT_ZERO), true) - MISC_DIR=$(ZERO_DIR) - GEN_DIR=$(ZERO_BASE_DIR)/generated +# Common +$(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz + $(install-file) +$(EXPORT_LIB_DIR)/%.jar: $(C1_DIR)/../generated/%.jar + $(install-file) +$(EXPORT_INCLUDE_DIR)/%: $(C1_DIR)/../generated/jvmtifiles/% + $(install-file) +# Windows +$(EXPORT_CLIENT_DIR)/%.dll: $(C1_DIR)/%.dll + $(install-file) +$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_DIR)/%.pdb + $(install-file) +$(EXPORT_CLIENT_DIR)/%.map: $(C1_DIR)/%.map + $(install-file) +$(EXPORT_LIB_DIR)/%.lib: $(C1_DIR)/%.lib + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.diz: $(C1_DIR)/%.diz + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.dll: $(C1_DIR)/%.dll + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(C1_DIR)/%.pdb + $(install-file) +$(EXPORT_JRE_BIN_DIR)/%.map: $(C1_DIR)/%.map + $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz + $(install-file) +$(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_DIR)/%.diz + $(install-file) endif + +# Minimal1 ifeq ($(JVM_VARIANT_MINIMAL1), true) - MISC_DIR=$(MINIMAL1_DIR) - GEN_DIR=$(MINIMAL1_BASE_DIR)/generated -endif - -# Bin files (windows) -ifeq ($(OSNAME),windows) - -# Get jvm.lib -$(EXPORT_LIB_DIR)/%.lib: $(MISC_DIR)/%.lib +# Common +$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz $(install-file) - -# Other libraries (like SA) -$(EXPORT_JRE_BIN_DIR)/%.diz: $(MISC_DIR)/%.diz +$(EXPORT_LIB_DIR)/%.jar: $(MINIMAL1_DIR)/../generated/%.jar $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.dll: $(MISC_DIR)/%.dll +$(EXPORT_INCLUDE_DIR)/%: $(MINIMAL1_DIR)/../generated/jvmtifiles/% $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MISC_DIR)/%.pdb +# Windows +$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll $(install-file) -$(EXPORT_JRE_BIN_DIR)/%.map: $(MISC_DIR)/%.map +$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb $(install-file) - -# Client files always come from C1 area -$(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz +$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map $(install-file) -$(EXPORT_CLIENT_DIR)/%.dll: $(C1_DIR)/%.dll +$(EXPORT_LIB_DIR)/%.lib: $(MINIMAL1_DIR)/%.lib $(install-file) -$(EXPORT_CLIENT_DIR)/%.pdb: $(C1_DIR)/%.pdb +$(EXPORT_JRE_BIN_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz $(install-file) -$(EXPORT_CLIENT_DIR)/%.map: $(C1_DIR)/%.map +$(EXPORT_JRE_BIN_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll $(install-file) - -# Server files always come from C2 area -$(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz +$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb $(install-file) -$(EXPORT_SERVER_DIR)/%.dll: $(C2_DIR)/%.dll +$(EXPORT_JRE_BIN_DIR)/%.map: $(MINIMAL1_DIR)/%.map $(install-file) -$(EXPORT_SERVER_DIR)/%.pdb: $(C2_DIR)/%.pdb +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) -$(EXPORT_SERVER_DIR)/%.map: $(C2_DIR)/%.map +$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz + $(install-file) +$(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz $(install-file) endif -# Minimal JVM files always come from minimal area -$(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz +# Zero +ifeq ($(JVM_VARIANT_ZERO), true) +# Common +$(EXPORT_LIB_DIR)/%.jar: $(ZERO_DIR)/../generated/%.jar $(install-file) -$(EXPORT_MINIMAL_DIR)/%.dll: $(MINIMAL1_DIR)/%.dll +$(EXPORT_INCLUDE_DIR)/%: $(ZERO_DIR)/../generated/jvmtifiles/% $(install-file) -$(EXPORT_MINIMAL_DIR)/%.pdb: $(MINIMAL1_DIR)/%.pdb +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) $(install-file) -$(EXPORT_MINIMAL_DIR)/%.map: $(MINIMAL1_DIR)/%.map +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz $(install-file) - -# Shared Library -ifneq ($(OSNAME),windows) - ifeq ($(JVM_VARIANT_SERVER), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.debuginfo: $(C2_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C2_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(C2_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/64/%.diz: $(C2_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_CLIENT), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_CLIENT_DIR)/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.debuginfo: $(C1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(C1_DIR)/%.diz - $(install-file) - $(EXPORT_CLIENT_DIR)/%.diz: $(C1_DIR)/%.diz - $(install-file) - $(EXPORT_CLIENT_DIR)/64/%.diz: $(C1_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_ZEROSHARK), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_ZERO), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz - $(install-file) - $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo - $(install-file) - $(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz - $(install-file) - endif - ifeq ($(JVM_VARIANT_MINIMAL1), true) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX) - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.debuginfo: $(MINIMAL1_DIR)/%.debuginfo - $(install-file) - $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - $(EXPORT_MINIMAL_DIR)/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - $(EXPORT_MINIMAL_DIR)/64/%.diz: $(MINIMAL1_DIR)/%.diz - $(install-file) - endif endif -# Jar file (sa-jdi.jar) -$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar +# Shark +ifeq ($(JVM_VARIANT_ZEROSHARK), true) +# Common +$(EXPORT_LIB_DIR)/%.jar: $(SHARK_DIR)/../generated/%.jar $(install-file) - -# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h) -$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/% +$(EXPORT_INCLUDE_DIR)/%: $(SHARK_DIR)/../generated/jvmtifiles/% $(install-file) +# Unix +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo + $(install-file) +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz + $(install-file) +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX) + $(install-file) +$(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo + $(install-file) +$(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz + $(install-file) +endif $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/% $(install-file) From 09fdc18edc579a2c74468a8d627f9a31b1ff49a0 Mon Sep 17 00:00:00 2001 From: Niclas Adlertz Date: Tue, 16 Apr 2013 10:08:41 +0200 Subject: [PATCH 019/162] 8011621: live_ranges_in_separate_class.patch Reviewed-by: kvn, roland --- hotspot/make/bsd/makefiles/vm.make | 2 +- hotspot/make/linux/makefiles/vm.make | 2 +- hotspot/make/solaris/makefiles/vm.make | 2 +- hotspot/make/windows/create_obj_files.sh | 2 +- hotspot/src/os/bsd/vm/chaitin_bsd.cpp | 42 -- hotspot/src/os/linux/vm/chaitin_linux.cpp | 42 -- hotspot/src/os/solaris/vm/chaitin_solaris.cpp | 46 -- hotspot/src/os/windows/vm/chaitin_windows.cpp | 78 --- hotspot/src/share/vm/opto/chaitin.cpp | 448 ++++++++++++------ hotspot/src/share/vm/opto/chaitin.hpp | 185 ++++++-- hotspot/src/share/vm/opto/coalesce.cpp | 321 ++++--------- hotspot/src/share/vm/opto/coalesce.hpp | 14 +- hotspot/src/share/vm/opto/compile.cpp | 14 +- .../src/share/vm/opto/idealGraphPrinter.cpp | 2 +- hotspot/src/share/vm/opto/ifg.cpp | 74 +-- hotspot/src/share/vm/opto/live.cpp | 2 +- hotspot/src/share/vm/opto/live.hpp | 4 +- hotspot/src/share/vm/opto/postaloc.cpp | 45 +- hotspot/src/share/vm/opto/reg_split.cpp | 167 ++++--- hotspot/src/share/vm/opto/regalloc.hpp | 11 +- hotspot/src/share/vm/runtime/vmStructs.cpp | 1 - 21 files changed, 733 insertions(+), 771 deletions(-) delete mode 100644 hotspot/src/os/bsd/vm/chaitin_bsd.cpp delete mode 100644 hotspot/src/os/linux/vm/chaitin_linux.cpp delete mode 100644 hotspot/src/os/solaris/vm/chaitin_solaris.cpp delete mode 100644 hotspot/src/os/windows/vm/chaitin_windows.cpp diff --git a/hotspot/make/bsd/makefiles/vm.make b/hotspot/make/bsd/makefiles/vm.make index e93765dc50f..b9528e101ee 100644 --- a/hotspot/make/bsd/makefiles/vm.make +++ b/hotspot/make/bsd/makefiles/vm.make @@ -187,7 +187,7 @@ Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero diff --git a/hotspot/make/linux/makefiles/vm.make b/hotspot/make/linux/makefiles/vm.make index b31064782f7..af060f8af0b 100644 --- a/hotspot/make/linux/makefiles/vm.make +++ b/hotspot/make/linux/makefiles/vm.make @@ -189,7 +189,7 @@ Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) $(SHARK_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero diff --git a/hotspot/make/solaris/makefiles/vm.make b/hotspot/make/solaris/makefiles/vm.make index 5aca8f05f76..62146d77f03 100644 --- a/hotspot/make/solaris/makefiles/vm.make +++ b/hotspot/make/solaris/makefiles/vm.make @@ -202,7 +202,7 @@ Src_Dirs/ZERO := $(CORE_PATHS) Src_Dirs/SHARK := $(CORE_PATHS) Src_Dirs := $(Src_Dirs/$(TYPE)) -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\* +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\* COMPILER1_SPECIFIC_FILES := c1_\* SHARK_SPECIFIC_FILES := shark ZERO_SPECIFIC_FILES := zero diff --git a/hotspot/make/windows/create_obj_files.sh b/hotspot/make/windows/create_obj_files.sh index 14a7087f2aa..257b3f140d2 100644 --- a/hotspot/make/windows/create_obj_files.sh +++ b/hotspot/make/windows/create_obj_files.sh @@ -114,7 +114,7 @@ case "${TYPE}" in "shark") Src_Dirs="${CORE_PATHS}" ;; esac -COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp chaitin* c2_* runtime_*" +COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*" COMPILER1_SPECIFIC_FILES="c1_*" SHARK_SPECIFIC_FILES="shark" ZERO_SPECIFIC_FILES="zero" diff --git a/hotspot/src/os/bsd/vm/chaitin_bsd.cpp b/hotspot/src/os/bsd/vm/chaitin_bsd.cpp deleted file mode 100644 index e4925644dbc..00000000000 --- a/hotspot/src/os/bsd/vm/chaitin_bsd.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -void PhaseRegAlloc::pd_preallocate_hook() { - // no action -} - -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action -} -#endif - - -// Reconciliation History -// chaitin_solaris.cpp 1.7 99/07/12 23:54:22 -// End diff --git a/hotspot/src/os/linux/vm/chaitin_linux.cpp b/hotspot/src/os/linux/vm/chaitin_linux.cpp deleted file mode 100644 index e4925644dbc..00000000000 --- a/hotspot/src/os/linux/vm/chaitin_linux.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -void PhaseRegAlloc::pd_preallocate_hook() { - // no action -} - -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action -} -#endif - - -// Reconciliation History -// chaitin_solaris.cpp 1.7 99/07/12 23:54:22 -// End diff --git a/hotspot/src/os/solaris/vm/chaitin_solaris.cpp b/hotspot/src/os/solaris/vm/chaitin_solaris.cpp deleted file mode 100644 index 92a437f9683..00000000000 --- a/hotspot/src/os/solaris/vm/chaitin_solaris.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -void PhaseRegAlloc::pd_preallocate_hook() { - // no action -} - -#ifdef ASSERT -void PhaseRegAlloc::pd_postallocate_verify_hook() { - // no action -} -#endif - - -//Reconciliation History -// 1.1 99/02/12 15:35:26 chaitin_win32.cpp -// 1.2 99/02/18 15:38:56 chaitin_win32.cpp -// 1.4 99/03/09 10:37:48 chaitin_win32.cpp -// 1.6 99/03/25 11:07:44 chaitin_win32.cpp -// 1.8 99/06/22 16:38:58 chaitin_win32.cpp -//End diff --git a/hotspot/src/os/windows/vm/chaitin_windows.cpp b/hotspot/src/os/windows/vm/chaitin_windows.cpp deleted file mode 100644 index bae10b3b5c4..00000000000 --- a/hotspot/src/os/windows/vm/chaitin_windows.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "opto/chaitin.hpp" -#include "opto/machnode.hpp" - -// Disallow the use of the frame pointer (EBP) for implicit null exceptions -// on win95/98. If we do not do this, the OS gets confused and gives a stack -// error. -void PhaseRegAlloc::pd_preallocate_hook() { -#ifndef _WIN64 - if (ImplicitNullChecks && !os::win32::is_nt()) { - for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) { - Block *block = _cfg._blocks[block_num]; - - Node *block_end = block->end(); - if (block_end->is_MachNullCheck() && - block_end->as_Mach()->ideal_Opcode() != Op_Con) { - // The last instruction in the block is an implicit null check. - // Fix its input so that it does not load into the frame pointer. - _matcher.pd_implicit_null_fixup(block_end->in(1)->as_Mach(), - block_end->as_MachNullCheck()->_vidx); - } - } - } -#else - // WIN64==itanium on XP -#endif -} - -#ifdef ASSERT -// Verify that no implicit null check uses the frame pointer (EBP) as -// its register on win95/98. Use of the frame pointer in an implicit -// null check confuses the OS, yielding a stack error. -void PhaseRegAlloc::pd_postallocate_verify_hook() { -#ifndef _WIN64 - if (ImplicitNullChecks && !os::win32::is_nt()) { - for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) { - Block *block = _cfg._blocks[block_num]; - - Node *block_end = block->_nodes[block->_nodes.size()-1]; - if (block_end->is_MachNullCheck() && block_end->as_Mach()->ideal_Opcode() != Op_Con) { - // The last instruction in the block is an implicit - // null check. Verify that this instruction does not - // use the frame pointer. - int reg = get_reg_first(block_end->in(1)->in(block_end->as_MachNullCheck()->_vidx)); - assert(reg != EBP_num, - "implicit null check using frame pointer on win95/98"); - } - } - } -#else - // WIN64==itanium on XP -#endif -} -#endif diff --git a/hotspot/src/share/vm/opto/chaitin.cpp b/hotspot/src/share/vm/opto/chaitin.cpp index 0fbe723c44f..9d69b0f3b71 100644 --- a/hotspot/src/share/vm/opto/chaitin.cpp +++ b/hotspot/src/share/vm/opto/chaitin.cpp @@ -145,6 +145,72 @@ void LRG_List::extend( uint nidx, uint lidx ) { #define NUMBUCKS 3 +// Straight out of Tarjan's union-find algorithm +uint LiveRangeMap::find_compress(uint lrg) { + uint cur = lrg; + uint next = _uf_map[cur]; + while (next != cur) { // Scan chain of equivalences + assert( next < cur, "always union smaller"); + cur = next; // until find a fixed-point + next = _uf_map[cur]; + } + + // Core of union-find algorithm: update chain of + // equivalences to be equal to the root. + while (lrg != next) { + uint tmp = _uf_map[lrg]; + _uf_map.map(lrg, next); + lrg = tmp; + } + return lrg; +} + +// Reset the Union-Find map to identity +void LiveRangeMap::reset_uf_map(uint max_lrg_id) { + _max_lrg_id= max_lrg_id; + // Force the Union-Find mapping to be at least this large + _uf_map.extend(_max_lrg_id, 0); + // Initialize it to be the ID mapping. + for (uint i = 0; i < _max_lrg_id; ++i) { + _uf_map.map(i, i); + } +} + +// Make all Nodes map directly to their final live range; no need for +// the Union-Find mapping after this call. +void LiveRangeMap::compress_uf_map_for_nodes() { + // For all Nodes, compress mapping + uint unique = _names.Size(); + for (uint i = 0; i < unique; ++i) { + uint lrg = _names[i]; + uint compressed_lrg = find(lrg); + if (lrg != compressed_lrg) { + _names.map(i, compressed_lrg); + } + } +} + +// Like Find above, but no path compress, so bad asymptotic behavior +uint LiveRangeMap::find_const(uint lrg) const { + if (!lrg) { + return lrg; // Ignore the zero LRG + } + + // Off the end? This happens during debugging dumps when you got + // brand new live ranges but have not told the allocator yet. + if (lrg >= _max_lrg_id) { + return lrg; + } + + uint next = _uf_map[lrg]; + while (next != lrg) { // Scan chain of equivalences + assert(next < lrg, "always union smaller"); + lrg = next; // until find a fixed-point + next = _uf_map[lrg]; + } + return next; +} + //------------------------------Chaitin---------------------------------------- PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) : PhaseRegAlloc(unique, cfg, matcher, @@ -153,13 +219,13 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) #else NULL #endif - ), - _names(unique), _uf_map(unique), - _maxlrg(0), _live(0), - _spilled_once(Thread::current()->resource_area()), - _spilled_twice(Thread::current()->resource_area()), - _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0), - _oldphi(unique) + ) + , _lrg_map(unique) + , _live(0) + , _spilled_once(Thread::current()->resource_area()) + , _spilled_twice(Thread::current()->resource_area()) + , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0) + , _oldphi(unique) #ifndef PRODUCT , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling")) #endif @@ -168,7 +234,6 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq); - uint i,j; // Build a list of basic blocks, sorted by frequency _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); // Experiment with sorting strategies to speed compilation @@ -176,30 +241,30 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) Block **buckets[NUMBUCKS]; // Array of buckets uint buckcnt[NUMBUCKS]; // Array of bucket counters double buckval[NUMBUCKS]; // Array of bucket value cutoffs - for( i = 0; i < NUMBUCKS; i++ ) { - buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks ); + for (uint i = 0; i < NUMBUCKS; i++) { + buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks); buckcnt[i] = 0; // Bump by three orders of magnitude each time cutoff *= 0.001; buckval[i] = cutoff; - for( j = 0; j < _cfg._num_blocks; j++ ) { + for (uint j = 0; j < _cfg._num_blocks; j++) { buckets[i][j] = NULL; } } // Sort blocks into buckets - for( i = 0; i < _cfg._num_blocks; i++ ) { - for( j = 0; j < NUMBUCKS; j++ ) { - if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) { + for (uint i = 0; i < _cfg._num_blocks; i++) { + for (uint j = 0; j < NUMBUCKS; j++) { + if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) { // Assign block to end of list for appropriate bucket buckets[j][buckcnt[j]++] = _cfg._blocks[i]; - break; // kick out of inner loop + break; // kick out of inner loop } } } // Dump buckets into final block array uint blkcnt = 0; - for( i = 0; i < NUMBUCKS; i++ ) { - for( j = 0; j < buckcnt[i]; j++ ) { + for (uint i = 0; i < NUMBUCKS; i++) { + for (uint j = 0; j < buckcnt[i]; j++) { _blks[blkcnt++] = buckets[i][j]; } } @@ -207,6 +272,77 @@ PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher) assert(blkcnt == _cfg._num_blocks, "Block array not totally filled"); } +//------------------------------Union------------------------------------------ +// union 2 sets together. +void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { + uint src = _lrg_map.find(src_n); + uint dst = _lrg_map.find(dst_n); + assert(src, ""); + assert(dst, ""); + assert(src < _lrg_map.max_lrg_id(), "oob"); + assert(dst < _lrg_map.max_lrg_id(), "oob"); + assert(src < dst, "always union smaller"); + _lrg_map.uf_map(dst, src); +} + +//------------------------------new_lrg---------------------------------------- +void PhaseChaitin::new_lrg(const Node *x, uint lrg) { + // Make the Node->LRG mapping + _lrg_map.extend(x->_idx,lrg); + // Make the Union-Find mapping an identity function + _lrg_map.uf_extend(lrg, lrg); +} + + +bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) { + Block *bcon = _cfg._bbs[con->_idx]; + uint cindex = bcon->find_node(con); + Node *con_next = bcon->_nodes[cindex+1]; + if (con_next->in(0) != con || !con_next->is_MachProj()) { + return false; // No MachProj's follow + } + + // Copy kills after the cloned constant + Node *kills = con_next->clone(); + kills->set_req(0, copy); + b->_nodes.insert(idx, kills); + _cfg._bbs.map(kills->_idx, b); + new_lrg(kills, max_lrg_id); + return true; +} + +//------------------------------compact---------------------------------------- +// Renumber the live ranges to compact them. Makes the IFG smaller. +void PhaseChaitin::compact() { + // Current the _uf_map contains a series of short chains which are headed + // by a self-cycle. All the chains run from big numbers to little numbers. + // The Find() call chases the chains & shortens them for the next Find call. + // We are going to change this structure slightly. Numbers above a moving + // wave 'i' are unchanged. Numbers below 'j' point directly to their + // compacted live range with no further chaining. There are no chains or + // cycles below 'i', so the Find call no longer works. + uint j=1; + uint i; + for (i = 1; i < _lrg_map.max_lrg_id(); i++) { + uint lr = _lrg_map.uf_live_range_id(i); + // Ignore unallocated live ranges + if (!lr) { + continue; + } + assert(lr <= i, ""); + _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr)); + } + // Now change the Node->LR mapping to reflect the compacted names + uint unique = _lrg_map.size(); + for (i = 0; i < unique; i++) { + uint lrg_id = _lrg_map.live_range_id(i); + _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id)); + } + + // Reset the Union-Find mapping + _lrg_map.reset_uf_map(j); +} + void PhaseChaitin::Register_Allocate() { // Above the OLD FP (and in registers) are the incoming arguments. Stack @@ -231,14 +367,12 @@ void PhaseChaitin::Register_Allocate() { // all copy-related live ranges low and then using the max copy-related // live range as a cut-off for LIVE and the IFG. In other words, I can // build a subset of LIVE and IFG just for copies. - PhaseLive live(_cfg,_names,&live_arena); + PhaseLive live(_cfg, _lrg_map.names(), &live_arena); // Need IFG for coalescing and coloring - PhaseIFG ifg( &live_arena ); + PhaseIFG ifg(&live_arena); _ifg = &ifg; - if (C->unique() > _names.Size()) _names.extend(C->unique()-1, 0); - // Come out of SSA world to the Named world. Assign (virtual) registers to // Nodes. Use the same register for all inputs and the output of PhiNodes // - effectively ending SSA form. This requires either coalescing live @@ -258,9 +392,9 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; // Mark live as being not available rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); // Empty IFG + ifg.init(_lrg_map.max_lrg_id()); // Empty IFG gather_lrg_masks( false ); // Collect LRG masks - live.compute( _maxlrg ); // Compute liveness + live.compute(_lrg_map.max_lrg_id()); // Compute liveness _live = &live; // Mark LIVE as being available } @@ -270,19 +404,19 @@ void PhaseChaitin::Register_Allocate() { // across any GC point where the derived value is live. So this code looks // at all the GC points, and "stretches" the live range of any base pointer // to the GC point. - if( stretch_base_pointer_live_ranges(&live_arena) ) { - NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); ) + if (stretch_base_pointer_live_ranges(&live_arena)) { + NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);) // Since some live range stretched, I need to recompute live _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); - gather_lrg_masks( false ); - live.compute( _maxlrg ); + ifg.init(_lrg_map.max_lrg_id()); + gather_lrg_masks(false); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } // Create the interference graph using virtual copies - build_ifg_virtual( ); // Include stack slots this time + build_ifg_virtual(); // Include stack slots this time // Aggressive (but pessimistic) copy coalescing. // This pass works on virtual copies. Any virtual copies which are not @@ -296,8 +430,8 @@ void PhaseChaitin::Register_Allocate() { // given Node and search them for an instance, i.e., time O(#MaxLRG)). _ifg->SquareUp(); - PhaseAggressiveCoalesce coalesce( *this ); - coalesce.coalesce_driver( ); + PhaseAggressiveCoalesce coalesce(*this); + coalesce.coalesce_driver(); // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do // not match the Phi itself, insert a copy. coalesce.insert_copies(_matcher); @@ -310,28 +444,36 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); + ifg.init(_lrg_map.max_lrg_id()); gather_lrg_masks( true ); - live.compute( _maxlrg ); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } // Build physical interference graph uint must_spill = 0; - must_spill = build_ifg_physical( &live_arena ); + must_spill = build_ifg_physical(&live_arena); // If we have a guaranteed spill, might as well spill now - if( must_spill ) { - if( !_maxlrg ) return; + if (must_spill) { + if(!_lrg_map.max_lrg_id()) { + return; + } // Bail out if unique gets too large (ie - unique > MaxNodeLimit) C->check_node_count(10*must_spill, "out of nodes before split"); - if (C->failing()) return; - _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere + if (C->failing()) { + return; + } + + uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere + _lrg_map.set_max_lrg_id(new_max_lrg_id); // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) // or we failed to split C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split"); - if (C->failing()) return; + if (C->failing()) { + return; + } - NOT_PRODUCT( C->verify_graph_edges(); ) + NOT_PRODUCT(C->verify_graph_edges();) compact(); // Compact LRGs; return new lower max lrg @@ -340,23 +482,23 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); // Build a new interference graph + ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph gather_lrg_masks( true ); // Collect intersect mask - live.compute( _maxlrg ); // Compute LIVE + live.compute(_lrg_map.max_lrg_id()); // Compute LIVE _live = &live; } - build_ifg_physical( &live_arena ); + build_ifg_physical(&live_arena); _ifg->SquareUp(); _ifg->Compute_Effective_Degree(); // Only do conservative coalescing if requested - if( OptoCoalesce ) { + if (OptoCoalesce) { // Conservative (and pessimistic) copy coalescing of those spills - PhaseConservativeCoalesce coalesce( *this ); + PhaseConservativeCoalesce coalesce(*this); // If max live ranges greater than cutoff, don't color the stack. // This cutoff can be larger than below since it is only done once. - coalesce.coalesce_driver( ); + coalesce.coalesce_driver(); } - compress_uf_map_for_nodes(); + _lrg_map.compress_uf_map_for_nodes(); #ifdef ASSERT verify(&live_arena, true); @@ -390,13 +532,18 @@ void PhaseChaitin::Register_Allocate() { } } - if( !_maxlrg ) return; - _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere + if (!_lrg_map.max_lrg_id()) { + return; + } + uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere + _lrg_map.set_max_lrg_id(new_max_lrg_id); // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor) - C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split"); - if (C->failing()) return; + C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split"); + if (C->failing()) { + return; + } - compact(); // Compact LRGs; return new lower max lrg + compact(); // Compact LRGs; return new lower max lrg // Nuke the live-ness and interference graph and LiveRanGe info { @@ -404,26 +551,26 @@ void PhaseChaitin::Register_Allocate() { _live = NULL; rm.reset_to_mark(); // Reclaim working storage IndexSet::reset_memory(C, &live_arena); - ifg.init(_maxlrg); + ifg.init(_lrg_map.max_lrg_id()); // Create LiveRanGe array. // Intersect register masks for all USEs and DEFs - gather_lrg_masks( true ); - live.compute( _maxlrg ); + gather_lrg_masks(true); + live.compute(_lrg_map.max_lrg_id()); _live = &live; } - must_spill = build_ifg_physical( &live_arena ); + must_spill = build_ifg_physical(&live_arena); _ifg->SquareUp(); _ifg->Compute_Effective_Degree(); // Only do conservative coalescing if requested - if( OptoCoalesce ) { + if (OptoCoalesce) { // Conservative (and pessimistic) copy coalescing - PhaseConservativeCoalesce coalesce( *this ); + PhaseConservativeCoalesce coalesce(*this); // Check for few live ranges determines how aggressive coalesce is. - coalesce.coalesce_driver( ); + coalesce.coalesce_driver(); } - compress_uf_map_for_nodes(); + _lrg_map.compress_uf_map_for_nodes(); #ifdef ASSERT verify(&live_arena, true); #endif @@ -435,7 +582,7 @@ void PhaseChaitin::Register_Allocate() { // Select colors by re-inserting LRGs back into the IFG in reverse order. // Return whether or not something spills. - spills = Select( ); + spills = Select(); } // Count number of Simplify-Select trips per coloring success. @@ -452,9 +599,12 @@ void PhaseChaitin::Register_Allocate() { // max_reg is past the largest *register* used. // Convert that to a frame_slot number. - if( _max_reg <= _matcher._new_SP ) + if (_max_reg <= _matcher._new_SP) { _framesize = C->out_preserve_stack_slots(); - else _framesize = _max_reg -_matcher._new_SP; + } + else { + _framesize = _max_reg -_matcher._new_SP; + } assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough"); // This frame must preserve the required fp alignment @@ -462,8 +612,9 @@ void PhaseChaitin::Register_Allocate() { assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" ); #ifndef PRODUCT _total_framesize += _framesize; - if( (int)_framesize > _max_framesize ) + if ((int)_framesize > _max_framesize) { _max_framesize = _framesize; + } #endif // Convert CISC spills @@ -475,15 +626,17 @@ void PhaseChaitin::Register_Allocate() { log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing()); } - if (C->failing()) return; + if (C->failing()) { + return; + } - NOT_PRODUCT( C->verify_graph_edges(); ) + NOT_PRODUCT(C->verify_graph_edges();) // Move important info out of the live_arena to longer lasting storage. - alloc_node_regs(_names.Size()); - for (uint i=0; i < _names.Size(); i++) { - if (_names[i]) { // Live range associated with Node? - LRG &lrg = lrgs(_names[i]); + alloc_node_regs(_lrg_map.size()); + for (uint i=0; i < _lrg_map.size(); i++) { + if (_lrg_map.live_range_id(i)) { // Live range associated with Node? + LRG &lrg = lrgs(_lrg_map.live_range_id(i)); if (!lrg.alive()) { set_bad(i); } else if (lrg.num_regs() == 1) { @@ -537,11 +690,11 @@ void PhaseChaitin::de_ssa() { Node *n = b->_nodes[j]; // Pre-color to the zero live range, or pick virtual register const RegMask &rm = n->out_RegMask(); - _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 ); + _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0); } } // Reset the Union-Find mapping to be identity - reset_uf_map(lr_counter); + _lrg_map.reset_uf_map(lr_counter); } @@ -551,7 +704,7 @@ void PhaseChaitin::de_ssa() { void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // Nail down the frame pointer live range - uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr)); + uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr)); lrgs(fp_lrg)._cost += 1e12; // Cost is infinite // For all blocks @@ -566,14 +719,14 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { uint idx = n->is_Copy(); // Get virtual register number, same as LiveRanGe index - uint vreg = n2lidx(n); + uint vreg = _lrg_map.live_range_id(n); LRG &lrg = lrgs(vreg); if( vreg ) { // No vreg means un-allocable (e.g. memory) // Collect has-copy bit if( idx ) { lrg._has_copy = 1; - uint clidx = n2lidx(n->in(idx)); + uint clidx = _lrg_map.live_range_id(n->in(idx)); LRG ©_src = lrgs(clidx); copy_src._has_copy = 1; } @@ -773,8 +926,10 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { } // Prepare register mask for each input for( uint k = input_edge_start; k < cnt; k++ ) { - uint vreg = n2lidx(n->in(k)); - if( !vreg ) continue; + uint vreg = _lrg_map.live_range_id(n->in(k)); + if (!vreg) { + continue; + } // If this instruction is CISC Spillable, add the flags // bit to its appropriate input @@ -857,7 +1012,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { } // end for all blocks // Final per-liverange setup - for (uint i2=0; i2<_maxlrg; i2++) { + for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) { LRG &lrg = lrgs(i2); assert(!lrg._is_vector || !lrg._fat_proj, "sanity"); if (lrg.num_regs() > 1 && !lrg._fat_proj) { @@ -879,7 +1034,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { // The bit is checked in Simplify. void PhaseChaitin::set_was_low() { #ifdef ASSERT - for( uint i = 1; i < _maxlrg; i++ ) { + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { int size = lrgs(i).num_regs(); uint old_was_lo = lrgs(i)._was_lo; lrgs(i)._was_lo = 0; @@ -913,7 +1068,7 @@ void PhaseChaitin::set_was_low() { // Compute cost/area ratio, in case we spill. Build the lo-degree list. void PhaseChaitin::cache_lrg_info( ) { - for( uint i = 1; i < _maxlrg; i++ ) { + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { LRG &lrg = lrgs(i); // Check for being of low degree: means we can be trivially colored. @@ -949,10 +1104,10 @@ void PhaseChaitin::Pre_Simplify( ) { // Warm up the lo-degree no-copy list int lo_no_copy = 0; - for( uint i = 1; i < _maxlrg; i++ ) { - if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) || + for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) { + if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) || !lrgs(i).alive() || - lrgs(i)._must_spill ) { + lrgs(i)._must_spill) { lrgs(i)._next = lo_no_copy; lo_no_copy = i; } @@ -1163,7 +1318,7 @@ static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) { OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { // Check for "at_risk" LRG's - uint risk_lrg = Find(lrg._risk_bias); + uint risk_lrg = _lrg_map.find(lrg._risk_bias); if( risk_lrg != 0 ) { // Walk the colored neighbors of the "at_risk" candidate // Choose a color which is both legal and already taken by a neighbor @@ -1179,7 +1334,7 @@ OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) { } } - uint copy_lrg = Find(lrg._copy_bias); + uint copy_lrg = _lrg_map.find(lrg._copy_bias); if( copy_lrg != 0 ) { // If he has a color, if( !(*(_ifg->_yanked))[copy_lrg] ) { @@ -1423,10 +1578,10 @@ uint PhaseChaitin::Select( ) { void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) { if( _spilled_once.test(src->_idx) ) { _spilled_once.set(dst->_idx); - lrgs(Find(dst))._was_spilled1 = 1; + lrgs(_lrg_map.find(dst))._was_spilled1 = 1; if( _spilled_twice.test(src->_idx) ) { _spilled_twice.set(dst->_idx); - lrgs(Find(dst))._was_spilled2 = 1; + lrgs(_lrg_map.find(dst))._was_spilled2 = 1; } } } @@ -1471,7 +1626,7 @@ void PhaseChaitin::fixup_spills() { MachNode *mach = n->as_Mach(); inp = mach->operand_index(inp); Node *src = n->in(inp); // Value to load or store - LRG &lrg_cisc = lrgs( Find_const(src) ); + LRG &lrg_cisc = lrgs(_lrg_map.find_const(src)); OptoReg::Name src_reg = lrg_cisc.reg(); // Doubles record the HIGH register of an adjacent pair. src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs()); @@ -1554,9 +1709,9 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive Block *startb = _cfg._bbs[C->top()->_idx]; startb->_nodes.insert(startb->find_node(C->top()), base ); _cfg._bbs.map( base->_idx, startb ); - assert (n2lidx(base) == 0, "should not have LRG yet"); + assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet"); } - if (n2lidx(base) == 0) { + if (_lrg_map.live_range_id(base) == 0) { new_lrg(base, maxlrg++); } assert(base->in(0) == _cfg._root && @@ -1566,7 +1721,7 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive } // Check for AddP-related opcodes - if( !derived->is_Phi() ) { + if (!derived->is_Phi()) { assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name())); Node *base = derived->in(AddPNode::Base); derived_base_map[derived->_idx] = base; @@ -1629,9 +1784,9 @@ Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derive // base pointer that is live across the Safepoint for oopmap building. The // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the // required edge set. -bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { +bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) { int must_recompute_live = false; - uint maxlrg = _maxlrg; + uint maxlrg = _lrg_map.max_lrg_id(); Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique()); memset( derived_base_map, 0, sizeof(Node*)*C->unique() ); @@ -1669,15 +1824,18 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { } // Get value being defined - uint lidx = n2lidx(n); - if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) { + uint lidx = _lrg_map.live_range_id(n); + // Ignore the occasional brand-new live range + if (lidx && lidx < _lrg_map.max_lrg_id()) { // Remove from live-out set liveout.remove(lidx); // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) liveout.remove( n2lidx(n->in(idx)) ); + if (idx) { + liveout.remove(_lrg_map.live_range_id(n->in(idx))); + } } // Found a safepoint? @@ -1695,21 +1853,21 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity"); // If its an OOP with a non-zero offset, then it is derived. if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) { - Node *base = find_base_for_derived( derived_base_map, derived, maxlrg ); - assert( base->_idx < _names.Size(), "" ); + Node *base = find_base_for_derived(derived_base_map, derived, maxlrg); + assert(base->_idx < _lrg_map.size(), ""); // Add reaching DEFs of derived pointer and base pointer as a // pair of inputs - n->add_req( derived ); - n->add_req( base ); + n->add_req(derived); + n->add_req(base); // See if the base pointer is already live to this point. // Since I'm working on the SSA form, live-ness amounts to // reaching def's. So if I find the base's live range then // I know the base's def reaches here. - if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or - !liveout.member( n2lidx(base) ) ) && // not live) AND - (n2lidx(base) > 0) && // not a constant - _cfg._bbs[base->_idx] != b ) { // base not def'd in blk) + if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or + !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND + (_lrg_map.live_range_id(base) > 0) && // not a constant + _cfg._bbs[base->_idx] != b) { // base not def'd in blk) // Base pointer is not currently live. Since I stretched // the base pointer to here and it crosses basic-block // boundaries, the global live info is now incorrect. @@ -1721,11 +1879,12 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { } // End of if found a GC point // Make all inputs live - if( !n->is_Phi() ) { // Phi function uses come from prior block - for( uint k = 1; k < n->req(); k++ ) { - uint lidx = n2lidx(n->in(k)); - if( lidx < _maxlrg ) - liveout.insert( lidx ); + if (!n->is_Phi()) { // Phi function uses come from prior block + for (uint k = 1; k < n->req(); k++) { + uint lidx = _lrg_map.live_range_id(n->in(k)); + if (lidx < _lrg_map.max_lrg_id()) { + liveout.insert(lidx); + } } } @@ -1733,11 +1892,12 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { liveout.clear(); // Free the memory used by liveout. } // End of forall blocks - _maxlrg = maxlrg; + _lrg_map.set_max_lrg_id(maxlrg); // If I created a new live range I need to recompute live - if( maxlrg != _ifg->_maxlrg ) + if (maxlrg != _ifg->_maxlrg) { must_recompute_live = true; + } return must_recompute_live != 0; } @@ -1745,16 +1905,17 @@ bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) { //------------------------------add_reference---------------------------------- // Extend the node to LRG mapping -void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) { - _names.extend( node->_idx, n2lidx(old_node) ); + +void PhaseChaitin::add_reference(const Node *node, const Node *old_node) { + _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node)); } //------------------------------dump------------------------------------------- #ifndef PRODUCT -void PhaseChaitin::dump( const Node *n ) const { - uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0; +void PhaseChaitin::dump(const Node *n) const { + uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0; tty->print("L%d",r); - if( r && n->Opcode() != Op_Phi ) { + if (r && n->Opcode() != Op_Phi) { if( _node_regs ) { // Got a post-allocation copy of allocation? tty->print("["); OptoReg::Name second = get_reg_second(n); @@ -1775,11 +1936,13 @@ void PhaseChaitin::dump( const Node *n ) const { tty->print("/N%d\t",n->_idx); tty->print("%s === ", n->Name()); uint k; - for( k = 0; k < n->req(); k++) { + for (k = 0; k < n->req(); k++) { Node *m = n->in(k); - if( !m ) tty->print("_ "); + if (!m) { + tty->print("_ "); + } else { - uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0; + uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; tty->print("L%d",r); // Data MultiNode's can have projections with no real registers. // Don't die while dumping them. @@ -1810,8 +1973,10 @@ void PhaseChaitin::dump( const Node *n ) const { if( k < n->len() && n->in(k) ) tty->print("| "); for( ; k < n->len(); k++ ) { Node *m = n->in(k); - if( !m ) break; - uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0; + if(!m) { + break; + } + uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0; tty->print("L%d",r); tty->print("/N%d ",m->_idx); } @@ -1839,7 +2004,7 @@ void PhaseChaitin::dump( const Block * b ) const { tty->print("{"); uint i; while ((i = elements.next()) != 0) { - tty->print("L%d ", Find_const(i)); + tty->print("L%d ", _lrg_map.find_const(i)); } tty->print_cr("}"); } @@ -1863,10 +2028,14 @@ void PhaseChaitin::dump() const { // Dump LRG array tty->print("--- Live RanGe Array ---\n"); - for(uint i2 = 1; i2 < _maxlrg; i2++ ) { + for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) { tty->print("L%d: ",i2); - if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( ); - else tty->print_cr("new LRG"); + if (i2 < _ifg->_maxlrg) { + lrgs(i2).dump(); + } + else { + tty->print_cr("new LRG"); + } } tty->print_cr(""); @@ -1939,7 +2108,7 @@ char *PhaseChaitin::dump_register( const Node *n, char *buf ) const { // Post allocation, use direct mappings, no LRG info available print_reg( get_reg_first(n), this, buf ); } else { - uint lidx = Find_const(n); // Grab LRG number + uint lidx = _lrg_map.find_const(n); // Grab LRG number if( !_ifg ) { sprintf(buf,"L%d",lidx); // No register binding yet } else if( !lidx ) { // Special, not allocated value @@ -1968,7 +2137,7 @@ void PhaseChaitin::dump_for_spill_split_recycle() const { if( WizardMode && (PrintCompilation || PrintOpto) ) { // Display which live ranges need to be split and the allocator's state tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt); - for( uint bidx = 1; bidx < _maxlrg; bidx++ ) { + for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) { if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { tty->print("L%d: ", bidx); lrgs(bidx).dump(); @@ -2099,14 +2268,17 @@ void PhaseChaitin::dump_bb( uint pre_order ) const { void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { tty->print_cr("---dump of L%d---",lidx); - if( _ifg ) { - if( lidx >= _maxlrg ) { + if (_ifg) { + if (lidx >= _lrg_map.max_lrg_id()) { tty->print("Attempt to print live range index beyond max live range.\n"); return; } tty->print("L%d: ",lidx); - if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( ); - else tty->print_cr("new LRG"); + if (lidx < _ifg->_maxlrg) { + lrgs(lidx).dump(); + } else { + tty->print_cr("new LRG"); + } } if( _ifg && lidx < _ifg->_maxlrg) { tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); @@ -2121,8 +2293,8 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { // For all instructions for( uint j = 0; j < b->_nodes.size(); j++ ) { Node *n = b->_nodes[j]; - if( Find_const(n) == lidx ) { - if( !dump_once++ ) { + if (_lrg_map.find_const(n) == lidx) { + if (!dump_once++) { tty->cr(); b->dump_head( &_cfg._bbs ); } @@ -2133,11 +2305,13 @@ void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { uint cnt = n->req(); for( uint k = 1; k < cnt; k++ ) { Node *m = n->in(k); - if (!m) continue; // be robust in the dumper - if( Find_const(m) == lidx ) { - if( !dump_once++ ) { + if (!m) { + continue; // be robust in the dumper + } + if (_lrg_map.find_const(m) == lidx) { + if (!dump_once++) { tty->cr(); - b->dump_head( &_cfg._bbs ); + b->dump_head(&_cfg._bbs); } dump(n); } diff --git a/hotspot/src/share/vm/opto/chaitin.hpp b/hotspot/src/share/vm/opto/chaitin.hpp index fc8010b851e..3455005f330 100644 --- a/hotspot/src/share/vm/opto/chaitin.hpp +++ b/hotspot/src/share/vm/opto/chaitin.hpp @@ -265,18 +265,118 @@ public: int effective_degree( uint lidx ) const; }; -// TEMPORARILY REPLACED WITH COMMAND LINE FLAG +// The LiveRangeMap class is responsible for storing node to live range id mapping. +// Each node is mapped to a live range id (a virtual register). Nodes that are +// not considered for register allocation are given live range id 0. +class LiveRangeMap VALUE_OBJ_CLASS_SPEC { -//// !!!!! Magic Constants need to move into ad file -#ifdef SPARC -//#define FLOAT_PRESSURE 30 /* SFLT_REG_mask.Size() - 1 */ -//#define INT_PRESSURE 23 /* NOTEMP_I_REG_mask.Size() - 1 */ -#define FLOAT_INCREMENT(regs) regs -#else -//#define FLOAT_PRESSURE 6 -//#define INT_PRESSURE 6 -#define FLOAT_INCREMENT(regs) 1 -#endif +private: + + uint _max_lrg_id; + + // Union-find map. Declared as a short for speed. + // Indexed by live-range number, it returns the compacted live-range number + LRG_List _uf_map; + + // Map from Nodes to live ranges + LRG_List _names; + + // Straight out of Tarjan's union-find algorithm + uint find_compress(const Node *node) { + uint lrg_id = find_compress(_names[node->_idx]); + _names.map(node->_idx, lrg_id); + return lrg_id; + } + + uint find_compress(uint lrg); + +public: + + const LRG_List& names() { + return _names; + } + + uint max_lrg_id() const { + return _max_lrg_id; + } + + void set_max_lrg_id(uint max_lrg_id) { + _max_lrg_id = max_lrg_id; + } + + uint size() const { + return _names.Size(); + } + + uint live_range_id(uint idx) const { + return _names[idx]; + } + + uint live_range_id(const Node *node) const { + return _names[node->_idx]; + } + + uint uf_live_range_id(uint lrg_id) const { + return _uf_map[lrg_id]; + } + + void map(uint idx, uint lrg_id) { + _names.map(idx, lrg_id); + } + + void uf_map(uint dst_lrg_id, uint src_lrg_id) { + _uf_map.map(dst_lrg_id, src_lrg_id); + } + + void extend(uint idx, uint lrg_id) { + _names.extend(idx, lrg_id); + } + + void uf_extend(uint dst_lrg_id, uint src_lrg_id) { + _uf_map.extend(dst_lrg_id, src_lrg_id); + } + + LiveRangeMap(uint unique) + : _names(unique) + , _uf_map(unique) + , _max_lrg_id(0) {} + + uint find_id( const Node *n ) { + uint retval = live_range_id(n); + assert(retval == find(n),"Invalid node to lidx mapping"); + return retval; + } + + // Reset the Union-Find map to identity + void reset_uf_map(uint max_lrg_id); + + // Make all Nodes map directly to their final live range; no need for + // the Union-Find mapping after this call. + void compress_uf_map_for_nodes(); + + uint find(uint lidx) { + uint uf_lidx = _uf_map[lidx]; + return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx); + } + + // Convert a Node into a Live Range Index - a lidx + uint find(const Node *node) { + uint lidx = live_range_id(node); + uint uf_lidx = _uf_map[lidx]; + return (uf_lidx == lidx) ? uf_lidx : find_compress(node); + } + + // Like Find above, but no path compress, so bad asymptotic behavior + uint find_const(uint lrg) const; + + // Like Find above, but no path compress, so bad asymptotic behavior + uint find_const(const Node *node) const { + if(node->_idx >= _names.Size()) { + return 0; // not mapped, usual for debug dump + } + return find_const(_names[node->_idx]); + } +}; //------------------------------Chaitin---------------------------------------- // Briggs-Chaitin style allocation, mostly. @@ -286,7 +386,6 @@ class PhaseChaitin : public PhaseRegAlloc { int _trip_cnt; int _alternate; - uint _maxlrg; // Max live range number LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); } PhaseLive *_live; // Liveness, used in the interference graph PhaseIFG *_ifg; // Interference graph (for original chunk) @@ -294,16 +393,6 @@ class PhaseChaitin : public PhaseRegAlloc { VectorSet _spilled_once; // Nodes that have been spilled VectorSet _spilled_twice; // Nodes that have been spilled twice - LRG_List _names; // Map from Nodes to Live RanGes - - // Union-find map. Declared as a short for speed. - // Indexed by live-range number, it returns the compacted live-range number - LRG_List _uf_map; - // Reset the Union-Find map to identity - void reset_uf_map( uint maxlrg ); - // Remove the need for the Union-Find mapping - void compress_uf_map_for_nodes( ); - // Combine the Live Range Indices for these 2 Nodes into a single live // range. Future requests for any Node in either live range will // return the live range index for the combined live range. @@ -322,7 +411,34 @@ class PhaseChaitin : public PhaseRegAlloc { // Helper functions for Split() uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray splits, int slidx ); uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray splits, int slidx ); - int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ); + + bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) { + bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id()); + + if(found_projs) { + uint max_lrg_id = lrg_map.max_lrg_id(); + lrg_map.set_max_lrg_id(max_lrg_id + 1); + } + + return found_projs; + } + + //------------------------------clone_projs------------------------------------ + // After cloning some rematerialized instruction, clone any MachProj's that + // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants + // use G3 as an address temp. + bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) { + bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id); + + if(found_projs) { + max_lrg_id++; + } + + return found_projs; + } + + bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id); + Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru); // True if lidx is used before any real register is def'd in the block @@ -349,20 +465,11 @@ public: PhaseChaitin( uint unique, PhaseCFG &cfg, Matcher &matcher ); ~PhaseChaitin() {} - // Convert a Node into a Live Range Index - a lidx - uint Find( const Node *n ) { - uint lidx = n2lidx(n); - uint uf_lidx = _uf_map[lidx]; - return (uf_lidx == lidx) ? uf_lidx : Find_compress(n); - } - uint Find_const( uint lrg ) const; - uint Find_const( const Node *n ) const; + LiveRangeMap _lrg_map; // Do all the real work of allocate void Register_Allocate(); - uint n2lidx( const Node *n ) const { return _names[n->_idx]; } - float high_frequency_lrg() const { return _high_frequency_lrg; } #ifndef PRODUCT @@ -374,18 +481,6 @@ private: // all inputs to a PhiNode, effectively coalescing live ranges. Insert // copies as needed. void de_ssa(); - uint Find_compress( const Node *n ); - uint Find( uint lidx ) { - uint uf_lidx = _uf_map[lidx]; - return (uf_lidx == lidx) ? uf_lidx : Find_compress(lidx); - } - uint Find_compress( uint lidx ); - - uint Find_id( const Node *n ) { - uint retval = n2lidx(n); - assert(retval == Find(n),"Invalid node to lidx mapping"); - return retval; - } // Add edge between reg and everything in the vector. // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask diff --git a/hotspot/src/share/vm/opto/coalesce.cpp b/hotspot/src/share/vm/opto/coalesce.cpp index 0811ea061fe..74618fb410c 100644 --- a/hotspot/src/share/vm/opto/coalesce.cpp +++ b/hotspot/src/share/vm/opto/coalesce.cpp @@ -34,160 +34,12 @@ #include "opto/matcher.hpp" #include "opto/regmask.hpp" -//============================================================================= -//------------------------------reset_uf_map----------------------------------- -void PhaseChaitin::reset_uf_map( uint maxlrg ) { - _maxlrg = maxlrg; - // Force the Union-Find mapping to be at least this large - _uf_map.extend(_maxlrg,0); - // Initialize it to be the ID mapping. - for( uint i=0; i<_maxlrg; i++ ) - _uf_map.map(i,i); -} - -//------------------------------compress_uf_map-------------------------------- -// Make all Nodes map directly to their final live range; no need for -// the Union-Find mapping after this call. -void PhaseChaitin::compress_uf_map_for_nodes( ) { - // For all Nodes, compress mapping - uint unique = _names.Size(); - for( uint i=0; i_idx]); - _names.map(n->_idx,lrg); - return lrg; -} - -//------------------------------Find_const------------------------------------- -// Like Find above, but no path compress, so bad asymptotic behavior -uint PhaseChaitin::Find_const( uint lrg ) const { - if( !lrg ) return lrg; // Ignore the zero LRG - // Off the end? This happens during debugging dumps when you got - // brand new live ranges but have not told the allocator yet. - if( lrg >= _maxlrg ) return lrg; - uint next = _uf_map[lrg]; - while( next != lrg ) { // Scan chain of equivalences - assert( next < lrg, "always union smaller" ); - lrg = next; // until find a fixed-point - next = _uf_map[lrg]; - } - return next; -} - -//------------------------------Find------------------------------------------- -// Like Find above, but no path compress, so bad asymptotic behavior -uint PhaseChaitin::Find_const( const Node *n ) const { - if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump - return Find_const( _names[n->_idx] ); -} - -//------------------------------Union------------------------------------------ -// union 2 sets together. -void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) { - uint src = Find(src_n); - uint dst = Find(dst_n); - assert( src, "" ); - assert( dst, "" ); - assert( src < _maxlrg, "oob" ); - assert( dst < _maxlrg, "oob" ); - assert( src < dst, "always union smaller" ); - _uf_map.map(dst,src); -} - -//------------------------------new_lrg---------------------------------------- -void PhaseChaitin::new_lrg( const Node *x, uint lrg ) { - // Make the Node->LRG mapping - _names.extend(x->_idx,lrg); - // Make the Union-Find mapping an identity function - _uf_map.extend(lrg,lrg); -} - -//------------------------------clone_projs------------------------------------ -// After cloning some rematerialized instruction, clone any MachProj's that -// follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants -// use G3 as an address temp. -int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) { - Block *bcon = _cfg._bbs[con->_idx]; - uint cindex = bcon->find_node(con); - Node *con_next = bcon->_nodes[cindex+1]; - if( con_next->in(0) != con || !con_next->is_MachProj() ) - return false; // No MachProj's follow - - // Copy kills after the cloned constant - Node *kills = con_next->clone(); - kills->set_req( 0, copy ); - b->_nodes.insert( idx, kills ); - _cfg._bbs.map( kills->_idx, b ); - new_lrg( kills, maxlrg++ ); - return true; -} - -//------------------------------compact---------------------------------------- -// Renumber the live ranges to compact them. Makes the IFG smaller. -void PhaseChaitin::compact() { - // Current the _uf_map contains a series of short chains which are headed - // by a self-cycle. All the chains run from big numbers to little numbers. - // The Find() call chases the chains & shortens them for the next Find call. - // We are going to change this structure slightly. Numbers above a moving - // wave 'i' are unchanged. Numbers below 'j' point directly to their - // compacted live range with no further chaining. There are no chains or - // cycles below 'i', so the Find call no longer works. - uint j=1; - uint i; - for( i=1; i < _maxlrg; i++ ) { - uint lr = _uf_map[i]; - // Ignore unallocated live ranges - if( !lr ) continue; - assert( lr <= i, "" ); - _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]); - } - if( false ) // PrintOptoCompactLiveRanges - printf("Compacted %d LRs from %d\n",i-j,i); - // Now change the Node->LR mapping to reflect the compacted names - uint unique = _names.Size(); - for( i=0; iprint("L%d/N%d ",r,n->_idx); } @@ -235,9 +87,9 @@ void PhaseCoalesce::dump() const { //------------------------------combine_these_two------------------------------ // Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. -void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) { - uint lr1 = _phc.Find(n1); - uint lr2 = _phc.Find(n2); +void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) { + uint lr1 = _phc._lrg_map.find(n1); + uint lr2 = _phc._lrg_map.find(n2); if( lr1 != lr2 && // Different live ranges already AND !_phc._ifg->test_edge_sq( lr1, lr2 ) ) { // Do not interfere LRG *lrg1 = &_phc.lrgs(lr1); @@ -306,14 +158,18 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui // I am about to clobber the dst_name, so the copy must be inserted // after the last use. Last use is really first-use on a backwards scan. uint i = b->end_idx()-1; - while( 1 ) { + while(1) { Node *n = b->_nodes[i]; // Check for end of virtual copies; this is also the end of the // parallel renaming effort. - if( n->_idx < _unique ) break; + if (n->_idx < _unique) { + break; + } uint idx = n->is_Copy(); assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); - if( idx && _phc.Find(n->in(idx)) == dst_name ) break; + if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) { + break; + } i--; } uint last_use_idx = i; @@ -324,24 +180,29 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui // There can be only 1 kill that exits any block and that is // the last kill. Thus it is the first kill on a backwards scan. i = b->end_idx()-1; - while( 1 ) { + while (1) { Node *n = b->_nodes[i]; // Check for end of virtual copies; this is also the end of the // parallel renaming effort. - if( n->_idx < _unique ) break; + if (n->_idx < _unique) { + break; + } assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); - if( _phc.Find(n) == src_name ) { + if (_phc._lrg_map.find(n) == src_name) { kill_src_idx = i; break; } i--; } // Need a temp? Last use of dst comes after the kill of src? - if( last_use_idx >= kill_src_idx ) { + if (last_use_idx >= kill_src_idx) { // Need to break a cycle with a temp uint idx = copy->is_Copy(); Node *tmp = copy->clone(); - _phc.new_lrg(tmp,_phc._maxlrg++); + uint max_lrg_id = _phc._lrg_map.max_lrg_id(); + _phc.new_lrg(tmp, max_lrg_id); + _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); + // Insert new temp between copy and source tmp ->set_req(idx,copy->in(idx)); copy->set_req(idx,tmp); @@ -359,14 +220,14 @@ void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, ui void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // We do LRGs compressing and fix a liveout data only here since the other // place in Split() is guarded by the assert which we never hit. - _phc.compress_uf_map_for_nodes(); + _phc._lrg_map.compress_uf_map_for_nodes(); // Fix block's liveout data for compressed live ranges. - for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) { - uint compressed_lrg = _phc.Find(lrg); - if( lrg != compressed_lrg ) { - for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) { + for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) { + uint compressed_lrg = _phc._lrg_map.find(lrg); + if (lrg != compressed_lrg) { + for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) { IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]); - if( liveout->member(lrg) ) { + if (liveout->member(lrg)) { liveout->remove(lrg); liveout->insert(compressed_lrg); } @@ -392,8 +253,9 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { uint cidx = copy->is_Copy(); if( cidx ) { Node *def = copy->in(cidx); - if( _phc.Find(copy) == _phc.Find(def) ) - n->set_req(k,def); + if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) { + n->set_req(k, def); + } } } @@ -401,7 +263,7 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { uint cidx = n->is_Copy(); if( cidx ) { Node *def = n->in(cidx); - if( _phc.Find(n) == _phc.Find(def) ) { + if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { n->replace_by(def); n->set_req(cidx,NULL); b->_nodes.remove(l); @@ -410,16 +272,18 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { } } - if( n->is_Phi() ) { + if (n->is_Phi()) { // Get the chosen name for the Phi - uint phi_name = _phc.Find( n ); + uint phi_name = _phc._lrg_map.find(n); // Ignore the pre-allocated specials - if( !phi_name ) continue; + if (!phi_name) { + continue; + } // Check for mismatch inputs to Phi - for( uint j = 1; jin(j); - uint src_name = _phc.Find(m); - if( src_name != phi_name ) { + uint src_name = _phc._lrg_map.find(m); + if (src_name != phi_name) { Block *pred = _phc._cfg._bbs[b->pred(j)->_idx]; Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); @@ -430,18 +294,18 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // Insert the copy in the predecessor basic block pred->add_inst(copy); // Copy any flags as well - _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg ); + _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map); } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode(m,*rm,*rm); + copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Find a good place to insert. Kinda tricky, use a subroutine insert_copy_with_overlap(pred,copy,phi_name,src_name); } // Insert the copy in the use-def chain - n->set_req( j, copy ); + n->set_req(j, copy); _phc._cfg._bbs.map( copy->_idx, pred ); // Extend ("register allocate") the names array for the copy. - _phc._names.extend( copy->_idx, phi_name ); + _phc._lrg_map.extend(copy->_idx, phi_name); } // End of if Phi names do not match } // End of for all inputs to Phi } else { // End of if Phi @@ -450,39 +314,40 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { uint idx; if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) { // Get the chosen name for the Node - uint name = _phc.Find( n ); - assert( name, "no 2-address specials" ); + uint name = _phc._lrg_map.find(n); + assert (name, "no 2-address specials"); // Check for name mis-match on the 2-address input Node *m = n->in(idx); - if( _phc.Find(m) != name ) { + if (_phc._lrg_map.find(m) != name) { Node *copy; assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); // At this point it is unsafe to extend live ranges (6550579). // Rematerialize only constants as we do for Phi above. - if( m->is_Mach() && m->as_Mach()->is_Con() && - m->as_Mach()->rematerialize() ) { + if(m->is_Mach() && m->as_Mach()->is_Con() && + m->as_Mach()->rematerialize()) { copy = m->clone(); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); - if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) ) + b->_nodes.insert(l++, copy); + if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) { l++; + } } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode( m, *rm, *rm ); + copy = new (C) MachSpillCopyNode(m, *rm, *rm); // Insert the copy in the basic block, just before us - b->_nodes.insert( l++, copy ); + b->_nodes.insert(l++, copy); } // Insert the copy in the use-def chain - n->set_req(idx, copy ); + n->set_req(idx, copy); // Extend ("register allocate") the names array for the copy. - _phc._names.extend( copy->_idx, name ); + _phc._lrg_map.extend(copy->_idx, name); _phc._cfg._bbs.map( copy->_idx, b ); } } // End of is two-adr // Insert a copy at a debug use for a lrg which has high frequency - if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) { + if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) { // Walk the debug inputs to the node and check for lrg freq JVMState* jvms = n->jvms(); uint debug_start = jvms ? jvms->debug_start() : 999999; @@ -490,9 +355,11 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) { // Do not split monitors; they are only needed for debug table // entries and need no code. - if( jvms->is_monitor_use(inpidx) ) continue; + if (jvms->is_monitor_use(inpidx)) { + continue; + } Node *inp = n->in(inpidx); - uint nidx = _phc.n2lidx(inp); + uint nidx = _phc._lrg_map.live_range_id(inp); LRG &lrg = lrgs(nidx); // If this lrg has a high frequency use/def @@ -519,8 +386,10 @@ void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { // Insert the copy in the basic block, just before us b->_nodes.insert( l++, copy ); // Extend ("register allocate") the names array for the copy. - _phc.new_lrg( copy, _phc._maxlrg++ ); - _phc._cfg._bbs.map( copy->_idx, b ); + uint max_lrg_id = _phc._lrg_map.max_lrg_id(); + _phc.new_lrg(copy, max_lrg_id); + _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); + _phc._cfg._bbs.map(copy->_idx, b); //tty->print_cr("Split a debug use in Aggressive Coalesce"); } // End of if high frequency use/def } // End of for all debug inputs @@ -583,17 +452,17 @@ void PhaseAggressiveCoalesce::coalesce( Block *b ) { uint idx; // 2-address instructions have a virtual Copy matching their input // to their output - if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) { + if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) { MachNode *mach = n->as_Mach(); - combine_these_two( mach, mach->in(idx) ); + combine_these_two(mach, mach->in(idx)); } } // End of for all instructions in block } //============================================================================= //------------------------------PhaseConservativeCoalesce---------------------- -PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) { - _ulr.initialize(_phc._maxlrg); +PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) { + _ulr.initialize(_phc._lrg_map.max_lrg_id()); } //------------------------------verify----------------------------------------- @@ -673,10 +542,14 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, // Else work back one in copy chain prev_copy = prev_copy->in(prev_copy->is_Copy()); } else { // Else collect interferences - uint lidx = _phc.Find(x); + uint lidx = _phc._lrg_map.find(x); // Found another def of live-range being stretched? - if( lidx == lr1 ) return max_juint; - if( lidx == lr2 ) return max_juint; + if(lidx == lr1) { + return max_juint; + } + if(lidx == lr2) { + return max_juint; + } // If we attempt to coalesce across a bound def if( lrgs(lidx).is_bound() ) { @@ -751,33 +624,43 @@ static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { // See if I can coalesce a series of multiple copies together. I need the // final dest copy and the original src copy. They can be the same Node. // Compute the compatible register masks. -bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { +bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) { - if( !dst_copy->is_SpillCopy() ) return false; - if( !src_copy->is_SpillCopy() ) return false; + if (!dst_copy->is_SpillCopy()) { + return false; + } + if (!src_copy->is_SpillCopy()) { + return false; + } Node *src_def = src_copy->in(src_copy->is_Copy()); - uint lr1 = _phc.Find(dst_copy); - uint lr2 = _phc.Find(src_def ); + uint lr1 = _phc._lrg_map.find(dst_copy); + uint lr2 = _phc._lrg_map.find(src_def); // Same live ranges already? - if( lr1 == lr2 ) return false; + if (lr1 == lr2) { + return false; + } // Interfere? - if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false; + if (_phc._ifg->test_edge_sq(lr1, lr2)) { + return false; + } // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. - if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast + if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast return false; + } // Coalescing between an aligned live range and a mis-aligned live range? // No, no! Alignment changes how we count degree. - if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj ) + if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) { return false; + } // Sort; use smaller live-range number Node *lr1_node = dst_copy; Node *lr2_node = src_def; - if( lr1 > lr2 ) { + if (lr1 > lr2) { uint tmp = lr1; lr1 = lr2; lr2 = tmp; lr1_node = src_def; lr2_node = dst_copy; } @@ -916,17 +799,5 @@ void PhaseConservativeCoalesce::coalesce( Block *b ) { PhaseChaitin::_conserv_coalesce++; // Collect stats on success continue; } - - /* do not attempt pairs. About 1/2 of all pairs can be removed by - post-alloc. The other set are too few to bother. - Node *copy2 = copy1->in(idx1); - uint idx2 = copy2->is_Copy(); - if( !idx2 ) continue; - if( copy_copy(copy1,copy2,b,i) ) { - i--; // Retry, same location in block - PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success - continue; - } - */ } } diff --git a/hotspot/src/share/vm/opto/coalesce.hpp b/hotspot/src/share/vm/opto/coalesce.hpp index 904ce7f679f..a6359af101c 100644 --- a/hotspot/src/share/vm/opto/coalesce.hpp +++ b/hotspot/src/share/vm/opto/coalesce.hpp @@ -41,23 +41,25 @@ protected: public: // Coalesce copies - PhaseCoalesce( PhaseChaitin &chaitin ) : Phase(Coalesce), _phc(chaitin) { } + PhaseCoalesce(PhaseChaitin &phc) + : Phase(Coalesce) + , _phc(phc) {} virtual void verify() = 0; // Coalesce copies - void coalesce_driver( ); + void coalesce_driver(); // Coalesce copies in this block - virtual void coalesce( Block *b ) = 0; + virtual void coalesce(Block *b) = 0; // Attempt to coalesce live ranges defined by these 2 - void combine_these_two( Node *n1, Node *n2 ); + void combine_these_two(Node *n1, Node *n2); - LRG &lrgs( uint lidx ) { return _phc.lrgs(lidx); } + LRG &lrgs(uint lidx) { return _phc.lrgs(lidx); } #ifndef PRODUCT // Dump internally name - void dump( Node *n ) const; + void dump(Node *n) const; // Dump whole shebang void dump() const; #endif diff --git a/hotspot/src/share/vm/opto/compile.cpp b/hotspot/src/share/vm/opto/compile.cpp index 37b4b4cd5c6..dd32b77f7b7 100644 --- a/hotspot/src/share/vm/opto/compile.cpp +++ b/hotspot/src/share/vm/opto/compile.cpp @@ -2127,22 +2127,19 @@ void Compile::Code_Gen() { } NOT_PRODUCT( verify_graph_edges(); ) - PhaseChaitin regalloc(unique(),cfg,m); + PhaseChaitin regalloc(unique(), cfg, m); _regalloc = ®alloc; { TracePhase t2("regalloc", &_t_registerAllocation, true); - // Perform any platform dependent preallocation actions. This is used, - // for example, to avoid taking an implicit null pointer exception - // using the frame pointer on win95. - _regalloc->pd_preallocate_hook(); - // Perform register allocation. After Chaitin, use-def chains are // no longer accurate (at spill code) and so must be ignored. // Node->LRG->reg mappings are still accurate. _regalloc->Register_Allocate(); // Bail out if the allocator builds too many nodes - if (failing()) return; + if (failing()) { + return; + } } // Prior to register allocation we kept empty basic blocks in case the @@ -2160,9 +2157,6 @@ void Compile::Code_Gen() { cfg.fixup_flow(); } - // Perform any platform dependent postallocation verifications. - debug_only( _regalloc->pd_postallocate_verify_hook(); ) - // Apply peephole optimizations if( OptoPeephole ) { NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); ) diff --git a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp index 1f811b8f4b8..e6909054324 100644 --- a/hotspot/src/share/vm/opto/idealGraphPrinter.cpp +++ b/hotspot/src/share/vm/opto/idealGraphPrinter.cpp @@ -616,7 +616,7 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) { buffer[0] = 0; _chaitin->dump_register(node, buffer); print_prop("reg", buffer); - print_prop("lrg", _chaitin->n2lidx(node)); + print_prop("lrg", _chaitin->_lrg_map.live_range_id(node)); } node->_in_dump_cnt--; diff --git a/hotspot/src/share/vm/opto/ifg.cpp b/hotspot/src/share/vm/opto/ifg.cpp index c40265214dc..96c0957cffb 100644 --- a/hotspot/src/share/vm/opto/ifg.cpp +++ b/hotspot/src/share/vm/opto/ifg.cpp @@ -286,15 +286,14 @@ void PhaseIFG::verify( const PhaseChaitin *pc ) const { uint idx; uint last = 0; while ((idx = elements.next()) != 0) { - assert( idx != i, "Must have empty diagonal"); - assert( pc->Find_const(idx) == idx, "Must not need Find" ); - assert( _adjs[idx].member(i), "IFG not square" ); - assert( !(*_yanked)[idx], "No yanked neighbors" ); - assert( last < idx, "not sorted increasing"); + assert(idx != i, "Must have empty diagonal"); + assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find"); + assert(_adjs[idx].member(i), "IFG not square"); + assert(!(*_yanked)[idx], "No yanked neighbors"); + assert(last < idx, "not sorted increasing"); last = idx; } - assert( !lrgs(i)._degree_valid || - effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" ); + assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong"); } } #endif @@ -342,10 +341,10 @@ void PhaseChaitin::build_ifg_virtual( ) { Node *n = b->_nodes[j-1]; // Get value being defined - uint r = n2lidx(n); + uint r = _lrg_map.live_range_id(n); // Some special values do not allocate - if( r ) { + if (r) { // Remove from live-out set liveout->remove(r); @@ -353,16 +352,19 @@ void PhaseChaitin::build_ifg_virtual( ) { // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) liveout->remove( n2lidx(n->in(idx)) ); + if (idx) { + liveout->remove(_lrg_map.live_range_id(n->in(idx))); + } // Interfere with everything live - interfere_with_live( r, liveout ); + interfere_with_live(r, liveout); } // Make all inputs live - if( !n->is_Phi() ) { // Phi function uses come from prior block - for( uint k = 1; k < n->req(); k++ ) - liveout->insert( n2lidx(n->in(k)) ); + if (!n->is_Phi()) { // Phi function uses come from prior block + for(uint k = 1; k < n->req(); k++) { + liveout->insert(_lrg_map.live_range_id(n->in(k))); + } } // 2-address instructions always have the defined value live @@ -394,11 +396,12 @@ void PhaseChaitin::build_ifg_virtual( ) { n->set_req( 2, tmp ); } // Defined value interferes with all inputs - uint lidx = n2lidx(n->in(idx)); - for( uint k = 1; k < n->req(); k++ ) { - uint kidx = n2lidx(n->in(k)); - if( kidx != lidx ) - _ifg->add_edge( r, kidx ); + uint lidx = _lrg_map.live_range_id(n->in(idx)); + for (uint k = 1; k < n->req(); k++) { + uint kidx = _lrg_map.live_range_id(n->in(k)); + if (kidx != lidx) { + _ifg->add_edge(r, kidx); + } } } } // End of forall instructions in block @@ -542,10 +545,10 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { Node *n = b->_nodes[j - 1]; // Get value being defined - uint r = n2lidx(n); + uint r = _lrg_map.live_range_id(n); // Some special values do not allocate - if( r ) { + if(r) { // A DEF normally costs block frequency; rematerialized values are // removed from the DEF sight, so LOWER costs here. lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq; @@ -556,9 +559,11 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { Node *def = n->in(0); if( !n->is_Proj() || // Could also be a flags-projection of a dead ADD or such. - (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) { + (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) { b->_nodes.remove(j - 1); - if( lrgs(r)._def == n ) lrgs(r)._def = 0; + if (lrgs(r)._def == n) { + lrgs(r)._def = 0; + } n->disconnect_inputs(NULL, C); _cfg._bbs.map(n->_idx,NULL); n->replace_by(C->top()); @@ -570,7 +575,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { // Fat-projections kill many registers which cannot be used to // hold live ranges. - if( lrgs(r)._fat_proj ) { + if (lrgs(r)._fat_proj) { // Count the int-only registers RegMask itmp = lrgs(r).mask(); itmp.AND(*Matcher::idealreg2regmask[Op_RegI]); @@ -636,12 +641,12 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { // Copies do not define a new value and so do not interfere. // Remove the copies source from the liveout set before interfering. uint idx = n->is_Copy(); - if( idx ) { - uint x = n2lidx(n->in(idx)); - if( liveout.remove( x ) ) { + if (idx) { + uint x = _lrg_map.live_range_id(n->in(idx)); + if (liveout.remove(x)) { lrgs(x)._area -= cost; // Adjust register pressure. - lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index ); + lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index); assert( pressure[0] == count_int_pressure (&liveout), "" ); assert( pressure[1] == count_float_pressure(&liveout), "" ); } @@ -727,18 +732,21 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) { // the flags and assumes it's dead. This keeps the (useless) // flag-setting behavior alive while also keeping the (useful) // memory update effect. - for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) { + for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) { Node *def = n->in(k); - uint x = n2lidx(def); - if( !x ) continue; + uint x = _lrg_map.live_range_id(def); + if (!x) { + continue; + } LRG &lrg = lrgs(x); // No use-side cost for spilling debug info - if( k < debug_start ) + if (k < debug_start) { // A USE costs twice block frequency (once for the Load, once // for a Load-delay). Rematerialized uses only cost once. lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq)); + } // It is live now - if( liveout.insert( x ) ) { + if (liveout.insert(x)) { // Newly live things assumed live from here to top of block lrg._area += cost; // Adjust register pressure diff --git a/hotspot/src/share/vm/opto/live.cpp b/hotspot/src/share/vm/opto/live.cpp index 5da41891583..773dd1ea2e6 100644 --- a/hotspot/src/share/vm/opto/live.cpp +++ b/hotspot/src/share/vm/opto/live.cpp @@ -44,7 +44,7 @@ // block is put on the worklist. // The locally live-in stuff is computed once and added to predecessor // live-out sets. This separate compilation is done in the outer loop below. -PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { +PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { } void PhaseLive::compute(uint maxlrg) { diff --git a/hotspot/src/share/vm/opto/live.hpp b/hotspot/src/share/vm/opto/live.hpp index 8a266c19067..c2ebe758cf8 100644 --- a/hotspot/src/share/vm/opto/live.hpp +++ b/hotspot/src/share/vm/opto/live.hpp @@ -80,7 +80,7 @@ class PhaseLive : public Phase { Block_List *_worklist; // Worklist for iterative solution const PhaseCFG &_cfg; // Basic blocks - LRG_List &_names; // Mapping from Nodes to live ranges + const LRG_List &_names; // Mapping from Nodes to live ranges uint _maxlrg; // Largest live-range number Arena *_arena; @@ -91,7 +91,7 @@ class PhaseLive : public Phase { void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ); public: - PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ); + PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena); ~PhaseLive() {} // Compute liveness info void compute(uint maxlrg); diff --git a/hotspot/src/share/vm/opto/postaloc.cpp b/hotspot/src/share/vm/opto/postaloc.cpp index a27145b5c7b..c1b3fdbd231 100644 --- a/hotspot/src/share/vm/opto/postaloc.cpp +++ b/hotspot/src/share/vm/opto/postaloc.cpp @@ -56,7 +56,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const { int i; for( i=0; i < limit; i++ ) { if( def->is_Proj() && def->in(0)->is_Start() && - _matcher.is_save_on_entry(lrgs(n2lidx(def)).reg()) ) + _matcher.is_save_on_entry(lrgs(_lrg_map.live_range_id(def)).reg())) return true; // Direct use of callee-save proj if( def->is_Copy() ) // Copies carry value through def = def->in(def->is_Copy()); @@ -83,7 +83,7 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_ // Count 1 if deleting an instruction from the current block if( oldb == current_block ) blk_adjust++; _cfg._bbs.map(old->_idx,NULL); - OptoReg::Name old_reg = lrgs(n2lidx(old)).reg(); + OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available? value->map(old_reg,NULL); // Yank from value/regnd maps regnd->map(old_reg,NULL); // This register's value is now unknown @@ -164,7 +164,7 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre // Not every pair of physical registers are assignment compatible, // e.g. on sparc floating point registers are not assignable to integer // registers. - const LRG &def_lrg = lrgs(n2lidx(def)); + const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def)); OptoReg::Name def_reg = def_lrg.reg(); const RegMask &use_mask = n->in_RegMask(idx); bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0) @@ -209,11 +209,12 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre // Skip through any number of copies (that don't mod oop-i-ness) Node *PhaseChaitin::skip_copies( Node *c ) { int idx = c->is_Copy(); - uint is_oop = lrgs(n2lidx(c))._is_oop; + uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop; while (idx != 0) { guarantee(c->in(idx) != NULL, "must not resurrect dead copy"); - if (lrgs(n2lidx(c->in(idx)))._is_oop != is_oop) + if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) { break; // casting copy, not the same value + } c = c->in(idx); idx = c->is_Copy(); } @@ -225,8 +226,8 @@ Node *PhaseChaitin::skip_copies( Node *c ) { int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ) { int blk_adjust = 0; - uint nk_idx = n2lidx(n->in(k)); - OptoReg::Name nk_reg = lrgs(nk_idx ).reg(); + uint nk_idx = _lrg_map.live_range_id(n->in(k)); + OptoReg::Name nk_reg = lrgs(nk_idx).reg(); // Remove obvious same-register copies Node *x = n->in(k); @@ -234,9 +235,13 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v while( (idx=x->is_Copy()) != 0 ) { Node *copy = x->in(idx); guarantee(copy != NULL, "must not resurrect dead copy"); - if( lrgs(n2lidx(copy)).reg() != nk_reg ) break; + if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) { + break; + } blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd); - if( n->in(k) != copy ) break; // Failed for some cutout? + if (n->in(k) != copy) { + break; // Failed for some cutout? + } x = copy; // Progress, try again } @@ -256,7 +261,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v if (val == x && nk_idx != 0 && regnd[nk_reg] != NULL && regnd[nk_reg] != x && - n2lidx(x) == n2lidx(regnd[nk_reg])) { + _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) { // When rematerialzing nodes and stretching lifetimes, the // allocator will reuse the original def for multidef LRG instead // of the current reaching def because it can't know it's safe to @@ -270,7 +275,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v if (val == x) return blk_adjust; // No progress? int n_regs = RegMask::num_registers(val->ideal_reg()); - uint val_idx = n2lidx(val); + uint val_idx = _lrg_map.live_range_id(val); OptoReg::Name val_reg = lrgs(val_idx).reg(); // See if it happens to already be in the correct register! @@ -499,12 +504,12 @@ void PhaseChaitin::post_allocate_copy_removal() { for( j = 1; j < phi_dex; j++ ) { uint k; Node *phi = b->_nodes[j]; - uint pidx = n2lidx(phi); - OptoReg::Name preg = lrgs(n2lidx(phi)).reg(); + uint pidx = _lrg_map.live_range_id(phi); + OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg(); // Remove copies remaining on edges. Check for junk phi. Node *u = NULL; - for( k=1; kreq(); k++ ) { + for (k = 1; k < phi->req(); k++) { Node *x = phi->in(k); if( phi != x && u != x ) // Found a different input u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input @@ -555,10 +560,10 @@ void PhaseChaitin::post_allocate_copy_removal() { // alive and well at the use (or else the allocator fubar'd). Take // advantage of this info to set a reaching def for the use-reg. uint k; - for( k = 1; k < n->req(); k++ ) { + for (k = 1; k < n->req(); k++) { Node *def = n->in(k); // n->in(k) is a USE; def is the DEF for this USE guarantee(def != NULL, "no disconnected nodes at this point"); - uint useidx = n2lidx(def); // useidx is the live range index for this USE + uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE if( useidx ) { OptoReg::Name ureg = lrgs(useidx).reg(); @@ -566,7 +571,7 @@ void PhaseChaitin::post_allocate_copy_removal() { int idx; // Skip occasional useless copy while( (idx=def->is_Copy()) != 0 && def->in(idx) != NULL && // NULL should not happen - ureg == lrgs(n2lidx(def->in(idx))).reg() ) + ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg()) def = def->in(idx); Node *valdef = skip_copies(def); // tighten up val through non-useless copies value.map(ureg,valdef); // record improved reaching-def info @@ -594,8 +599,10 @@ void PhaseChaitin::post_allocate_copy_removal() { j -= elide_copy( n, k, b, value, regnd, two_adr!=k ); // Unallocated Nodes define no registers - uint lidx = n2lidx(n); - if( !lidx ) continue; + uint lidx = _lrg_map.live_range_id(n); + if (!lidx) { + continue; + } // Update the register defined by this instruction OptoReg::Name nreg = lrgs(lidx).reg(); diff --git a/hotspot/src/share/vm/opto/reg_split.cpp b/hotspot/src/share/vm/opto/reg_split.cpp index 1695dee6008..edd614987ea 100644 --- a/hotspot/src/share/vm/opto/reg_split.cpp +++ b/hotspot/src/share/vm/opto/reg_split.cpp @@ -318,9 +318,13 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint for( uint i = 1; i < def->req(); i++ ) { Node *in = def->in(i); // Check for single-def (LRG cannot redefined) - uint lidx = n2lidx(in); - if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy - if (lrgs(lidx).is_singledef()) continue; + uint lidx = _lrg_map.live_range_id(in); + if (lidx >= _lrg_map.max_lrg_id()) { + continue; // Value is a recent spill-copy + } + if (lrgs(lidx).is_singledef()) { + continue; + } Block *b_def = _cfg._bbs[def->_idx]; int idx_def = b_def->find_node(def); @@ -344,26 +348,28 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint if( spill->req() > 1 ) { for( uint i = 1; i < spill->req(); i++ ) { Node *in = spill->in(i); - uint lidx = Find_id(in); + uint lidx = _lrg_map.find_id(in); // Walk backwards thru spill copy node intermediates if (walkThru) { - while ( in->is_SpillCopy() && lidx >= _maxlrg ) { + while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) { in = in->in(1); - lidx = Find_id(in); + lidx = _lrg_map.find_id(in); } - if (lidx < _maxlrg && lrgs(lidx).is_multidef()) { + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) { // walkThru found a multidef LRG, which is unsafe to use, so // just keep the original def used in the clone. in = spill->in(i); - lidx = Find_id(in); + lidx = _lrg_map.find_id(in); } } - if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) { + if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) { Node *rdef = Reachblock[lrg2reach[lidx]]; - if( rdef ) spill->set_req(i,rdef); + if (rdef) { + spill->set_req(i, rdef); + } } } } @@ -382,7 +388,7 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint #endif // See if the cloned def kills any flags, and copy those kills as well uint i = insidx+1; - if( clone_projs( b, i, def, spill, maxlrg ) ) { + if( clone_projs( b, i, def, spill, maxlrg) ) { // Adjust the point where we go hi-pressure if( i <= b->_ihrp_index ) b->_ihrp_index++; if( i <= b->_fhrp_index ) b->_fhrp_index++; @@ -424,17 +430,25 @@ bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) { //------------------------------prompt_use--------------------------------- // True if lidx is used before any real register is def'd in the block bool PhaseChaitin::prompt_use( Block *b, uint lidx ) { - if( lrgs(lidx)._was_spilled2 ) return false; + if (lrgs(lidx)._was_spilled2) { + return false; + } // Scan block for 1st use. for( uint i = 1; i <= b->end_idx(); i++ ) { Node *n = b->_nodes[i]; // Ignore PHI use, these can be up or down - if( n->is_Phi() ) continue; - for( uint j = 1; j < n->req(); j++ ) - if( Find_id(n->in(j)) == lidx ) + if (n->is_Phi()) { + continue; + } + for (uint j = 1; j < n->req(); j++) { + if (_lrg_map.find_id(n->in(j)) == lidx) { return true; // Found 1st use! - if( n->out_RegMask().is_NotEmpty() ) return false; + } + } + if (n->out_RegMask().is_NotEmpty()) { + return false; + } } return false; } @@ -464,23 +478,23 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { bool u1, u2, u3; Block *b, *pred; PhiNode *phi; - GrowableArray lidxs(split_arena, _maxlrg, 0, 0); + GrowableArray lidxs(split_arena, maxlrg, 0, 0); // Array of counters to count splits per live range - GrowableArray splits(split_arena, _maxlrg, 0, 0); + GrowableArray splits(split_arena, maxlrg, 0, 0); #define NEW_SPLIT_ARRAY(type, size)\ (type*) split_arena->allocate_bytes((size) * sizeof(type)) //----------Setup Code---------- // Create a convenient mapping from lrg numbers to reaches/leaves indices - uint *lrg2reach = NEW_SPLIT_ARRAY( uint, _maxlrg ); + uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg); // Keep track of DEFS & Phis for later passes defs = new Node_List(); phis = new Node_List(); // Gather info on which LRG's are spilling, and build maps - for( bidx = 1; bidx < _maxlrg; bidx++ ) { - if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) { + for (bidx = 1; bidx < maxlrg; bidx++) { + if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) { assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color"); lrg2reach[bidx] = spill_cnt; spill_cnt++; @@ -629,7 +643,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { break; } // must be looking at a phi - if( Find_id(n1) == lidxs.at(slidx) ) { + if (_lrg_map.find_id(n1) == lidxs.at(slidx)) { // found the necessary phi needs_phi = false; has_phi = true; @@ -651,11 +665,11 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { Reachblock[slidx] = phi; // add node to block & node_to_block mapping - insert_proj( b, insidx++, phi, maxlrg++ ); + insert_proj(b, insidx++, phi, maxlrg++); non_phi++; // Reset new phi's mapping to be the spilling live range - _names.map(phi->_idx, lidx); - assert(Find_id(phi) == lidx,"Bad update on Union-Find mapping"); + _lrg_map.map(phi->_idx, lidx); + assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping"); } // end if not found correct phi // Here you have either found or created the Phi, so record it assert(phi != NULL,"Must have a Phi Node here"); @@ -721,12 +735,12 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { for( insidx = 1; insidx <= b->end_idx(); insidx++ ) { Node *n = b->_nodes[insidx]; // Find the defining Node's live range index - uint defidx = Find_id(n); + uint defidx = _lrg_map.find_id(n); uint cnt = n->req(); - if( n->is_Phi() ) { + if (n->is_Phi()) { // Skip phi nodes after removing dead copies. - if( defidx < _maxlrg ) { + if (defidx < _lrg_map.max_lrg_id()) { // Check for useless Phis. These appear if we spill, then // coalesce away copies. Dont touch Phis in spilling live // ranges; they are busy getting modifed in this pass. @@ -744,8 +758,8 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } } assert( u, "at least 1 valid input expected" ); - if( i >= cnt ) { // Found one unique input - assert(Find_id(n) == Find_id(u), "should be the same lrg"); + if (i >= cnt) { // Found one unique input + assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg"); n->replace_by(u); // Then replace with unique input n->disconnect_inputs(NULL, C); b->_nodes.remove(insidx); @@ -793,16 +807,24 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { while( insert_point > 0 ) { Node *n = b->_nodes[insert_point]; // Hit top of block? Quit going backwards - if( n->is_Phi() ) break; + if (n->is_Phi()) { + break; + } // Found a def? Better split after it. - if( n2lidx(n) == lidx ) break; + if (_lrg_map.live_range_id(n) == lidx) { + break; + } // Look for a use uint i; - for( i = 1; i < n->req(); i++ ) - if( n2lidx(n->in(i)) == lidx ) + for( i = 1; i < n->req(); i++ ) { + if (_lrg_map.live_range_id(n->in(i)) == lidx) { break; + } + } // Found a use? Better split after it. - if( i < n->req() ) break; + if (i < n->req()) { + break; + } insert_point--; } uint orig_eidx = b->end_idx(); @@ -812,8 +834,9 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { return 0; } // Spill of NULL check mem op goes into the following block. - if (b->end_idx() > orig_eidx) + if (b->end_idx() > orig_eidx) { insidx++; + } } // This is a new DEF, so update UP UPblock[slidx] = false; @@ -832,13 +855,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } // end if crossing HRP Boundry // If the LRG index is oob, then this is a new spillcopy, skip it. - if( defidx >= _maxlrg ) { + if (defidx >= _lrg_map.max_lrg_id()) { continue; } LRG &deflrg = lrgs(defidx); uint copyidx = n->is_Copy(); // Remove coalesced copy from CFG - if( copyidx && defidx == n2lidx(n->in(copyidx)) ) { + if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) { n->replace_by( n->in(copyidx) ); n->set_req( copyidx, NULL ); b->_nodes.remove(insidx--); @@ -864,13 +887,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // If inpidx > old_last, then one of these new inputs is being // handled. Skip the derived part of the pair, but process // the base like any other input. - if( inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED ) { + if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) { continue; // skip derived_debug added below } // Get lidx of input - uint useidx = Find_id(n->in(inpidx)); + uint useidx = _lrg_map.find_id(n->in(inpidx)); // Not a brand-new split, and it is a spill use - if( useidx < _maxlrg && lrgs(useidx).reg() >= LRG::SPILL_REG ) { + if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) { // Check for valid reaching DEF slidx = lrg2reach[useidx]; Node *def = Reachblock[slidx]; @@ -886,7 +909,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { return 0; } - _names.extend(def->_idx,0); + _lrg_map.extend(def->_idx, 0); _cfg._bbs.map(def->_idx,b); n->set_req(inpidx, def); continue; @@ -1186,10 +1209,10 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { // ********** Split Left Over Mem-Mem Moves ********** // Check for mem-mem copies and split them now. Do not do this // to copies about to be spilled; they will be Split shortly. - if( copyidx ) { + if (copyidx) { Node *use = n->in(copyidx); - uint useidx = Find_id(use); - if( useidx < _maxlrg && // This is not a new split + uint useidx = _lrg_map.find_id(use); + if (useidx < _lrg_map.max_lrg_id() && // This is not a new split OptoReg::is_stack(deflrg.reg()) && deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack LRG &uselrg = lrgs(useidx); @@ -1228,7 +1251,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { uint member; IndexSetIterator isi(liveout); while ((member = isi.next()) != 0) { - assert(defidx != Find_const(member), "Live out member has not been compressed"); + assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed"); } #endif Reachblock[slidx] = NULL; @@ -1261,7 +1284,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { assert(phi->is_Phi(),"This list must only contain Phi Nodes"); Block *b = _cfg._bbs[phi->_idx]; // Grab the live range number - uint lidx = Find_id(phi); + uint lidx = _lrg_map.find_id(phi); uint slidx = lrg2reach[lidx]; // Update node to lidx map new_lrg(phi, maxlrg++); @@ -1296,11 +1319,13 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { int insert = pred->end_idx(); while (insert >= 1 && pred->_nodes[insert - 1]->is_SpillCopy() && - Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { + _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { insert--; } - def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false ); - if( !def ) return 0; // Bail out + def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false); + if (!def) { + return 0; // Bail out + } } // Update the Phi's input edge array phi->set_req(i,def); @@ -1316,7 +1341,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { } // End for all inputs to the Phi } // End for all Phi Nodes // Update _maxlrg to save Union asserts - _maxlrg = maxlrg; + _lrg_map.set_max_lrg_id(maxlrg); //----------PASS 3---------- @@ -1328,47 +1353,51 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) { for( uint i = 1; i < phi->req(); i++ ) { // Grab the input node Node *n = phi->in(i); - assert( n, "" ); - uint lidx = Find(n); - uint pidx = Find(phi); - if( lidx < pidx ) + assert(n, "node should exist"); + uint lidx = _lrg_map.find(n); + uint pidx = _lrg_map.find(phi); + if (lidx < pidx) { Union(n, phi); - else if( lidx > pidx ) + } + else if(lidx > pidx) { Union(phi, n); + } } // End for all inputs to the Phi Node } // End for all Phi Nodes // Now union all two address instructions - for( insidx = 0; insidx < defs->size(); insidx++ ) { + for (insidx = 0; insidx < defs->size(); insidx++) { // Grab the def n1 = defs->at(insidx); // Set new lidx for DEF & handle 2-addr instructions - if( n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0) ) { - assert( Find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); + if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) { + assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index"); // Union the input and output live ranges - uint lr1 = Find(n1); - uint lr2 = Find(n1->in(twoidx)); - if( lr1 < lr2 ) + uint lr1 = _lrg_map.find(n1); + uint lr2 = _lrg_map.find(n1->in(twoidx)); + if (lr1 < lr2) { Union(n1, n1->in(twoidx)); - else if( lr1 > lr2 ) + } + else if (lr1 > lr2) { Union(n1->in(twoidx), n1); + } } // End if two address } // End for all defs // DEBUG #ifdef ASSERT // Validate all live range index assignments - for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) { + for (bidx = 0; bidx < _cfg._num_blocks; bidx++) { b = _cfg._blocks[bidx]; - for( insidx = 0; insidx <= b->end_idx(); insidx++ ) { + for (insidx = 0; insidx <= b->end_idx(); insidx++) { Node *n = b->_nodes[insidx]; - uint defidx = Find(n); - assert(defidx < _maxlrg,"Bad live range index in Split"); + uint defidx = _lrg_map.find(n); + assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split"); assert(defidx < maxlrg,"Bad live range index in Split"); } } // Issue a warning if splitting made no progress int noprogress = 0; - for( slidx = 0; slidx < spill_cnt; slidx++ ) { - if( PrintOpto && WizardMode && splits.at(slidx) == 0 ) { + for (slidx = 0; slidx < spill_cnt; slidx++) { + if (PrintOpto && WizardMode && splits.at(slidx) == 0) { tty->print_cr("Failed to split live range %d", lidxs.at(slidx)); //BREAKPOINT; } diff --git a/hotspot/src/share/vm/opto/regalloc.hpp b/hotspot/src/share/vm/opto/regalloc.hpp index d0a993e57c4..9bea94be5de 100644 --- a/hotspot/src/share/vm/opto/regalloc.hpp +++ b/hotspot/src/share/vm/opto/regalloc.hpp @@ -113,7 +113,7 @@ public: OptoReg::Name offset2reg( int stk_offset ) const; // Get the register encoding associated with the Node - int get_encode( const Node *n ) const { + int get_encode(const Node *n) const { assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array"); OptoReg::Name first = _node_regs[n->_idx].first(); OptoReg::Name second = _node_regs[n->_idx].second(); @@ -122,15 +122,6 @@ public: return Matcher::_regEncode[first]; } - // Platform dependent hook for actions prior to allocation - void pd_preallocate_hook(); - -#ifdef ASSERT - // Platform dependent hook for verification after allocation. Will - // only get called when compiling with asserts. - void pd_postallocate_verify_hook(); -#endif - #ifndef PRODUCT static int _total_framesize; static int _max_framesize; diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp index b00f6825ddd..fff9a6180ae 100644 --- a/hotspot/src/share/vm/runtime/vmStructs.cpp +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp @@ -1115,7 +1115,6 @@ typedef BinaryTreeDictionary MetablockTreeDictionary; c2_nonstatic_field(PhaseChaitin, _lo_stk_degree, uint) \ c2_nonstatic_field(PhaseChaitin, _hi_degree, uint) \ c2_nonstatic_field(PhaseChaitin, _simplified, uint) \ - c2_nonstatic_field(PhaseChaitin, _maxlrg, uint) \ \ c2_nonstatic_field(Block, _nodes, Node_List) \ c2_nonstatic_field(Block, _succs, Block_Array) \ From 33e3f6b59bc4fe0b908e150639661ffc7defe02a Mon Sep 17 00:00:00 2001 From: Igor Ignatyev Date: Tue, 16 Apr 2013 10:04:01 -0700 Subject: [PATCH 020/162] 8011971: WB API doesn't accept j.l.reflect.Constructor Reviewed-by: kvn, vlivanov --- hotspot/src/share/vm/prims/whitebox.cpp | 24 +- .../whitebox/ClearMethodStateTest.java | 53 ++-- .../whitebox/CompilerWhiteBoxTest.java | 262 +++++++++++++++--- .../compiler/whitebox/DeoptimizeAllTest.java | 24 +- .../whitebox/DeoptimizeMethodTest.java | 26 +- .../EnqueueMethodForCompilationTest.java | 64 +++-- .../whitebox/IsMethodCompilableTest.java | 73 +++-- .../whitebox/MakeMethodNotCompilableTest.java | 83 +++++- .../whitebox/SetDontInlineMethodTest.java | 36 ++- .../whitebox/SetForceInlineMethodTest.java | 36 ++- .../whitebox/sun/hotspot/WhiteBox.java | 29 +- 11 files changed, 535 insertions(+), 175 deletions(-) diff --git a/hotspot/src/share/vm/prims/whitebox.cpp b/hotspot/src/share/vm/prims/whitebox.cpp index 1dfcd3d39a4..36733b2b646 100644 --- a/hotspot/src/share/vm/prims/whitebox.cpp +++ b/hotspot/src/share/vm/prims/whitebox.cpp @@ -237,10 +237,10 @@ WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject meth WB_END -WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method)) +WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level)) jmethodID jmid = reflected_method_to_jmid(thread, env, method); methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid)); - mh->set_not_compilable(); + mh->set_not_compilable(comp_level, true /* report */, "WhiteBox"); WB_END WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value)) @@ -398,28 +398,28 @@ static JNINativeMethod methods[] = { {CC"NMTWaitForDataMerge", CC"()Z", (void*)&WB_NMTWaitForDataMerge}, #endif // INCLUDE_NMT {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, - {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Method;)I", + {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_DeoptimizeMethod }, - {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Method;)Z", + {CC"isMethodCompiled", CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodCompiled }, - {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Method;I)Z", + {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_IsMethodCompilable}, {CC"isMethodQueuedForCompilation", - CC"(Ljava/lang/reflect/Method;)Z", (void*)&WB_IsMethodQueuedForCompilation}, + CC"(Ljava/lang/reflect/Executable;)Z", (void*)&WB_IsMethodQueuedForCompilation}, {CC"makeMethodNotCompilable", - CC"(Ljava/lang/reflect/Method;)V", (void*)&WB_MakeMethodNotCompilable}, + CC"(Ljava/lang/reflect/Executable;I)V", (void*)&WB_MakeMethodNotCompilable}, {CC"testSetDontInlineMethod", - CC"(Ljava/lang/reflect/Method;Z)Z", (void*)&WB_TestSetDontInlineMethod}, + CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetDontInlineMethod}, {CC"getMethodCompilationLevel", - CC"(Ljava/lang/reflect/Method;)I", (void*)&WB_GetMethodCompilationLevel}, + CC"(Ljava/lang/reflect/Executable;)I", (void*)&WB_GetMethodCompilationLevel}, {CC"getCompileQueuesSize", CC"()I", (void*)&WB_GetCompileQueuesSize}, {CC"testSetForceInlineMethod", - CC"(Ljava/lang/reflect/Method;Z)Z", (void*)&WB_TestSetForceInlineMethod}, + CC"(Ljava/lang/reflect/Executable;Z)Z", (void*)&WB_TestSetForceInlineMethod}, {CC"enqueueMethodForCompilation", - CC"(Ljava/lang/reflect/Method;I)Z", (void*)&WB_EnqueueMethodForCompilation}, + CC"(Ljava/lang/reflect/Executable;I)Z", (void*)&WB_EnqueueMethodForCompilation}, {CC"clearMethodState", - CC"(Ljava/lang/reflect/Method;)V", (void*)&WB_ClearMethodState}, + CC"(Ljava/lang/reflect/Executable;)V", (void*)&WB_ClearMethodState}, {CC"isInStringTable", CC"(Ljava/lang/String;)Z", (void*)&WB_IsInStringTable }, {CC"fullGC", CC"()V", (void*)&WB_FullGC }, }; diff --git a/hotspot/test/compiler/whitebox/ClearMethodStateTest.java b/hotspot/test/compiler/whitebox/ClearMethodStateTest.java index a117e1b3821..491650d3752 100644 --- a/hotspot/test/compiler/whitebox/ClearMethodStateTest.java +++ b/hotspot/test/compiler/whitebox/ClearMethodStateTest.java @@ -27,42 +27,61 @@ * @build ClearMethodStateTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ClearMethodStateTest + * @summary testing of WB::clearMethodState() * @author igor.ignatyev@oracle.com */ public class ClearMethodStateTest extends CompilerWhiteBoxTest { + public static void main(String[] args) throws Exception { - // to prevent inlining #method into #compile() and #test() - WHITE_BOX.testSetDontInlineMethod(METHOD, true); - new ClearMethodStateTest().runTest(); + for (TestCase test : TestCase.values()) { + new ClearMethodStateTest(test).runTest(); + } } + public ClearMethodStateTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + + /** + * Tests {@code WB::clearMethodState()} by calling it before/after + * compilation. For non-tiered, checks that counters will be rested after + * clearing of method state. + * + * @throws Exception if one of the checks fails. + */ + @Override protected void test() throws Exception { - checkNotCompiled(METHOD); + checkNotCompiled(); compile(); - checkCompiled(METHOD); - WHITE_BOX.clearMethodState(METHOD); - WHITE_BOX.deoptimizeMethod(METHOD); - checkNotCompiled(METHOD); + WHITE_BOX.clearMethodState(method); + checkCompiled(); + WHITE_BOX.clearMethodState(method); + WHITE_BOX.deoptimizeMethod(method); + checkNotCompiled(); if (!TIERED_COMPILATION) { - WHITE_BOX.clearMethodState(METHOD); + WHITE_BOX.clearMethodState(method); compile(COMPILE_THRESHOLD); - checkCompiled(METHOD); + checkCompiled(); - WHITE_BOX.deoptimizeMethod(METHOD); - checkNotCompiled(METHOD); - WHITE_BOX.clearMethodState(METHOD); + WHITE_BOX.deoptimizeMethod(method); + checkNotCompiled(); + WHITE_BOX.clearMethodState(method); + // invoke method one less time than needed to compile if (COMPILE_THRESHOLD > 1) { compile(COMPILE_THRESHOLD - 1); - checkNotCompiled(METHOD); + checkNotCompiled(); } else { - System.err.println("Warning: 'CompileThreshold' <= 1"); + System.err.println("Warning: 'CompileThreshold' <= 1"); } - method(); - checkCompiled(METHOD); + compile(1); + checkCompiled(); } else { System.err.println( "Warning: part of test is not applicable in Tiered"); diff --git a/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java b/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java index 07b196588c3..a69d9ef8244 100644 --- a/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java +++ b/hotspot/test/compiler/whitebox/CompilerWhiteBoxTest.java @@ -21,68 +21,132 @@ * questions. */ +import com.sun.management.HotSpotDiagnosticMXBean; +import com.sun.management.VMOption; import sun.hotspot.WhiteBox; import sun.management.ManagementFactoryHelper; -import com.sun.management.HotSpotDiagnosticMXBean; +import java.lang.reflect.Constructor; +import java.lang.reflect.Executable; import java.lang.reflect.Method; +import java.util.Objects; +import java.util.concurrent.Callable; -/* +/** + * Abstract class for WhiteBox testing of JIT. + * * @author igor.ignatyev@oracle.com */ public abstract class CompilerWhiteBoxTest { + /** {@code CompLevel::CompLevel_none} -- Interpreter */ + protected static int COMP_LEVEL_NONE = 0; + /** {@code CompLevel::CompLevel_any}, {@code CompLevel::CompLevel_all} */ + protected static int COMP_LEVEL_ANY = -1; + /** Instance of WhiteBox */ protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); - protected static final Method METHOD = getMethod("method"); + /** Value of {@code -XX:CompileThreshold} */ protected static final int COMPILE_THRESHOLD = Integer.parseInt(getVMOption("CompileThreshold", "10000")); + /** Value of {@code -XX:BackgroundCompilation} */ protected static final boolean BACKGROUND_COMPILATION = Boolean.valueOf(getVMOption("BackgroundCompilation", "true")); + /** Value of {@code -XX:TieredCompilation} */ protected static final boolean TIERED_COMPILATION = Boolean.valueOf(getVMOption("TieredCompilation", "false")); + /** Value of {@code -XX:TieredStopAtLevel} */ + protected static final int TIERED_STOP_AT_LEVEL + = Integer.parseInt(getVMOption("TieredStopAtLevel", "0")); - protected static Method getMethod(String name) { - try { - return CompilerWhiteBoxTest.class.getDeclaredMethod(name); - } catch (NoSuchMethodException | SecurityException e) { - throw new RuntimeException( - "exception on getting method " + name, e); - } - } - + /** + * Returns value of VM option. + * + * @param name option's name + * @return value of option or {@code null}, if option doesn't exist + * @throws NullPointerException if name is null + */ protected static String getVMOption(String name) { - String result; + Objects.requireNonNull(name); HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean(); - result = diagnostic.getVMOption(name).getValue(); - return result; + VMOption tmp; + try { + tmp = diagnostic.getVMOption(name); + } catch (IllegalArgumentException e) { + tmp = null; + } + return (tmp == null ? null : tmp.getValue()); } + /** + * Returns value of VM option or default value. + * + * @param name option's name + * @param defaultValue default value + * @return value of option or {@code defaultValue}, if option doesn't exist + * @throws NullPointerException if name is null + * @see #getVMOption(String) + */ protected static String getVMOption(String name, String defaultValue) { String result = getVMOption(name); return result == null ? defaultValue : result; } - protected final void runTest() throws RuntimeException { + /** tested method */ + protected final Executable method; + private final Callable callable; + + /** + * Constructor. + * + * @param testCase object, that contains tested method and way to invoke it. + */ + protected CompilerWhiteBoxTest(TestCase testCase) { + Objects.requireNonNull(testCase); + System.out.println("TEST CASE:" + testCase.name()); + method = testCase.executable; + callable = testCase.callable; + } + + /** + * Template method for testing. Prints tested method's info before + * {@linkplain #test()} and after {@linkplain #test()} or on thrown + * exception. + * + * @throws RuntimeException if method {@linkplain #test()} throws any + * exception + * @see #test() + */ + protected final void runTest() { if (ManagementFactoryHelper.getCompilationMXBean() == null) { System.err.println( "Warning: test is not applicable in interpreted mode"); return; } System.out.println("at test's start:"); - printInfo(METHOD); + printInfo(); try { test(); } catch (Exception e) { System.out.printf("on exception '%s':", e.getMessage()); - printInfo(METHOD); + printInfo(); e.printStackTrace(); + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } throw new RuntimeException(e); } System.out.println("at test's end:"); - printInfo(METHOD); + printInfo(); } - protected static void checkNotCompiled(Method method) { + /** + * Checks, that {@linkplain #method} is not compiled. + * + * @throws RuntimeException if {@linkplain #method} is in compiler queue or + * is compiled, or if {@linkplain #method} has zero + * compilation level. + */ + protected final void checkNotCompiled() { if (WHITE_BOX.isMethodQueuedForCompilation(method)) { throw new RuntimeException(method + " must not be in queue"); } @@ -94,10 +158,16 @@ public abstract class CompilerWhiteBoxTest { } } - protected static void checkCompiled(Method method) - throws InterruptedException { + /** + * Checks, that {@linkplain #method} is compiled. + * + * @throws RuntimeException if {@linkplain #method} isn't in compiler queue + * and isn't compiled, or if {@linkplain #method} + * has nonzero compilation level + */ + protected final void checkCompiled() { final long start = System.currentTimeMillis(); - waitBackgroundCompilation(method); + waitBackgroundCompilation(); if (WHITE_BOX.isMethodQueuedForCompilation(method)) { System.err.printf("Warning: %s is still in queue after %dms%n", method, System.currentTimeMillis() - start); @@ -111,23 +181,30 @@ public abstract class CompilerWhiteBoxTest { } } - protected static void waitBackgroundCompilation(Method method) - throws InterruptedException { + /** + * Waits for completion of background compilation of {@linkplain #method}. + */ + protected final void waitBackgroundCompilation() { if (!BACKGROUND_COMPILATION) { return; } final Object obj = new Object(); - synchronized (obj) { - for (int i = 0; i < 10; ++i) { - if (!WHITE_BOX.isMethodQueuedForCompilation(method)) { - break; + for (int i = 0; i < 10 + && WHITE_BOX.isMethodQueuedForCompilation(method); ++i) { + synchronized (obj) { + try { + obj.wait(1000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } - obj.wait(1000); } } } - protected static void printInfo(Method method) { + /** + * Prints information about {@linkplain #method}. + */ + protected final void printInfo() { System.out.printf("%n%s:%n", method); System.out.printf("\tcompilable:\t%b%n", WHITE_BOX.isMethodCompilable(method)); @@ -141,22 +218,139 @@ public abstract class CompilerWhiteBoxTest { WHITE_BOX.getCompileQueuesSize()); } + /** + * Executes testing. + */ protected abstract void test() throws Exception; + /** + * Tries to trigger compilation of {@linkplain #method} by call + * {@linkplain #callable} enough times. + * + * @return accumulated result + * @see #compile(int) + */ protected final int compile() { return compile(Math.max(COMPILE_THRESHOLD, 150000)); } + /** + * Tries to trigger compilation of {@linkplain #method} by call + * {@linkplain #callable} specified times. + * + * @param count invocation count + * @return accumulated result + */ protected final int compile(int count) { int result = 0; + Integer tmp; for (int i = 0; i < count; ++i) { - result += method(); + try { + tmp = callable.call(); + } catch (Exception e) { + tmp = null; + } + result += tmp == null ? 0 : tmp; } System.out.println("method was invoked " + count + " times"); return result; } +} - protected int method() { - return 42; +/** + * Utility structure containing tested method and object to invoke it. + */ +enum TestCase { + /** constructor test case */ + CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE), + /** method test case */ + METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE), + /** static method test case */ + STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE); + + /** tested method */ + final Executable executable; + /** object to invoke {@linkplain #executable} */ + final Callable callable; + + private TestCase(Executable executable, Callable callable) { + this.executable = executable; + this.callable = callable; + } + + private static class Helper { + private static final Callable CONSTRUCTOR_CALLABLE + = new Callable() { + @Override + public Integer call() throws Exception { + return new Helper(1337).hashCode(); + } + }; + + private static final Callable METHOD_CALLABLE + = new Callable() { + private final Helper helper = new Helper(); + + @Override + public Integer call() throws Exception { + return helper.method(); + } + }; + + private static final Callable STATIC_CALLABLE + = new Callable() { + @Override + public Integer call() throws Exception { + return staticMethod(); + } + }; + + private static final Constructor CONSTRUCTOR; + private static final Method METHOD; + private static final Method STATIC; + + static { + try { + CONSTRUCTOR = Helper.class.getDeclaredConstructor(int.class); + } catch (NoSuchMethodException | SecurityException e) { + throw new RuntimeException( + "exception on getting method Helper.(int)", e); + } + try { + METHOD = Helper.class.getDeclaredMethod("method"); + } catch (NoSuchMethodException | SecurityException e) { + throw new RuntimeException( + "exception on getting method Helper.method()", e); + } + try { + STATIC = Helper.class.getDeclaredMethod("staticMethod"); + } catch (NoSuchMethodException | SecurityException e) { + throw new RuntimeException( + "exception on getting method Helper.staticMethod()", e); + } + } + + private static int staticMethod() { + return 1138; + } + + private int method() { + return 42; + } + + private final int x; + + public Helper() { + x = 0; + } + + private Helper(int x) { + this.x = x; + } + + @Override + public int hashCode() { + return x; + } } } diff --git a/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java b/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java index b6bdaf1f2ad..c831a23e58c 100644 --- a/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java +++ b/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java @@ -27,20 +27,34 @@ * @build DeoptimizeAllTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeAllTest + * @summary testing of WB::deoptimizeAll() * @author igor.ignatyev@oracle.com */ public class DeoptimizeAllTest extends CompilerWhiteBoxTest { public static void main(String[] args) throws Exception { - // to prevent inlining #method into #compile() - WHITE_BOX.testSetDontInlineMethod(METHOD, true); - new DeoptimizeAllTest().runTest(); + for (TestCase test : TestCase.values()) { + new DeoptimizeAllTest(test).runTest(); + } } + public DeoptimizeAllTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + /** + * Tests {@code WB::deoptimizeAll()} by calling it after + * compilation and checking that method isn't compiled. + * + * @throws Exception if one of the checks fails. + */ + @Override protected void test() throws Exception { compile(); - checkCompiled(METHOD); + checkCompiled(); WHITE_BOX.deoptimizeAll(); - checkNotCompiled(METHOD); + checkNotCompiled(); } } diff --git a/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java b/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java index a5053ae9009..b6c84aeb313 100644 --- a/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java +++ b/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java @@ -27,20 +27,34 @@ * @build DeoptimizeMethodTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeMethodTest + * @summary testing of WB::deoptimizeMethod() * @author igor.ignatyev@oracle.com */ public class DeoptimizeMethodTest extends CompilerWhiteBoxTest { public static void main(String[] args) throws Exception { - // to prevent inlining #method into #compile() - WHITE_BOX.testSetDontInlineMethod(METHOD, true); - new DeoptimizeMethodTest().runTest(); + for (TestCase test : TestCase.values()) { + new DeoptimizeMethodTest(test).runTest(); + } } + public DeoptimizeMethodTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + /** + * Tests {@code WB::deoptimizeMethod()} by calling it after + * compilation and checking that method isn't compiled. + * + * @throws Exception if one of the checks fails. + */ + @Override protected void test() throws Exception { compile(); - checkCompiled(METHOD); - WHITE_BOX.deoptimizeMethod(METHOD); - checkNotCompiled(METHOD); + checkCompiled(); + WHITE_BOX.deoptimizeMethod(method); + checkNotCompiled(); } } diff --git a/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java b/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java index a4464f0e4e7..f87b3235697 100644 --- a/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java +++ b/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java @@ -27,48 +27,60 @@ * @build EnqueueMethodForCompilationTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI EnqueueMethodForCompilationTest + * @summary testing of WB::enqueueMethodForCompilation() * @author igor.ignatyev@oracle.com */ public class EnqueueMethodForCompilationTest extends CompilerWhiteBoxTest { + public static void main(String[] args) throws Exception { - // to prevent inlining #method into #compile() - WHITE_BOX.testSetDontInlineMethod(METHOD, true); - new EnqueueMethodForCompilationTest().runTest(); + for (TestCase test : TestCase.values()) { + new EnqueueMethodForCompilationTest(test).runTest(); + } } + public EnqueueMethodForCompilationTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + @Override protected void test() throws Exception { - checkNotCompiled(METHOD); + checkNotCompiled(); - WHITE_BOX.enqueueMethodForCompilation(METHOD, 0); - if (WHITE_BOX.isMethodCompilable(METHOD, 0)) { - throw new RuntimeException(METHOD + " is compilable at level 0"); + // method can not be compiled on level 'none' + WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_NONE); + if (WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_NONE)) { + throw new RuntimeException(method + + " is compilable at level COMP_LEVEL_NONE"); } - checkNotCompiled(METHOD); + checkNotCompiled(); - WHITE_BOX.enqueueMethodForCompilation(METHOD, -1); - checkNotCompiled(METHOD); + // COMP_LEVEL_ANY is inapplicable as level for compilation + WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_ANY); + checkNotCompiled(); - WHITE_BOX.enqueueMethodForCompilation(METHOD, 5); - if (!WHITE_BOX.isMethodCompilable(METHOD, 5)) { - checkNotCompiled(METHOD); - compile(); - checkCompiled(METHOD); + WHITE_BOX.enqueueMethodForCompilation(method, 5); + if (!WHITE_BOX.isMethodCompilable(method, 5)) { + checkNotCompiled(); + compile(); + checkCompiled(); } else { - checkCompiled(METHOD); + checkCompiled(); } - int compLevel = WHITE_BOX.getMethodCompilationLevel(METHOD); - WHITE_BOX.deoptimizeMethod(METHOD); - checkNotCompiled(METHOD); + int compLevel = WHITE_BOX.getMethodCompilationLevel(method); + WHITE_BOX.deoptimizeMethod(method); + checkNotCompiled(); - WHITE_BOX.enqueueMethodForCompilation(METHOD, compLevel); - checkCompiled(METHOD); - WHITE_BOX.deoptimizeMethod(METHOD); - checkNotCompiled(METHOD); + WHITE_BOX.enqueueMethodForCompilation(method, compLevel); + checkCompiled(); + WHITE_BOX.deoptimizeMethod(method); + checkNotCompiled(); compile(); - checkCompiled(METHOD); - WHITE_BOX.deoptimizeMethod(METHOD); - checkNotCompiled(METHOD); + checkCompiled(); + WHITE_BOX.deoptimizeMethod(method); + checkNotCompiled(); } } diff --git a/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java b/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java index 8f785c27272..e1cfaf4887b 100644 --- a/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java +++ b/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java @@ -28,9 +28,13 @@ * @build IsMethodCompilableTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IsMethodCompilableTest + * @summary testing of WB::isMethodCompilable() * @author igor.ignatyev@oracle.com */ public class IsMethodCompilableTest extends CompilerWhiteBoxTest { + /** + * Value of {@code -XX:PerMethodRecompilationCutoff} + */ protected static final long PER_METHOD_RECOMPILATION_CUTOFF; static { @@ -44,14 +48,28 @@ public class IsMethodCompilableTest extends CompilerWhiteBoxTest { } public static void main(String[] args) throws Exception { - // to prevent inlining #method into #compile() - WHITE_BOX.testSetDontInlineMethod(METHOD, true); - new IsMethodCompilableTest().runTest(); + for (TestCase test : TestCase.values()) { + new IsMethodCompilableTest(test).runTest(); + } } + public IsMethodCompilableTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + /** + * Tests {@code WB::isMethodCompilable()} by recompilation of tested method + * 'PerMethodRecompilationCutoff' times and checks compilation status. Also + * checks that WB::clearMethodState() clears no-compilable flags. + * + * @throws Exception if one of the checks fails. + */ + @Override protected void test() throws Exception { - if (!WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD + " must be compilable"); + if (!WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + " must be compilable"); } System.out.println("PerMethodRecompilationCutoff = " + PER_METHOD_RECOMPILATION_CUTOFF); @@ -61,46 +79,47 @@ public class IsMethodCompilableTest extends CompilerWhiteBoxTest { return; } - // deoptimze 'PerMethodRecompilationCutoff' times and clear state + // deoptimize 'PerMethodRecompilationCutoff' times and clear state for (long i = 0L, n = PER_METHOD_RECOMPILATION_CUTOFF - 1; i < n; ++i) { - compileAndDeoptimaze(); + compileAndDeoptimize(); } - if (!WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD + " is not compilable after " + if (!WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + " is not compilable after " + (PER_METHOD_RECOMPILATION_CUTOFF - 1) + " iterations"); } - WHITE_BOX.clearMethodState(METHOD); + WHITE_BOX.clearMethodState(method); - // deoptimze 'PerMethodRecompilationCutoff' + 1 times + // deoptimize 'PerMethodRecompilationCutoff' + 1 times long i; for (i = 0L; i < PER_METHOD_RECOMPILATION_CUTOFF - && WHITE_BOX.isMethodCompilable(METHOD); ++i) { - compileAndDeoptimaze(); + && WHITE_BOX.isMethodCompilable(method); ++i) { + compileAndDeoptimize(); } if (i != PER_METHOD_RECOMPILATION_CUTOFF) { - throw new RuntimeException(METHOD + " is not compilable after " - + i + " iterations, but must only after " - + PER_METHOD_RECOMPILATION_CUTOFF); + throw new RuntimeException(method + " is not compilable after " + + i + " iterations, but must only after " + + PER_METHOD_RECOMPILATION_CUTOFF); } - if (WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD + " is still compilable after " + if (WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + " is still compilable after " + PER_METHOD_RECOMPILATION_CUTOFF + " iterations"); } compile(); - checkNotCompiled(METHOD); + checkNotCompiled(); - WHITE_BOX.clearMethodState(METHOD); - if (!WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD - + " is compilable after clearMethodState()"); + // WB.clearMethodState() must reset no-compilable flags + WHITE_BOX.clearMethodState(method); + if (!WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + + " is not compilable after clearMethodState()"); } compile(); - checkCompiled(METHOD); + checkCompiled(); } - private void compileAndDeoptimaze() throws Exception { + private void compileAndDeoptimize() throws Exception { compile(); - waitBackgroundCompilation(METHOD); - WHITE_BOX.deoptimizeMethod(METHOD); + waitBackgroundCompilation(); + WHITE_BOX.deoptimizeMethod(method); } } diff --git a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java index 19aaf515a57..59676ccfb37 100644 --- a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java +++ b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java @@ -27,28 +27,85 @@ * @build MakeMethodNotCompilableTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI MakeMethodNotCompilableTest + * @summary testing of WB::makeMethodNotCompilable() * @author igor.ignatyev@oracle.com */ public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest { public static void main(String[] args) throws Exception { - // to prevent inlining #method into #compile() - WHITE_BOX.testSetDontInlineMethod(METHOD, true); - new MakeMethodNotCompilableTest().runTest(); + if (args.length == 0) { + for (TestCase test : TestCase.values()) { + new MakeMethodNotCompilableTest(test).runTest(); + } + } else { + for (String name : args) { + new MakeMethodNotCompilableTest( + TestCase.valueOf(name)).runTest(); + } + } } - protected void test() throws Exception { - if (!WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD + " must be compilable"); + public MakeMethodNotCompilableTest(TestCase testCase) { + super(testCase); + // to prevent inlining of #method + WHITE_BOX.testSetDontInlineMethod(method, true); + } + + /** + * Tests {@code WB::makeMethodNotCompilable()} by calling it before + * compilation and checking that method isn't compiled. Also + * checks that WB::clearMethodState() clears no-compilable flags. For + * tiered, additional checks for all available levels are conducted. + * + * @throws Exception if one of the checks fails. + */ + @Override + protected void test() throws Exception { + checkNotCompiled(); + if (!WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + " must be compilable"); } - WHITE_BOX.makeMethodNotCompilable(METHOD); - if (WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD + " must be not compilable"); + + if (TIERED_COMPILATION) { + for (int i = 1, n = TIERED_STOP_AT_LEVEL + 1; i < n; ++i) { + WHITE_BOX.makeMethodNotCompilable(method, i); + if (WHITE_BOX.isMethodCompilable(method, i)) { + throw new RuntimeException(method + + " must be not compilable at level" + i); + } + WHITE_BOX.enqueueMethodForCompilation(method, i); + checkNotCompiled(); + + if (!WHITE_BOX.isMethodCompilable(method)) { + System.out.println(method + + " is not compilable after level " + i); + } + } + + // WB.clearMethodState() must reset no-compilable flags + WHITE_BOX.clearMethodState(method); + if (!WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + + " is not compilable after clearMethodState()"); + } + } + WHITE_BOX.makeMethodNotCompilable(method); + if (WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + " must be not compilable"); + } + + compile(); + checkNotCompiled(); + if (WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + " must be not compilable"); + } + // WB.clearMethodState() must reset no-compilable flags + WHITE_BOX.clearMethodState(method); + if (!WHITE_BOX.isMethodCompilable(method)) { + throw new RuntimeException(method + + " is not compilable after clearMethodState()"); } compile(); - checkNotCompiled(METHOD); - if (WHITE_BOX.isMethodCompilable(METHOD)) { - throw new RuntimeException(METHOD + " must be not compilable"); - } + checkCompiled(); } } diff --git a/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java b/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java index a85421a63f9..6a8b61c373f 100644 --- a/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java +++ b/hotspot/test/compiler/whitebox/SetDontInlineMethodTest.java @@ -27,33 +27,47 @@ * @build SetDontInlineMethodTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetDontInlineMethodTest + * @summary testing of WB::testSetDontInlineMethod() * @author igor.ignatyev@oracle.com */ public class SetDontInlineMethodTest extends CompilerWhiteBoxTest { public static void main(String[] args) throws Exception { - new SetDontInlineMethodTest().runTest(); + for (TestCase test : TestCase.values()) { + new SetDontInlineMethodTest(test).runTest(); + } } + public SetDontInlineMethodTest(TestCase testCase) { + super(testCase); + } + + /** + * Tests {@code WB::testSetDontInlineMethod()} by sequential calling it and + * checking of return value. + * + * @throws Exception if one of the checks fails. + */ + @Override protected void test() throws Exception { - if (WHITE_BOX.testSetDontInlineMethod(METHOD, true)) { - throw new RuntimeException("on start " + METHOD + if (WHITE_BOX.testSetDontInlineMethod(method, true)) { + throw new RuntimeException("on start " + method + " must be inlineable"); } - if (!WHITE_BOX.testSetDontInlineMethod(METHOD, true)) { - throw new RuntimeException("after first change to true " + METHOD + if (!WHITE_BOX.testSetDontInlineMethod(method, true)) { + throw new RuntimeException("after first change to true " + method + " must be not inlineable"); } - if (!WHITE_BOX.testSetDontInlineMethod(METHOD, false)) { - throw new RuntimeException("after second change to true " + METHOD + if (!WHITE_BOX.testSetDontInlineMethod(method, false)) { + throw new RuntimeException("after second change to true " + method + " must be still not inlineable"); } - if (WHITE_BOX.testSetDontInlineMethod(METHOD, false)) { - throw new RuntimeException("after first change to false" + METHOD + if (WHITE_BOX.testSetDontInlineMethod(method, false)) { + throw new RuntimeException("after first change to false" + method + " must be inlineable"); } - if (WHITE_BOX.testSetDontInlineMethod(METHOD, false)) { - throw new RuntimeException("after second change to false " + METHOD + if (WHITE_BOX.testSetDontInlineMethod(method, false)) { + throw new RuntimeException("after second change to false " + method + " must be inlineable"); } } diff --git a/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java b/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java index cb4a7b02044..ca3e54389bb 100644 --- a/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java +++ b/hotspot/test/compiler/whitebox/SetForceInlineMethodTest.java @@ -27,33 +27,47 @@ * @build SetForceInlineMethodTest * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetForceInlineMethodTest + * @summary testing of WB::testSetForceInlineMethod() * @author igor.ignatyev@oracle.com */ public class SetForceInlineMethodTest extends CompilerWhiteBoxTest { public static void main(String[] args) throws Exception { - new SetForceInlineMethodTest().runTest(); + for (TestCase test : TestCase.values()) { + new SetForceInlineMethodTest(test).runTest(); + } } + public SetForceInlineMethodTest(TestCase testCase) { + super(testCase); + } + + /** + * Tests {@code WB::testSetForceInlineMethod()} by sequential calling it and + * checking of return value. + * + * @throws Exception if one of the checks fails. + */ + @Override protected void test() throws Exception { - if (WHITE_BOX.testSetForceInlineMethod(METHOD, true)) { - throw new RuntimeException("on start " + METHOD + if (WHITE_BOX.testSetForceInlineMethod(method, true)) { + throw new RuntimeException("on start " + method + " must be not force inlineable"); } - if (!WHITE_BOX.testSetForceInlineMethod(METHOD, true)) { - throw new RuntimeException("after first change to true " + METHOD + if (!WHITE_BOX.testSetForceInlineMethod(method, true)) { + throw new RuntimeException("after first change to true " + method + " must be force inlineable"); } - if (!WHITE_BOX.testSetForceInlineMethod(METHOD, false)) { - throw new RuntimeException("after second change to true " + METHOD + if (!WHITE_BOX.testSetForceInlineMethod(method, false)) { + throw new RuntimeException("after second change to true " + method + " must be still force inlineable"); } - if (WHITE_BOX.testSetForceInlineMethod(METHOD, false)) { - throw new RuntimeException("after first change to false" + METHOD + if (WHITE_BOX.testSetForceInlineMethod(method, false)) { + throw new RuntimeException("after first change to false" + method + " must be not force inlineable"); } - if (WHITE_BOX.testSetForceInlineMethod(METHOD, false)) { - throw new RuntimeException("after second change to false " + METHOD + if (WHITE_BOX.testSetForceInlineMethod(method, false)) { + throw new RuntimeException("after second change to false " + method + " must be not force inlineable"); } } diff --git a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java index 56e609254b4..e294b2344fa 100644 --- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java +++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java @@ -24,7 +24,7 @@ package sun.hotspot; -import java.lang.reflect.Method; +import java.lang.reflect.Executable; import java.security.BasicPermission; import sun.hotspot.parser.DiagnosticCommand; @@ -90,22 +90,25 @@ public class WhiteBox { // Compiler public native void deoptimizeAll(); - public native boolean isMethodCompiled(Method method); - public boolean isMethodCompilable(Method method) { + public native boolean isMethodCompiled(Executable method); + public boolean isMethodCompilable(Executable method) { return isMethodCompilable(method, -1 /*any*/); } - public native boolean isMethodCompilable(Method method, int compLevel); - public native boolean isMethodQueuedForCompilation(Method method); - public native int deoptimizeMethod(Method method); - public native void makeMethodNotCompilable(Method method); - public native int getMethodCompilationLevel(Method method); - public native boolean testSetDontInlineMethod(Method method, boolean value); + public native boolean isMethodCompilable(Executable method, int compLevel); + public native boolean isMethodQueuedForCompilation(Executable method); + public native int deoptimizeMethod(Executable method); + public void makeMethodNotCompilable(Executable method) { + makeMethodNotCompilable(method, -1 /*any*/); + } + public native void makeMethodNotCompilable(Executable method, int compLevel); + public native int getMethodCompilationLevel(Executable method); + public native boolean testSetDontInlineMethod(Executable method, boolean value); public native int getCompileQueuesSize(); - public native boolean testSetForceInlineMethod(Method method, boolean value); - public native boolean enqueueMethodForCompilation(Method method, int compLevel); - public native void clearMethodState(Method method); + public native boolean testSetForceInlineMethod(Executable method, boolean value); + public native boolean enqueueMethodForCompilation(Executable method, int compLevel); + public native void clearMethodState(Executable method); - //Intered strings + // Intered strings public native boolean isInStringTable(String str); // force Full GC From 92ef5fe748ef3156a17fce189270ce451d9abbbc Mon Sep 17 00:00:00 2001 From: Nils Eliasson Date: Thu, 11 Apr 2013 13:57:44 +0200 Subject: [PATCH 021/162] 8006952: Slow VM due to excessive code cache freelist iteration Remove continous free block requirement Reviewed-by: kvn --- hotspot/src/share/vm/code/codeBlob.cpp | 4 +- hotspot/src/share/vm/code/codeCache.cpp | 32 ++---- hotspot/src/share/vm/code/codeCache.hpp | 10 +- hotspot/src/share/vm/code/nmethod.cpp | 77 ++++++++------- .../src/share/vm/compiler/compileBroker.cpp | 2 +- hotspot/src/share/vm/memory/heap.cpp | 97 ++++++++++--------- hotspot/src/share/vm/memory/heap.hpp | 34 +++---- hotspot/src/share/vm/opto/output.cpp | 21 +--- 8 files changed, 123 insertions(+), 154 deletions(-) diff --git a/hotspot/src/share/vm/code/codeBlob.cpp b/hotspot/src/share/vm/code/codeBlob.cpp index 6120d3535ae..a855ade0f5f 100644 --- a/hotspot/src/share/vm/code/codeBlob.cpp +++ b/hotspot/src/share/vm/code/codeBlob.cpp @@ -348,14 +348,14 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, void* RuntimeStub::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); + void* p = CodeCache::allocate(size, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } // operator new shared by all singletons: void* SingletonBlob::operator new(size_t s, unsigned size) { - void* p = CodeCache::allocate(size); + void* p = CodeCache::allocate(size, true); if (!p) fatal("Initial size of CodeCache is too small"); return p; } diff --git a/hotspot/src/share/vm/code/codeCache.cpp b/hotspot/src/share/vm/code/codeCache.cpp index d5b8f8f9a78..7d2bb575d9f 100644 --- a/hotspot/src/share/vm/code/codeCache.cpp +++ b/hotspot/src/share/vm/code/codeCache.cpp @@ -172,7 +172,7 @@ nmethod* CodeCache::next_nmethod (CodeBlob* cb) { static size_t maxCodeCacheUsed = 0; -CodeBlob* CodeCache::allocate(int size) { +CodeBlob* CodeCache::allocate(int size, bool is_critical) { // Do not seize the CodeCache lock here--if the caller has not // already done so, we are going to lose bigtime, since the code // cache will contain a garbage CodeBlob until the caller can @@ -183,7 +183,7 @@ CodeBlob* CodeCache::allocate(int size) { CodeBlob* cb = NULL; _number_of_blobs++; while (true) { - cb = (CodeBlob*)_heap->allocate(size); + cb = (CodeBlob*)_heap->allocate(size, is_critical); if (cb != NULL) break; if (!_heap->expand_by(CodeCacheExpansionSize)) { // Expansion failed @@ -192,8 +192,8 @@ CodeBlob* CodeCache::allocate(int size) { if (PrintCodeCacheExtension) { ResourceMark rm; tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", - (intptr_t)_heap->begin(), (intptr_t)_heap->end(), - (address)_heap->end() - (address)_heap->begin()); + (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), + (address)_heap->high() - (address)_heap->low_boundary()); } } maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - @@ -608,13 +608,13 @@ void CodeCache::verify_oops() { address CodeCache::first_address() { assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->begin(); + return (address)_heap->low_boundary(); } address CodeCache::last_address() { assert_locked_or_safepoint(CodeCache_lock); - return (address)_heap->end(); + return (address)_heap->high(); } @@ -996,10 +996,9 @@ void CodeCache::print() { void CodeCache::print_summary(outputStream* st, bool detailed) { size_t total = (_heap->high_boundary() - _heap->low_boundary()); st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT - "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT - "Kb max_free_chunk=" SIZE_FORMAT "Kb", + "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", total/K, (total - unallocated_capacity())/K, - maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K); + maxCodeCacheUsed/K, unallocated_capacity()/K); if (detailed) { st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", @@ -1018,19 +1017,8 @@ void CodeCache::print_summary(outputStream* st, bool detailed) { void CodeCache::log_state(outputStream* st) { st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" - " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'" - " largest_free_block='" SIZE_FORMAT "'", + " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", nof_blobs(), nof_nmethods(), nof_adapters(), - unallocated_capacity(), largest_free_block()); + unallocated_capacity()); } -size_t CodeCache::largest_free_block() { - // This is called both with and without CodeCache_lock held so - // handle both cases. - if (CodeCache_lock->owned_by_self()) { - return _heap->largest_free_block(); - } else { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - return _heap->largest_free_block(); - } -} diff --git a/hotspot/src/share/vm/code/codeCache.hpp b/hotspot/src/share/vm/code/codeCache.hpp index e19aec61b79..9bb0b839e14 100644 --- a/hotspot/src/share/vm/code/codeCache.hpp +++ b/hotspot/src/share/vm/code/codeCache.hpp @@ -70,7 +70,7 @@ class CodeCache : AllStatic { static void initialize(); // Allocation/administration - static CodeBlob* allocate(int size); // allocates a new CodeBlob + static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled static int alignment_unit(); // guaranteed alignment of all CodeBlobs static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) @@ -156,19 +156,13 @@ class CodeCache : AllStatic { static address low_bound() { return (address) _heap->low_boundary(); } static address high_bound() { return (address) _heap->high_boundary(); } - static bool has_space(int size) { - // Always leave some room in the CodeCache for I2C/C2I adapters - return largest_free_block() > (CodeCacheMinimumFreeSpace + size); - } - // Profiling static address first_address(); // first address used for CodeBlobs static address last_address(); // last address used for CodeBlobs static size_t capacity() { return _heap->capacity(); } static size_t max_capacity() { return _heap->max_capacity(); } static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } - static size_t largest_free_block(); - static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; } + static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; } static bool needs_cache_clean() { return _needs_cache_clean; } static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } diff --git a/hotspot/src/share/vm/code/nmethod.cpp b/hotspot/src/share/vm/code/nmethod.cpp index 55a2c05f5ff..60bc88b7734 100644 --- a/hotspot/src/share/vm/code/nmethod.cpp +++ b/hotspot/src/share/vm/code/nmethod.cpp @@ -501,18 +501,17 @@ nmethod* nmethod::new_native_nmethod(methodHandle method, { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - if (CodeCache::has_space(native_nmethod_size)) { - CodeOffsets offsets; - offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); - offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, - compile_id, &offsets, - code_buffer, frame_size, - basic_lock_owner_sp_offset, - basic_lock_sp_offset, oop_maps); - NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size, + compile_id, &offsets, + code_buffer, frame_size, + basic_lock_owner_sp_offset, + basic_lock_sp_offset, oop_maps); + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); + if (PrintAssembly && nm != NULL) { + Disassembler::decode(nm); } } // verify nmethod @@ -538,18 +537,17 @@ nmethod* nmethod::new_dtrace_nmethod(methodHandle method, { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); int nmethod_size = allocation_size(code_buffer, sizeof(nmethod)); - if (CodeCache::has_space(nmethod_size)) { - CodeOffsets offsets; - offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); - offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); - offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); + CodeOffsets offsets; + offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); + offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset); + offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); - nm = new (nmethod_size) nmethod(method(), nmethod_size, - &offsets, code_buffer, frame_size); + nm = new (nmethod_size) nmethod(method(), nmethod_size, + &offsets, code_buffer, frame_size); - NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); - if (PrintAssembly && nm != NULL) - Disassembler::decode(nm); + NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); + if (PrintAssembly && nm != NULL) { + Disassembler::decode(nm); } } // verify nmethod @@ -591,16 +589,16 @@ nmethod* nmethod::new_nmethod(methodHandle method, + round_to(handler_table->size_in_bytes(), oopSize) + round_to(nul_chk_table->size_in_bytes(), oopSize) + round_to(debug_info->data_size() , oopSize); - if (CodeCache::has_space(nmethod_size)) { - nm = new (nmethod_size) - nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, - orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, - oop_maps, - handler_table, - nul_chk_table, - compiler, - comp_level); - } + + nm = new (nmethod_size) + nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, + orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, + oop_maps, + handler_table, + nul_chk_table, + compiler, + comp_level); + if (nm != NULL) { // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. @@ -612,15 +610,18 @@ nmethod* nmethod::new_nmethod(methodHandle method, // classes the slow way is too slow. for (Dependencies::DepStream deps(nm); deps.next(); ) { Klass* klass = deps.context_type(); - if (klass == NULL) continue; // ignore things like evol_method + if (klass == NULL) { + continue; // ignore things like evol_method + } // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } } NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); - if (PrintAssembly && nm != NULL) + if (PrintAssembly && nm != NULL) { Disassembler::decode(nm); + } } // verify nmethod @@ -798,13 +799,11 @@ nmethod::nmethod( } #endif // def HAVE_DTRACE_H -void* nmethod::operator new(size_t size, int nmethod_size) { - void* alloc = CodeCache::allocate(nmethod_size); - guarantee(alloc != NULL, "CodeCache should have enough space"); - return alloc; +void* nmethod::operator new(size_t size, int nmethod_size) throw () { + // Not critical, may return null if there is too little continuous memory + return CodeCache::allocate(nmethod_size); } - nmethod::nmethod( Method* method, int nmethod_size, diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp index de3df50bdf4..411f02b6771 100644 --- a/hotspot/src/share/vm/compiler/compileBroker.cpp +++ b/hotspot/src/share/vm/compiler/compileBroker.cpp @@ -1581,7 +1581,7 @@ void CompileBroker::compiler_thread_loop() { // We need this HandleMark to avoid leaking VM handles. HandleMark hm(thread); - if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) { + if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) { // the code cache is really full handle_full_code_cache(); } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) { diff --git a/hotspot/src/share/vm/memory/heap.cpp b/hotspot/src/share/vm/memory/heap.cpp index bf41864c6f3..727690b5c7f 100644 --- a/hotspot/src/share/vm/memory/heap.cpp +++ b/hotspot/src/share/vm/memory/heap.cpp @@ -42,7 +42,7 @@ CodeHeap::CodeHeap() { _log2_segment_size = 0; _next_segment = 0; _freelist = NULL; - _free_segments = 0; + _freelist_segments = 0; } @@ -115,8 +115,8 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size, } on_code_mapping(_memory.low(), _memory.committed_size()); - _number_of_committed_segments = number_of_segments(_memory.committed_size()); - _number_of_reserved_segments = number_of_segments(_memory.reserved_size()); + _number_of_committed_segments = size_to_segments(_memory.committed_size()); + _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); // reserve space for _segmap @@ -149,8 +149,8 @@ bool CodeHeap::expand_by(size_t size) { if (!_memory.expand_by(dm)) return false; on_code_mapping(base, dm); size_t i = _number_of_committed_segments; - _number_of_committed_segments = number_of_segments(_memory.committed_size()); - assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change"); + _number_of_committed_segments = size_to_segments(_memory.committed_size()); + assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change"); assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking"); // expand _segmap space size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); @@ -176,33 +176,44 @@ void CodeHeap::clear() { } -void* CodeHeap::allocate(size_t size) { - size_t length = number_of_segments(size + sizeof(HeapBlock)); - assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList"); +void* CodeHeap::allocate(size_t instance_size, bool is_critical) { + size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock)); + assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList"); // First check if we can satify request from freelist debug_only(verify()); - HeapBlock* block = search_freelist(length); + HeapBlock* block = search_freelist(number_of_segments, is_critical); debug_only(if (VerifyCodeCacheOften) verify()); if (block != NULL) { - assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check"); + assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check"); assert(!block->free(), "must be marked free"); #ifdef ASSERT - memset((void *)block->allocated_space(), badCodeHeapNewVal, size); + memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size); #endif return block->allocated_space(); } - if (length < CodeCacheMinBlockLength) { - length = CodeCacheMinBlockLength; + // Ensure minimum size for allocation to the heap. + if (number_of_segments < CodeCacheMinBlockLength) { + number_of_segments = CodeCacheMinBlockLength; } - if (_next_segment + length <= _number_of_committed_segments) { - mark_segmap_as_used(_next_segment, _next_segment + length); + + if (!is_critical) { + // Make sure the allocation fits in the unallocated heap without using + // the CodeCacheMimimumFreeSpace that is reserved for critical allocations. + if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) { + // Fail allocation + return NULL; + } + } + + if (_next_segment + number_of_segments <= _number_of_committed_segments) { + mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); HeapBlock* b = block_at(_next_segment); - b->initialize(length); - _next_segment += length; + b->initialize(number_of_segments); + _next_segment += number_of_segments; #ifdef ASSERT - memset((void *)b->allocated_space(), badCodeHeapNewVal, size); + memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size); #endif return b->allocated_space(); } else { @@ -219,7 +230,7 @@ void CodeHeap::deallocate(void* p) { #ifdef ASSERT memset((void *)b->allocated_space(), badCodeHeapFreeVal, - size(b->length()) - sizeof(HeapBlock)); + segments_to_size(b->length()) - sizeof(HeapBlock)); #endif add_to_freelist(b); @@ -299,32 +310,14 @@ size_t CodeHeap::max_capacity() const { } size_t CodeHeap::allocated_capacity() const { - // Start with the committed size in _memory; - size_t l = _memory.committed_size(); - - // Subtract the committed, but unused, segments - l -= size(_number_of_committed_segments - _next_segment); - - // Subtract the size of the freelist - l -= size(_free_segments); - - return l; + // size of used heap - size on freelist + return segments_to_size(_next_segment - _freelist_segments); } -size_t CodeHeap::largest_free_block() const { - // First check unused space excluding free blocks. - size_t free_sz = size(_free_segments); - size_t unused = max_capacity() - allocated_capacity() - free_sz; - if (unused >= free_sz) - return unused; - - // Now check largest free block. - size_t len = 0; - for (FreeBlock* b = _freelist; b != NULL; b = b->link()) { - if (b->length() > len) - len = b->length(); - } - return MAX2(unused, size(len)); +// Returns size of the unallocated heap block +size_t CodeHeap::heap_unallocated_capacity() const { + // Total number of segments - number currently used + return segments_to_size(_number_of_reserved_segments - _next_segment); } // Free list management @@ -365,7 +358,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) { assert(b != _freelist, "cannot be removed twice"); // Mark as free and update free space count - _free_segments += b->length(); + _freelist_segments += b->length(); b->set_free(); // First element in list? @@ -400,7 +393,7 @@ void CodeHeap::add_to_freelist(HeapBlock *a) { // Search freelist for an entry on the list with the best fit // Return NULL if no one was found -FreeBlock* CodeHeap::search_freelist(size_t length) { +FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) { FreeBlock *best_block = NULL; FreeBlock *best_prev = NULL; size_t best_length = 0; @@ -411,6 +404,16 @@ FreeBlock* CodeHeap::search_freelist(size_t length) { while(cur != NULL) { size_t l = cur->length(); if (l >= length && (best_block == NULL || best_length > l)) { + + // Non critical allocations are not allowed to use the last part of the code heap. + if (!is_critical) { + // Make sure the end of the allocation doesn't cross into the last part of the code heap + if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) { + // the freelist is sorted by address - if one fails, all consecutive will also fail. + break; + } + } + // Remember best block, its previous element, and its length best_block = cur; best_prev = prev; @@ -452,7 +455,7 @@ FreeBlock* CodeHeap::search_freelist(size_t length) { } best_block->set_used(); - _free_segments -= length; + _freelist_segments -= length; return best_block; } @@ -478,7 +481,7 @@ void CodeHeap::verify() { } // Verify that freelist contains the right amount of free space - // guarantee(len == _free_segments, "wrong freelist"); + // guarantee(len == _freelist_segments, "wrong freelist"); // Verify that the number of free blocks is not out of hand. static int free_block_threshold = 10000; diff --git a/hotspot/src/share/vm/memory/heap.hpp b/hotspot/src/share/vm/memory/heap.hpp index f1aa3ffdc33..725592e67be 100644 --- a/hotspot/src/share/vm/memory/heap.hpp +++ b/hotspot/src/share/vm/memory/heap.hpp @@ -91,11 +91,11 @@ class CodeHeap : public CHeapObj { size_t _next_segment; FreeBlock* _freelist; - size_t _free_segments; // No. of segments in freelist + size_t _freelist_segments; // No. of segments in freelist // Helper functions - size_t number_of_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } - size_t size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; } + size_t size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; } + size_t segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; } size_t segment_for(void* p) const { return ((char*)p - _memory.low()) >> _log2_segment_size; } HeapBlock* block_at(size_t i) const { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); } @@ -110,7 +110,7 @@ class CodeHeap : public CHeapObj { // Toplevel freelist management void add_to_freelist(HeapBlock *b); - FreeBlock* search_freelist(size_t length); + FreeBlock* search_freelist(size_t length, bool is_critical); // Iteration helpers void* next_free(HeapBlock* b) const; @@ -132,22 +132,19 @@ class CodeHeap : public CHeapObj { void clear(); // clears all heap contents // Memory allocation - void* allocate (size_t size); // allocates a block of size or returns NULL + void* allocate (size_t size, bool is_critical); // allocates a block of size or returns NULL void deallocate(void* p); // deallocates a block // Attributes - void* begin() const { return _memory.low (); } - void* end() const { return _memory.high(); } - bool contains(void* p) const { return begin() <= p && p < end(); } - void* find_start(void* p) const; // returns the block containing p or NULL - size_t alignment_unit() const; // alignment of any block - size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit - static size_t header_size(); // returns the header size for each heap block + char* low_boundary() const { return _memory.low_boundary (); } + char* high() const { return _memory.high(); } + char* high_boundary() const { return _memory.high_boundary(); } - // Returns reserved area high and low addresses - char *low_boundary() const { return _memory.low_boundary (); } - char *high() const { return _memory.high(); } - char *high_boundary() const { return _memory.high_boundary(); } + bool contains(const void* p) const { return low_boundary() <= p && p < high(); } + void* find_start(void* p) const; // returns the block containing p or NULL + size_t alignment_unit() const; // alignment of any block + size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit + static size_t header_size(); // returns the header size for each heap block // Iteration @@ -161,8 +158,11 @@ class CodeHeap : public CHeapObj { size_t max_capacity() const; size_t allocated_capacity() const; size_t unallocated_capacity() const { return max_capacity() - allocated_capacity(); } - size_t largest_free_block() const; +private: + size_t heap_unallocated_capacity() const; + +public: // Debugging void verify(); void print() PRODUCT_RETURN; diff --git a/hotspot/src/share/vm/opto/output.cpp b/hotspot/src/share/vm/opto/output.cpp index 178f3b717eb..8c42920995e 100644 --- a/hotspot/src/share/vm/opto/output.cpp +++ b/hotspot/src/share/vm/opto/output.cpp @@ -1044,21 +1044,6 @@ void NonSafepointEmitter::emit_non_safepoint() { debug_info->end_non_safepoint(pc_offset); } - - -// helper for fill_buffer bailout logic -static void turn_off_compiler(Compile* C) { - if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) { - // Do not turn off compilation if a single giant method has - // blown the code cache size. - C->record_failure("excessive request to CodeCache"); - } else { - // Let CompilerBroker disable further compilations. - C->record_failure("CodeCache is full"); - } -} - - //------------------------------init_buffer------------------------------------ CodeBuffer* Compile::init_buffer(uint* blk_starts) { @@ -1158,7 +1143,7 @@ CodeBuffer* Compile::init_buffer(uint* blk_starts) { // Have we run out of code space? if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return NULL; } // Configure the code buffer. @@ -1476,7 +1461,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // Verify that there is sufficient space remaining cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return; } @@ -1633,7 +1618,7 @@ void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // One last check for failed CodeBuffer::expand: if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) { - turn_off_compiler(this); + C->record_failure("CodeCache is full"); return; } From 9ce964ad3894cbb6d93cf18c3a5fadbcc872954f Mon Sep 17 00:00:00 2001 From: Jiangli Zhou Date: Thu, 11 Apr 2013 23:06:33 -0400 Subject: [PATCH 022/162] 8012052: java/lang/invoke/6987555/Test6987555.java crashes with assert(mcs != NULL) failed: MethodCounters cannot be NULL Skip counter decay if the MethodCounters is NULL in NonTieredCompPolicy::delay_compilation(). Reviewed-by: kvn, dholmes --- hotspot/src/share/vm/runtime/compilationPolicy.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hotspot/src/share/vm/runtime/compilationPolicy.cpp b/hotspot/src/share/vm/runtime/compilationPolicy.cpp index cec42ae9195..3d670d6a297 100644 --- a/hotspot/src/share/vm/runtime/compilationPolicy.cpp +++ b/hotspot/src/share/vm/runtime/compilationPolicy.cpp @@ -297,9 +297,10 @@ void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { // that it's recommended to delay the complation of this method. void NonTieredCompPolicy::delay_compilation(Method* method) { MethodCounters* mcs = method->method_counters(); - assert(mcs != NULL, "MethodCounters cannot be NULL"); - mcs->invocation_counter()->decay(); - mcs->backedge_counter()->decay(); + if (mcs != NULL) { + mcs->invocation_counter()->decay(); + mcs->backedge_counter()->decay(); + } } void NonTieredCompPolicy::disable_compilation(Method* method) { From 6aa1ba2f503e87931dfdf9c39ec297b4d83a6d78 Mon Sep 17 00:00:00 2001 From: Alejandro Murillo Date: Thu, 11 Apr 2013 21:54:46 -0700 Subject: [PATCH 023/162] 8011949: new hotspot build - hs25-b29 Reviewed-by: jcoomes --- hotspot/make/hotspot_version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotspot/make/hotspot_version b/hotspot/make/hotspot_version index 865a90932d1..2f5b12ddbc2 100644 --- a/hotspot/make/hotspot_version +++ b/hotspot/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013 HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=28 +HS_BUILD_NUMBER=29 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 From a4bf4b9be6d70055b8875d93b90b5475bcba99eb Mon Sep 17 00:00:00 2001 From: Athijegannathan Sundararajan Date: Mon, 15 Apr 2013 20:12:50 +0530 Subject: [PATCH 024/162] 8012240: Array.prototype.map.call({length: -1, get 0(){throw 0}}, function(){}).length does not throw error Reviewed-by: lagergren, jlaskey --- .../internal/runtime/arrays/MapIterator.java | 3 +- nashorn/test/script/basic/JDK-8012240.js | 47 +++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 nashorn/test/script/basic/JDK-8012240.js diff --git a/nashorn/src/jdk/nashorn/internal/runtime/arrays/MapIterator.java b/nashorn/src/jdk/nashorn/internal/runtime/arrays/MapIterator.java index be9bc23dd1e..fdf9d3a4047 100644 --- a/nashorn/src/jdk/nashorn/internal/runtime/arrays/MapIterator.java +++ b/nashorn/src/jdk/nashorn/internal/runtime/arrays/MapIterator.java @@ -66,8 +66,7 @@ class MapIterator extends ArrayLikeIterator { bumpIndex(); } - // special case - balk at iterating to infinity or MAX_UINT - return (length != JSType.MAX_UINT) && indexInArray(); + return indexInArray(); } @Override diff --git a/nashorn/test/script/basic/JDK-8012240.js b/nashorn/test/script/basic/JDK-8012240.js new file mode 100644 index 00000000000..2ac6eaf7237 --- /dev/null +++ b/nashorn/test/script/basic/JDK-8012240.js @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * JDK-8012240: Array.prototype.map.call({length: -1, get 0(){throw 0}}, function(){}).length does not throw error + * + * @test + * @run + */ + +var in_getter_for_0 = false; + +try { + Array.prototype.map.call( + { + length: -1, + get 0() { + in_getter_for_0 = true; + throw 0; + } + }, + function(){}).length; +} catch (e) { + if (e !== 0 || !in_getter_for_0) { + fail("should have thrown error from getter for '0'th element"); + } +} From bad6728ea40d6e49e291d85b0635f45ffb93855e Mon Sep 17 00:00:00 2001 From: Roland Westrelin Date: Tue, 16 Apr 2013 17:06:39 +0200 Subject: [PATCH 025/162] 8011901: Unsafe.getAndAddLong(obj, off, delta) does not work properly with long deltas Instruct xaddL_no_res shouldn't allow 64 bit constants. Reviewed-by: kvn --- hotspot/src/cpu/x86/vm/x86_64.ad | 8 +-- .../test/compiler/8011901/Test8011901.java | 68 +++++++++++++++++++ 2 files changed, 69 insertions(+), 7 deletions(-) create mode 100644 hotspot/test/compiler/8011901/Test8011901.java diff --git a/hotspot/src/cpu/x86/vm/x86_64.ad b/hotspot/src/cpu/x86/vm/x86_64.ad index 2953b8bb41d..170e2ff1100 100644 --- a/hotspot/src/cpu/x86/vm/x86_64.ad +++ b/hotspot/src/cpu/x86/vm/x86_64.ad @@ -2222,12 +2222,6 @@ encode %{ $$$emit32$src$$constant; %} - enc_class Con64(immL src) - %{ - // Output immediate - emit_d64($src$$constant); - %} - enc_class Con32F_as_bits(immF src) %{ // Output Float immediate bits @@ -7608,7 +7602,7 @@ instruct xaddI( memory mem, rRegI newval, rFlagsReg cr) %{ ins_pipe( pipe_cmpxchg ); %} -instruct xaddL_no_res( memory mem, Universe dummy, immL add, rFlagsReg cr) %{ +instruct xaddL_no_res( memory mem, Universe dummy, immL32 add, rFlagsReg cr) %{ predicate(n->as_LoadStore()->result_not_used()); match(Set dummy (GetAndAddL mem add)); effect(KILL cr); diff --git a/hotspot/test/compiler/8011901/Test8011901.java b/hotspot/test/compiler/8011901/Test8011901.java new file mode 100644 index 00000000000..6ff0a932118 --- /dev/null +++ b/hotspot/test/compiler/8011901/Test8011901.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8011901 + * @summary instruct xaddL_no_res shouldn't allow 64 bit constants. + * @run main/othervm -XX:-BackgroundCompilation Test8011901 + * + */ + +import java.lang.reflect.*; +import sun.misc.*; + +public class Test8011901 { + + private long ctl; + + private static final sun.misc.Unsafe U; + private static final long CTL; + + static { + try { + Field unsafe = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); + unsafe.setAccessible(true); + U = (sun.misc.Unsafe) unsafe.get(null); + CTL = U.objectFieldOffset(Test8011901.class.getDeclaredField("ctl")); + } catch (Exception e) { + throw new Error(e); + } + } + + public static void main(String[] args) { + for(int c = 0; c < 20000; c++) { + new Test8011901().makeTest(); + } + System.out.println("Test Passed"); + } + + public static final long EXPECTED = 1L << 42; + + public void makeTest() { + U.getAndAddLong(this, CTL, EXPECTED); + if (ctl != EXPECTED) { + throw new RuntimeException("Test failed. Expected: " + EXPECTED + ", but got = " + ctl); + } + } +} From 49eb3c0de1815c826f35cd803987c9ccceb5b450 Mon Sep 17 00:00:00 2001 From: Athijegannathan Sundararajan Date: Wed, 17 Apr 2013 16:52:06 +0530 Subject: [PATCH 026/162] 8012457: Function.prototype.apply should accept any array-like argument for function arguments Reviewed-by: lagergren, jlaskey --- .../internal/objects/NativeFunction.java | 14 +----- nashorn/test/script/basic/JDK-8012457.js | 46 +++++++++++++++++++ 2 files changed, 48 insertions(+), 12 deletions(-) create mode 100644 nashorn/test/script/basic/JDK-8012457.js diff --git a/nashorn/src/jdk/nashorn/internal/objects/NativeFunction.java b/nashorn/src/jdk/nashorn/internal/objects/NativeFunction.java index f5c0c290632..13c1bc152ad 100644 --- a/nashorn/src/jdk/nashorn/internal/objects/NativeFunction.java +++ b/nashorn/src/jdk/nashorn/internal/objects/NativeFunction.java @@ -81,23 +81,13 @@ public final class NativeFunction { Object[] args = null; - if (ScriptObject.isArray(array)) { - args = ((NativeArray)array).asObjectArray(); - } else if (array instanceof ScriptObject) { + if (array instanceof ScriptObject) { // look for array-like object final ScriptObject sobj = (ScriptObject)array; final Object len = sobj.getLength(); - - if (len == UNDEFINED || len == null) { - throw typeError("function.apply.expects.array"); - } - final int n = (int)JSType.toUint32(len); - if (n != JSType.toNumber(len)) { - throw typeError("function.apply.expects.array"); - } - args = new Object[(int)JSType.toUint32(len)]; + args = new Object[n]; for (int i = 0; i < args.length; i++) { args[i] = sobj.get(i); } diff --git a/nashorn/test/script/basic/JDK-8012457.js b/nashorn/test/script/basic/JDK-8012457.js new file mode 100644 index 00000000000..2f71a9a95e1 --- /dev/null +++ b/nashorn/test/script/basic/JDK-8012457.js @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * JDK-8012457: Function.prototype.apply should accept any array-like argument for function arguments + * + * @test + * @run + */ + +// no exception for these +Function().apply(null, {length: null}) +Function().apply(null, {length: 0.1}) + +// getter should be called +var getter_0_called = false; + +Function().apply(null, + Object.defineProperty([],"0", + { get: function(){ getter_0_called = true; return 0 } + }) +); + +if (! getter_0_called) { + fail("getter for '0' of arguments array not called"); +} From 54b7ae1ff969d744c13ad8be24aeeddc70740f90 Mon Sep 17 00:00:00 2001 From: Harold Seigel Date: Wed, 17 Apr 2013 08:20:02 -0400 Subject: [PATCH 027/162] 8009928: PSR:PERF Increase default string table size Increase default string table size to 60013 for 64-bit platforms. Reviewed-by: coleenp, dholmes --- hotspot/src/share/vm/runtime/arguments.cpp | 2 +- hotspot/src/share/vm/utilities/globalDefinitions.hpp | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hotspot/src/share/vm/runtime/arguments.cpp b/hotspot/src/share/vm/runtime/arguments.cpp index 6db5ff5820d..236a1272624 100644 --- a/hotspot/src/share/vm/runtime/arguments.cpp +++ b/hotspot/src/share/vm/runtime/arguments.cpp @@ -1901,7 +1901,7 @@ bool Arguments::check_vm_args_consistency() { // Divide by bucket size to prevent a large size from causing rollover when // calculating amount of memory needed to be allocated for the String table. - status = status && verify_interval(StringTableSize, defaultStringTableSize, + status = status && verify_interval(StringTableSize, minimumStringTableSize, (max_uintx / StringTable::bucket_size()), "StringTable size"); if (MinHeapFreeRatio > MaxHeapFreeRatio) { diff --git a/hotspot/src/share/vm/utilities/globalDefinitions.hpp b/hotspot/src/share/vm/utilities/globalDefinitions.hpp index 52258981857..5609ffdf431 100644 --- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp +++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp @@ -328,9 +328,10 @@ const int max_method_code_size = 64*K - 1; // JVM spec, 2nd ed. section 4.8.1 ( //---------------------------------------------------------------------------------------------------- -// Minimum StringTableSize value +// Default and minimum StringTableSize values -const int defaultStringTableSize=1009; +const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); +const int minimumStringTableSize=1009; //---------------------------------------------------------------------------------------------------- From 9a94591b8e53aa191b065c2774ccce1bd394a9c0 Mon Sep 17 00:00:00 2001 From: Daniel Fuchs Date: Wed, 17 Apr 2013 15:23:19 +0200 Subject: [PATCH 028/162] 8005954: JAXP Plugability Layer should use java.util.ServiceLoader This fix replaces manual processing of files under META-INF/services in JAXP factories by calls to java.util.ServiceLoader. Reviewed-by: alanb, joehw, mchung --- .../javax/xml/datatype/DatatypeFactory.java | 1484 +++++++++-------- .../src/javax/xml/datatype/FactoryFinder.java | 192 +-- .../xml/parsers/DocumentBuilderFactory.java | 174 +- jaxp/src/javax/xml/parsers/FactoryFinder.java | 202 +-- .../javax/xml/parsers/SAXParserFactory.java | 95 +- jaxp/src/javax/xml/stream/FactoryFinder.java | 230 ++- .../src/javax/xml/stream/XMLEventFactory.java | 102 +- .../src/javax/xml/stream/XMLInputFactory.java | 97 +- .../javax/xml/stream/XMLOutputFactory.java | 99 +- .../javax/xml/transform/FactoryFinder.java | 264 ++- .../xml/transform/TransformerFactory.java | 46 +- .../javax/xml/validation/SchemaFactory.java | 54 +- .../SchemaFactoryConfigurationError.java | 80 + .../xml/validation/SchemaFactoryFinder.java | 340 ++-- jaxp/src/javax/xml/xpath/XPathFactory.java | 144 +- .../javax/xml/xpath/XPathFactoryFinder.java | 362 ++-- 16 files changed, 1829 insertions(+), 2136 deletions(-) create mode 100644 jaxp/src/javax/xml/validation/SchemaFactoryConfigurationError.java diff --git a/jaxp/src/javax/xml/datatype/DatatypeFactory.java b/jaxp/src/javax/xml/datatype/DatatypeFactory.java index 530d95818a8..4f9d6f67c23 100644 --- a/jaxp/src/javax/xml/datatype/DatatypeFactory.java +++ b/jaxp/src/javax/xml/datatype/DatatypeFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,8 +25,8 @@ package javax.xml.datatype; -import java.math.BigInteger; import java.math.BigDecimal; +import java.math.BigInteger; import java.util.GregorianCalendar; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -34,12 +34,12 @@ import java.util.regex.Pattern; /** *

Factory that creates new javax.xml.datatype Objects that map XML to/from Java Objects.

* - *

{@link #newInstance()} is used to create a new DatatypeFactory. - * The following implementation resolution mechanisms are used in the following order:

+ *

A new instance of the DatatypeFactory is created through the {@link #newInstance()} method + * that uses the following implementation resolution mechanisms to determine an implementation:

*
    *
  1. * If the system property specified by {@link #DATATYPEFACTORY_PROPERTY}, "javax.xml.datatype.DatatypeFactory", - * exists, a class with the name of the property's value is instantiated. + * exists, a class with the name of the property value is instantiated. * Any Exception thrown during the instantiation process is wrapped as a {@link DatatypeConfigurationException}. *
  2. *
  3. @@ -48,8 +48,12 @@ import java.util.regex.Pattern; * and processed as documented in the prior step. *
  4. *
  5. - * The services resolution mechanism is used, e.g. META-INF/services/java.xml.datatype.DatatypeFactory. - * Any Exception thrown during the instantiation process is wrapped as a {@link DatatypeConfigurationException}. + * Uses the service-provider loading facilities, defined by the {@link java.util.ServiceLoader} class, to attempt + * to locate and load an implementation of the service. + *
    + * In case of {@link java.util.ServiceConfigurationError service + * configuration error} a {@link javax.xml.datatype.DatatypeConfigurationException} + * will be thrown. *
  6. *
  7. * The final mechanism is to attempt to instantiate the Class specified by @@ -67,26 +71,33 @@ import java.util.regex.Pattern; */ public abstract class DatatypeFactory { - /** - *

    Default property name as defined in JSR 206: Java(TM) API for XML Processing (JAXP) 1.3.

    - * - *

    Default value is javax.xml.datatype.DatatypeFactory.

    - */ - public static final String DATATYPEFACTORY_PROPERTY = "javax.xml.datatype.DatatypeFactory"; + /** + *

    Default property name as defined in JSR 206: Java(TM) API for XML Processing (JAXP) 1.3.

    + * + *

    Default value is javax.xml.datatype.DatatypeFactory.

    + */ + public static final String DATATYPEFACTORY_PROPERTY = + // We use a String constant here, rather than calling + // DatatypeFactory.class.getName() - in order to make javadoc + // generate a See Also: Constant Field Value link. + "javax.xml.datatype.DatatypeFactory"; - /** - *

    Default implementation class name as defined in - * JSR 206: Java(TM) API for XML Processing (JAXP) 1.3.

    - * - *

    Implementers should specify the name of an appropriate class - * to be instantiated if no other implementation resolution mechanism - * succeeds.

    - * - *

    Users should not refer to this field; it is intended only to - * document a factory implementation detail. - *

    - */ - public static final String DATATYPEFACTORY_IMPLEMENTATION_CLASS = new String("com.sun.org.apache.xerces.internal.jaxp.datatype.DatatypeFactoryImpl"); + /** + *

    Default implementation class name as defined in + * JSR 206: Java(TM) API for XML Processing (JAXP) 1.3.

    + * + *

    Implementers should specify the name of an appropriate class + * to be instantiated if no other implementation resolution mechanism + * succeeds.

    + * + *

    Users should not refer to this field; it is intended only to + * document a factory implementation detail. + *

    + */ + public static final String DATATYPEFACTORY_IMPLEMENTATION_CLASS = + // We use new String() here to prevent javadoc from generating + // a See Also: Constant Field Value link. + new String("com.sun.org.apache.xerces.internal.jaxp.datatype.DatatypeFactoryImpl"); /** * http://www.w3.org/TR/xpath-datamodel/#xdtschema defines two regexps @@ -101,40 +112,36 @@ public abstract class DatatypeFactory { private static final Pattern XDTSCHEMA_DTD = Pattern.compile("[^YM]*[DT].*"); - /** - *

    Protected constructor to prevent instaniation outside of package.

    - * - *

    Use {@link #newInstance()} to create a DatatypeFactory.

    - */ - protected DatatypeFactory() { - } + /** + *

    Protected constructor to prevent instaniation outside of package.

    + * + *

    Use {@link #newInstance()} to create a DatatypeFactory.

    + */ + protected DatatypeFactory() { + } - /** - *

    Obtain a new instance of a DatatypeFactory.

    - * + /** + *

    Obtain a new instance of a DatatypeFactory.

    + * *

    The implementation resolution mechanisms are defined in this * Class's documentation.

    - * - * @return New instance of a DatatypeFactory - * - * @throws DatatypeConfigurationException If the implementation is not - * available or cannot be instantiated. + * + * @return New instance of a DatatypeFactory + * + * @throws DatatypeConfigurationException If the implementation is not + * available or cannot be instantiated. * * @see #newInstance(String factoryClassName, ClassLoader classLoader) - */ - public static DatatypeFactory newInstance() - throws DatatypeConfigurationException { + */ + public static DatatypeFactory newInstance() + throws DatatypeConfigurationException { - try { - return (DatatypeFactory) FactoryFinder.find( - /* The default property name according to the JAXP spec */ - DATATYPEFACTORY_PROPERTY, - /* The fallback implementation class name */ - DATATYPEFACTORY_IMPLEMENTATION_CLASS); - } catch (FactoryFinder.ConfigurationError e) { - throw new DatatypeConfigurationException(e.getMessage(), e.getException()); - } - } + return FactoryFinder.find( + /* The default property name according to the JAXP spec */ + DatatypeFactory.class, + /* The fallback implementation class name */ + DATATYPEFACTORY_IMPLEMENTATION_CLASS); + } /** *

    Obtain a new instance of a DatatypeFactory from class name. @@ -172,57 +179,54 @@ public abstract class DatatypeFactory { */ public static DatatypeFactory newInstance(String factoryClassName, ClassLoader classLoader) throws DatatypeConfigurationException { - try { - return (DatatypeFactory) FactoryFinder.newInstance(factoryClassName, classLoader, false); - } catch (FactoryFinder.ConfigurationError e) { - throw new DatatypeConfigurationException(e.getMessage(), e.getException()); - } - } + return FactoryFinder.newInstance(DatatypeFactory.class, + factoryClassName, classLoader, false); + } - /** - *

    Obtain a new instance of a Duration - * specifying the Duration as its string representation, "PnYnMnDTnHnMnS", - * as defined in XML Schema 1.0 section 3.2.6.1.

    - * - *

    XML Schema Part 2: Datatypes, 3.2.6 duration, defines duration as:

    - *
    - * duration represents a duration of time. - * The value space of duration is a six-dimensional space where the coordinates designate the - * Gregorian year, month, day, hour, minute, and second components defined in Section 5.5.3.2 of [ISO 8601], respectively. - * These components are ordered in their significance by their order of appearance i.e. as - * year, month, day, hour, minute, and second. - *
    - *

    All six values are set and availabe from the created {@link Duration}

    + /** + *

    Obtain a new instance of a Duration + * specifying the Duration as its string representation, "PnYnMnDTnHnMnS", + * as defined in XML Schema 1.0 section 3.2.6.1.

    + * + *

    XML Schema Part 2: Datatypes, 3.2.6 duration, defines duration as:

    + *
    + * duration represents a duration of time. + * The value space of duration is a six-dimensional space where the coordinates designate the + * Gregorian year, month, day, hour, minute, and second components defined in Section 5.5.3.2 of [ISO 8601], respectively. + * These components are ordered in their significance by their order of appearance i.e. as + * year, month, day, hour, minute, and second. + *
    + *

    All six values are set and available from the created {@link Duration}

    * *

    The XML Schema specification states that values can be of an arbitrary size. * Implementations may chose not to or be incapable of supporting arbitrarily large and/or small values. * An {@link UnsupportedOperationException} will be thrown with a message indicating implementation limits * if implementation capacities are exceeded.

    - * - * @param lexicalRepresentation String representation of a Duration. - * - * @return New Duration created from parsing the lexicalRepresentation. - * - * @throws IllegalArgumentException If lexicalRepresentation is not a valid representation of a Duration. - * @throws UnsupportedOperationException If implementation cannot support requested values. - * @throws NullPointerException if lexicalRepresentation is null. - */ - public abstract Duration newDuration(final String lexicalRepresentation); + * + * @param lexicalRepresentation String representation of a Duration. + * + * @return New Duration created from parsing the lexicalRepresentation. + * + * @throws IllegalArgumentException If lexicalRepresentation is not a valid representation of a Duration. + * @throws UnsupportedOperationException If implementation cannot support requested values. + * @throws NullPointerException if lexicalRepresentation is null. + */ + public abstract Duration newDuration(final String lexicalRepresentation); - /** - *

    Obtain a new instance of a Duration - * specifying the Duration as milliseconds.

    - * - *

    XML Schema Part 2: Datatypes, 3.2.6 duration, defines duration as:

    - *
    - * duration represents a duration of time. - * The value space of duration is a six-dimensional space where the coordinates designate the - * Gregorian year, month, day, hour, minute, and second components defined in Section 5.5.3.2 of [ISO 8601], respectively. - * These components are ordered in their significance by their order of appearance i.e. as - * year, month, day, hour, minute, and second. - *
    + /** + *

    Obtain a new instance of a Duration + * specifying the Duration as milliseconds.

    + * + *

    XML Schema Part 2: Datatypes, 3.2.6 duration, defines duration as:

    + *
    + * duration represents a duration of time. + * The value space of duration is a six-dimensional space where the coordinates designate the + * Gregorian year, month, day, hour, minute, and second components defined in Section 5.5.3.2 of [ISO 8601], respectively. + * These components are ordered in their significance by their order of appearance i.e. as + * year, month, day, hour, minute, and second. + *
    *

    All six values are set by computing their values from the specified milliseconds - * and are availabe using the get methods of the created {@link Duration}. + * and are available using the get methods of the created {@link Duration}. * The values conform to and are defined by:

    *
      *
    • ISO 8601:2000(E) Section 5.5.3.2 Alternative format
    • @@ -231,25 +235,25 @@ public abstract class DatatypeFactory { * *
    • {@link XMLGregorianCalendar} Date/Time Datatype Field Mapping Between XML Schema 1.0 and Java Representation
    • *
    - * - *

    The default start instance is defined by {@link GregorianCalendar}'s use of the start of the epoch: i.e., - * {@link java.util.Calendar#YEAR} = 1970, - * {@link java.util.Calendar#MONTH} = {@link java.util.Calendar#JANUARY}, - * {@link java.util.Calendar#DATE} = 1, etc. - * This is important as there are variations in the Gregorian Calendar, - * e.g. leap years have different days in the month = {@link java.util.Calendar#FEBRUARY} - * so the result of {@link Duration#getMonths()} and {@link Duration#getDays()} can be influenced.

    - * - * @param durationInMilliSeconds Duration in milliseconds to create. - * - * @return New Duration representing durationInMilliSeconds. - */ - public abstract Duration newDuration(final long durationInMilliSeconds); + * + *

    The default start instance is defined by {@link GregorianCalendar}'s use of the start of the epoch: i.e., + * {@link java.util.Calendar#YEAR} = 1970, + * {@link java.util.Calendar#MONTH} = {@link java.util.Calendar#JANUARY}, + * {@link java.util.Calendar#DATE} = 1, etc. + * This is important as there are variations in the Gregorian Calendar, + * e.g. leap years have different days in the month = {@link java.util.Calendar#FEBRUARY} + * so the result of {@link Duration#getMonths()} and {@link Duration#getDays()} can be influenced.

    + * + * @param durationInMilliSeconds Duration in milliseconds to create. + * + * @return New Duration representing durationInMilliSeconds. + */ + public abstract Duration newDuration(final long durationInMilliSeconds); - /** - *

    Obtain a new instance of a Duration - * specifying the Duration as isPositive, years, months, days, hours, minutes, seconds.

    - * + /** + *

    Obtain a new instance of a Duration + * specifying the Duration as isPositive, years, months, days, hours, minutes, seconds.

    + * *

    The XML Schema specification states that values can be of an arbitrary size. * Implementations may chose not to or be incapable of supporting arbitrarily large and/or small values. * An {@link UnsupportedOperationException} will be thrown with a message indicating implementation limits @@ -257,35 +261,35 @@ public abstract class DatatypeFactory { * *

    A null value indicates that field is not set.

    * - * @param isPositive Set to false to create a negative duration. When the length - * of the duration is zero, this parameter will be ignored. - * @param years of this Duration - * @param months of this Duration - * @param days of this Duration - * @param hours of this Duration - * @param minutes of this Duration - * @param seconds of this Duration - * - * @return New Duration created from the specified values. - * - * @throws IllegalArgumentException If the values are not a valid representation of a - * Duration: if all the fields (years, months, ...) are null or - * if any of the fields is negative. - * @throws UnsupportedOperationException If implementation cannot support requested values. - */ - public abstract Duration newDuration( - final boolean isPositive, - final BigInteger years, - final BigInteger months, - final BigInteger days, - final BigInteger hours, - final BigInteger minutes, - final BigDecimal seconds); + * @param isPositive Set to false to create a negative duration. When the length + * of the duration is zero, this parameter will be ignored. + * @param years of this Duration + * @param months of this Duration + * @param days of this Duration + * @param hours of this Duration + * @param minutes of this Duration + * @param seconds of this Duration + * + * @return New Duration created from the specified values. + * + * @throws IllegalArgumentException If the values are not a valid representation of a + * Duration: if all the fields (years, months, ...) are null or + * if any of the fields is negative. + * @throws UnsupportedOperationException If implementation cannot support requested values. + */ + public abstract Duration newDuration( + final boolean isPositive, + final BigInteger years, + final BigInteger months, + final BigInteger days, + final BigInteger hours, + final BigInteger minutes, + final BigDecimal seconds); - /** - *

    Obtain a new instance of a Duration - * specifying the Duration as isPositive, years, months, days, hours, minutes, seconds.

    - * + /** + *

    Obtain a new instance of a Duration + * specifying the Duration as isPositive, years, months, days, hours, minutes, seconds.

    + * *

    A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

    * * @param isPositive Set to false to create a negative duration. When the length @@ -297,113 +301,113 @@ public abstract class DatatypeFactory { * @param minutes of this Duration * @param seconds of this Duration * - * @return New Duration created from the specified values. - * - * @throws IllegalArgumentException If the values are not a valid representation of a - * Duration: if any of the fields is negative. - * - * @see #newDuration( - * boolean isPositive, - * BigInteger years, - * BigInteger months, - * BigInteger days, - * BigInteger hours, - * BigInteger minutes, - * BigDecimal seconds) - */ - public Duration newDuration( - final boolean isPositive, - final int years, - final int months, - final int days, - final int hours, - final int minutes, - final int seconds) { + * @return New Duration created from the specified values. + * + * @throws IllegalArgumentException If the values are not a valid representation of a + * Duration: if any of the fields is negative. + * + * @see #newDuration( + * boolean isPositive, + * BigInteger years, + * BigInteger months, + * BigInteger days, + * BigInteger hours, + * BigInteger minutes, + * BigDecimal seconds) + */ + public Duration newDuration( + final boolean isPositive, + final int years, + final int months, + final int days, + final int hours, + final int minutes, + final int seconds) { - // years may not be set - BigInteger realYears = (years != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) years) : null; + // years may not be set + BigInteger realYears = (years != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) years) : null; - // months may not be set - BigInteger realMonths = (months != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) months) : null; + // months may not be set + BigInteger realMonths = (months != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) months) : null; - // days may not be set - BigInteger realDays = (days != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) days) : null; + // days may not be set + BigInteger realDays = (days != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) days) : null; - // hours may not be set - BigInteger realHours = (hours != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) hours) : null; + // hours may not be set + BigInteger realHours = (hours != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) hours) : null; - // minutes may not be set - BigInteger realMinutes = (minutes != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) minutes) : null; + // minutes may not be set + BigInteger realMinutes = (minutes != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) minutes) : null; - // seconds may not be set - BigDecimal realSeconds = (seconds != DatatypeConstants.FIELD_UNDEFINED) ? BigDecimal.valueOf((long) seconds) : null; + // seconds may not be set + BigDecimal realSeconds = (seconds != DatatypeConstants.FIELD_UNDEFINED) ? BigDecimal.valueOf((long) seconds) : null; - return newDuration( - isPositive, - realYears, - realMonths, - realDays, - realHours, - realMinutes, - realSeconds - ); - } + return newDuration( + isPositive, + realYears, + realMonths, + realDays, + realHours, + realMinutes, + realSeconds + ); + } - /** - *

    Create a Duration of type xdt:dayTimeDuration by parsing its String representation, - * "PnDTnHnMnS", - * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    - * - *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration - * whose lexical representation contains only day, hour, minute, and second components. - * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    - * - *

    All four values are set and availabe from the created {@link Duration}

    - * + /** + *

    Create a Duration of type xdt:dayTimeDuration by parsing its String representation, + * "PnDTnHnMnS", + * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    + * + *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration + * whose lexical representation contains only day, hour, minute, and second components. + * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    + * + *

    All four values are set and available from the created {@link Duration}

    + * *

    The XML Schema specification states that values can be of an arbitrary size. * Implementations may chose not to or be incapable of supporting arbitrarily large and/or small values. * An {@link UnsupportedOperationException} will be thrown with a message indicating implementation limits * if implementation capacities are exceeded.

    * - * @param lexicalRepresentation Lexical representation of a duration. - * - * @return New Duration created using the specified lexicalRepresentation. - * - * @throws IllegalArgumentException If lexicalRepresentation is not a valid representation of a Duration expressed only in terms of days and time. - * @throws UnsupportedOperationException If implementation cannot support requested values. - * @throws NullPointerException If lexicalRepresentation is null. - */ - public Duration newDurationDayTime(final String lexicalRepresentation) { - // lexicalRepresentation must be non-null - if (lexicalRepresentation == null) { - throw new NullPointerException( - "Trying to create an xdt:dayTimeDuration with an invalid" - + " lexical representation of \"null\""); - } - - // test lexicalRepresentation against spec regex - Matcher matcher = XDTSCHEMA_DTD.matcher(lexicalRepresentation); - if (!matcher.matches()) { - throw new IllegalArgumentException( - "Trying to create an xdt:dayTimeDuration with an invalid" - + " lexical representation of \"" + lexicalRepresentation - + "\", data model requires years and months only."); - } - - return newDuration(lexicalRepresentation); + * @param lexicalRepresentation Lexical representation of a duration. + * + * @return New Duration created using the specified lexicalRepresentation. + * + * @throws IllegalArgumentException If lexicalRepresentation is not a valid representation of a Duration expressed only in terms of days and time. + * @throws UnsupportedOperationException If implementation cannot support requested values. + * @throws NullPointerException If lexicalRepresentation is null. + */ + public Duration newDurationDayTime(final String lexicalRepresentation) { + // lexicalRepresentation must be non-null + if (lexicalRepresentation == null) { + throw new NullPointerException( + "Trying to create an xdt:dayTimeDuration with an invalid" + + " lexical representation of \"null\""); } - /** - *

    Create a Duration of type xdt:dayTimeDuration using the specified milliseconds as defined in - * - * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    - * - *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration - * whose lexical representation contains only day, hour, minute, and second components. - * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    - * + // test lexicalRepresentation against spec regex + Matcher matcher = XDTSCHEMA_DTD.matcher(lexicalRepresentation); + if (!matcher.matches()) { + throw new IllegalArgumentException( + "Trying to create an xdt:dayTimeDuration with an invalid" + + " lexical representation of \"" + lexicalRepresentation + + "\", data model requires years and months only."); + } + + return newDuration(lexicalRepresentation); + } + + /** + *

    Create a Duration of type xdt:dayTimeDuration using the specified milliseconds as defined in + * + * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    + * + *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration + * whose lexical representation contains only day, hour, minute, and second components. + * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    + * *

    All four values are set by computing their values from the specified milliseconds - * and are availabe using the get methods of the created {@link Duration}. + * and are available using the get methods of the created {@link Duration}. * The values conform to and are defined by:

    *
      *
    • ISO 8601:2000(E) Section 5.5.3.2 Alternative format
    • @@ -412,39 +416,39 @@ public abstract class DatatypeFactory { * *
    • {@link XMLGregorianCalendar} Date/Time Datatype Field Mapping Between XML Schema 1.0 and Java Representation
    • *
    - * - *

    The default start instance is defined by {@link GregorianCalendar}'s use of the start of the epoch: i.e., - * {@link java.util.Calendar#YEAR} = 1970, - * {@link java.util.Calendar#MONTH} = {@link java.util.Calendar#JANUARY}, - * {@link java.util.Calendar#DATE} = 1, etc. - * This is important as there are variations in the Gregorian Calendar, - * e.g. leap years have different days in the month = {@link java.util.Calendar#FEBRUARY} - * so the result of {@link Duration#getDays()} can be influenced.

    - * + * + *

    The default start instance is defined by {@link GregorianCalendar}'s use of the start of the epoch: i.e., + * {@link java.util.Calendar#YEAR} = 1970, + * {@link java.util.Calendar#MONTH} = {@link java.util.Calendar#JANUARY}, + * {@link java.util.Calendar#DATE} = 1, etc. + * This is important as there are variations in the Gregorian Calendar, + * e.g. leap years have different days in the month = {@link java.util.Calendar#FEBRUARY} + * so the result of {@link Duration#getDays()} can be influenced.

    + * *

    Any remaining milliseconds after determining the day, hour, minute and second are discarded.

    * - * @param durationInMilliseconds Milliseconds of Duration to create. - * - * @return New Duration created with the specified durationInMilliseconds. - * - * @see - * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration - */ - public Duration newDurationDayTime(final long durationInMilliseconds) { + * @param durationInMilliseconds Milliseconds of Duration to create. + * + * @return New Duration created with the specified durationInMilliseconds. + * + * @see + * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration + */ + public Duration newDurationDayTime(final long durationInMilliseconds) { - return newDuration(durationInMilliseconds); - } + return newDuration(durationInMilliseconds); + } - /** - *

    Create a Duration of type xdt:dayTimeDuration using the specified - * day, hour, minute and second as defined in - * - * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    - * - *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration - * whose lexical representation contains only day, hour, minute, and second components. - * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    - * + /** + *

    Create a Duration of type xdt:dayTimeDuration using the specified + * day, hour, minute and second as defined in + * + * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    + * + *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration + * whose lexical representation contains only day, hour, minute, and second components. + * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    + * *

    The XML Schema specification states that values can be of an arbitrary size. * Implementations may chose not to or be incapable of supporting arbitrarily large and/or small values. * An {@link UnsupportedOperationException} will be thrown with a message indicating implementation limits @@ -454,102 +458,102 @@ public abstract class DatatypeFactory { * * @param isPositive Set to false to create a negative duration. When the length * of the duration is zero, this parameter will be ignored. - * @param day Day of Duration. - * @param hour Hour of Duration. - * @param minute Minute of Duration. - * @param second Second of Duration. - * - * @return New Duration created with the specified day, hour, minute - * and second. - * - * @throws IllegalArgumentException If the values are not a valid representation of a - * Duration: if all the fields (day, hour, ...) are null or - * if any of the fields is negative. - * @throws UnsupportedOperationException If implementation cannot support requested values. - */ - public Duration newDurationDayTime( - final boolean isPositive, - final BigInteger day, - final BigInteger hour, - final BigInteger minute, - final BigInteger second) { + * @param day Day of Duration. + * @param hour Hour of Duration. + * @param minute Minute of Duration. + * @param second Second of Duration. + * + * @return New Duration created with the specified day, hour, minute + * and second. + * + * @throws IllegalArgumentException If the values are not a valid representation of a + * Duration: if all the fields (day, hour, ...) are null or + * if any of the fields is negative. + * @throws UnsupportedOperationException If implementation cannot support requested values. + */ + public Duration newDurationDayTime( + final boolean isPositive, + final BigInteger day, + final BigInteger hour, + final BigInteger minute, + final BigInteger second) { - return newDuration( - isPositive, - null, // years - null, // months - day, - hour, - minute, - (second != null)? new BigDecimal(second):null - ); - } + return newDuration( + isPositive, + null, // years + null, // months + day, + hour, + minute, + (second != null)? new BigDecimal(second):null + ); + } - /** - *

    Create a Duration of type xdt:dayTimeDuration using the specified - * day, hour, minute and second as defined in - * - * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    - * - *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration - * whose lexical representation contains only day, hour, minute, and second components. - * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    - * + /** + *

    Create a Duration of type xdt:dayTimeDuration using the specified + * day, hour, minute and second as defined in + * + * XQuery 1.0 and XPath 2.0 Data Model, xdt:dayTimeDuration.

    + * + *

    The datatype xdt:dayTimeDuration is a subtype of xs:duration + * whose lexical representation contains only day, hour, minute, and second components. + * This datatype resides in the namespace http://www.w3.org/2003/11/xpath-datatypes.

    + * *

    A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

    * * @param isPositive Set to false to create a negative duration. When the length * of the duration is zero, this parameter will be ignored. - * @param day Day of Duration. - * @param hour Hour of Duration. - * @param minute Minute of Duration. - * @param second Second of Duration. - * - * @return New Duration created with the specified day, hour, minute - * and second. - * - * @throws IllegalArgumentException If the values are not a valid representation of a - * Duration: if any of the fields (day, hour, ...) is negative. - */ - public Duration newDurationDayTime( - final boolean isPositive, - final int day, - final int hour, - final int minute, - final int second) { + * @param day Day of Duration. + * @param hour Hour of Duration. + * @param minute Minute of Duration. + * @param second Second of Duration. + * + * @return New Duration created with the specified day, hour, minute + * and second. + * + * @throws IllegalArgumentException If the values are not a valid representation of a + * Duration: if any of the fields (day, hour, ...) is negative. + */ + public Duration newDurationDayTime( + final boolean isPositive, + final int day, + final int hour, + final int minute, + final int second) { - return newDurationDayTime( - isPositive, - BigInteger.valueOf((long) day), - BigInteger.valueOf((long) hour), - BigInteger.valueOf((long) minute), - BigInteger.valueOf((long) second) - ); - } + return newDurationDayTime( + isPositive, + BigInteger.valueOf((long) day), + BigInteger.valueOf((long) hour), + BigInteger.valueOf((long) minute), + BigInteger.valueOf((long) second) + ); + } - /** - *

    Create a Duration of type xdt:yearMonthDuration by parsing its String representation, - * "PnYnM", - * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    - * - *

    The datatype xdt:yearMonthDuration is a subtype of xs:duration - * whose lexical representation contains only year and month components. - * This datatype resides in the namespace {@link javax.xml.XMLConstants#W3C_XPATH_DATATYPE_NS_URI}.

    - * - *

    Both values are set and availabe from the created {@link Duration}

    - * + /** + *

    Create a Duration of type xdt:yearMonthDuration by parsing its String representation, + * "PnYnM", + * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    + * + *

    The datatype xdt:yearMonthDuration is a subtype of xs:duration + * whose lexical representation contains only year and month components. + * This datatype resides in the namespace {@link javax.xml.XMLConstants#W3C_XPATH_DATATYPE_NS_URI}.

    + * + *

    Both values are set and available from the created {@link Duration}

    + * *

    The XML Schema specification states that values can be of an arbitrary size. * Implementations may chose not to or be incapable of supporting arbitrarily large and/or small values. * An {@link UnsupportedOperationException} will be thrown with a message indicating implementation limits * if implementation capacities are exceeded.

    * - * @param lexicalRepresentation Lexical representation of a duration. - * - * @return New Duration created using the specified lexicalRepresentation. - * - * @throws IllegalArgumentException If lexicalRepresentation is not a valid representation of a Duration expressed only in terms of years and months. - * @throws UnsupportedOperationException If implementation cannot support requested values. - * @throws NullPointerException If lexicalRepresentation is null. - */ + * @param lexicalRepresentation Lexical representation of a duration. + * + * @return New Duration created using the specified lexicalRepresentation. + * + * @throws IllegalArgumentException If lexicalRepresentation is not a valid representation of a Duration expressed only in terms of years and months. + * @throws UnsupportedOperationException If implementation cannot support requested values. + * @throws NullPointerException If lexicalRepresentation is null. + */ public Duration newDurationYearMonth( final String lexicalRepresentation) { @@ -572,17 +576,17 @@ public abstract class DatatypeFactory { return newDuration(lexicalRepresentation); } - /** - *

    Create a Duration of type xdt:yearMonthDuration using the specified milliseconds as defined in - * - * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    - * - *

    The datatype xdt:yearMonthDuration is a subtype of xs:duration - * whose lexical representation contains only year and month components. - * This datatype resides in the namespace {@link javax.xml.XMLConstants#W3C_XPATH_DATATYPE_NS_URI}.

    - * + /** + *

    Create a Duration of type xdt:yearMonthDuration using the specified milliseconds as defined in + * + * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    + * + *

    The datatype xdt:yearMonthDuration is a subtype of xs:duration + * whose lexical representation contains only year and month components. + * This datatype resides in the namespace {@link javax.xml.XMLConstants#W3C_XPATH_DATATYPE_NS_URI}.

    + * *

    Both values are set by computing their values from the specified milliseconds - * and are availabe using the get methods of the created {@link Duration}. + * and are available using the get methods of the created {@link Duration}. * The values conform to and are defined by:

    *
      *
    • ISO 8601:2000(E) Section 5.5.3.2 Alternative format
    • @@ -592,20 +596,20 @@ public abstract class DatatypeFactory { *
    • {@link XMLGregorianCalendar} Date/Time Datatype Field Mapping Between XML Schema 1.0 and Java Representation
    • *
    * - *

    The default start instance is defined by {@link GregorianCalendar}'s use of the start of the epoch: i.e., - * {@link java.util.Calendar#YEAR} = 1970, - * {@link java.util.Calendar#MONTH} = {@link java.util.Calendar#JANUARY}, - * {@link java.util.Calendar#DATE} = 1, etc. - * This is important as there are variations in the Gregorian Calendar, - * e.g. leap years have different days in the month = {@link java.util.Calendar#FEBRUARY} - * so the result of {@link Duration#getMonths()} can be influenced.

    - * + *

    The default start instance is defined by {@link GregorianCalendar}'s use of the start of the epoch: i.e., + * {@link java.util.Calendar#YEAR} = 1970, + * {@link java.util.Calendar#MONTH} = {@link java.util.Calendar#JANUARY}, + * {@link java.util.Calendar#DATE} = 1, etc. + * This is important as there are variations in the Gregorian Calendar, + * e.g. leap years have different days in the month = {@link java.util.Calendar#FEBRUARY} + * so the result of {@link Duration#getMonths()} can be influenced.

    + * *

    Any remaining milliseconds after determining the year and month are discarded.

    - * - * @param durationInMilliseconds Milliseconds of Duration to create. - * - * @return New Duration created using the specified durationInMilliseconds. - */ + * + * @param durationInMilliseconds Milliseconds of Duration to create. + * + * @return New Duration created using the specified durationInMilliseconds. + */ public Duration newDurationYearMonth( final long durationInMilliseconds) { @@ -624,12 +628,12 @@ public abstract class DatatypeFactory { return newDurationYearMonth(isPositive, years, months); } - /** - *

    Create a Duration of type xdt:yearMonthDuration using the specified - * year and month as defined in - * - * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    - * + /** + *

    Create a Duration of type xdt:yearMonthDuration using the specified + * year and month as defined in + * + * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    + * *

    The XML Schema specification states that values can be of an arbitrary size. * Implementations may chose not to or be incapable of supporting arbitrarily large and/or small values. * An {@link UnsupportedOperationException} will be thrown with a message indicating implementation limits @@ -639,74 +643,74 @@ public abstract class DatatypeFactory { * * @param isPositive Set to false to create a negative duration. When the length * of the duration is zero, this parameter will be ignored. - * @param year Year of Duration. - * @param month Month of Duration. - * - * @return New Duration created using the specified year and month. - * - * @throws IllegalArgumentException If the values are not a valid representation of a - * Duration: if all of the fields (year, month) are null or - * if any of the fields is negative. - * @throws UnsupportedOperationException If implementation cannot support requested values. - */ - public Duration newDurationYearMonth( - final boolean isPositive, - final BigInteger year, - final BigInteger month) { + * @param year Year of Duration. + * @param month Month of Duration. + * + * @return New Duration created using the specified year and month. + * + * @throws IllegalArgumentException If the values are not a valid representation of a + * Duration: if all of the fields (year, month) are null or + * if any of the fields is negative. + * @throws UnsupportedOperationException If implementation cannot support requested values. + */ + public Duration newDurationYearMonth( + final boolean isPositive, + final BigInteger year, + final BigInteger month) { - return newDuration( - isPositive, - year, - month, - null, // days - null, // hours - null, // minutes - null // seconds - ); - } + return newDuration( + isPositive, + year, + month, + null, // days + null, // hours + null, // minutes + null // seconds + ); + } - /** - *

    Create a Duration of type xdt:yearMonthDuration using the specified - * year and month as defined in - * - * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    - * + /** + *

    Create a Duration of type xdt:yearMonthDuration using the specified + * year and month as defined in + * + * XQuery 1.0 and XPath 2.0 Data Model, xdt:yearMonthDuration.

    + * *

    A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

    * * @param isPositive Set to false to create a negative duration. When the length * of the duration is zero, this parameter will be ignored. - * @param year Year of Duration. - * @param month Month of Duration. - * - * @return New Duration created using the specified year and month. - * - * @throws IllegalArgumentException If the values are not a valid representation of a - * Duration: if any of the fields (year, month) is negative. - */ - public Duration newDurationYearMonth( - final boolean isPositive, - final int year, - final int month) { + * @param year Year of Duration. + * @param month Month of Duration. + * + * @return New Duration created using the specified year and month. + * + * @throws IllegalArgumentException If the values are not a valid representation of a + * Duration: if any of the fields (year, month) is negative. + */ + public Duration newDurationYearMonth( + final boolean isPositive, + final int year, + final int month) { - return newDurationYearMonth( - isPositive, - BigInteger.valueOf((long) year), - BigInteger.valueOf((long) month)); - } + return newDurationYearMonth( + isPositive, + BigInteger.valueOf((long) year), + BigInteger.valueOf((long) month)); + } - /** - *

    Create a new instance of an XMLGregorianCalendar.

    - * + /** + *

    Create a new instance of an XMLGregorianCalendar.

    + * *

    All date/time datatype fields set to {@link DatatypeConstants#FIELD_UNDEFINED} or null.

    * * @return New XMLGregorianCalendar with all date/time datatype fields set to * {@link DatatypeConstants#FIELD_UNDEFINED} or null. - */ - public abstract XMLGregorianCalendar newXMLGregorianCalendar(); + */ + public abstract XMLGregorianCalendar newXMLGregorianCalendar(); - /** - *

    Create a new XMLGregorianCalendar by parsing the String as a lexical representation.

    - * + /** + *

    Create a new XMLGregorianCalendar by parsing the String as a lexical representation.

    + * *

    Parsing the lexical string representation is defined in * XML Schema 1.0 Part 2, Section 3.2.[7-14].1, * Lexical Representation.

    @@ -721,344 +725,344 @@ public abstract class DatatypeFactory { *

    Except for the noted lexical/canonical representation mismatches * listed in * XML Schema 1.0 errata, Section 3.2.7.2.

    - * - * @param lexicalRepresentation Lexical representation of one the eight XML Schema date/time datatypes. - * - * @return XMLGregorianCalendar created from the lexicalRepresentation. - * - * @throws IllegalArgumentException If the lexicalRepresentation is not a valid XMLGregorianCalendar. - * @throws NullPointerException If lexicalRepresentation is null. - */ - public abstract XMLGregorianCalendar newXMLGregorianCalendar(final String lexicalRepresentation); + * + * @param lexicalRepresentation Lexical representation of one the eight XML Schema date/time datatypes. + * + * @return XMLGregorianCalendar created from the lexicalRepresentation. + * + * @throws IllegalArgumentException If the lexicalRepresentation is not a valid XMLGregorianCalendar. + * @throws NullPointerException If lexicalRepresentation is null. + */ + public abstract XMLGregorianCalendar newXMLGregorianCalendar(final String lexicalRepresentation); - /** - *

    Create an XMLGregorianCalendar from a {@link GregorianCalendar}.

    - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    - * Field by Field Conversion from - * {@link GregorianCalendar} to an {@link XMLGregorianCalendar} - *
    java.util.GregorianCalendar fieldjavax.xml.datatype.XMLGregorianCalendar field
    ERA == GregorianCalendar.BC ? -YEAR : YEAR{@link XMLGregorianCalendar#setYear(int year)}
    MONTH + 1{@link XMLGregorianCalendar#setMonth(int month)}
    DAY_OF_MONTH{@link XMLGregorianCalendar#setDay(int day)}
    HOUR_OF_DAY, MINUTE, SECOND, MILLISECOND{@link XMLGregorianCalendar#setTime(int hour, int minute, int second, BigDecimal fractional)}
    - * (ZONE_OFFSET + DST_OFFSET) / (60*1000)
    - * (in minutes) - *
    {@link XMLGregorianCalendar#setTimezone(int offset)}* - *
    - *

    *conversion loss of information. It is not possible to represent - * a java.util.GregorianCalendar daylight savings timezone id in the - * XML Schema 1.0 date/time datatype representation.

    - * - *

    To compute the return value's TimeZone field, - *

      - *
    • when this.getTimezone() != FIELD_UNDEFINED, - * create a java.util.TimeZone with a custom timezone id - * using the this.getTimezone().
    • - *
    • else use the GregorianCalendar default timezone value - * for the host is defined as specified by - * java.util.TimeZone.getDefault().
    • - * - * @param cal java.util.GregorianCalendar used to create XMLGregorianCalendar - * - * @return XMLGregorianCalendar created from java.util.GregorianCalendar - * - * @throws NullPointerException If cal is null. - */ - public abstract XMLGregorianCalendar newXMLGregorianCalendar(final GregorianCalendar cal); + /** + *

      Create an XMLGregorianCalendar from a {@link GregorianCalendar}.

      + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
      + * Field by Field Conversion from + * {@link GregorianCalendar} to an {@link XMLGregorianCalendar} + *
      java.util.GregorianCalendar fieldjavax.xml.datatype.XMLGregorianCalendar field
      ERA == GregorianCalendar.BC ? -YEAR : YEAR{@link XMLGregorianCalendar#setYear(int year)}
      MONTH + 1{@link XMLGregorianCalendar#setMonth(int month)}
      DAY_OF_MONTH{@link XMLGregorianCalendar#setDay(int day)}
      HOUR_OF_DAY, MINUTE, SECOND, MILLISECOND{@link XMLGregorianCalendar#setTime(int hour, int minute, int second, BigDecimal fractional)}
      + * (ZONE_OFFSET + DST_OFFSET) / (60*1000)
      + * (in minutes) + *
      {@link XMLGregorianCalendar#setTimezone(int offset)}* + *
      + *

      *conversion loss of information. It is not possible to represent + * a java.util.GregorianCalendar daylight savings timezone id in the + * XML Schema 1.0 date/time datatype representation.

      + * + *

      To compute the return value's TimeZone field, + *

        + *
      • when this.getTimezone() != FIELD_UNDEFINED, + * create a java.util.TimeZone with a custom timezone id + * using the this.getTimezone().
      • + *
      • else use the GregorianCalendar default timezone value + * for the host is defined as specified by + * java.util.TimeZone.getDefault().
      • + * + * @param cal java.util.GregorianCalendar used to create XMLGregorianCalendar + * + * @return XMLGregorianCalendar created from java.util.GregorianCalendar + * + * @throws NullPointerException If cal is null. + */ + public abstract XMLGregorianCalendar newXMLGregorianCalendar(final GregorianCalendar cal); - /** - *

        Constructor allowing for complete value spaces allowed by - * W3C XML Schema 1.0 recommendation for xsd:dateTime and related - * builtin datatypes. Note that year parameter supports - * arbitrarily large numbers and fractionalSecond has infinite - * precision.

        - * + /** + *

        Constructor allowing for complete value spaces allowed by + * W3C XML Schema 1.0 recommendation for xsd:dateTime and related + * builtin datatypes. Note that year parameter supports + * arbitrarily large numbers and fractionalSecond has infinite + * precision.

        + * *

        A null value indicates that field is not set.

        * - * @param year of XMLGregorianCalendar to be created. - * @param month of XMLGregorianCalendar to be created. - * @param day of XMLGregorianCalendar to be created. - * @param hour of XMLGregorianCalendar to be created. - * @param minute of XMLGregorianCalendar to be created. - * @param second of XMLGregorianCalendar to be created. - * @param fractionalSecond of XMLGregorianCalendar to be created. - * @param timezone of XMLGregorianCalendar to be created. - * - * @return XMLGregorianCalendar created from specified values. - * - * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field - * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} - * or if the composite values constitute an invalid XMLGregorianCalendar instance - * as determined by {@link XMLGregorianCalendar#isValid()}. - */ - public abstract XMLGregorianCalendar newXMLGregorianCalendar( - final BigInteger year, - final int month, - final int day, - final int hour, - final int minute, - final int second, - final BigDecimal fractionalSecond, - final int timezone); + * @param year of XMLGregorianCalendar to be created. + * @param month of XMLGregorianCalendar to be created. + * @param day of XMLGregorianCalendar to be created. + * @param hour of XMLGregorianCalendar to be created. + * @param minute of XMLGregorianCalendar to be created. + * @param second of XMLGregorianCalendar to be created. + * @param fractionalSecond of XMLGregorianCalendar to be created. + * @param timezone of XMLGregorianCalendar to be created. + * + * @return XMLGregorianCalendar created from specified values. + * + * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field + * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} + * or if the composite values constitute an invalid XMLGregorianCalendar instance + * as determined by {@link XMLGregorianCalendar#isValid()}. + */ + public abstract XMLGregorianCalendar newXMLGregorianCalendar( + final BigInteger year, + final int month, + final int day, + final int hour, + final int minute, + final int second, + final BigDecimal fractionalSecond, + final int timezone); - /** - *

        Constructor of value spaces that a - * java.util.GregorianCalendar instance would need to convert to an - * XMLGregorianCalendar instance.

        - * - *

        XMLGregorianCalendar eon and - * fractionalSecond are set to null

        - * + /** + *

        Constructor of value spaces that a + * java.util.GregorianCalendar instance would need to convert to an + * XMLGregorianCalendar instance.

        + * + *

        XMLGregorianCalendar eon and + * fractionalSecond are set to null

        + * *

        A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

        * - * @param year of XMLGregorianCalendar to be created. - * @param month of XMLGregorianCalendar to be created. - * @param day of XMLGregorianCalendar to be created. - * @param hour of XMLGregorianCalendar to be created. - * @param minute of XMLGregorianCalendar to be created. - * @param second of XMLGregorianCalendar to be created. - * @param millisecond of XMLGregorianCalendar to be created. - * @param timezone of XMLGregorianCalendar to be created. - * - * @return XMLGregorianCalendar created from specified values. - * - * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field - * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} - * or if the composite values constitute an invalid XMLGregorianCalendar instance - * as determined by {@link XMLGregorianCalendar#isValid()}. - */ - public XMLGregorianCalendar newXMLGregorianCalendar( - final int year, - final int month, - final int day, - final int hour, - final int minute, - final int second, - final int millisecond, - final int timezone) { + * @param year of XMLGregorianCalendar to be created. + * @param month of XMLGregorianCalendar to be created. + * @param day of XMLGregorianCalendar to be created. + * @param hour of XMLGregorianCalendar to be created. + * @param minute of XMLGregorianCalendar to be created. + * @param second of XMLGregorianCalendar to be created. + * @param millisecond of XMLGregorianCalendar to be created. + * @param timezone of XMLGregorianCalendar to be created. + * + * @return XMLGregorianCalendar created from specified values. + * + * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field + * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} + * or if the composite values constitute an invalid XMLGregorianCalendar instance + * as determined by {@link XMLGregorianCalendar#isValid()}. + */ + public XMLGregorianCalendar newXMLGregorianCalendar( + final int year, + final int month, + final int day, + final int hour, + final int minute, + final int second, + final int millisecond, + final int timezone) { - // year may be undefined - BigInteger realYear = (year != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) year) : null; + // year may be undefined + BigInteger realYear = (year != DatatypeConstants.FIELD_UNDEFINED) ? BigInteger.valueOf((long) year) : null; - // millisecond may be undefined - // millisecond must be >= 0 millisecond <= 1000 - BigDecimal realMillisecond = null; // undefined value - if (millisecond != DatatypeConstants.FIELD_UNDEFINED) { - if (millisecond < 0 || millisecond > 1000) { - throw new IllegalArgumentException( - "javax.xml.datatype.DatatypeFactory#newXMLGregorianCalendar(" - + "int year, int month, int day, int hour, int minute, int second, int millisecond, int timezone)" - + "with invalid millisecond: " + millisecond - ); - } + // millisecond may be undefined + // millisecond must be >= 0 millisecond <= 1000 + BigDecimal realMillisecond = null; // undefined value + if (millisecond != DatatypeConstants.FIELD_UNDEFINED) { + if (millisecond < 0 || millisecond > 1000) { + throw new IllegalArgumentException( + "javax.xml.datatype.DatatypeFactory#newXMLGregorianCalendar(" + + "int year, int month, int day, int hour, int minute, int second, int millisecond, int timezone)" + + "with invalid millisecond: " + millisecond + ); + } - realMillisecond = BigDecimal.valueOf((long) millisecond).movePointLeft(3); - } + realMillisecond = BigDecimal.valueOf((long) millisecond).movePointLeft(3); + } - return newXMLGregorianCalendar( - realYear, - month, - day, - hour, - minute, - second, - realMillisecond, - timezone - ); - } + return newXMLGregorianCalendar( + realYear, + month, + day, + hour, + minute, + second, + realMillisecond, + timezone + ); + } - /** - *

        Create a Java representation of XML Schema builtin datatype date or g*.

        - * - *

        For example, an instance of gYear can be created invoking this factory - * with month and day parameters set to - * {@link DatatypeConstants#FIELD_UNDEFINED}.

        - * + /** + *

        Create a Java representation of XML Schema builtin datatype date or g*.

        + * + *

        For example, an instance of gYear can be created invoking this factory + * with month and day parameters set to + * {@link DatatypeConstants#FIELD_UNDEFINED}.

        + * *

        A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

        * - * @param year of XMLGregorianCalendar to be created. - * @param month of XMLGregorianCalendar to be created. - * @param day of XMLGregorianCalendar to be created. - * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. - * - * @return XMLGregorianCalendar created from parameter values. - * - * @see DatatypeConstants#FIELD_UNDEFINED - * - * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field - * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} - * or if the composite values constitute an invalid XMLGregorianCalendar instance - * as determined by {@link XMLGregorianCalendar#isValid()}. - */ - public XMLGregorianCalendar newXMLGregorianCalendarDate( - final int year, - final int month, - final int day, - final int timezone) { + * @param year of XMLGregorianCalendar to be created. + * @param month of XMLGregorianCalendar to be created. + * @param day of XMLGregorianCalendar to be created. + * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. + * + * @return XMLGregorianCalendar created from parameter values. + * + * @see DatatypeConstants#FIELD_UNDEFINED + * + * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field + * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} + * or if the composite values constitute an invalid XMLGregorianCalendar instance + * as determined by {@link XMLGregorianCalendar#isValid()}. + */ + public XMLGregorianCalendar newXMLGregorianCalendarDate( + final int year, + final int month, + final int day, + final int timezone) { - return newXMLGregorianCalendar( - year, - month, - day, - DatatypeConstants.FIELD_UNDEFINED, // hour - DatatypeConstants.FIELD_UNDEFINED, // minute - DatatypeConstants.FIELD_UNDEFINED, // second - DatatypeConstants.FIELD_UNDEFINED, // millisecond - timezone); - } + return newXMLGregorianCalendar( + year, + month, + day, + DatatypeConstants.FIELD_UNDEFINED, // hour + DatatypeConstants.FIELD_UNDEFINED, // minute + DatatypeConstants.FIELD_UNDEFINED, // second + DatatypeConstants.FIELD_UNDEFINED, // millisecond + timezone); + } - /** - *

        Create a Java instance of XML Schema builtin datatype time.

        - * + /** + *

        Create a Java instance of XML Schema builtin datatype time.

        + * *

        A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

        * - * @param hours number of hours - * @param minutes number of minutes - * @param seconds number of seconds - * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. - * - * @return XMLGregorianCalendar created from parameter values. - * - * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field - * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} - * or if the composite values constitute an invalid XMLGregorianCalendar instance - * as determined by {@link XMLGregorianCalendar#isValid()}. - * - * @see DatatypeConstants#FIELD_UNDEFINED - */ - public XMLGregorianCalendar newXMLGregorianCalendarTime( - final int hours, - final int minutes, - final int seconds, - final int timezone) { + * @param hours number of hours + * @param minutes number of minutes + * @param seconds number of seconds + * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. + * + * @return XMLGregorianCalendar created from parameter values. + * + * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field + * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} + * or if the composite values constitute an invalid XMLGregorianCalendar instance + * as determined by {@link XMLGregorianCalendar#isValid()}. + * + * @see DatatypeConstants#FIELD_UNDEFINED + */ + public XMLGregorianCalendar newXMLGregorianCalendarTime( + final int hours, + final int minutes, + final int seconds, + final int timezone) { - return newXMLGregorianCalendar( - DatatypeConstants.FIELD_UNDEFINED, // Year - DatatypeConstants.FIELD_UNDEFINED, // Month - DatatypeConstants.FIELD_UNDEFINED, // Day - hours, - minutes, - seconds, - DatatypeConstants.FIELD_UNDEFINED, //Millisecond - timezone); - } + return newXMLGregorianCalendar( + DatatypeConstants.FIELD_UNDEFINED, // Year + DatatypeConstants.FIELD_UNDEFINED, // Month + DatatypeConstants.FIELD_UNDEFINED, // Day + hours, + minutes, + seconds, + DatatypeConstants.FIELD_UNDEFINED, //Millisecond + timezone); + } - /** - *

        Create a Java instance of XML Schema builtin datatype time.

        - * + /** + *

        Create a Java instance of XML Schema builtin datatype time.

        + * *

        A null value indicates that field is not set.

        *

        A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

        * - * @param hours number of hours - * @param minutes number of minutes - * @param seconds number of seconds - * @param fractionalSecond value of null indicates that this optional field is not set. - * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. - * - * @return XMLGregorianCalendar created from parameter values. - * - * @see DatatypeConstants#FIELD_UNDEFINED - * - * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field - * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} - * or if the composite values constitute an invalid XMLGregorianCalendar instance - * as determined by {@link XMLGregorianCalendar#isValid()}. - */ - public XMLGregorianCalendar newXMLGregorianCalendarTime( - final int hours, - final int minutes, - final int seconds, - final BigDecimal fractionalSecond, - final int timezone) { + * @param hours number of hours + * @param minutes number of minutes + * @param seconds number of seconds + * @param fractionalSecond value of null indicates that this optional field is not set. + * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. + * + * @return XMLGregorianCalendar created from parameter values. + * + * @see DatatypeConstants#FIELD_UNDEFINED + * + * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field + * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} + * or if the composite values constitute an invalid XMLGregorianCalendar instance + * as determined by {@link XMLGregorianCalendar#isValid()}. + */ + public XMLGregorianCalendar newXMLGregorianCalendarTime( + final int hours, + final int minutes, + final int seconds, + final BigDecimal fractionalSecond, + final int timezone) { - return newXMLGregorianCalendar( - null, // year - DatatypeConstants.FIELD_UNDEFINED, // month - DatatypeConstants.FIELD_UNDEFINED, // day - hours, - minutes, - seconds, - fractionalSecond, - timezone); - } + return newXMLGregorianCalendar( + null, // year + DatatypeConstants.FIELD_UNDEFINED, // month + DatatypeConstants.FIELD_UNDEFINED, // day + hours, + minutes, + seconds, + fractionalSecond, + timezone); + } - /** - *

        Create a Java instance of XML Schema builtin datatype time.

        - * + /** + *

        Create a Java instance of XML Schema builtin datatype time.

        + * *

        A {@link DatatypeConstants#FIELD_UNDEFINED} value indicates that field is not set.

        * - * @param hours number of hours - * @param minutes number of minutes - * @param seconds number of seconds - * @param milliseconds number of milliseconds - * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. - * - * @return XMLGregorianCalendar created from parameter values. - * - * @see DatatypeConstants#FIELD_UNDEFINED - * - * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field - * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} - * or if the composite values constitute an invalid XMLGregorianCalendar instance - * as determined by {@link XMLGregorianCalendar#isValid()}. - */ - public XMLGregorianCalendar newXMLGregorianCalendarTime( - final int hours, - final int minutes, - final int seconds, - final int milliseconds, - final int timezone) { + * @param hours number of hours + * @param minutes number of minutes + * @param seconds number of seconds + * @param milliseconds number of milliseconds + * @param timezone offset in minutes. {@link DatatypeConstants#FIELD_UNDEFINED} indicates optional field is not set. + * + * @return XMLGregorianCalendar created from parameter values. + * + * @see DatatypeConstants#FIELD_UNDEFINED + * + * @throws IllegalArgumentException If any individual parameter's value is outside the maximum value constraint for the field + * as determined by the Date/Time Data Mapping table in {@link XMLGregorianCalendar} + * or if the composite values constitute an invalid XMLGregorianCalendar instance + * as determined by {@link XMLGregorianCalendar#isValid()}. + */ + public XMLGregorianCalendar newXMLGregorianCalendarTime( + final int hours, + final int minutes, + final int seconds, + final int milliseconds, + final int timezone) { - // millisecond may be undefined - // millisecond must be >= 0 millisecond <= 1000 - BigDecimal realMilliseconds = null; // undefined value - if (milliseconds != DatatypeConstants.FIELD_UNDEFINED) { - if (milliseconds < 0 || milliseconds > 1000) { - throw new IllegalArgumentException( - "javax.xml.datatype.DatatypeFactory#newXMLGregorianCalendarTime(" - + "int hours, int minutes, int seconds, int milliseconds, int timezone)" - + "with invalid milliseconds: " + milliseconds - ); - } + // millisecond may be undefined + // millisecond must be >= 0 millisecond <= 1000 + BigDecimal realMilliseconds = null; // undefined value + if (milliseconds != DatatypeConstants.FIELD_UNDEFINED) { + if (milliseconds < 0 || milliseconds > 1000) { + throw new IllegalArgumentException( + "javax.xml.datatype.DatatypeFactory#newXMLGregorianCalendarTime(" + + "int hours, int minutes, int seconds, int milliseconds, int timezone)" + + "with invalid milliseconds: " + milliseconds + ); + } - realMilliseconds = BigDecimal.valueOf((long) milliseconds).movePointLeft(3); - } + realMilliseconds = BigDecimal.valueOf((long) milliseconds).movePointLeft(3); + } - return newXMLGregorianCalendarTime( - hours, - minutes, - seconds, - realMilliseconds, - timezone - ); - } + return newXMLGregorianCalendarTime( + hours, + minutes, + seconds, + realMilliseconds, + timezone + ); + } } diff --git a/jaxp/src/javax/xml/datatype/FactoryFinder.java b/jaxp/src/javax/xml/datatype/FactoryFinder.java index deb47eaa11c..ce982157240 100644 --- a/jaxp/src/javax/xml/datatype/FactoryFinder.java +++ b/jaxp/src/javax/xml/datatype/FactoryFinder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,14 +26,12 @@ package javax.xml.datatype; import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; - +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Iterator; import java.util.Properties; -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.net.URL; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; /** *

        Implements pluggable Datatypes.

        @@ -54,19 +52,19 @@ class FactoryFinder { /** * Cache for properties in java.home/lib/jaxp.properties */ - static Properties cacheProps = new Properties(); + private final static Properties cacheProps = new Properties(); /** * Flag indicating if properties from java.home/lib/jaxp.properties * have been cached. */ - static volatile boolean firstTime = true; + private static volatile boolean firstTime = true; /** * Security support class use to check access control before * getting certain system resources. */ - static SecuritySupport ss = new SecuritySupport(); + private final static SecuritySupport ss = new SecuritySupport(); // Define system property "jaxp.debug" to get output static { @@ -99,31 +97,31 @@ class FactoryFinder { * * Use bootstrap classLoader if cl = null and useBSClsLoader is true */ - static private Class getProviderClass(String className, ClassLoader cl, + static private Class getProviderClass(String className, ClassLoader cl, boolean doFallback, boolean useBSClsLoader) throws ClassNotFoundException { try { if (cl == null) { if (useBSClsLoader) { - return Class.forName(className, true, FactoryFinder.class.getClassLoader()); + return Class.forName(className, false, FactoryFinder.class.getClassLoader()); } else { cl = ss.getContextClassLoader(); if (cl == null) { throw new ClassNotFoundException(); } else { - return cl.loadClass(className); + return Class.forName(className, false, cl); } } } else { - return cl.loadClass(className); + return Class.forName(className, false, cl); } } catch (ClassNotFoundException e1) { if (doFallback) { // Use current class loader - should always be bootstrap CL - return Class.forName(className, true, FactoryFinder.class.getClassLoader()); + return Class.forName(className, false, FactoryFinder.class.getClassLoader()); } else { throw e1; @@ -135,6 +133,9 @@ class FactoryFinder { * Create an instance of a class. Delegates to method * getProviderClass() in order to load the class. * + * @param type Base class / Service interface of the factory to + * instantiate. + * * @param className Name of the concrete class corresponding to the * service provider * @@ -144,16 +145,19 @@ class FactoryFinder { * @param doFallback True if the current ClassLoader should be tried as * a fallback if the class is not found using cl */ - static Object newInstance(String className, ClassLoader cl, boolean doFallback) - throws ConfigurationError + static T newInstance(Class type, String className, ClassLoader cl, boolean doFallback) + throws DatatypeConfigurationException { - return newInstance(className, cl, doFallback, false); + return newInstance(type, className, cl, doFallback, false); } /** * Create an instance of a class. Delegates to method * getProviderClass() in order to load the class. * + * @param type Base class / Service interface of the factory to + * instantiate. + * * @param className Name of the concrete class corresponding to the * service provider * @@ -166,9 +170,12 @@ class FactoryFinder { * @param useBSClsLoader True if cl=null actually meant bootstrap classLoader. This parameter * is needed since DocumentBuilderFactory/SAXParserFactory defined null as context classLoader. */ - static Object newInstance(String className, ClassLoader cl, boolean doFallback, boolean useBSClsLoader) - throws ConfigurationError + static T newInstance(Class type, String className, ClassLoader cl, + boolean doFallback, boolean useBSClsLoader) + throws DatatypeConfigurationException { + assert type != null; + // make sure we have access to restricted packages if (System.getSecurityManager() != null) { if (className != null && className.startsWith(DEFAULT_PACKAGE)) { @@ -178,20 +185,23 @@ class FactoryFinder { } try { - Class providerClass = getProviderClass(className, cl, doFallback, useBSClsLoader); + Class providerClass = getProviderClass(className, cl, doFallback, useBSClsLoader); + if (!type.isAssignableFrom(providerClass)) { + throw new ClassCastException(className + " cannot be cast to " + type.getName()); + } Object instance = providerClass.newInstance(); if (debug) { // Extra check to avoid computing cl strings dPrint("created new instance of " + providerClass + " using ClassLoader: " + cl); } - return instance; + return type.cast(instance); } catch (ClassNotFoundException x) { - throw new ConfigurationError( + throw new DatatypeConfigurationException( "Provider " + className + " not found", x); } catch (Exception x) { - throw new ConfigurationError( + throw new DatatypeConfigurationException( "Provider " + className + " could not be instantiated: " + x, x); } @@ -202,16 +212,17 @@ class FactoryFinder { * entry point. * @return Class object of factory, never null * - * @param factoryId Name of the factory to find, same as - * a property name + * @param type Base class / Service interface of the + * factory to find. * @param fallbackClassName Implementation class name, if nothing else * is found. Use null to mean no fallback. * * Package private so this code can be shared. */ - static Object find(String factoryId, String fallbackClassName) - throws ConfigurationError + static T find(Class type, String fallbackClassName) + throws DatatypeConfigurationException { + final String factoryId = type.getName(); dPrint("find factoryId =" + factoryId); // Use the system property first @@ -219,7 +230,7 @@ class FactoryFinder { String systemProp = ss.getSystemProperty(factoryId); if (systemProp != null) { dPrint("found system property, value=" + systemProp); - return newInstance(systemProp, null, true); + return newInstance(type, systemProp, null, true); } } catch (SecurityException se) { @@ -228,7 +239,6 @@ class FactoryFinder { // try to read from $java.home/lib/jaxp.properties try { - String factoryClassName = null; if (firstTime) { synchronized (cacheProps) { if (firstTime) { @@ -243,11 +253,11 @@ class FactoryFinder { } } } - factoryClassName = cacheProps.getProperty(factoryId); + final String factoryClassName = cacheProps.getProperty(factoryId); if (factoryClassName != null) { dPrint("found in $java.home/jaxp.properties, value=" + factoryClassName); - return newInstance(factoryClassName, null, true); + return newInstance(type, factoryClassName, null, true); } } catch (Exception ex) { @@ -255,112 +265,46 @@ class FactoryFinder { } // Try Jar Service Provider Mechanism - Object provider = findJarServiceProvider(factoryId); + final T provider = findServiceProvider(type); if (provider != null) { return provider; } if (fallbackClassName == null) { - throw new ConfigurationError( - "Provider for " + factoryId + " cannot be found", null); + throw new DatatypeConfigurationException( + "Provider for " + factoryId + " cannot be found"); } dPrint("loaded from fallback value: " + fallbackClassName); - return newInstance(fallbackClassName, null, true); + return newInstance(type, fallbackClassName, null, true); } /* - * Try to find provider using Jar Service Provider Mechanism + * Try to find provider using the ServiceLoader API + * + * @param type Base class / Service interface of the factory to find. * * @return instance of provider class if found or null */ - private static Object findJarServiceProvider(String factoryId) - throws ConfigurationError + private static T findServiceProvider(final Class type) + throws DatatypeConfigurationException { - String serviceId = "META-INF/services/" + factoryId; - InputStream is = null; - - // First try the Context ClassLoader - ClassLoader cl = ss.getContextClassLoader(); - boolean useBSClsLoader = false; - if (cl != null) { - is = ss.getResourceAsStream(cl, serviceId); - - // If no provider found then try the current ClassLoader - if (is == null) { - cl = FactoryFinder.class.getClassLoader(); - is = ss.getResourceAsStream(cl, serviceId); - useBSClsLoader = true; - } - } else { - // No Context ClassLoader, try the current ClassLoader - cl = FactoryFinder.class.getClassLoader(); - is = ss.getResourceAsStream(cl, serviceId); - useBSClsLoader = true; - } - - if (is == null) { - // No provider found - return null; - } - - if (debug) { // Extra check to avoid computing cl strings - dPrint("found jar resource=" + serviceId + " using ClassLoader: " + cl); - } - - BufferedReader rd; try { - rd = new BufferedReader(new InputStreamReader(is, "UTF-8")); - } - catch (java.io.UnsupportedEncodingException e) { - rd = new BufferedReader(new InputStreamReader(is)); - } - - String factoryClassName = null; - try { - // XXX Does not handle all possible input as specified by the - // Jar Service Provider specification - factoryClassName = rd.readLine(); - rd.close(); - } catch (IOException x) { - // No provider found - return null; - } - - if (factoryClassName != null && !"".equals(factoryClassName)) { - dPrint("found in resource, value=" + factoryClassName); - - // Note: here we do not want to fall back to the current - // ClassLoader because we want to avoid the case where the - // resource file was found using one ClassLoader and the - // provider class was instantiated using a different one. - return newInstance(factoryClassName, cl, false, useBSClsLoader); - } - - // No provider found - return null; - } - - static class ConfigurationError extends Error { - private Exception exception; - - /** - * Construct a new instance with the specified detail string and - * exception. - */ - ConfigurationError(String msg, Exception x) { - super(msg); - this.exception = x; - } - - Exception getException() { - return exception; - } - /** - * use the exception chaining mechanism of JDK1.4 - */ - @Override - public Throwable getCause() { - return exception; + return AccessController.doPrivileged(new PrivilegedAction() { + public T run() { + final ServiceLoader serviceLoader = ServiceLoader.load(type); + final Iterator iterator = serviceLoader.iterator(); + if (iterator.hasNext()) { + return iterator.next(); + } else { + return null; + } + } + }); + } catch(ServiceConfigurationError e) { + final DatatypeConfigurationException error = + new DatatypeConfigurationException( + "Provider for " + type + " cannot be found", e); + throw error; } } diff --git a/jaxp/src/javax/xml/parsers/DocumentBuilderFactory.java b/jaxp/src/javax/xml/parsers/DocumentBuilderFactory.java index 98db9beed5d..0ef1c13cecb 100644 --- a/jaxp/src/javax/xml/parsers/DocumentBuilderFactory.java +++ b/jaxp/src/javax/xml/parsers/DocumentBuilderFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,9 +40,6 @@ import javax.xml.validation.Schema; public abstract class DocumentBuilderFactory { - /** The default property name according to the JAXP spec */ - private static final String DEFAULT_PROPERTY_NAME = "javax.xml.parsers.DocumentBuilderFactory"; - private boolean validating = false; private boolean namespaceAware = false; private boolean whitespace = false; @@ -50,8 +47,6 @@ public abstract class DocumentBuilderFactory { private boolean ignoreComments = false; private boolean coalescing = false; - private boolean canonicalState = false; - /** *

        Protected constructor to prevent instantiation. * Use {@link #newInstance()}.

        @@ -85,14 +80,12 @@ public abstract class DocumentBuilderFactory { * of any property in jaxp.properties after it has been read for the first time. * *
      • - * Use the Services API (as detailed in the JAR specification), if - * available, to determine the classname. The Services API will look - * for a classname in the file - * META-INF/services/javax.xml.parsers.DocumentBuilderFactory - * in jars available to the runtime. + * Uses the service-provider loading facilities, defined by the + * {@link java.util.ServiceLoader} class, to attempt to locate and load an + * implementation of the service. *
      • *
      • - * Platform default DocumentBuilderFactory instance. + * Otherwise, the system-default implementation is returned. *
      • *
      * @@ -113,21 +106,16 @@ public abstract class DocumentBuilderFactory { * * @return New instance of a DocumentBuilderFactory * - * @throws FactoryConfigurationError if the implementation is not - * available or cannot be instantiated. + * @throws FactoryConfigurationError in case of {@linkplain + * java.util.ServiceConfigurationError service configuration error} or if + * the implementation is not available or cannot be instantiated. */ public static DocumentBuilderFactory newInstance() { - try { - return (DocumentBuilderFactory) FactoryFinder.find( + return FactoryFinder.find( /* The default property name according to the JAXP spec */ - "javax.xml.parsers.DocumentBuilderFactory", + DocumentBuilderFactory.class, // "javax.xml.parsers.DocumentBuilderFactory" /* The fallback implementation class name */ "com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl"); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } - } /** @@ -165,13 +153,9 @@ public abstract class DocumentBuilderFactory { * @since 1.6 */ public static DocumentBuilderFactory newInstance(String factoryClassName, ClassLoader classLoader){ - try { //do not fallback if given classloader can't find the class, throw exception - return (DocumentBuilderFactory) FactoryFinder.newInstance(factoryClassName, classLoader, false); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } + return FactoryFinder.newInstance(DocumentBuilderFactory.class, + factoryClassName, classLoader, false); } /** @@ -391,75 +375,64 @@ public abstract class DocumentBuilderFactory { public abstract Object getAttribute(String name) throws IllegalArgumentException; - /** - *

      Set a feature for this DocumentBuilderFactory and DocumentBuilders created by this factory.

      - * - *

      - * Feature names are fully qualified {@link java.net.URI}s. - * Implementations may define their own features. - * A {@link ParserConfigurationException} is thrown if this DocumentBuilderFactory or the - * DocumentBuilders it creates cannot support the feature. - * It is possible for a DocumentBuilderFactory to expose a feature value but be unable to change its state. - *

      - * - *

      - * All implementations are required to support the {@link javax.xml.XMLConstants#FEATURE_SECURE_PROCESSING} feature. - * When the feature is:

      - *
        - *
      • - * true: the implementation will limit XML processing to conform to implementation limits. - * Examples include enity expansion limits and XML Schema constructs that would consume large amounts of resources. - * If XML processing is limited for security reasons, it will be reported via a call to the registered - * {@link org.xml.sax.ErrorHandler#fatalError(SAXParseException exception)}. - * See {@link DocumentBuilder#setErrorHandler(org.xml.sax.ErrorHandler errorHandler)}. - *
      • - *
      • - * false: the implementation will processing XML according to the XML specifications without - * regard to possible implementation limits. - *
      • - *
      - * - * @param name Feature name. - * @param value Is feature state true or false. - * - * @throws ParserConfigurationException if this DocumentBuilderFactory or the DocumentBuilders - * it creates cannot support this feature. - * @throws NullPointerException If the name parameter is null. - */ - public abstract void setFeature(String name, boolean value) - throws ParserConfigurationException; - - /** - *

      Get the state of the named feature.

      - * - *

      - * Feature names are fully qualified {@link java.net.URI}s. - * Implementations may define their own features. - * An {@link ParserConfigurationException} is thrown if this DocumentBuilderFactory or the - * DocumentBuilders it creates cannot support the feature. - * It is possible for an DocumentBuilderFactory to expose a feature value but be unable to change its state. - *

      - * - * @param name Feature name. - * - * @return State of the named feature. - * - * @throws ParserConfigurationException if this DocumentBuilderFactory - * or the DocumentBuilders it creates cannot support this feature. - */ - public abstract boolean getFeature(String name) - throws ParserConfigurationException; - - - /**

      Get current state of canonicalization.

      + /** + *

      Set a feature for this DocumentBuilderFactory and DocumentBuilders created by this factory.

      * - * @return current state canonicalization control + *

      + * Feature names are fully qualified {@link java.net.URI}s. + * Implementations may define their own features. + * A {@link ParserConfigurationException} is thrown if this DocumentBuilderFactory or the + * DocumentBuilders it creates cannot support the feature. + * It is possible for a DocumentBuilderFactory to expose a feature value but be unable to change its state. + *

      + * + *

      + * All implementations are required to support the {@link javax.xml.XMLConstants#FEATURE_SECURE_PROCESSING} feature. + * When the feature is:

      + *
        + *
      • + * true: the implementation will limit XML processing to conform to implementation limits. + * Examples include enity expansion limits and XML Schema constructs that would consume large amounts of resources. + * If XML processing is limited for security reasons, it will be reported via a call to the registered + * {@link org.xml.sax.ErrorHandler#fatalError(SAXParseException exception)}. + * See {@link DocumentBuilder#setErrorHandler(org.xml.sax.ErrorHandler errorHandler)}. + *
      • + *
      • + * false: the implementation will processing XML according to the XML specifications without + * regard to possible implementation limits. + *
      • + *
      + * + * @param name Feature name. + * @param value Is feature state true or false. + * + * @throws ParserConfigurationException if this DocumentBuilderFactory or the DocumentBuilders + * it creates cannot support this feature. + * @throws NullPointerException If the name parameter is null. */ - /* - public boolean getCanonicalization() { - return canonicalState; - } - */ + public abstract void setFeature(String name, boolean value) + throws ParserConfigurationException; + + /** + *

      Get the state of the named feature.

      + * + *

      + * Feature names are fully qualified {@link java.net.URI}s. + * Implementations may define their own features. + * An {@link ParserConfigurationException} is thrown if this DocumentBuilderFactory or the + * DocumentBuilders it creates cannot support the feature. + * It is possible for an DocumentBuilderFactory to expose a feature value but be unable to change its state. + *

      + * + * @param name Feature name. + * + * @return State of the named feature. + * + * @throws ParserConfigurationException if this DocumentBuilderFactory + * or the DocumentBuilders it creates cannot support this feature. + */ + public abstract boolean getFeature(String name) + throws ParserConfigurationException; /** @@ -488,17 +461,6 @@ public abstract class DocumentBuilderFactory { } - /*

      Set canonicalization control to true or - * false.

      - * - * @param state of canonicalization - */ - /* - public void setCanonicalization(boolean state) { - canonicalState = state; - } - */ - /** *

      Set the {@link Schema} to be used by parsers created * from this factory. diff --git a/jaxp/src/javax/xml/parsers/FactoryFinder.java b/jaxp/src/javax/xml/parsers/FactoryFinder.java index 214f87f2239..9e186f512ef 100644 --- a/jaxp/src/javax/xml/parsers/FactoryFinder.java +++ b/jaxp/src/javax/xml/parsers/FactoryFinder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,15 +25,16 @@ package javax.xml.parsers; -import java.io.BufferedReader; import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Iterator; import java.util.Properties; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; /** - *

      Implements pluggable Datatypes.

      + *

      Implements pluggable Parsers.

      * *

      This class is duplicated for each JAXP subpackage so keep it in * sync. It is package private for secure class loading.

      @@ -51,7 +52,7 @@ class FactoryFinder { /** * Cache for properties in java.home/lib/jaxp.properties */ - static Properties cacheProps = new Properties(); + private static final Properties cacheProps = new Properties(); /** * Flag indicating if properties from java.home/lib/jaxp.properties @@ -63,7 +64,7 @@ class FactoryFinder { * Security support class use to check access control before * getting certain system resources. */ - static SecuritySupport ss = new SecuritySupport(); + private static final SecuritySupport ss = new SecuritySupport(); // Define system property "jaxp.debug" to get output static { @@ -96,31 +97,31 @@ class FactoryFinder { * * Use bootstrap classLoader if cl = null and useBSClsLoader is true */ - static private Class getProviderClass(String className, ClassLoader cl, + static private Class getProviderClass(String className, ClassLoader cl, boolean doFallback, boolean useBSClsLoader) throws ClassNotFoundException { try { if (cl == null) { if (useBSClsLoader) { - return Class.forName(className, true, FactoryFinder.class.getClassLoader()); + return Class.forName(className, false, FactoryFinder.class.getClassLoader()); } else { cl = ss.getContextClassLoader(); if (cl == null) { throw new ClassNotFoundException(); } else { - return cl.loadClass(className); + return Class.forName(className, false, cl); } } } else { - return cl.loadClass(className); + return Class.forName(className, false, cl); } } catch (ClassNotFoundException e1) { if (doFallback) { // Use current class loader - should always be bootstrap CL - return Class.forName(className, true, FactoryFinder.class.getClassLoader()); + return Class.forName(className, false, FactoryFinder.class.getClassLoader()); } else { throw e1; @@ -132,6 +133,9 @@ class FactoryFinder { * Create an instance of a class. Delegates to method * getProviderClass() in order to load the class. * + * @param type Base class / Service interface of the factory to + * instantiate. + * * @param className Name of the concrete class corresponding to the * service provider * @@ -141,16 +145,20 @@ class FactoryFinder { * @param doFallback True if the current ClassLoader should be tried as * a fallback if the class is not found using cl */ - static Object newInstance(String className, ClassLoader cl, boolean doFallback) - throws ConfigurationError + static T newInstance(Class type, String className, ClassLoader cl, + boolean doFallback) + throws FactoryConfigurationError { - return newInstance(className, cl, doFallback, false); + return newInstance(type, className, cl, doFallback, false); } /** * Create an instance of a class. Delegates to method * getProviderClass() in order to load the class. * + * @param type Base class / Service interface of the factory to + * instantiate. + * * @param className Name of the concrete class corresponding to the * service provider * @@ -163,9 +171,11 @@ class FactoryFinder { * @param useBSClsLoader True if cl=null actually meant bootstrap classLoader. This parameter * is needed since DocumentBuilderFactory/SAXParserFactory defined null as context classLoader. */ - static Object newInstance(String className, ClassLoader cl, boolean doFallback, boolean useBSClsLoader) - throws ConfigurationError + static T newInstance(Class type, String className, ClassLoader cl, + boolean doFallback, boolean useBSClsLoader) + throws FactoryConfigurationError { + assert type != null; // make sure we have access to restricted packages if (System.getSecurityManager() != null) { if (className != null && className.startsWith(DEFAULT_PACKAGE)) { @@ -175,22 +185,24 @@ class FactoryFinder { } try { - Class providerClass = getProviderClass(className, cl, doFallback, useBSClsLoader); + Class providerClass = getProviderClass(className, cl, doFallback, useBSClsLoader); + if (!type.isAssignableFrom(providerClass)) { + throw new ClassCastException(className + " cannot be cast to " + type.getName()); + } Object instance = providerClass.newInstance(); if (debug) { // Extra check to avoid computing cl strings dPrint("created new instance of " + providerClass + " using ClassLoader: " + cl); } - return instance; + return type.cast(instance); } catch (ClassNotFoundException x) { - throw new ConfigurationError( - "Provider " + className + " not found", x); + throw new FactoryConfigurationError(x, + "Provider " + className + " not found"); } catch (Exception x) { - throw new ConfigurationError( - "Provider " + className + " could not be instantiated: " + x, - x); + throw new FactoryConfigurationError(x, + "Provider " + className + " could not be instantiated: " + x); } } @@ -199,16 +211,17 @@ class FactoryFinder { * entry point. * @return Class object of factory, never null * - * @param factoryId Name of the factory to find, same as - * a property name + * @param type Base class / Service interface of the + * factory to find. * @param fallbackClassName Implementation class name, if nothing else * is found. Use null to mean no fallback. * * Package private so this code can be shared. */ - static Object find(String factoryId, String fallbackClassName) - throws ConfigurationError + static T find(Class type, String fallbackClassName) + throws FactoryConfigurationError { + final String factoryId = type.getName(); dPrint("find factoryId =" + factoryId); // Use the system property first @@ -216,7 +229,7 @@ class FactoryFinder { String systemProp = ss.getSystemProperty(factoryId); if (systemProp != null) { dPrint("found system property, value=" + systemProp); - return newInstance(systemProp, null, true); + return newInstance(type, systemProp, null, true); } } catch (SecurityException se) { @@ -225,7 +238,6 @@ class FactoryFinder { // try to read from $java.home/lib/jaxp.properties try { - String factoryClassName = null; if (firstTime) { synchronized (cacheProps) { if (firstTime) { @@ -240,11 +252,11 @@ class FactoryFinder { } } } - factoryClassName = cacheProps.getProperty(factoryId); + final String factoryClassName = cacheProps.getProperty(factoryId); if (factoryClassName != null) { dPrint("found in $java.home/jaxp.properties, value=" + factoryClassName); - return newInstance(factoryClassName, null, true); + return newInstance(type, factoryClassName, null, true); } } catch (Exception ex) { @@ -252,112 +264,52 @@ class FactoryFinder { } // Try Jar Service Provider Mechanism - Object provider = findJarServiceProvider(factoryId); + T provider = findServiceProvider(type); if (provider != null) { return provider; } if (fallbackClassName == null) { - throw new ConfigurationError( - "Provider for " + factoryId + " cannot be found", null); + throw new FactoryConfigurationError( + "Provider for " + factoryId + " cannot be found"); } dPrint("loaded from fallback value: " + fallbackClassName); - return newInstance(fallbackClassName, null, true); + return newInstance(type, fallbackClassName, null, true); } /* - * Try to find provider using Jar Service Provider Mechanism + * Try to find provider using the ServiceLoader API + * + * @param type Base class / Service interface of the factory to find. * * @return instance of provider class if found or null */ - private static Object findJarServiceProvider(String factoryId) - throws ConfigurationError - { - String serviceId = "META-INF/services/" + factoryId; - InputStream is = null; - - // First try the Context ClassLoader - ClassLoader cl = ss.getContextClassLoader(); - boolean useBSClsLoader = false; - if (cl != null) { - is = ss.getResourceAsStream(cl, serviceId); - - // If no provider found then try the current ClassLoader - if (is == null) { - cl = FactoryFinder.class.getClassLoader(); - is = ss.getResourceAsStream(cl, serviceId); - useBSClsLoader = true; - } - } else { - // No Context ClassLoader, try the current ClassLoader - cl = FactoryFinder.class.getClassLoader(); - is = ss.getResourceAsStream(cl, serviceId); - useBSClsLoader = true; - } - - if (is == null) { - // No provider found - return null; - } - - if (debug) { // Extra check to avoid computing cl strings - dPrint("found jar resource=" + serviceId + " using ClassLoader: " + cl); - } - - BufferedReader rd; + private static T findServiceProvider(final Class type) { try { - rd = new BufferedReader(new InputStreamReader(is, "UTF-8")); - } - catch (java.io.UnsupportedEncodingException e) { - rd = new BufferedReader(new InputStreamReader(is)); - } - - String factoryClassName = null; - try { - // XXX Does not handle all possible input as specified by the - // Jar Service Provider specification - factoryClassName = rd.readLine(); - rd.close(); - } catch (IOException x) { - // No provider found - return null; - } - - if (factoryClassName != null && !"".equals(factoryClassName)) { - dPrint("found in resource, value=" + factoryClassName); - - // Note: here we do not want to fall back to the current - // ClassLoader because we want to avoid the case where the - // resource file was found using one ClassLoader and the - // provider class was instantiated using a different one. - return newInstance(factoryClassName, cl, false, useBSClsLoader); - } - - // No provider found - return null; - } - - static class ConfigurationError extends Error { - private Exception exception; - - /** - * Construct a new instance with the specified detail string and - * exception. - */ - ConfigurationError(String msg, Exception x) { - super(msg); - this.exception = x; - } - - Exception getException() { - return exception; - } - /** - * use the exception chaining mechanism of JDK1.4 - */ - @Override - public Throwable getCause() { - return exception; + return AccessController.doPrivileged(new PrivilegedAction() { + public T run() { + final ServiceLoader serviceLoader = ServiceLoader.load(type); + final Iterator iterator = serviceLoader.iterator(); + if (iterator.hasNext()) { + return iterator.next(); + } else { + return null; + } + } + }); + } catch(ServiceConfigurationError e) { + // It is not possible to wrap an error directly in + // FactoryConfigurationError - so we need to wrap the + // ServiceConfigurationError in a RuntimeException. + // The alternative would be to modify the logic in + // FactoryConfigurationError to allow setting a + // Throwable as the cause, but that could cause + // compatibility issues down the road. + final RuntimeException x = new RuntimeException( + "Provider for " + type + " cannot be created", e); + final FactoryConfigurationError error = + new FactoryConfigurationError(x, x.getMessage()); + throw error; } } diff --git a/jaxp/src/javax/xml/parsers/SAXParserFactory.java b/jaxp/src/javax/xml/parsers/SAXParserFactory.java index 95c78042767..a7aef97bdf3 100644 --- a/jaxp/src/javax/xml/parsers/SAXParserFactory.java +++ b/jaxp/src/javax/xml/parsers/SAXParserFactory.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,6 @@ package javax.xml.parsers; import javax.xml.validation.Schema; - import org.xml.sax.SAXException; import org.xml.sax.SAXNotRecognizedException; import org.xml.sax.SAXNotSupportedException; @@ -42,8 +41,6 @@ import org.xml.sax.SAXNotSupportedException; * */ public abstract class SAXParserFactory { - /** The default property name according to the JAXP spec */ - private static final String DEFAULT_PROPERTY_NAME = "javax.xml.parsers.SAXParserFactory"; /** *

      Should Parsers be validating?

      @@ -87,14 +84,12 @@ public abstract class SAXParserFactory { * of any property in jaxp.properties after it has been read for the first time. * *
    • - * Use the Services API (as detailed in the JAR specification), if - * available, to determine the classname. The Services API will look - * for a classname in the file - * META-INF/services/javax.xml.parsers.SAXParserFactory - * in jars available to the runtime. + * Use the service-provider loading facilities, defined by the + * {@link java.util.ServiceLoader} class, to attempt to locate and load an + * implementation of the service. *
    • *
    • - * Platform default SAXParserFactory instance. + * Otherwise the system-default implementation is returned. *
    • *
    * @@ -109,7 +104,7 @@ public abstract class SAXParserFactory { * this method to print a lot of debug messages * to System.err about what it is doing and where it is looking at.

    * - *

    If you have problems loading {@link DocumentBuilder}s, try:

    + *

    If you have problems loading {@link SAXParser}s, try:

    *
          * java -Djaxp.debug=1 YourProgram ....
          * 
    @@ -117,21 +112,17 @@ public abstract class SAXParserFactory { * * @return A new instance of a SAXParserFactory. * - * @throws FactoryConfigurationError if the implementation is - * not available or cannot be instantiated. + * @throws FactoryConfigurationError in case of {@linkplain + * java.util.ServiceConfigurationError service configuration error} or if + * the implementation is not available or cannot be instantiated. */ public static SAXParserFactory newInstance() { - try { - return (SAXParserFactory) FactoryFinder.find( + return FactoryFinder.find( /* The default property name according to the JAXP spec */ - "javax.xml.parsers.SAXParserFactory", + SAXParserFactory.class, /* The fallback implementation class name */ "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } } /** @@ -169,13 +160,9 @@ public abstract class SAXParserFactory { * @since 1.6 */ public static SAXParserFactory newInstance(String factoryClassName, ClassLoader classLoader){ - try { //do not fallback if given classloader can't find the class, throw exception - return (SAXParserFactory) FactoryFinder.newInstance(factoryClassName, classLoader, false); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } + return FactoryFinder.newInstance(SAXParserFactory.class, + factoryClassName, classLoader, false); } /** @@ -266,22 +253,22 @@ public abstract class SAXParserFactory { * A list of the core features and properties can be found at * http://www.saxproject.org/

    * - *

    All implementations are required to support the {@link javax.xml.XMLConstants#FEATURE_SECURE_PROCESSING} feature. - * When the feature is

    - *
      - *
    • - * true: the implementation will limit XML processing to conform to implementation limits. - * Examples include enity expansion limits and XML Schema constructs that would consume large amounts of resources. - * If XML processing is limited for security reasons, it will be reported via a call to the registered - * {@link org.xml.sax.ErrorHandler#fatalError(SAXParseException exception)}. - * See {@link SAXParser} parse methods for handler specification. - *
    • - *
    • - * When the feature is false, the implementation will processing XML according to the XML specifications without - * regard to possible implementation limits. - *
    • - *
    - * + *

    All implementations are required to support the {@link javax.xml.XMLConstants#FEATURE_SECURE_PROCESSING} feature. + * When the feature is

    + *
      + *
    • + * true: the implementation will limit XML processing to conform to implementation limits. + * Examples include entity expansion limits and XML Schema constructs that would consume large amounts of resources. + * If XML processing is limited for security reasons, it will be reported via a call to the registered + * {@link org.xml.sax.ErrorHandler#fatalError(SAXParseException exception)}. + * See {@link SAXParser} parse methods for handler specification. + *
    • + *
    • + * When the feature is false, the implementation will processing XML according to the XML specifications without + * regard to possible implementation limits. + *
    • + *
    + * * @param name The name of the feature to be set. * @param value The value of the feature to be set. * @@ -320,17 +307,6 @@ public abstract class SAXParserFactory { SAXNotSupportedException; - - /*

    Get current state of canonicalization.

    - * - * @return current state canonicalization control - */ - /* - public boolean getCanonicalization() { - return canonicalState; - } - */ - /** * Gets the {@link Schema} object specified through * the {@link #setSchema(Schema schema)} method. @@ -357,17 +333,6 @@ public abstract class SAXParserFactory { ); } - /**

    Set canonicalization control to true or - * false.

    - * - * @param state of canonicalization - */ - /* - public void setCanonicalization(boolean state) { - canonicalState = state; - } - */ - /** *

    Set the {@link Schema} to be used by parsers created * from this factory.

    @@ -400,7 +365,7 @@ public abstract class SAXParserFactory { * Such configuration will cause a {@link SAXException} * exception when those properties are set on a {@link SAXParser}.

    * - *

    Note for implmentors

    + *

    Note for implementors

    *

    * A parser must be able to work with any {@link Schema} * implementation. However, parsers and schemas are allowed diff --git a/jaxp/src/javax/xml/stream/FactoryFinder.java b/jaxp/src/javax/xml/stream/FactoryFinder.java index bcfeba10e04..68f4ef21d66 100644 --- a/jaxp/src/javax/xml/stream/FactoryFinder.java +++ b/jaxp/src/javax/xml/stream/FactoryFinder.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,15 +25,16 @@ package javax.xml.stream; -import java.io.BufferedReader; import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Iterator; import java.util.Properties; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader; /** - *

    Implements pluggable Datatypes.

    + *

    Implements pluggable streams.

    * *

    This class is duplicated for each JAXP subpackage so keep it in * sync. It is package private for secure class loading.

    @@ -52,19 +53,19 @@ class FactoryFinder { /** * Cache for properties in java.home/lib/jaxp.properties */ - static Properties cacheProps = new Properties(); + final private static Properties cacheProps = new Properties(); /** * Flag indicating if properties from java.home/lib/jaxp.properties * have been cached. */ - static volatile boolean firstTime = true; + private static volatile boolean firstTime = true; /** * Security support class use to check access control before * getting certain system resources. */ - static SecuritySupport ss = new SecuritySupport(); + final private static SecuritySupport ss = new SecuritySupport(); // Define system property "jaxp.debug" to get output static { @@ -103,25 +104,25 @@ class FactoryFinder { try { if (cl == null) { if (useBSClsLoader) { - return Class.forName(className, true, FactoryFinder.class.getClassLoader()); + return Class.forName(className, false, FactoryFinder.class.getClassLoader()); } else { cl = ss.getContextClassLoader(); if (cl == null) { throw new ClassNotFoundException(); } else { - return cl.loadClass(className); + return Class.forName(className, false, cl); } } } else { - return cl.loadClass(className); + return Class.forName(className, false, cl); } } catch (ClassNotFoundException e1) { if (doFallback) { // Use current class loader - should always be bootstrap CL - return Class.forName(className, true, FactoryFinder.class.getClassLoader()); + return Class.forName(className, false, FactoryFinder.class.getClassLoader()); } else { throw e1; @@ -133,6 +134,9 @@ class FactoryFinder { * Create an instance of a class. Delegates to method * getProviderClass() in order to load the class. * + * @param type Base class / Service interface of the factory to + * instantiate. + * * @param className Name of the concrete class corresponding to the * service provider * @@ -142,16 +146,19 @@ class FactoryFinder { * @param doFallback True if the current ClassLoader should be tried as * a fallback if the class is not found using cl */ - static Object newInstance(String className, ClassLoader cl, boolean doFallback) - throws ConfigurationError + static T newInstance(Class type, String className, ClassLoader cl, boolean doFallback) + throws FactoryConfigurationError { - return newInstance(className, cl, doFallback, false); + return newInstance(type, className, cl, doFallback, false); } /** * Create an instance of a class. Delegates to method * getProviderClass() in order to load the class. * + * @param type Base class / Service interface of the factory to + * instantiate. + * * @param className Name of the concrete class corresponding to the * service provider * @@ -164,9 +171,12 @@ class FactoryFinder { * @param useBSClsLoader True if cl=null actually meant bootstrap classLoader. This parameter * is needed since DocumentBuilderFactory/SAXParserFactory defined null as context classLoader. */ - static Object newInstance(String className, ClassLoader cl, boolean doFallback, boolean useBSClsLoader) - throws ConfigurationError + static T newInstance(Class type, String className, ClassLoader cl, + boolean doFallback, boolean useBSClsLoader) + throws FactoryConfigurationError { + assert type != null; + // make sure we have access to restricted packages if (System.getSecurityManager() != null) { if (className != null && className.startsWith(DEFAULT_PACKAGE)) { @@ -176,20 +186,23 @@ class FactoryFinder { } try { - Class providerClass = getProviderClass(className, cl, doFallback, useBSClsLoader); + Class providerClass = getProviderClass(className, cl, doFallback, useBSClsLoader); + if (!type.isAssignableFrom(providerClass)) { + throw new ClassCastException(className + " cannot be cast to " + type.getName()); + } Object instance = providerClass.newInstance(); if (debug) { // Extra check to avoid computing cl strings dPrint("created new instance of " + providerClass + " using ClassLoader: " + cl); } - return instance; + return type.cast(instance); } catch (ClassNotFoundException x) { - throw new ConfigurationError( + throw new FactoryConfigurationError( "Provider " + className + " not found", x); } catch (Exception x) { - throw new ConfigurationError( + throw new FactoryConfigurationError( "Provider " + className + " could not be instantiated: " + x, x); } @@ -200,17 +213,18 @@ class FactoryFinder { * * @return Class object of factory, never null * - * @param factoryId Name of the factory to find, same as - * a property name + * @param type Base class / Service interface of the + * factory to find. + * * @param fallbackClassName Implementation class name, if nothing else * is found. Use null to mean no fallback. * * Package private so this code can be shared. */ - static Object find(String factoryId, String fallbackClassName) - throws ConfigurationError + static T find(Class type, String fallbackClassName) + throws FactoryConfigurationError { - return find(factoryId, null, fallbackClassName); + return find(type, type.getName(), null, fallbackClassName); } /** @@ -218,6 +232,9 @@ class FactoryFinder { * entry point. * @return Class object of factory, never null * + * @param type Base class / Service interface of the + * factory to find. + * * @param factoryId Name of the factory to find, same as * a property name * @@ -229,8 +246,8 @@ class FactoryFinder { * * Package private so this code can be shared. */ - static Object find(String factoryId, ClassLoader cl, String fallbackClassName) - throws ConfigurationError + static T find(Class type, String factoryId, ClassLoader cl, String fallbackClassName) + throws FactoryConfigurationError { dPrint("find factoryId =" + factoryId); @@ -239,7 +256,9 @@ class FactoryFinder { String systemProp = ss.getSystemProperty(factoryId); if (systemProp != null) { dPrint("found system property, value=" + systemProp); - return newInstance(systemProp, null, true); + // There's a bug here - because 'cl' is ignored. + // This will be handled separately. + return newInstance(type, systemProp, null, true); } } catch (SecurityException se) { @@ -250,7 +269,6 @@ class FactoryFinder { // $java.home/lib/jaxp.properties if former not present String configFile = null; try { - String factoryClassName = null; if (firstTime) { synchronized (cacheProps) { if (firstTime) { @@ -269,130 +287,80 @@ class FactoryFinder { if (ss.doesFileExist(f)) { dPrint("Read properties file "+f); cacheProps.load(ss.getFileInputStream(f)); + } + } } } } - } - } - factoryClassName = cacheProps.getProperty(factoryId); + final String factoryClassName = cacheProps.getProperty(factoryId); if (factoryClassName != null) { dPrint("found in " + configFile + " value=" + factoryClassName); - return newInstance(factoryClassName, null, true); + // There's a bug here - because 'cl' is ignored. + // This will be handled separately. + return newInstance(type, factoryClassName, null, true); } } catch (Exception ex) { if (debug) ex.printStackTrace(); } - // Try Jar Service Provider Mechanism - Object provider = findJarServiceProvider(factoryId); - if (provider != null) { - return provider; + if (type.getName().equals(factoryId)) { + // Try Jar Service Provider Mechanism + final T provider = findServiceProvider(type); + if (provider != null) { + return provider; + } + } else { + // We're in the case where a 'custom' factoryId was provided, + // and in every case where that happens, we expect that + // fallbackClassName will be null. + assert fallbackClassName == null; } if (fallbackClassName == null) { - throw new ConfigurationError( + throw new FactoryConfigurationError( "Provider for " + factoryId + " cannot be found", null); } dPrint("loaded from fallback value: " + fallbackClassName); - return newInstance(fallbackClassName, cl, true); + return newInstance(type, fallbackClassName, cl, true); } /* - * Try to find provider using Jar Service Provider Mechanism + * Try to find provider using the ServiceLoader API + * + * @param type Base class / Service interface of the factory to find. * * @return instance of provider class if found or null */ - private static Object findJarServiceProvider(String factoryId) - throws ConfigurationError - { - String serviceId = "META-INF/services/" + factoryId; - InputStream is = null; - - // First try the Context ClassLoader - ClassLoader cl = ss.getContextClassLoader(); - boolean useBSClsLoader = false; - if (cl != null) { - is = ss.getResourceAsStream(cl, serviceId); - - // If no provider found then try the current ClassLoader - if (is == null) { - cl = FactoryFinder.class.getClassLoader(); - is = ss.getResourceAsStream(cl, serviceId); - useBSClsLoader = true; - } - } else { - // No Context ClassLoader, try the current ClassLoader - cl = FactoryFinder.class.getClassLoader(); - is = ss.getResourceAsStream(cl, serviceId); - useBSClsLoader = true; - } - - if (is == null) { - // No provider found - return null; - } - - if (debug) { // Extra check to avoid computing cl strings - dPrint("found jar resource=" + serviceId + " using ClassLoader: " + cl); - } - - BufferedReader rd; + private static T findServiceProvider(final Class type) { try { - rd = new BufferedReader(new InputStreamReader(is, "UTF-8")); - } - catch (java.io.UnsupportedEncodingException e) { - rd = new BufferedReader(new InputStreamReader(is)); - } - - String factoryClassName = null; - try { - // XXX Does not handle all possible input as specified by the - // Jar Service Provider specification - factoryClassName = rd.readLine(); - rd.close(); - } catch (IOException x) { - // No provider found - return null; - } - - if (factoryClassName != null && !"".equals(factoryClassName)) { - dPrint("found in resource, value=" + factoryClassName); - - // Note: here we do not want to fall back to the current - // ClassLoader because we want to avoid the case where the - // resource file was found using one ClassLoader and the - // provider class was instantiated using a different one. - return newInstance(factoryClassName, cl, false, useBSClsLoader); - } - - // No provider found - return null; - } - - static class ConfigurationError extends Error { - private Exception exception; - - /** - * Construct a new instance with the specified detail string and - * exception. - */ - ConfigurationError(String msg, Exception x) { - super(msg); - this.exception = x; - } - - Exception getException() { - return exception; - } - /** - * use the exception chaining mechanism of JDK1.4 - */ - @Override - public Throwable getCause() { - return exception; - } - } + return AccessController.doPrivileged(new PrivilegedAction() { + @Override + public T run() { + final ServiceLoader serviceLoader = ServiceLoader.load(type); + final Iterator iterator = serviceLoader.iterator(); + if (iterator.hasNext()) { + return iterator.next(); + } else { + return null; + } + } + }); + } catch(ServiceConfigurationError e) { + // It is not possible to wrap an error directly in + // FactoryConfigurationError - so we need to wrap the + // ServiceConfigurationError in a RuntimeException. + // The alternative would be to modify the logic in + // FactoryConfigurationError to allow setting a + // Throwable as the cause, but that could cause + // compatibility issues down the road. + final RuntimeException x = new RuntimeException( + "Provider for " + type + " cannot be created", e); + final FactoryConfigurationError error = + new FactoryConfigurationError(x, x.getMessage()); + throw error; + } + } } diff --git a/jaxp/src/javax/xml/stream/XMLEventFactory.java b/jaxp/src/javax/xml/stream/XMLEventFactory.java index f92d77805f7..d9e47ef0727 100644 --- a/jaxp/src/javax/xml/stream/XMLEventFactory.java +++ b/jaxp/src/javax/xml/stream/XMLEventFactory.java @@ -23,14 +23,14 @@ */ /* - * Copyright (c) 2009 by Oracle Corporation. All Rights Reserved. + * Copyright (c) 2009, 2013, by Oracle Corporation. All Rights Reserved. */ package javax.xml.stream; -import javax.xml.stream.events.*; +import java.util.Iterator; import javax.xml.namespace.NamespaceContext; import javax.xml.namespace.QName; -import java.util.Iterator; +import javax.xml.stream.events.*; /** * This interface defines a utility class for creating instances of * XMLEvents @@ -54,48 +54,59 @@ public abstract class XMLEventFactory { /** - * Create a new instance of the factory + * Creates a new instance of the factory in exactly the same manner as the + * {@link #newFactory()} method. * @throws FactoryConfigurationError if an instance of this factory cannot be loaded */ public static XMLEventFactory newInstance() throws FactoryConfigurationError { - return (XMLEventFactory) FactoryFinder.find( - JAXPFACTORYID, - DEFAULIMPL); + return FactoryFinder.find(XMLEventFactory.class, DEFAULIMPL); } /** * Create a new instance of the factory. + *

    * This static method creates a new factory instance. * This method uses the following ordered lookup procedure to determine * the XMLEventFactory implementation class to load: + *

    + *
      + *
    • * Use the javax.xml.stream.XMLEventFactory system property. + *
    • + *
    • * Use the properties file "lib/stax.properties" in the JRE directory. * This configuration file is in standard java.util.Properties format * and contains the fully qualified name of the implementation class * with the key being the system property defined above. - * Use the Services API (as detailed in the JAR specification), if available, - * to determine the classname. The Services API will look for a classname - * in the file META-INF/services/javax.xml.stream.XMLEventFactory in jars - * available to the runtime. - * Platform default XMLEventFactory instance. - * + *
    • + *
    • + * Use the service-provider loading facilities, defined by the + * {@link java.util.ServiceLoader} class, to attempt to locate and load an + * implementation of the service. + *
    • + *
    • + * Otherwise, the system-default implementation is returned. + *
    • + *
    + *

    * Once an application has obtained a reference to a XMLEventFactory it * can use the factory to configure and obtain stream instances. - * + *

    + *

    * Note that this is a new method that replaces the deprecated newInstance() method. * No changes in behavior are defined by this replacement method relative to * the deprecated method. - * - * @throws FactoryConfigurationError if an instance of this factory cannot be loaded + *

    + * @throws FactoryConfigurationError in case of {@linkplain + * java.util.ServiceConfigurationError service configuration error} or if + * the implementation is not available or cannot be instantiated. */ public static XMLEventFactory newFactory() throws FactoryConfigurationError { - return (XMLEventFactory) FactoryFinder.find( - JAXPFACTORYID, - DEFAULIMPL); + return FactoryFinder.find(XMLEventFactory.class, DEFAULIMPL); } /** @@ -116,40 +127,59 @@ public abstract class XMLEventFactory { public static XMLEventFactory newInstance(String factoryId, ClassLoader classLoader) throws FactoryConfigurationError { - try { - //do not fallback if given classloader can't find the class, throw exception - return (XMLEventFactory) FactoryFinder.find(factoryId, classLoader, null); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } + //do not fallback if given classloader can't find the class, throw exception + return FactoryFinder.find(XMLEventFactory.class, factoryId, classLoader, null); } /** * Create a new instance of the factory. * If the classLoader argument is null, then the ContextClassLoader is used. + *

    + * This method uses the following ordered lookup procedure to determine + * the XMLEventFactory implementation class to load: + *

    + *
      + *
    • + * Use the value of the system property identified by {@code factoryId}. + *
    • + *
    • + * Use the properties file "lib/stax.properties" in the JRE directory. + * This configuration file is in standard java.util.Properties format + * and contains the fully qualified name of the implementation class + * with the key being the given {@code factoryId}. + *
    • + *
    • + * If {@code factoryId} is "javax.xml.stream.XMLEventFactory", + * use the service-provider loading facilities, defined by the + * {@link java.util.ServiceLoader} class, to attempt to locate and load an + * implementation of the service. + *
    • + *
    • + * Otherwise, throws a {@link FactoryConfigurationError}. + *
    • + *
    * + *

    * Note that this is a new method that replaces the deprecated - * newInstance(String factoryId, ClassLoader classLoader) method. + * {@link #newInstance(java.lang.String, java.lang.ClassLoader) + * newInstance(String factoryId, ClassLoader classLoader)} method. * No changes in behavior are defined by this replacement method relative * to the deprecated method. + *

    * * @param factoryId Name of the factory to find, same as * a property name * @param classLoader classLoader to use * @return the factory implementation - * @throws FactoryConfigurationError if an instance of this factory cannot be loaded + * @throws FactoryConfigurationError in case of {@linkplain + * java.util.ServiceConfigurationError service configuration error} or if + * the implementation is not available or cannot be instantiated. */ public static XMLEventFactory newFactory(String factoryId, - ClassLoader classLoader) + ClassLoader classLoader) throws FactoryConfigurationError { - try { - //do not fallback if given classloader can't find the class, throw exception - return (XMLEventFactory) FactoryFinder.find(factoryId, classLoader, null); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } + //do not fallback if given classloader can't find the class, throw exception + return FactoryFinder.find(XMLEventFactory.class, factoryId, classLoader, null); } /** diff --git a/jaxp/src/javax/xml/stream/XMLInputFactory.java b/jaxp/src/javax/xml/stream/XMLInputFactory.java index 48dc3675fa5..2bfbad5d461 100644 --- a/jaxp/src/javax/xml/stream/XMLInputFactory.java +++ b/jaxp/src/javax/xml/stream/XMLInputFactory.java @@ -23,13 +23,13 @@ */ /* - * Copyright (c) 2009 by Oracle Corporation. All Rights Reserved. + * Copyright (c) 2009, 2013, by Oracle Corporation. All Rights Reserved. */ package javax.xml.stream; -import javax.xml.transform.Source; import javax.xml.stream.util.XMLEventAllocator; +import javax.xml.transform.Source; /** * Defines an abstract implementation of a factory for getting streams. @@ -144,48 +144,59 @@ public abstract class XMLInputFactory { protected XMLInputFactory(){} /** - * Create a new instance of the factory. + * Creates a new instance of the factory in exactly the same manner as the + * {@link #newFactory()} method. * @throws FactoryConfigurationError if an instance of this factory cannot be loaded */ public static XMLInputFactory newInstance() throws FactoryConfigurationError { - return (XMLInputFactory) FactoryFinder.find( - "javax.xml.stream.XMLInputFactory", - DEFAULIMPL); + return FactoryFinder.find(XMLInputFactory.class, DEFAULIMPL); } /** * Create a new instance of the factory. + *

    * This static method creates a new factory instance. * This method uses the following ordered lookup procedure to determine * the XMLInputFactory implementation class to load: + *

    + *
      + *
    • * Use the javax.xml.stream.XMLInputFactory system property. + *
    • + *
    • * Use the properties file "lib/stax.properties" in the JRE directory. * This configuration file is in standard java.util.Properties format * and contains the fully qualified name of the implementation class * with the key being the system property defined above. - * Use the Services API (as detailed in the JAR specification), if available, - * to determine the classname. The Services API will look for a classname - * in the file META-INF/services/javax.xml.stream.XMLInputFactory in jars - * available to the runtime. - * Platform default XMLInputFactory instance. - * + *
    • + *
    • + * Use the service-provider loading facilities, defined by the + * {@link java.util.ServiceLoader} class, to attempt to locate and load an + * implementation of the service. + *
    • + *
    • + * Otherwise, the system-default implementation is returned. + *
    • + *
    + *

    * Once an application has obtained a reference to a XMLInputFactory it * can use the factory to configure and obtain stream instances. - * + *

    + *

    * Note that this is a new method that replaces the deprecated newInstance() method. * No changes in behavior are defined by this replacement method relative to * the deprecated method. - * - * @throws FactoryConfigurationError if an instance of this factory cannot be loaded + *

    + * @throws FactoryConfigurationError in case of {@linkplain + * java.util.ServiceConfigurationError service configuration error} or if + * the implementation is not available or cannot be instantiated. */ public static XMLInputFactory newFactory() throws FactoryConfigurationError { - return (XMLInputFactory) FactoryFinder.find( - "javax.xml.stream.XMLInputFactory", - DEFAULIMPL); + return FactoryFinder.find(XMLInputFactory.class, DEFAULIMPL); } /** @@ -206,40 +217,60 @@ public abstract class XMLInputFactory { public static XMLInputFactory newInstance(String factoryId, ClassLoader classLoader) throws FactoryConfigurationError { - try { - //do not fallback if given classloader can't find the class, throw exception - return (XMLInputFactory) FactoryFinder.find(factoryId, classLoader, null); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } + //do not fallback if given classloader can't find the class, throw exception + return FactoryFinder.find(XMLInputFactory.class, factoryId, classLoader, null); } /** * Create a new instance of the factory. * If the classLoader argument is null, then the ContextClassLoader is used. + *

    + * This method uses the following ordered lookup procedure to determine + * the XMLInputFactory implementation class to load: + *

    + *
      + *
    • + * Use the value of the system property identified by {@code factoryId}. + *
    • + *
    • + * Use the properties file "lib/stax.properties" in the JRE directory. + * This configuration file is in standard java.util.Properties format + * and contains the fully qualified name of the implementation class + * with the key being the given {@code factoryId}. + *
    • + *
    • + * If {@code factoryId} is "javax.xml.stream.XMLInputFactory", + * use the service-provider loading facilities, defined by the + * {@link java.util.ServiceLoader} class, to attempt to locate and load an + * implementation of the service. + *
    • + *
    • + * Otherwise, throws a {@link FactoryConfigurationError}. + *
    • + *
    * + *

    * Note that this is a new method that replaces the deprecated - * newInstance(String factoryId, ClassLoader classLoader) method. + * {@link #newInstance(java.lang.String, java.lang.ClassLoader) + * newInstance(String factoryId, ClassLoader classLoader)} method. * No changes in behavior are defined by this replacement method relative * to the deprecated method. + *

    * * @param factoryId Name of the factory to find, same as * a property name * @param classLoader classLoader to use * @return the factory implementation + * @throws FactoryConfigurationError in case of {@linkplain + * java.util.ServiceConfigurationError service configuration error} or if + * the implementation is not available or cannot be instantiated. * @throws FactoryConfigurationError if an instance of this factory cannot be loaded */ public static XMLInputFactory newFactory(String factoryId, ClassLoader classLoader) throws FactoryConfigurationError { - try { - //do not fallback if given classloader can't find the class, throw exception - return (XMLInputFactory) FactoryFinder.find(factoryId, classLoader, null); - } catch (FactoryFinder.ConfigurationError e) { - throw new FactoryConfigurationError(e.getException(), - e.getMessage()); - } + //do not fallback if given classloader can't find the class, throw exception + return FactoryFinder.find(XMLInputFactory.class, factoryId, classLoader, null); } /** diff --git a/jaxp/src/javax/xml/stream/XMLOutputFactory.java b/jaxp/src/javax/xml/stream/XMLOutputFactory.java index b0d4f0b309f..a5a593c9cd0 100644 --- a/jaxp/src/javax/xml/stream/XMLOutputFactory.java +++ b/jaxp/src/javax/xml/stream/XMLOutputFactory.java @@ -23,7 +23,7 @@ */ /* - * Copyright (c) 2009 by Oracle Corporation. All Rights Reserved. + * Copyright (c) 2009, 2013, by Oracle Corporation. All Rights Reserved. */ package javax.xml.stream; @@ -120,46 +120,58 @@ public abstract class XMLOutputFactory { protected XMLOutputFactory(){} /** - * Create a new instance of the factory. + * Creates a new instance of the factory in exactly the same manner as the + * {@link #newFactory()} method. * @throws FactoryConfigurationError if an instance of this factory cannot be loaded */ public static XMLOutputFactory newInstance() throws FactoryConfigurationError { - return (XMLOutputFactory) FactoryFinder.find("javax.xml.stream.XMLOutputFactory", - DEFAULIMPL); + return FactoryFinder.find(XMLOutputFactory.class, DEFAULIMPL); } /** * Create a new instance of the factory. + *

    * This static method creates a new factory instance. This method uses the * following ordered lookup procedure to determine the XMLOutputFactory * implementation class to load: + *

    + *