-By default symbols are also located using PATH. However, there are also Java properties that can be used to specify both the location of the binaries, and also separately the location of symbols. Use sun.jvm.hotspot.debugger.windbg.imagePath for the location of binaries, and use sun.jvm.hotspot.debugger.windbg.symbolPath for the location of symbols. imagePath defaults to PATH if not set, and symbolPath defaults to imagePath. The advantage of using these propeties is that you don't need to change your PATH setting, and they allow for binaries to be located separately from symbols.
+By default symbols are also located using PATH. However, there are also Java properties that
+can be used to specify both the location of the binaries, and also separately the location of
+symbols. Use sun.jvm.hotspot.debugger.windbg.imagePath for the location of binaries, and use
+sun.jvm.hotspot.debugger.windbg.symbolPath for the location of symbols. imagePath
+defaults to PATH if not set, and symbolPath defaults to imagePath. The
+advantage of using these propeties is that you don't need to change your PATH setting, and
+they allow for binaries to be located separately from symbols.
-How you set these properties will depend on the SA tool being used. The following example demonstrates how to set one of the properties when launching the clhsdb tool:
+How you set these properties will depend on the SA tool being used. The following example
+demonstrates how to set one of the properties when launching the clhsdb tool:
If you are not seeing symbols for Windows libraries, try setting sun.jvm.hotspot.debugger.windbg.symbolPath to include "srv*https://msdl.microsoft.com/download/symbols". Also include PATH so SA will still find your JVM and JNI symbols. For example:
+
If you are not seeing symbols for Windows libraries, try setting
+sun.jvm.hotspot.debugger.windbg.symbolPath to include
+"srv*https://msdl.microsoft.com/download/symbols". Also include PATH so SA will still
+find your JVM and JNI symbols. For example:
For locating the user JNI libraries, SA uses DYLD_LIBRARY_PATH. It can contain
more than one directory separated by a colon. DYLD_LIBRARY_PATH can also be
- used for locating the JDK libraries, but it needs to specify the full path to the libraries. SA will
- not automatically search subdirs such as lib/server as it does for JAVA_HOME.
+ used for locating the JDK libraries, but it needs to specify the full path to the libraries. SA
+ will not automatically search subdirs such as lib/server as it does for JAVA_HOME.
For locating the macOS libraries, SA uses SA_ALTROOT similar to the linux support,
- except it does not use it to map all the subdirs. It just appends SA_ALTROOT to the
+ except it does not use it to map all the subdirs. It just prepends SA_ALTROOT to the
full path of each macOS library. So if you specify SA_ALTROOT=/altroot, SA will
prepend /altroot to the full path of each macOS library. Note however, due to
JDK-8249779 , SA will not
From ff5c9a4ddecbe3ee453a30fcfd49fd677c174f06 Mon Sep 17 00:00:00 2001
From: Thomas Schatzl
Date: Thu, 11 Apr 2024 11:35:07 +0000
Subject: [PATCH 005/486] 8329528: G1 does not update TAMS correctly when
dropping retained regions during Concurrent Start pause
Reviewed-by: ayang, iwalulya
---
src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 2 +-
src/hotspot/share/gc/g1/g1YoungCollector.cpp | 13 ++--
.../share/gc/g1/g1YoungGCPreEvacuateTasks.hpp | 1 +
.../pinnedobjs/TestDroppedRetainedTAMS.java | 72 +++++++++++++++++++
4 files changed, 80 insertions(+), 8 deletions(-)
create mode 100644 test/hotspot/jtreg/gc/g1/pinnedobjs/TestDroppedRetainedTAMS.java
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
index f9ebc74805b..4b20bb43317 100644
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
@@ -869,7 +869,7 @@ public:
NoteStartOfMarkHRClosure() : HeapRegionClosure(), _cm(G1CollectedHeap::heap()->concurrent_mark()) { }
bool do_heap_region(HeapRegion* r) override {
- if (r->is_old_or_humongous() && !r->is_collection_set_candidate()) {
+ if (r->is_old_or_humongous() && !r->is_collection_set_candidate() && !r->in_collection_set()) {
_cm->update_top_at_mark_start(r);
}
return false;
diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp
index 4fa3b928763..906d9854d8d 100644
--- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp
@@ -484,13 +484,8 @@ void G1YoungCollector::set_young_collection_default_active_worker_threads(){
}
void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
-
- // Must be before collection set calculation, requires collection set to not
- // be calculated yet.
- if (collector_state()->in_concurrent_start_gc()) {
- concurrent_mark()->pre_concurrent_start(_gc_cause);
- }
-
+ // Flush various data in thread-local buffers to be able to determine the collection
+ // set
{
Ticks start = Ticks::now();
G1PreEvacuateCollectionSetBatchTask cl;
@@ -501,6 +496,10 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info)
// Needs log buffers flushed.
calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());
+ if (collector_state()->in_concurrent_start_gc()) {
+ concurrent_mark()->pre_concurrent_start(_gc_cause);
+ }
+
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
diff --git a/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.hpp b/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.hpp
index effefa7a1eb..912941fa2a2 100644
--- a/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.hpp
+++ b/src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.hpp
@@ -29,6 +29,7 @@
// Set of pre evacuate collection set tasks containing ("s" means serial):
// - Retire TLAB and Flush Logs (Java threads)
+// - Flush pin count cache (Java threads)
// - Flush Logs (s) (Non-Java threads)
class G1PreEvacuateCollectionSetBatchTask : public G1BatchedTask {
class JavaThreadRetireTLABAndFlushLogs;
diff --git a/test/hotspot/jtreg/gc/g1/pinnedobjs/TestDroppedRetainedTAMS.java b/test/hotspot/jtreg/gc/g1/pinnedobjs/TestDroppedRetainedTAMS.java
new file mode 100644
index 00000000000..f650e53a25f
--- /dev/null
+++ b/test/hotspot/jtreg/gc/g1/pinnedobjs/TestDroppedRetainedTAMS.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test
+ * @summary Check that TAMSes are correctly updated for regions dropped from
+ * the retained collection set candidates during a Concurrent Start pause.
+ * @requires vm.gc.G1
+ * @requires vm.flagless
+ * @library /test/lib
+ * @build jdk.test.whitebox.WhiteBox
+ * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
+ * @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
+ -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx32m -XX:G1NumCollectionsKeepPinned=1
+ -XX:+VerifyBeforeGC -XX:+VerifyAfterGC -XX:G1MixedGCLiveThresholdPercent=100
+ -XX:G1HeapWastePercent=0 -Xlog:gc,gc+ergo+cset=trace gc.g1.pinnedobjs.TestDroppedRetainedTAMS
+ */
+
+package gc.g1.pinnedobjs;
+
+import jdk.test.whitebox.WhiteBox;
+
+public class TestDroppedRetainedTAMS {
+
+ private static final WhiteBox wb = WhiteBox.getWhiteBox();
+
+ private static final char[] dummy = new char[100];
+
+ public static void main(String[] args) {
+ wb.fullGC(); // Move the target dummy object to old gen.
+
+ wb.pinObject(dummy);
+
+ // After this concurrent cycle the pinned region will be in the the (marking)
+ // collection set candidates.
+ wb.g1RunConcurrentGC();
+
+ // Pass the Prepare mixed gc which will not do anything about the marking
+ // candidates.
+ wb.youngGC();
+ // Mixed GC. Will complete. That pinned region is now retained. The mixed gcs
+ // will end here.
+ wb.youngGC();
+
+ // The pinned region will be dropped from the retained candidates during the
+ // Concurrent Start GC, leaving that region's TAMS broken.
+ wb.g1RunConcurrentGC();
+
+ // Verification will find a lot of broken objects.
+ wb.youngGC();
+ System.out.println(dummy);
+ }
+}
From ecc603ca9b441cbb7ad27fbc2529fcb0b1da1992 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Daniel=20Jeli=C5=84ski?=
Date: Thu, 11 Apr 2024 12:42:24 +0000
Subject: [PATCH 006/486] 8201183: sjavac build failures: "Connection attempt
failed: Connection refused"
Reviewed-by: erikj, ihse
---
.../tools/javacserver/shared/PortFile.java | 44 ++++++++++---------
1 file changed, 24 insertions(+), 20 deletions(-)
diff --git a/make/langtools/tools/javacserver/shared/PortFile.java b/make/langtools/tools/javacserver/shared/PortFile.java
index 2e4283a22b3..b31e97cfeea 100644
--- a/make/langtools/tools/javacserver/shared/PortFile.java
+++ b/make/langtools/tools/javacserver/shared/PortFile.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,17 +50,16 @@ public class PortFile {
// Followed by a 4 byte int, with the port nr.
// Followed by a 8 byte long, with cookie nr.
- private String filename;
- private File file;
- private File stopFile;
+ private final String filename;
+ private final File file;
+ private final File stopFile;
private RandomAccessFile rwfile;
- private FileChannel channel;
// FileLock used to solve inter JVM synchronization, lockSem used to avoid
// JVM internal OverlappingFileLockExceptions.
// Class invariant: lock.isValid() <-> lockSem.availablePermits() == 0
private FileLock lock;
- private Semaphore lockSem = new Semaphore(1);
+ private final Semaphore lockSem = new Semaphore(1);
private boolean containsPortInfo;
private int serverPort;
@@ -89,17 +88,18 @@ public class PortFile {
}
// The rwfile should only be readable by the owner of the process
// and no other! How do we do that on a RandomAccessFile?
- channel = rwfile.getChannel();
}
/**
* Lock the port file.
*/
public void lock() throws IOException, InterruptedException {
- if (channel == null) {
- initializeChannel();
- }
lockSem.acquire();
+ if (rwfile != null) {
+ throw new IllegalStateException("rwfile not null");
+ }
+ initializeChannel();
+ FileChannel channel = rwfile.getChannel();
lock = channel.lock();
}
@@ -110,8 +110,7 @@ public class PortFile {
public void getValues() {
containsPortInfo = false;
if (lock == null) {
- // Not locked, remain ignorant about port file contents.
- return;
+ throw new IllegalStateException("Must lock before calling getValues");
}
try {
if (rwfile.length()>0) {
@@ -156,6 +155,9 @@ public class PortFile {
* Store the values into the locked port file.
*/
public void setValues(int port, long cookie) throws IOException {
+ if (lock == null) {
+ throw new IllegalStateException("Must lock before calling setValues");
+ }
rwfile.seek(0);
// Write the magic nr that identifies a port file.
rwfile.writeInt(magicNr);
@@ -169,19 +171,19 @@ public class PortFile {
* Delete the port file.
*/
public void delete() throws IOException, InterruptedException {
- // Access to file must be closed before deleting.
- rwfile.close();
-
- file.delete();
-
- // Wait until file has been deleted (deletes are asynchronous on Windows!) otherwise we
+ if (!file.exists()) { // file deleted already
+ return;
+ }
+ // Keep trying until file has been deleted, otherwise we
// might shutdown the server and prevent another one from starting.
- for (int i = 0; i < 10 && file.exists(); i++) {
+ for (int i = 0; i < 10 && file.exists() && !file.delete(); i++) {
Thread.sleep(1000);
}
if (file.exists()) {
throw new IOException("Failed to delete file.");
}
+ // allow some time for late clients to connect
+ Thread.sleep(1000);
}
/**
@@ -210,10 +212,12 @@ public class PortFile {
*/
public void unlock() throws IOException {
if (lock == null) {
- return;
+ throw new IllegalStateException("Not locked");
}
lock.release();
lock = null;
+ rwfile.close();
+ rwfile = null;
lockSem.release();
}
From 63684cd18300d862f3128bd13995e5c82307b50c Mon Sep 17 00:00:00 2001
From: Coleen Phillimore
Date: Thu, 11 Apr 2024 13:17:48 +0000
Subject: [PATCH 007/486] 8327250: assert(!method->is_old()) failed: Should not
be installing old methods
Reviewed-by: eosterlund, sspitsyn
---
.../share/interpreter/interpreterRuntime.cpp | 9 ++++---
.../share/interpreter/linkResolver.cpp | 24 ++++++++++++++++++-
.../share/interpreter/linkResolver.hpp | 6 ++---
3 files changed, 30 insertions(+), 9 deletions(-)
diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp
index ff7c453e01e..5f8dcbbf2ad 100644
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp
@@ -864,13 +864,12 @@ void InterpreterRuntime::resolve_invoke(JavaThread* current, Bytecodes::Code byt
return;
}
- if (JvmtiExport::can_hotswap_or_post_breakpoint() && info.resolved_method()->is_old()) {
- resolved_method = methodHandle(current, info.resolved_method()->get_new_method());
- } else {
- resolved_method = methodHandle(current, info.resolved_method());
- }
+ resolved_method = methodHandle(current, info.resolved_method());
} // end JvmtiHideSingleStepping
+ // Don't allow safepoints until the method is cached.
+ NoSafepointVerifier nsv;
+
// check if link resolution caused cpCache to be updated
if (cache->resolved_method_entry_at(method_index)->is_resolved(bytecode)) return;
diff --git a/src/hotspot/share/interpreter/linkResolver.cpp b/src/hotspot/share/interpreter/linkResolver.cpp
index 38bef3e1e98..7b5b62ac075 100644
--- a/src/hotspot/share/interpreter/linkResolver.cpp
+++ b/src/hotspot/share/interpreter/linkResolver.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@
#include "oops/oop.inline.hpp"
#include "oops/resolvedIndyEntry.hpp"
#include "oops/symbolHandle.hpp"
+#include "prims/jvmtiExport.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
@@ -112,6 +113,27 @@ void CallInfo::set_handle(Klass* resolved_klass,
_resolved_appendix = resolved_appendix;
}
+// Redefinition safepoint may have updated the method. Make sure the new version of the method is returned.
+// Callers are responsible for not safepointing and storing this method somewhere safe where redefinition
+// can replace it if runs again. Safe places are constant pool cache and code cache metadata.
+// The old method is safe in CallInfo since its a methodHandle (it won't get deleted), and accessed with these
+// accessors.
+Method* CallInfo::resolved_method() const {
+ if (JvmtiExport::can_hotswap_or_post_breakpoint() && _resolved_method->is_old()) {
+ return _resolved_method->get_new_method();
+ } else {
+ return _resolved_method();
+ }
+}
+
+Method* CallInfo::selected_method() const {
+ if (JvmtiExport::can_hotswap_or_post_breakpoint() && _selected_method->is_old()) {
+ return _selected_method->get_new_method();
+ } else {
+ return _selected_method();
+ }
+}
+
void CallInfo::set_common(Klass* resolved_klass,
const methodHandle& resolved_method,
const methodHandle& selected_method,
diff --git a/src/hotspot/share/interpreter/linkResolver.hpp b/src/hotspot/share/interpreter/linkResolver.hpp
index c7a4c0eaba7..e18749cd6a5 100644
--- a/src/hotspot/share/interpreter/linkResolver.hpp
+++ b/src/hotspot/share/interpreter/linkResolver.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -92,8 +92,8 @@ class CallInfo : public StackObj {
CallInfo(Method* resolved_method, Klass* resolved_klass, TRAPS);
Klass* resolved_klass() const { return _resolved_klass; }
- Method* resolved_method() const { return _resolved_method(); }
- Method* selected_method() const { return _selected_method(); }
+ Method* resolved_method() const;
+ Method* selected_method() const;
Handle resolved_appendix() const { return _resolved_appendix; }
Handle resolved_method_name() const { return _resolved_method_name; }
// Materialize a java.lang.invoke.ResolvedMethodName for this resolved_method
From 2e3682a7f2983cd58b9564253dc698760faba4b8 Mon Sep 17 00:00:00 2001
From: Mahendra Chhipa
Date: Thu, 11 Apr 2024 13:51:07 +0000
Subject: [PATCH 008/486] 8319678: Several tests from corelibs areas ignore VM
flags
Reviewed-by: naoto, jpai
---
.../lang/Thread/UncaughtExceptionsTest.java | 16 ++--
.../java/lang/annotation/LoaderLeakTest.java | 4 +-
.../reliability/benchmark/bench/rmi/Main.java | 51 +++++------
.../java/time/chrono/HijrahConfigTest.java | 18 ++--
.../spi/providers/InitialContextTest.java | 85 +++++++++----------
test/jdk/sun/misc/EscapePath.java | 38 ++++++---
6 files changed, 105 insertions(+), 107 deletions(-)
diff --git a/test/jdk/java/lang/Thread/UncaughtExceptionsTest.java b/test/jdk/java/lang/Thread/UncaughtExceptionsTest.java
index 42d85593088..915d1cb6b76 100644
--- a/test/jdk/java/lang/Thread/UncaughtExceptionsTest.java
+++ b/test/jdk/java/lang/Thread/UncaughtExceptionsTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,15 +23,13 @@
import java.util.stream.Stream;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.process.ProcessTools;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.params.ParameterizedTest;
-import org.junit.jupiter.params.provider.MethodSource;
-import org.junit.jupiter.params.provider.Arguments;
-
import static java.lang.System.err;
import static java.lang.System.out;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
/*
* @test
@@ -85,7 +83,7 @@ class UncaughtExceptionsTest {
@MethodSource("testCases")
void test(String className, int exitValue, String stdOutMatch, String stdErrMatch) throws Throwable {
String cmd = "UncaughtExitSimulator$" + className;
- ProcessBuilder processBuilder = ProcessTools.createLimitedTestJavaProcessBuilder(cmd);
+ ProcessBuilder processBuilder = ProcessTools.createTestJavaProcessBuilder(cmd);
OutputAnalyzer outputAnalyzer = ProcessTools.executeCommand(processBuilder);
outputAnalyzer.shouldHaveExitValue(exitValue);
outputAnalyzer.stderrShouldMatch(stdErrMatch);
diff --git a/test/jdk/java/lang/annotation/LoaderLeakTest.java b/test/jdk/java/lang/annotation/LoaderLeakTest.java
index ed230d8df0e..762a08f9990 100644
--- a/test/jdk/java/lang/annotation/LoaderLeakTest.java
+++ b/test/jdk/java/lang/annotation/LoaderLeakTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@ public class LoaderLeakTest {
}
private void runJavaProcessExpectSuccessExitCode(String ... command) throws Throwable {
- var processBuilder = ProcessTools.createLimitedTestJavaProcessBuilder(command)
+ var processBuilder = ProcessTools.createTestJavaProcessBuilder(command)
.directory(Paths.get(Utils.TEST_CLASSES).toFile());
ProcessTools.executeCommand(processBuilder).shouldHaveExitValue(0);
}
diff --git a/test/jdk/java/rmi/reliability/benchmark/bench/rmi/Main.java b/test/jdk/java/rmi/reliability/benchmark/bench/rmi/Main.java
index b4d8e289075..6e7405df896 100644
--- a/test/jdk/java/rmi/reliability/benchmark/bench/rmi/Main.java
+++ b/test/jdk/java/rmi/reliability/benchmark/bench/rmi/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,13 +25,13 @@
* @test
* @summary The RMI benchmark test. This java class is used to run the test
* under JTREG.
- * @library ../../../../testlibrary ../../
+ * @library ../../../../testlibrary ../../ /test/lib
* @modules java.desktop
* java.rmi/sun.rmi.registry
* java.rmi/sun.rmi.server
* java.rmi/sun.rmi.transport
* java.rmi/sun.rmi.transport.tcp
- * @build TestLibrary bench.BenchInfo bench.HtmlReporter bench.Util
+ * @build TestLibrary bench.BenchInfo bench.HtmlReporter bench.Util jdk.test.lib.process.ProcessTools
* bench.Benchmark bench.Reporter bench.XmlReporter bench.ConfigFormatException
* bench.Harness bench.TextReporter bench.rmi.BenchServer
* bench.rmi.DoubleArrayCalls bench.rmi.LongCalls bench.rmi.ShortCalls
@@ -51,19 +51,12 @@
package bench.rmi;
-import bench.ConfigFormatException;
-import bench.Harness;
-import bench.HtmlReporter;
-import bench.Reporter;
-import bench.TextReporter;
-import bench.XmlReporter;
-import static bench.rmi.Main.OutputFormat.*;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
-import java.io.InputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.rmi.AlreadyBoundException;
@@ -77,6 +70,18 @@ import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
+import bench.ConfigFormatException;
+import bench.Harness;
+import bench.HtmlReporter;
+import bench.Reporter;
+import bench.TextReporter;
+import bench.XmlReporter;
+import static bench.rmi.Main.OutputFormat.HTML;
+import static bench.rmi.Main.OutputFormat.TEXT;
+import static bench.rmi.Main.OutputFormat.XML;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
/**
* RMI/Serialization benchmark tests.
*/
@@ -234,13 +239,6 @@ public class Main {
//Setup for client mode, server will fork client process
//after its initiation.
List clientProcessStr = new ArrayList<>();
- clientProcessStr.add(System.getProperty("test.jdk") +
- File.separator + "bin" + File.separator + "java");
- String classpath = System.getProperty("java.class.path");
- if (classpath != null) {
- clientProcessStr.add("-cp");
- clientProcessStr.add(classpath);
- }
clientProcessStr.add("-Djava.security.policy=" + TEST_SRC_PATH + "policy.all");
clientProcessStr.add("-Djava.security.manager=allow");
clientProcessStr.add("-Dtest.src=" + TEST_SRC_PATH);
@@ -276,20 +274,13 @@ public class Main {
}
try {
- Process client = new ProcessBuilder(clientProcessStr).
- inheritIO().start();
- try {
- client.waitFor();
- int exitValue = client.exitValue();
- if (0 != exitValue) {
- die("Error: error happened in client process, exitValue = " + exitValue);
- }
- } finally {
- client.destroyForcibly();
- }
+ ProcessBuilder pb = ProcessTools.createTestJavaProcessBuilder(clientProcessStr);
+ OutputAnalyzer outputAnalyzer = ProcessTools.executeProcess(pb);
+ System.out.println(outputAnalyzer.getOutput());
+ outputAnalyzer.shouldHaveExitValue(0);
} catch (IOException ex) {
die("Error: Unable start client process, ex=" + ex.getMessage());
- } catch (InterruptedException ex) {
+ } catch (Exception ex) {
die("Error: Error happening to client process, ex=" + ex.getMessage());
}
break;
diff --git a/test/jdk/java/time/nontestng/java/time/chrono/HijrahConfigTest.java b/test/jdk/java/time/nontestng/java/time/chrono/HijrahConfigTest.java
index 1caaf68153b..b76848e4bb1 100644
--- a/test/jdk/java/time/nontestng/java/time/chrono/HijrahConfigTest.java
+++ b/test/jdk/java/time/nontestng/java/time/chrono/HijrahConfigTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,8 @@
import java.nio.file.Files;
import java.nio.file.Path;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
import tests.Helper;
import tests.JImageGenerator;
@@ -32,12 +34,12 @@ import tests.JImageGenerator;
* @summary Tests whether a custom Hijrah configuration properties file works correctly
* @bug 8187987
* @requires (vm.compMode != "Xcomp" & os.maxMemory >= 2g)
- * @library /tools/lib
+ * @library /tools/lib /test/lib
* @enablePreview
* @modules java.base/jdk.internal.jimage
* jdk.jlink/jdk.tools.jimage
* jdk.compiler
- * @build HijrahConfigCheck tests.*
+ * @build HijrahConfigCheck tests.* jdk.test.lib.compiler.CompilerUtils jdk.test.lib.process.ProcessTools
* @run main/othervm -Xmx1g HijrahConfigTest
*/
public class HijrahConfigTest {
@@ -66,13 +68,7 @@ public class HijrahConfigTest {
// Run tests
Path launcher = outputPath.resolve("bin").resolve("java");
- ProcessBuilder builder = new ProcessBuilder(
- launcher.toAbsolutePath().toString(), "-ea", "-esa", "HijrahConfigCheck");
- Process p = builder.inheritIO().start();
- p.waitFor();
- int exitValue = p.exitValue();
- if (exitValue != 0) {
- throw new RuntimeException("HijrahConfigTest failed. Exit value: " + exitValue);
- }
+ OutputAnalyzer analyzer = ProcessTools.executeCommand(launcher.toAbsolutePath().toString(), "-ea", "-esa", "HijrahConfigCheck");
+ analyzer.shouldHaveExitValue(0);
}
}
diff --git a/test/jdk/javax/naming/spi/providers/InitialContextTest.java b/test/jdk/javax/naming/spi/providers/InitialContextTest.java
index 054365878a1..f1858586d17 100644
--- a/test/jdk/javax/naming/spi/providers/InitialContextTest.java
+++ b/test/jdk/javax/naming/spi/providers/InitialContextTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -21,23 +21,38 @@
* questions.
*/
-import javax.naming.Context;
-import java.io.*;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.io.SequenceInputStream;
+import java.io.StringWriter;
+import java.io.Writer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.util.*;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import static java.lang.String.format;
+import javax.naming.Context;
+
import static java.util.Arrays.asList;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonMap;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
/*
* @test
@@ -45,6 +60,9 @@ import static java.util.Collections.singletonMap;
* @summary Examines different ways JNDI providers can hook up themselves and
* become available. Each case mimics the most straightforward way of
* executing scenarios.
+ * @library /test/lib
+ * @build jdk.test.lib.process.ProcessTools
+ * @run main InitialContextTest
*/
public class InitialContextTest {
@@ -243,9 +261,13 @@ public class InitialContextTest {
private static void jar(Path jarName, Path jarRoot) {
String jar = getJDKTool("jar");
- ProcessBuilder p = new ProcessBuilder(jar, "cf", jarName.toString(),
- "-C", jarRoot.toString(), ".");
- quickFail(run(p));
+ String [] commands = {jar, "cf", jarName.toString(),"-C", jarRoot.toString(), "."};
+ try {
+ OutputAnalyzer outputAnalyzer = ProcessTools.executeCommand(commands);
+ outputAnalyzer.shouldHaveExitValue(0);
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
}
private static void javac(Path compilationOutput, Path... sourceFiles) {
@@ -256,22 +278,18 @@ public class InitialContextTest {
commands.addAll(paths.stream()
.map(Path::toString)
.collect(Collectors.toList()));
- quickFail(run(new ProcessBuilder(commands)));
- }
-
- private static void quickFail(Result r) {
- if (r.exitValue != 0)
- throw new RuntimeException(r.output);
+ try {
+ OutputAnalyzer outputAnalyzer = ProcessTools.executeCommand(commands.toArray(new String[commands.size()]));
+ outputAnalyzer.shouldHaveExitValue(0);
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
}
private static Result java(Map properties,
Collection classpath,
String classname) {
-
- String java = getJDKTool("java");
-
List commands = new ArrayList<>();
- commands.add(java);
commands.addAll(properties.entrySet()
.stream()
.map(e -> "-D" + e.getKey() + "=" + e.getValue())
@@ -283,35 +301,14 @@ public class InitialContextTest {
commands.add("-cp");
commands.add(cp);
commands.add(classname);
-
- return run(new ProcessBuilder(commands));
- }
-
- private static Result run(ProcessBuilder b) {
- Process p = null;
- try {
- p = b.start();
- } catch (IOException e) {
- throw new RuntimeException(
- format("Couldn't start process '%s'", b.command()), e);
- }
-
- String output;
- try {
- output = toString(p.getInputStream(), p.getErrorStream());
- } catch (IOException e) {
- throw new RuntimeException(
- format("Couldn't read process output '%s'", b.command()), e);
- }
+ ProcessBuilder pb = ProcessTools.createTestJavaProcessBuilder(commands);
try {
- p.waitFor();
- } catch (InterruptedException e) {
- throw new RuntimeException(
- format("Process hasn't finished '%s'", b.command()), e);
+ OutputAnalyzer outputAnalyzer = ProcessTools.executeProcess(pb);
+ return new Result(outputAnalyzer.getExitValue(), outputAnalyzer.getOutput());
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
}
-
- return new Result(p.exitValue(), output);
}
private static String getJDKTool(String name) {
diff --git a/test/jdk/sun/misc/EscapePath.java b/test/jdk/sun/misc/EscapePath.java
index ced4fc54475..bbe7e62db5a 100644
--- a/test/jdk/sun/misc/EscapePath.java
+++ b/test/jdk/sun/misc/EscapePath.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,19 @@
/* @test
* @bug 4359123
* @summary Test loading of classes with # in the path
+ * @library /test/lib
+ * @build jdk.test.lib.process.ProcessTools
+ * @run main EscapePath
*/
-import java.io.*;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
public class EscapePath {
@@ -75,14 +86,19 @@ public class EscapePath {
fos.close();
}
- private static void invokeJava() throws Exception {
- String command = System.getProperty("java.home") +
- File.separator + "bin" + File.separator +
- "java -classpath " + "a#b/ Hello";
- Process p = Runtime.getRuntime().exec(command);
- p.waitFor();
- int result = p.exitValue();
- if (result != 0)
- throw new RuntimeException("Path encoding failure.");
+ private static void invokeJava() {
+ List commands = new ArrayList<>();
+
+ commands.add("-classpath");
+ commands.add("a#b");
+ commands.add("Hello");
+ ProcessBuilder pb = ProcessTools.createTestJavaProcessBuilder(commands);
+
+ try {
+ OutputAnalyzer outputAnalyzer = ProcessTools.executeProcess(pb);
+ outputAnalyzer.shouldHaveExitValue(0);
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
}
}
From 16061874ffdd1b018fe1cad7e6d8ba8bdbdbbee1 Mon Sep 17 00:00:00 2001
From: Magnus Ihse Bursie
Date: Thu, 11 Apr 2024 14:15:34 +0000
Subject: [PATCH 009/486] 8326947: Minimize MakeBase.gmk
Reviewed-by: erikj
---
make/Bundles.gmk | 5 +-
make/CompileDemos.gmk | 2 +
make/CompileInterimLangtools.gmk | 2 +
make/CompileToolsJdk.gmk | 2 +
make/CopyImportModules.gmk | 2 +
make/CopyInterimTZDB.gmk | 2 +
make/CreateJmods.gmk | 2 +
make/Docs.gmk | 2 +
make/GenerateLinkOptData.gmk | 2 +
make/GraalBuilderImage.gmk | 2 +
make/Images.gmk | 2 +
make/InitSupport.gmk | 25 ++
make/JrtfsJar.gmk | 2 +
make/MacBundles.gmk | 2 +
make/ModuleWrapper.gmk | 2 +
make/SourceRevision.gmk | 12 +-
make/StaticLibsImage.gmk | 2 +
make/autoconf/spec.gmk.template | 4 +
make/common/FileUtils.gmk | 303 +++++++++++++++++
make/common/JarArchive.gmk | 4 +-
make/common/JavaCompilation.gmk | 4 +-
make/common/MakeBase.gmk | 289 +---------------
make/common/MakeIO.gmk | 29 --
make/common/NativeCompilation.gmk | 1 +
make/common/Utils.gmk | 321 +++++++++---------
make/common/modules/CopyCommon.gmk | 2 +
make/common/modules/LauncherCommon.gmk | 1 +
make/hotspot/CopyToExplodedJdk.gmk | 2 +
make/hotspot/gensrc/GensrcJvmti.gmk | 2 +
make/hotspot/lib/CompileJvm.gmk | 2 +
make/hotspot/test/GtestImage.gmk | 2 +
make/modules/java.base/Lib.gmk | 1 +
.../java.desktop/lib/Awt2dLibraries.gmk | 2 +
make/modules/jdk.compiler/Gendata.gmk | 1 +
make/test/BuildFailureHandler.gmk | 2 +
make/test/BuildJtregTestThreadFactory.gmk | 2 +
make/test/BuildMicrobenchmark.gmk | 2 +
make/test/BuildTestLib.gmk | 2 +
make/test/BuildTestLibNative.gmk | 2 +
make/test/JtregNativeHotspot.gmk | 2 +
make/test/JtregNativeJdk.gmk | 2 +
make/test/JtregNativeLibTest.gmk | 2 +
test/make/TestCopyFiles.gmk | 3 +-
43 files changed, 577 insertions(+), 482 deletions(-)
create mode 100644 make/common/FileUtils.gmk
diff --git a/make/Bundles.gmk b/make/Bundles.gmk
index 33d51d3f613..0901e415a8a 100644
--- a/make/Bundles.gmk
+++ b/make/Bundles.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,9 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
+include MakeIO.gmk
+
PRODUCT_TARGETS :=
LEGACY_TARGETS :=
TEST_TARGETS :=
diff --git a/make/CompileDemos.gmk b/make/CompileDemos.gmk
index 3aee428602d..9339250c4fb 100644
--- a/make/CompileDemos.gmk
+++ b/make/CompileDemos.gmk
@@ -31,6 +31,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
include TextFileProcessing.gmk
include ZipArchive.gmk
diff --git a/make/CompileInterimLangtools.gmk b/make/CompileInterimLangtools.gmk
index 990ec06aca2..1a8b6382f81 100644
--- a/make/CompileInterimLangtools.gmk
+++ b/make/CompileInterimLangtools.gmk
@@ -28,6 +28,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
include Modules.gmk
diff --git a/make/CompileToolsJdk.gmk b/make/CompileToolsJdk.gmk
index 13101c7cccf..50ffe73a096 100644
--- a/make/CompileToolsJdk.gmk
+++ b/make/CompileToolsJdk.gmk
@@ -27,6 +27,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
include TextFileProcessing.gmk
diff --git a/make/CopyImportModules.gmk b/make/CopyImportModules.gmk
index 34baaf9d37b..52515ebd314 100644
--- a/make/CopyImportModules.gmk
+++ b/make/CopyImportModules.gmk
@@ -31,6 +31,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
+
LIBS_DIR := $(wildcard $(addsuffix /$(MODULE), $(IMPORT_MODULES_LIBS)))
CMDS_DIR := $(wildcard $(addsuffix /$(MODULE), $(IMPORT_MODULES_CMDS)))
CONF_DIR := $(wildcard $(addsuffix /$(MODULE), $(IMPORT_MODULES_CONF)))
diff --git a/make/CopyInterimTZDB.gmk b/make/CopyInterimTZDB.gmk
index 6ce41865e2c..ac390580aa9 100644
--- a/make/CopyInterimTZDB.gmk
+++ b/make/CopyInterimTZDB.gmk
@@ -28,6 +28,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
+
##########################################################################################
### TZDB tool needs files from java.time.zone package
diff --git a/make/CreateJmods.gmk b/make/CreateJmods.gmk
index d1e83c8c8e0..fd36554358b 100644
--- a/make/CreateJmods.gmk
+++ b/make/CreateJmods.gmk
@@ -27,6 +27,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include Execute.gmk
include Modules.gmk
diff --git a/make/Docs.gmk b/make/Docs.gmk
index d0c01f0283d..2977f6f66f9 100644
--- a/make/Docs.gmk
+++ b/make/Docs.gmk
@@ -26,6 +26,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include Execute.gmk
include Modules.gmk
include ModuleTools.gmk
diff --git a/make/GenerateLinkOptData.gmk b/make/GenerateLinkOptData.gmk
index 5dd766c8c07..e7d6962fb79 100644
--- a/make/GenerateLinkOptData.gmk
+++ b/make/GenerateLinkOptData.gmk
@@ -31,6 +31,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
################################################################################
diff --git a/make/GraalBuilderImage.gmk b/make/GraalBuilderImage.gmk
index 8c4d66aa9d5..7fa90c66019 100644
--- a/make/GraalBuilderImage.gmk
+++ b/make/GraalBuilderImage.gmk
@@ -31,6 +31,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
+
################################################################################
TARGETS :=
diff --git a/make/Images.gmk b/make/Images.gmk
index 3048eb3eda9..bfad1ad563c 100644
--- a/make/Images.gmk
+++ b/make/Images.gmk
@@ -27,6 +27,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include Execute.gmk
include Modules.gmk
include Utils.gmk
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index 2bdab7f907d..c59e685bf4f 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -536,6 +536,31 @@ else # $(HAS_SPEC)=true
endef
endif
+ ##############################################################################
+ # Functions for timers
+ ##############################################################################
+
+ # Store the build times in this directory.
+ BUILDTIMESDIR=$(OUTPUTDIR)/make-support/build-times
+
+ # Record starting time for build of a sub repository.
+ define RecordStartTime
+ $(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_start_$(strip $1) && \
+ $(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_start_$(strip $1)_human_readable
+ endef
+
+ # Record ending time and calculate the difference and store it in a
+ # easy to read format. Handles builds that cross midnight. Expects
+ # that a build will never take 24 hours or more.
+ define RecordEndTime
+ $(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)
+ $(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)_human_readable
+ $(ECHO) `$(CAT) $(BUILDTIMESDIR)/build_time_start_$(strip $1)` `$(CAT) $(BUILDTIMESDIR)/build_time_end_$(strip $1)` $1 | \
+ $(AWK) '{ F=$$7; T=$$14; if (F > T) { T+=3600*24 }; D=T-F; H=int(D/3600); \
+ M=int((D-H*3600)/60); S=D-H*3600-M*60; printf("%02d:%02d:%02d %s\n",H,M,S,$$15); }' \
+ > $(BUILDTIMESDIR)/build_time_diff_$(strip $1)
+ endef
+
define StartGlobalTimer
$(RM) -r $(BUILDTIMESDIR) 2> /dev/null && \
$(MKDIR) -p $(BUILDTIMESDIR) && \
diff --git a/make/JrtfsJar.gmk b/make/JrtfsJar.gmk
index b0b7ed6ce08..990c8a7e1ed 100644
--- a/make/JrtfsJar.gmk
+++ b/make/JrtfsJar.gmk
@@ -27,6 +27,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
include JarArchive.gmk
include TextFileProcessing.gmk
diff --git a/make/MacBundles.gmk b/make/MacBundles.gmk
index 39da6c9cdb3..8ed6476c506 100644
--- a/make/MacBundles.gmk
+++ b/make/MacBundles.gmk
@@ -25,6 +25,8 @@
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include TextFileProcessing.gmk
default: bundles
diff --git a/make/ModuleWrapper.gmk b/make/ModuleWrapper.gmk
index d83af819a9b..14298d25a53 100644
--- a/make/ModuleWrapper.gmk
+++ b/make/ModuleWrapper.gmk
@@ -35,6 +35,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
+
MODULE_SRC := $(TOPDIR)/src/$(MODULE)
# All makefiles should add the targets to be built to this variable.
diff --git a/make/SourceRevision.gmk b/make/SourceRevision.gmk
index 9a47ae456e2..63bbe5b62fb 100644
--- a/make/SourceRevision.gmk
+++ b/make/SourceRevision.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,16 @@ $(eval $(call IncludeCustomExtension, SourceRevision-pre.gmk))
STORED_SOURCE_REVISION := $(TOPDIR)/.src-rev
+# Locate all sourcecode repositories included in the forest, as absolute paths
+FindAllReposAbs = \
+ $(strip $(sort $(dir $(filter-out $(TOPDIR)/build/%, $(wildcard \
+ $(addprefix $(TOPDIR)/, .git */.git */*/.git */*/*/.git */*/*/*/.git) \
+ )))))
+
+# Locate all sourcecode repositories included in the forest, as relative paths
+FindAllReposRel = \
+ $(strip $(subst $(TOPDIR)/,.,$(patsubst $(TOPDIR)/%/, %, $(FindAllReposAbs))))
+
USE_SCM := false
ifneq ($(and $(GIT), $(wildcard $(TOPDIR)/.git)), )
USE_SCM := true
diff --git a/make/StaticLibsImage.gmk b/make/StaticLibsImage.gmk
index 44a4ba56782..84e97b13b29 100644
--- a/make/StaticLibsImage.gmk
+++ b/make/StaticLibsImage.gmk
@@ -30,6 +30,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include Modules.gmk
ALL_MODULES = $(call FindAllModules)
diff --git a/make/autoconf/spec.gmk.template b/make/autoconf/spec.gmk.template
index e90d9aabf04..2f2525a662a 100644
--- a/make/autoconf/spec.gmk.template
+++ b/make/autoconf/spec.gmk.template
@@ -677,6 +677,10 @@ BUILD_JAR = @FIXPATH@ $(BUILD_JDK)/bin/jar
DOCS_REFERENCE_JAVADOC := @DOCS_REFERENCE_JAVADOC@
+# A file containing a way to uniquely identify the source code revision that
+# the build was created from
+SOURCE_REVISION_TRACKER := $(SUPPORT_OUTPUTDIR)/src-rev/source-revision-tracker
+
# Interim langtools modules and arguments
INTERIM_LANGTOOLS_BASE_MODULES := java.compiler jdk.compiler jdk.javadoc
INTERIM_LANGTOOLS_MODULES := $(addsuffix .interim, $(INTERIM_LANGTOOLS_BASE_MODULES))
diff --git a/make/common/FileUtils.gmk b/make/common/FileUtils.gmk
new file mode 100644
index 00000000000..114f3adefbe
--- /dev/null
+++ b/make/common/FileUtils.gmk
@@ -0,0 +1,303 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+ifeq (,$(_MAKEBASE_GMK))
+ $(error You must include MakeBase.gmk prior to including FileUtils.gmk)
+endif
+
+################################################################################
+#
+# Common file utility functions
+#
+################################################################################
+
+################################################################################
+# Replace question marks with space in string. This macro needs to be called on
+# files from FindFiles in case any of them contains space in their file name,
+# since FindFiles replaces space with ?.
+# Param 1 - String to replace in
+DecodeSpace = \
+ $(subst ?,$(SPACE),$(strip $1))
+
+EncodeSpace = \
+ $(subst $(SPACE),?,$(strip $1))
+
+################################################################################
+# Take two paths and return the path of the last common directory.
+# Ex: /foo/bar/baz, /foo/bar/banan -> /foo/bar
+# foo/bar/baz, /foo/bar ->
+#
+# The x prefix is used to preserve the presence of the initial slash
+# On Windows paths are treated as case-insensitive
+#
+# $1 - Path to compare
+# $2 - Other path to compare
+FindCommonPathPrefix = \
+ $(call DecodeSpace,$(patsubst x%,%,$(subst $(SPACE),/,$(strip \
+ $(call FindCommonPathPrefixHelper1, \
+ $(subst /,$(SPACE),x$(call EncodeSpace,$(strip $1))), \
+ $(subst /,$(SPACE),x$(call EncodeSpace,$(strip $2)))) \
+ ))))
+
+FindCommonPathPrefixHelper1 = \
+ $(if $(filter $(OPENJDK_TARGET_OS), windows), \
+ $(call FindCommonPathPrefixHelper2,$(call uppercase,$1),$(call uppercase,$2),$1), \
+ $(call FindCommonPathPrefixHelper2,$1,$2,$1))
+
+FindCommonPathPrefixHelper2 = \
+ $(if $(call equals, $(firstword $1), $(firstword $2)), \
+ $(if $(call equals, $(firstword $1),),, \
+ $(firstword $3) \
+ $(call FindCommonPathPrefixHelper2, \
+ $(wordlist 2, $(words $1), $1), \
+ $(wordlist 2, $(words $2), $2), \
+ $(wordlist 2, $(words $3), $3) \
+ ) \
+ ) \
+ )
+
+# Computes the relative path from a directory to a file
+# $1 - File to compute the relative path to
+# $2 - Directory to compute the relative path from
+RelativePath = \
+ $(call DecodeSpace,$(strip $(call RelativePathHelper,$(call EncodeSpace \
+ ,$(strip $1)),$(call EncodeSpace \
+ ,$(strip $2)),$(call EncodeSpace \
+ ,$(call FindCommonPathPrefix,$1,$2)))))
+
+RelativePathHelper = \
+ $(eval $3_prefix_length := $(words $(subst /,$(SPACE),$3))) \
+ $(eval $1_words := $(subst /,$(SPACE),$1)) \
+ $(eval $2_words := $(subst /,$(SPACE),$2)) \
+ $(if $(call equals,$($3_prefix_length),0),, \
+ $(eval $1_words := $(wordlist 2,$(words $($1_words)),$(wordlist \
+ $($3_prefix_length),$(words $($1_words)),$($1_words)))) \
+ $(eval $2_words := $(wordlist 2,$(words $($2_words)),$(wordlist \
+ $($3_prefix_length),$(words $($2_words)),$($2_words)))) \
+ ) \
+ $(eval $1_suffix := $(subst $(SPACE),/,$($1_words))) \
+ $(eval $2_dotdots := $(subst $(SPACE),/,$(foreach d,$($2_words),..))) \
+ $(if $($1_suffix), \
+ $(if $($2_dotdots), $($2_dotdots)/$($1_suffix), $($1_suffix)), \
+ $(if $($2_dotdots), $($2_dotdots), .))
+
+# Make directory for target file. Should handle spaces in filenames. Just
+# calling $(call MakeDir $(@D)) will not work if the directory contains a space
+# and the target file already exists. In that case, the target file will have
+# its wildcard ? resolved and the $(@D) will evaluate each space separated dir
+# part on its own.
+MakeTargetDir = \
+ $(call MakeDir, $(dir $(call EncodeSpace, $@)))
+
+################################################################################
+# All install-file and related macros automatically call DecodeSpace when needed.
+
+ifeq ($(call isTargetOs, macosx), true)
+ # On mac, extended attributes sometimes creep into the source files, which may later
+ # cause the creation of ._* files which confuses testing. Clear these with xattr if
+ # set. Some files get their write permissions removed after being copied to the
+ # output dir. When these are copied again to images, xattr would fail. By only clearing
+ # attributes when they are present, failing on this is avoided.
+ #
+ # If copying a soft link to a directory, need to delete the target first to avoid
+ # weird errors.
+ define install-file
+ $(call MakeTargetDir)
+ $(RM) '$(call DecodeSpace, $@)'
+ # Work around a weirdness with cp on Macosx. When copying a symlink, if
+ # the target of the link is write protected (e.g. 444), cp will add
+ # write permission for the user on the target file (644). Avoid this by
+ # using ln to create a new link instead.
+ if [ -h '$(call DecodeSpace, $<)' ]; then \
+ $(LN) -s "`$(READLINK) '$(call DecodeSpace, $<)'`" '$(call DecodeSpace, $@)'; \
+ else \
+ $(CP) -fRP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'; \
+ fi
+ if [ -n "`$(XATTR) -ls '$(call DecodeSpace, $@)'`" ]; then \
+ $(XATTR) -cs '$(call DecodeSpace, $@)'; \
+ fi
+ endef
+else
+ define install-file
+ $(call MakeTargetDir)
+ $(CP) -fP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
+ endef
+endif
+
+# Variant of install file that does not preserve symlinks
+define install-file-nolink
+ $(call MakeTargetDir)
+ $(CP) -f '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
+endef
+
+################################################################################
+# link-file-* works similarly to install-file but creates a symlink instead.
+# There are two versions, either creating a relative or an absolute link. Be
+# careful when using this on Windows since the symlink created is only valid in
+# the unix emulation environment.
+# In msys2 we use mklink /J because its ln would perform a deep copy of the target.
+# This inhibits performance and can lead to issues with long paths. With mklink /J
+# relative linking does not work, so we handle the link as absolute path.
+ifeq ($(OPENJDK_BUILD_OS_ENV), windows.msys2)
+ define link-file-relative
+ $(call MakeTargetDir)
+ $(RM) '$(call DecodeSpace, $@)'
+ cmd //c "mklink /J $(call FixPath, $(call DecodeSpace, $@)) $(call FixPath, $(call DecodeSpace, $<))"
+ endef
+else
+ define link-file-relative
+ $(call MakeTargetDir)
+ $(RM) '$(call DecodeSpace, $@)'
+ $(LN) -s '$(call DecodeSpace, $(call RelativePath, $<, $(@D)))' '$(call DecodeSpace, $@)'
+ endef
+endif
+
+ifeq ($(OPENJDK_BUILD_OS_ENV), windows.msys2)
+ define link-file-absolute
+ $(call MakeTargetDir)
+ $(RM) '$(call DecodeSpace, $@)'
+ cmd //c "mklink /J $(call FixPath, $(call DecodeSpace, $@)) $(call FixPath, $(call DecodeSpace, $<))"
+ endef
+else
+ define link-file-absolute
+ $(call MakeTargetDir)
+ $(RM) '$(call DecodeSpace, $@)'
+ $(LN) -s '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
+ endef
+endif
+
+################################################################################
+
+# Recursive wildcard function. Walks down directories recursively and matches
+# files with the search patterns. Patterns use standard file wildcards (* and
+# ?).
+#
+# $1 - Directories to start search in
+# $2 - Search patterns
+rwildcard = \
+ $(strip \
+ $(foreach d, \
+ $(patsubst %/,%,$(sort $(dir $(wildcard $(addsuffix /*/*, $(strip $1)))))), \
+ $(call rwildcard,$d,$2) \
+ ) \
+ $(call DoubleDollar, $(wildcard $(foreach p, $2, $(addsuffix /$(strip $p), $(strip $1))))) \
+ )
+
+# Find non directories using recursive wildcard function. This function may
+# be used directly when a small amount of directories is expected to be
+# searched and caching is not expected to be of use.
+#
+# $1 - Directory to start search in
+# $2 - Optional search patterns, defaults to '*'.
+WildcardFindFiles = \
+ $(sort $(strip \
+ $(eval WildcardFindFiles_result := $(call rwildcard,$(patsubst %/,%,$1),$(if $(strip $2),$2,*))) \
+ $(filter-out $(patsubst %/,%,$(sort $(dir $(WildcardFindFiles_result)))), \
+ $(WildcardFindFiles_result) \
+ ) \
+ ))
+
+# Find non directories using the find utility in the shell. Safe to call for
+# non existing directories, or directories containing wildcards.
+#
+# Files containing space will get spaces replaced with ? because GNU Make
+# cannot handle lists of files with space in them. By using ?, make will match
+# the wildcard to space in many situations so we don't need to replace back
+# to space on every use. While not a complete solution it does allow some uses
+# of FindFiles to function with spaces in file names, including for
+# SetupCopyFiles. Unfortunately this does not work for WildcardFindFiles so
+# if files with spaces are anticipated, use ShellFindFiles directly.
+#
+# $1 - Directories to start search in.
+# $2 - Optional search patterns, empty means find everything. Patterns use
+# standard file wildcards (* and ?) and should not be quoted.
+# $3 - Optional options to find.
+ShellFindFiles = \
+ $(if $(wildcard $1), \
+ $(sort \
+ $(shell $(FIND) $3 $(patsubst %/,%,$(wildcard $1)) \( -type f -o -type l \) \
+ $(if $(strip $2), -a \( -name "$(firstword $2)" \
+ $(foreach p, $(filter-out $(firstword $2), $2), -o -name "$(p)") \)) \
+ | $(TR) ' ' '?' \
+ ) \
+ ) \
+ )
+
+# Find non directories using the method most likely to work best for the
+# current build host
+#
+# $1 - Directory to start search in
+# $2 - Optional search patterns, defaults to '*'.
+ifeq ($(OPENJDK_BUILD_OS)-$(RWILDCARD_WORKS), windows-true)
+ DirectFindFiles = $(WildcardFindFiles)
+else
+ DirectFindFiles = $(ShellFindFiles)
+endif
+
+# Finds files using a cache that is populated by FillFindCache below. If any of
+# the directories given have not been cached, DirectFindFiles is used for
+# everything. Caching is especially useful in Cygwin, where file finds are very
+# costly.
+#
+# $1 - Directories to start search in.
+# $2 - Optional search patterns. If used, no caching is done.
+CacheFindFiles_CACHED_DIRS :=
+CacheFindFiles_CACHED_FILES :=
+CacheFindFiles = \
+ $(if $2, \
+ $(call DirectFindFiles, $1, $2) \
+ , \
+ $(if $(filter-out $(addsuffix /%, $(CacheFindFiles_CACHED_DIRS)) \
+ $(CacheFindFiles_CACHED_DIRS), $1), \
+ $(call DirectFindFiles, $1) \
+ , \
+ $(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(CacheFindFiles_CACHED_FILES)) \
+ ) \
+ )
+
+# Explicitly adds files to the find cache used by CacheFindFiles.
+#
+# $1 - Directories to start search in
+FillFindCache = \
+ $(eval CacheFindFiles_NEW_DIRS := $$(filter-out $$(addsuffix /%,\
+ $$(CacheFindFiles_CACHED_DIRS)) $$(CacheFindFiles_CACHED_DIRS), $1)) \
+ $(if $(CacheFindFiles_NEW_DIRS), \
+ $(eval CacheFindFiles_CACHED_DIRS += $$(patsubst %/,%,$$(CacheFindFiles_NEW_DIRS))) \
+ $(eval CacheFindFiles_CACHED_FILES := $$(sort $$(CacheFindFiles_CACHED_FILES) \
+ $$(call DirectFindFiles, $$(CacheFindFiles_NEW_DIRS)))) \
+ )
+
+# Findfiles is the default macro that should be used to find files in the file
+# system. This function does not always support files with spaces in the names.
+# If files with spaces are anticipated, use ShellFindFiles directly.
+#
+# $1 - Directories to start search in.
+# $2 - Optional search patterns, empty means find everything. Patterns use
+# standard file wildcards (* and ?) and should not be quoted.
+ifeq ($(DISABLE_CACHE_FIND), true)
+ FindFiles = $(DirectFindFiles)
+else
+ FindFiles = $(CacheFindFiles)
+endif
diff --git a/make/common/JarArchive.gmk b/make/common/JarArchive.gmk
index 684736cb289..25b42186666 100644
--- a/make/common/JarArchive.gmk
+++ b/make/common/JarArchive.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@ ifeq (,$(_MAKEBASE_GMK))
$(error You must include MakeBase.gmk prior to including JarArchive.gmk)
endif
+include MakeIO.gmk
+
FALSE_FIND_PATTERN:=-name FILE_NAME_THAT_DOESNT_EXIST
# Setup make rules for creating a jar archive.
diff --git a/make/common/JavaCompilation.gmk b/make/common/JavaCompilation.gmk
index ff7c90e5785..259c1834da3 100644
--- a/make/common/JavaCompilation.gmk
+++ b/make/common/JavaCompilation.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@ ifeq (,$(_MAKEBASE_GMK))
$(error You must include MakeBase.gmk prior to including JavaCompilation.gmk)
endif
+include MakeIO.gmk
+
# Java compilation needs SetupJarArchive and/or SetupZipArchive, if we're
# generating a jar file or a source zip.
include JarArchive.gmk
diff --git a/make/common/MakeBase.gmk b/make/common/MakeBase.gmk
index 3858b652ee6..5af1967e54b 100644
--- a/make/common/MakeBase.gmk
+++ b/make/common/MakeBase.gmk
@@ -24,9 +24,10 @@
#
################################################################
-#
-# Setup common utility functions.
-#
+# MakeBase provides the core functionality needed and used by all makefiles. It
+# should be included by all makefiles. MakeBase provides essential
+# functionality for named parameter functions, variable dependency, tool
+# execution, logging and fixpath functionality.
################################################################
ifndef _MAKEBASE_GMK
@@ -64,9 +65,6 @@ define NEWLINE
endef
-# Make sure we have a value (could be overridden on command line by caller)
-CREATING_BUILDJDK ?= false
-
# Certain features only work in newer version of GNU Make. The build will still
# function in 3.81, but will be less performant.
ifeq (4.0, $(firstword $(sort 4.0 $(MAKE_VERSION))))
@@ -75,67 +73,16 @@ ifeq (4.0, $(firstword $(sort 4.0 $(MAKE_VERSION))))
RWILDCARD_WORKS := true
endif
-
# For convenience, MakeBase.gmk continues to include these separate files, at
# least for now.
-
+# Utils.gmk must be included before FileUtils.gmk, since it uses some of the
+# basic utility functions there.
include $(TOPDIR)/make/common/Utils.gmk
-include $(TOPDIR)/make/common/MakeIO.gmk
-include $(TOPDIR)/make/common/CopyFiles.gmk
+include $(TOPDIR)/make/common/FileUtils.gmk
################################################################################
-# Functions for timers
-################################################################################
-
-# Store the build times in this directory.
-BUILDTIMESDIR=$(OUTPUTDIR)/make-support/build-times
-
-# Record starting time for build of a sub repository.
-define RecordStartTime
- $(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_start_$(strip $1) && \
- $(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_start_$(strip $1)_human_readable
-endef
-
-# Record ending time and calculate the difference and store it in a
-# easy to read format. Handles builds that cross midnight. Expects
-# that a build will never take 24 hours or more.
-define RecordEndTime
- $(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)
- $(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)_human_readable
- $(ECHO) `$(CAT) $(BUILDTIMESDIR)/build_time_start_$(strip $1)` `$(CAT) $(BUILDTIMESDIR)/build_time_end_$(strip $1)` $1 | \
- $(AWK) '{ F=$$7; T=$$14; if (F > T) { T+=3600*24 }; D=T-F; H=int(D/3600); \
- M=int((D-H*3600)/60); S=D-H*3600-M*60; printf("%02d:%02d:%02d %s\n",H,M,S,$$15); }' \
- > $(BUILDTIMESDIR)/build_time_diff_$(strip $1)
-endef
-
-# Hook to be called when starting to execute a top-level target
-define TargetEnter
- $(PRINTF) "## Starting $(patsubst %-only,%,$@)\n"
- $(call RecordStartTime,$(patsubst %-only,%,$@))
-endef
-
-# Hook to be called when finish executing a top-level target
-define TargetExit
- $(call RecordEndTime,$(patsubst %-only,%,$@))
- $(PRINTF) "## Finished $(patsubst %-only,%,$@) (build time %s)\n\n" \
- "`$(CAT) $(BUILDTIMESDIR)/build_time_diff_$(patsubst %-only,%,$@) | $(CUT) -f 1 -d ' '`"
-endef
-
-################################################################################
-
-# A file containing a way to uniquely identify the source code revision that
-# the build was created from
-SOURCE_REVISION_TRACKER := $(SUPPORT_OUTPUTDIR)/src-rev/source-revision-tracker
-
-# Locate all sourcecode repositories included in the forest, as absolute paths
-FindAllReposAbs = \
- $(strip $(sort $(dir $(filter-out $(TOPDIR)/build/%, $(wildcard \
- $(addprefix $(TOPDIR)/, .git */.git */*/.git */*/*/.git */*/*/*/.git) \
- )))))
-
-# Locate all sourcecode repositories included in the forest, as relative paths
-FindAllReposRel = \
- $(strip $(subst $(TOPDIR)/,.,$(patsubst $(TOPDIR)/%/, %, $(FindAllReposAbs))))
+# Make sure we have a value (could be overridden on command line by caller)
+CREATING_BUILDJDK ?= false
################################################################################
@@ -237,224 +184,6 @@ define NamedParamsMacroTemplate
$(call $(0)Body,$(strip $1))
endef
-################################################################################
-# Make directory without forking mkdir if not needed.
-#
-# If a directory with an encoded space is provided, the wildcard function
-# sometimes returns false answers (typically if the dir existed when the
-# makefile was parsed, but was deleted by a previous rule). In that case, always
-# call mkdir regardless of what wildcard says.
-#
-# 1: List of directories to create
-MakeDir = \
- $(strip \
- $(eval MakeDir_dirs_to_make := $(strip $(foreach d, $1, \
- $(if $(findstring ?, $d), '$(call DecodeSpace, $d)', \
- $(if $(wildcard $d), , $d) \
- ) \
- ))) \
- $(if $(MakeDir_dirs_to_make), $(shell $(MKDIR) -p $(MakeDir_dirs_to_make))) \
- )
-
-# Make directory for target file. Should handle spaces in filenames. Just
-# calling $(call MakeDir $(@D)) will not work if the directory contains a space
-# and the target file already exists. In that case, the target file will have
-# its wildcard ? resolved and the $(@D) will evaluate each space separated dir
-# part on its own.
-MakeTargetDir = \
- $(call MakeDir, $(dir $(call EncodeSpace, $@)))
-
-################################################################################
-# All install-file and related macros automatically call DecodeSpace when needed.
-
-ifeq ($(call isTargetOs, macosx), true)
- # On mac, extended attributes sometimes creep into the source files, which may later
- # cause the creation of ._* files which confuses testing. Clear these with xattr if
- # set. Some files get their write permissions removed after being copied to the
- # output dir. When these are copied again to images, xattr would fail. By only clearing
- # attributes when they are present, failing on this is avoided.
- #
- # If copying a soft link to a directory, need to delete the target first to avoid
- # weird errors.
- define install-file
- $(call MakeTargetDir)
- $(RM) '$(call DecodeSpace, $@)'
- # Work around a weirdness with cp on Macosx. When copying a symlink, if
- # the target of the link is write protected (e.g. 444), cp will add
- # write permission for the user on the target file (644). Avoid this by
- # using ln to create a new link instead.
- if [ -h '$(call DecodeSpace, $<)' ]; then \
- $(LN) -s "`$(READLINK) '$(call DecodeSpace, $<)'`" '$(call DecodeSpace, $@)'; \
- else \
- $(CP) -fRP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'; \
- fi
- if [ -n "`$(XATTR) -ls '$(call DecodeSpace, $@)'`" ]; then \
- $(XATTR) -cs '$(call DecodeSpace, $@)'; \
- fi
- endef
-else
- define install-file
- $(call MakeTargetDir)
- $(CP) -fP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
- endef
-endif
-
-# Variant of install file that does not preserve symlinks
-define install-file-nolink
- $(call MakeTargetDir)
- $(CP) -f '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
-endef
-
-################################################################################
-# link-file-* works similarly to install-file but creates a symlink instead.
-# There are two versions, either creating a relative or an absolute link. Be
-# careful when using this on Windows since the symlink created is only valid in
-# the unix emulation environment.
-# In msys2 we use mklink /J because its ln would perform a deep copy of the target.
-# This inhibits performance and can lead to issues with long paths. With mklink /J
-# relative linking does not work, so we handle the link as absolute path.
-ifeq ($(OPENJDK_BUILD_OS_ENV), windows.msys2)
- define link-file-relative
- $(call MakeTargetDir)
- $(RM) '$(call DecodeSpace, $@)'
- cmd //c "mklink /J $(call FixPath, $(call DecodeSpace, $@)) $(call FixPath, $(call DecodeSpace, $<))"
- endef
-else
- define link-file-relative
- $(call MakeTargetDir)
- $(RM) '$(call DecodeSpace, $@)'
- $(LN) -s '$(call DecodeSpace, $(call RelativePath, $<, $(@D)))' '$(call DecodeSpace, $@)'
- endef
-endif
-
-ifeq ($(OPENJDK_BUILD_OS_ENV), windows.msys2)
- define link-file-absolute
- $(call MakeTargetDir)
- $(RM) '$(call DecodeSpace, $@)'
- cmd //c "mklink /J $(call FixPath, $(call DecodeSpace, $@)) $(call FixPath, $(call DecodeSpace, $<))"
- endef
-else
- define link-file-absolute
- $(call MakeTargetDir)
- $(RM) '$(call DecodeSpace, $@)'
- $(LN) -s '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
- endef
-endif
-
-################################################################################
-
-# Recursive wildcard function. Walks down directories recursively and matches
-# files with the search patterns. Patterns use standard file wildcards (* and
-# ?).
-#
-# $1 - Directories to start search in
-# $2 - Search patterns
-rwildcard = \
- $(strip \
- $(foreach d, \
- $(patsubst %/,%,$(sort $(dir $(wildcard $(addsuffix /*/*, $(strip $1)))))), \
- $(call rwildcard,$d,$2) \
- ) \
- $(call DoubleDollar, $(wildcard $(foreach p, $2, $(addsuffix /$(strip $p), $(strip $1))))) \
- )
-
-# Find non directories using recursive wildcard function. This function may
-# be used directly when a small amount of directories is expected to be
-# searched and caching is not expected to be of use.
-#
-# $1 - Directory to start search in
-# $2 - Optional search patterns, defaults to '*'.
-WildcardFindFiles = \
- $(sort $(strip \
- $(eval WildcardFindFiles_result := $(call rwildcard,$(patsubst %/,%,$1),$(if $(strip $2),$2,*))) \
- $(filter-out $(patsubst %/,%,$(sort $(dir $(WildcardFindFiles_result)))), \
- $(WildcardFindFiles_result) \
- ) \
- ))
-
-# Find non directories using the find utility in the shell. Safe to call for
-# non existing directories, or directories containing wildcards.
-#
-# Files containing space will get spaces replaced with ? because GNU Make
-# cannot handle lists of files with space in them. By using ?, make will match
-# the wildcard to space in many situations so we don't need to replace back
-# to space on every use. While not a complete solution it does allow some uses
-# of FindFiles to function with spaces in file names, including for
-# SetupCopyFiles. Unfortunately this does not work for WildcardFindFiles so
-# if files with spaces are anticipated, use ShellFindFiles directly.
-#
-# $1 - Directories to start search in.
-# $2 - Optional search patterns, empty means find everything. Patterns use
-# standard file wildcards (* and ?) and should not be quoted.
-# $3 - Optional options to find.
-ShellFindFiles = \
- $(if $(wildcard $1), \
- $(sort \
- $(shell $(FIND) $3 $(patsubst %/,%,$(wildcard $1)) \( -type f -o -type l \) \
- $(if $(strip $2), -a \( -name "$(firstword $2)" \
- $(foreach p, $(filter-out $(firstword $2), $2), -o -name "$(p)") \)) \
- | $(TR) ' ' '?' \
- ) \
- ) \
- )
-
-# Find non directories using the method most likely to work best for the
-# current build host
-#
-# $1 - Directory to start search in
-# $2 - Optional search patterns, defaults to '*'.
-ifeq ($(OPENJDK_BUILD_OS)-$(RWILDCARD_WORKS), windows-true)
- DirectFindFiles = $(WildcardFindFiles)
-else
- DirectFindFiles = $(ShellFindFiles)
-endif
-
-# Finds files using a cache that is populated by FillFindCache below. If any of
-# the directories given have not been cached, DirectFindFiles is used for
-# everything. Caching is especially useful in Cygwin, where file finds are very
-# costly.
-#
-# $1 - Directories to start search in.
-# $2 - Optional search patterns. If used, no caching is done.
-CacheFindFiles_CACHED_DIRS :=
-CacheFindFiles_CACHED_FILES :=
-CacheFindFiles = \
- $(if $2, \
- $(call DirectFindFiles, $1, $2) \
- , \
- $(if $(filter-out $(addsuffix /%, $(CacheFindFiles_CACHED_DIRS)) \
- $(CacheFindFiles_CACHED_DIRS), $1), \
- $(call DirectFindFiles, $1) \
- , \
- $(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(CacheFindFiles_CACHED_FILES)) \
- ) \
- )
-
-# Explicitly adds files to the find cache used by CacheFindFiles.
-#
-# $1 - Directories to start search in
-FillFindCache = \
- $(eval CacheFindFiles_NEW_DIRS := $$(filter-out $$(addsuffix /%,\
- $$(CacheFindFiles_CACHED_DIRS)) $$(CacheFindFiles_CACHED_DIRS), $1)) \
- $(if $(CacheFindFiles_NEW_DIRS), \
- $(eval CacheFindFiles_CACHED_DIRS += $$(patsubst %/,%,$$(CacheFindFiles_NEW_DIRS))) \
- $(eval CacheFindFiles_CACHED_FILES := $$(sort $$(CacheFindFiles_CACHED_FILES) \
- $$(call DirectFindFiles, $$(CacheFindFiles_NEW_DIRS)))) \
- )
-
-# Findfiles is the default macro that should be used to find files in the file
-# system. This function does not always support files with spaces in the names.
-# If files with spaces are anticipated, use ShellFindFiles directly.
-#
-# $1 - Directories to start search in.
-# $2 - Optional search patterns, empty means find everything. Patterns use
-# standard file wildcards (* and ?) and should not be quoted.
-ifeq ($(DISABLE_CACHE_FIND), true)
- FindFiles = $(DirectFindFiles)
-else
- FindFiles = $(CacheFindFiles)
-endif
-
################################################################################
# FixPath
#
diff --git a/make/common/MakeIO.gmk b/make/common/MakeIO.gmk
index 75a387c34d9..865c5cae2e5 100644
--- a/make/common/MakeIO.gmk
+++ b/make/common/MakeIO.gmk
@@ -241,32 +241,3 @@ else # HAS_FILE_FUNCTION = false
$$(call ListPathsSafely_IfPrintf,$1,$2,29751,30000)
endef
endif # HAS_FILE_FUNCTION
-
-################################################################################
-# Write to and read from file
-
-# Param 1 - File to read
-ReadFile = \
- $(shell $(CAT) $1)
-
-# Param 1 - Text to write
-# Param 2 - File to write to
-ifeq ($(HAS_FILE_FUNCTION), true)
- WriteFile = \
- $(file >$2,$(strip $1))
-else
- # Use printf to get consistent behavior on all platforms.
- WriteFile = \
- $(shell $(PRINTF) "%s\n" $(strip $(call ShellQuote, $1)) > $2)
-endif
-
-# Param 1 - Text to write
-# Param 2 - File to write to
-ifeq ($(HAS_FILE_FUNCTION), true)
- AppendFile = \
- $(file >>$2,$(strip $1))
-else
- # Use printf to get consistent behavior on all platforms.
- AppendFile = \
- $(shell $(PRINTF) "%s\n" $(strip $(call ShellQuote, $1)) >> $2)
-endif
diff --git a/make/common/NativeCompilation.gmk b/make/common/NativeCompilation.gmk
index afae920f755..3f31865c247 100644
--- a/make/common/NativeCompilation.gmk
+++ b/make/common/NativeCompilation.gmk
@@ -36,6 +36,7 @@ ifeq ($(_MAKEBASE_GMK), )
$(error You must include MakeBase.gmk prior to including NativeCompilation.gmk)
endif
+include MakeIO.gmk
include native/CompileFile.gmk
include native/DebugSymbols.gmk
include native/Flags.gmk
diff --git a/make/common/Utils.gmk b/make/common/Utils.gmk
index 00e73f7dd6c..7f1cbd61f38 100644
--- a/make/common/Utils.gmk
+++ b/make/common/Utils.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -29,29 +29,34 @@ endif
################################################################################
#
-# Common utility functions
+# Basic utility functions available to MakeBase.gmk itself
#
################################################################################
-### Debug functions
+# String equals
+equals = \
+ $(if $(strip $1)$(strip $2),$(strip \
+ $(and $(findstring $(strip $1),$(strip $2)),\
+ $(findstring $(strip $2),$(strip $1)))), \
+ true \
+ )
-# Prints the name and value of a variable
-PrintVar = \
- $(info $(strip $1) >$($(strip $1))<)
+# Convert the string given to upper case, without any $(shell)
+# Inspired by http://lists.gnu.org/archive/html/help-make/2013-09/msg00009.html
+uppercase_table := a,A b,B c,C d,D e,E f,F g,G h,H i,I j,J k,K l,L m,M n,N o,O \
+ p,P q,Q r,R s,S t,T u,U v,V w,W x,X y,Y z,Z
-################################################################################
-# This macro translates $ into \$ to protect the $ from expansion in the shell.
-# To make this macro resilient against already escaped strings, first remove
-# any present escapes before escaping so that no double escapes are added.
-EscapeDollar = $(subst $$,\$$,$(subst \$$,$$,$(strip $1)))
+uppercase_internal = \
+ $(if $(strip $1), $$(subst $(firstword $1), $(call uppercase_internal, \
+ $(wordlist 2, $(words $1), $1), $2)), $2)
-################################################################################
-# This macro works just like EscapeDollar above, but for #.
-EscapeHash = $(subst \#,\\\#,$(subst \\\#,\#,$(strip $1)))
-
-################################################################################
-# This macro translates $ into $$ to protect the string from make itself.
-DoubleDollar = $(subst $$,$$$$,$(strip $1))
+# Convert a string to upper case. Works only on a-z.
+# $1 - The string to convert
+uppercase = \
+ $(strip \
+ $(eval uppercase_result := $(call uppercase_internal, $(uppercase_table), $1)) \
+ $(uppercase_result) \
+ )
################################################################################
# Creates a sequence of increasing numbers (inclusive).
@@ -68,23 +73,142 @@ _sequence-do = \
$(words $(SEQUENCE_COUNT)) \
$(call _sequence-do,$1))
+################################################################################
+# This macro translates $ into \$ to protect the $ from expansion in the shell.
+# To make this macro resilient against already escaped strings, first remove
+# any present escapes before escaping so that no double escapes are added.
+EscapeDollar = $(subst $$,\$$,$(subst \$$,$$,$(strip $1)))
+
+################################################################################
+# This macro works just like EscapeDollar above, but for #.
+EscapeHash = $(subst \#,\\\#,$(subst \\\#,\#,$(strip $1)))
+
+################################################################################
+# This macro translates $ into $$ to protect the string from make itself.
+DoubleDollar = $(subst $$,$$$$,$(strip $1))
+
+################################################################################
+# ShellQuote
+#
+# Quotes a string with single quotes and replaces single quotes with '\'' so
+# that the contents survives being given to the shell.
+ShellQuote = \
+ $(SQUOTE)$(subst $(SQUOTE),$(SQUOTE)\$(SQUOTE)$(SQUOTE),$(strip $1))$(SQUOTE)
+
+################################################################################
+# Write to and read from file
+
+# Param 1 - File to read
+ReadFile = \
+ $(shell $(CAT) $1)
+
+# Param 1 - Text to write
+# Param 2 - File to write to
+ifeq ($(HAS_FILE_FUNCTION), true)
+ WriteFile = \
+ $(file >$2,$(strip $1))
+else
+ # Use printf to get consistent behavior on all platforms.
+ WriteFile = \
+ $(shell $(PRINTF) "%s\n" $(strip $(call ShellQuote, $1)) > $2)
+endif
+
+# Param 1 - Text to write
+# Param 2 - File to write to
+ifeq ($(HAS_FILE_FUNCTION), true)
+ AppendFile = \
+ $(file >>$2,$(strip $1))
+else
+ # Use printf to get consistent behavior on all platforms.
+ AppendFile = \
+ $(shell $(PRINTF) "%s\n" $(strip $(call ShellQuote, $1)) >> $2)
+endif
+
+################################################################################
+# Make directory without forking mkdir if not needed.
+#
+# If a directory with an encoded space is provided, the wildcard function
+# sometimes returns false answers (typically if the dir existed when the
+# makefile was parsed, but was deleted by a previous rule). In that case, always
+# call mkdir regardless of what wildcard says.
+#
+# 1: List of directories to create
+MakeDir = \
+ $(strip \
+ $(eval MakeDir_dirs_to_make := $(strip $(foreach d, $1, \
+ $(if $(findstring ?, $d), '$(call DecodeSpace, $d)', \
+ $(if $(wildcard $d), , $d) \
+ ) \
+ ))) \
+ $(if $(MakeDir_dirs_to_make), $(shell $(MKDIR) -p $(MakeDir_dirs_to_make))) \
+ )
+
+################################################################################
+# Check if our build or target conforms to certain restrictions. This set of
+# functions all work in similar ways, testing the property that the name
+# implies, so e.g. isTargetCpu test the CPU of the target system.
+#
+# $1 - A property, or a space separated list of properties to test for.
+#
+# Returns true if the actual property matches one of the properties in the list,
+# and false otherwise.
+#
+# Examples: $(call isTargetOs, linux windows) will return true when executed
+# on either linux or windows, and false otherwise.
+# $(call isBuildCpuArch, x86) will return true iff the build CPU Arch is x86.
+
+isTargetOs = \
+ $(strip $(if $(filter $(OPENJDK_TARGET_OS), $1), true, false))
+
+isTargetOsType = \
+ $(strip $(if $(filter $(OPENJDK_TARGET_OS_TYPE), $1), true, false))
+
+isTargetCpu = \
+ $(strip $(if $(filter $(OPENJDK_TARGET_CPU), $1), true, false))
+
+isTargetCpuArch = \
+ $(strip $(if $(filter $(OPENJDK_TARGET_CPU_ARCH), $1), true, false))
+
+isTargetCpuBits = \
+ $(strip $(if $(filter $(OPENJDK_TARGET_CPU_BITS), $1), true, false))
+
+isBuildOs = \
+ $(strip $(if $(filter $(OPENJDK_BUILD_OS), $1), true, false))
+
+isBuildOsType = \
+ $(strip $(if $(filter $(OPENJDK_BUILD_OS_TYPE), $1), true, false))
+
+isBuildOsEnv = \
+ $(strip $(if $(filter $(OPENJDK_BUILD_OS_ENV), $1), true, false))
+
+isBuildCpu = \
+ $(strip $(if $(filter $(OPENJDK_BUILD_CPU), $1), true, false))
+
+isBuildCpuArch = \
+ $(strip $(if $(filter $(OPENJDK_BUILD_CPU_ARCH), $1), true, false))
+
+isCompiler = \
+ $(strip $(if $(filter $(TOOLCHAIN_TYPE), $1), true, false))
+
+################################################################################
+#
+# Common utility functions
+#
+################################################################################
+
+### Debug functions
+
+# Prints the name and value of a variable
+PrintVar = \
+ $(info $(strip $1) >$($(strip $1))<)
+
+
################################################################################
# Strip both arguments. Append the first argument to the second argument. If the
# first argument is empty, return the empty string.
IfAppend = \
$(if $(strip $1),$(strip $1)$(strip $2),)
-################################################################################
-# Replace question marks with space in string. This macro needs to be called on
-# files from FindFiles in case any of them contains space in their file name,
-# since FindFiles replaces space with ?.
-# Param 1 - String to replace in
-DecodeSpace = \
- $(subst ?,$(SPACE),$(strip $1))
-
-EncodeSpace = \
- $(subst $(SPACE),?,$(strip $1))
-
################################################################################
# Assign a variable only if it is empty
# Param 1 - Variable to assign
@@ -92,65 +216,6 @@ EncodeSpace = \
SetIfEmpty = \
$(if $($(strip $1)),,$(eval $(strip $1) := $2))
-################################################################################
-# Take two paths and return the path of the last common directory.
-# Ex: /foo/bar/baz, /foo/bar/banan -> /foo/bar
-# foo/bar/baz, /foo/bar ->
-#
-# The x prefix is used to preserve the presence of the initial slash
-# On Windows paths are treated as case-insensitive
-#
-# $1 - Path to compare
-# $2 - Other path to compare
-FindCommonPathPrefix = \
- $(call DecodeSpace,$(patsubst x%,%,$(subst $(SPACE),/,$(strip \
- $(call FindCommonPathPrefixHelper1, \
- $(subst /,$(SPACE),x$(call EncodeSpace,$(strip $1))), \
- $(subst /,$(SPACE),x$(call EncodeSpace,$(strip $2)))) \
- ))))
-
-FindCommonPathPrefixHelper1 = \
- $(if $(filter $(OPENJDK_TARGET_OS), windows), \
- $(call FindCommonPathPrefixHelper2,$(call uppercase,$1),$(call uppercase,$2),$1), \
- $(call FindCommonPathPrefixHelper2,$1,$2,$1))
-
-FindCommonPathPrefixHelper2 = \
- $(if $(call equals, $(firstword $1), $(firstword $2)), \
- $(if $(call equals, $(firstword $1),),, \
- $(firstword $3) \
- $(call FindCommonPathPrefixHelper2, \
- $(wordlist 2, $(words $1), $1), \
- $(wordlist 2, $(words $2), $2), \
- $(wordlist 2, $(words $3), $3) \
- ) \
- ) \
- )
-
-# Computes the relative path from a directory to a file
-# $1 - File to compute the relative path to
-# $2 - Directory to compute the relative path from
-RelativePath = \
- $(call DecodeSpace,$(strip $(call RelativePathHelper,$(call EncodeSpace \
- ,$(strip $1)),$(call EncodeSpace \
- ,$(strip $2)),$(call EncodeSpace \
- ,$(call FindCommonPathPrefix,$1,$2)))))
-
-RelativePathHelper = \
- $(eval $3_prefix_length := $(words $(subst /,$(SPACE),$3))) \
- $(eval $1_words := $(subst /,$(SPACE),$1)) \
- $(eval $2_words := $(subst /,$(SPACE),$2)) \
- $(if $(call equals,$($3_prefix_length),0),, \
- $(eval $1_words := $(wordlist 2,$(words $($1_words)),$(wordlist \
- $($3_prefix_length),$(words $($1_words)),$($1_words)))) \
- $(eval $2_words := $(wordlist 2,$(words $($2_words)),$(wordlist \
- $($3_prefix_length),$(words $($2_words)),$($2_words)))) \
- ) \
- $(eval $1_suffix := $(subst $(SPACE),/,$($1_words))) \
- $(eval $2_dotdots := $(subst $(SPACE),/,$(foreach d,$($2_words),..))) \
- $(if $($1_suffix), \
- $(if $($2_dotdots), $($2_dotdots)/$($1_suffix), $($1_suffix)), \
- $(if $($2_dotdots), $($2_dotdots), .))
-
################################################################################
# Filter out duplicate sub strings while preserving order. Keeps the first occurrence.
uniq = \
@@ -173,14 +238,6 @@ dups = \
$(strip $(foreach v, $(sort $1), $(if $(filter-out 1, \
$(words $(filter $v, $1))), $v)))
-# String equals
-equals = \
- $(if $(strip $1)$(strip $2),$(strip \
- $(and $(findstring $(strip $1),$(strip $2)),\
- $(findstring $(strip $2),$(strip $1)))), \
- true \
- )
-
# Remove a whole list of prefixes
# $1 - List of prefixes
# $2 - List of elements to process
@@ -188,23 +245,6 @@ remove-prefixes = \
$(strip $(if $1,$(patsubst $(firstword $1)%,%,\
$(call remove-prefixes,$(filter-out $(firstword $1),$1),$2)),$2))
-# Convert the string given to upper case, without any $(shell)
-# Inspired by http://lists.gnu.org/archive/html/help-make/2013-09/msg00009.html
-uppercase_table := a,A b,B c,C d,D e,E f,F g,G h,H i,I j,J k,K l,L m,M n,N o,O \
- p,P q,Q r,R s,S t,T u,U v,V w,W x,X y,Y z,Z
-
-uppercase_internal = \
- $(if $(strip $1), $$(subst $(firstword $1), $(call uppercase_internal, \
- $(wordlist 2, $(words $1), $1), $2)), $2)
-
-# Convert a string to upper case. Works only on a-z.
-# $1 - The string to convert
-uppercase = \
- $(strip \
- $(eval uppercase_result := $(call uppercase_internal, $(uppercase_table), $1)) \
- $(uppercase_result) \
- )
-
################################################################################
# Boolean operators.
@@ -290,14 +330,6 @@ define ParseKeywordVariableBody
endif
endef
-################################################################################
-# ShellQuote
-#
-# Quotes a string with single quotes and replaces single quotes with '\'' so
-# that the contents survives being given to the shell.
-ShellQuote = \
- $(SQUOTE)$(subst $(SQUOTE),$(SQUOTE)\$(SQUOTE)$(SQUOTE),$(strip $1))$(SQUOTE)
-
################################################################################
# Find lib dir for module
# Param 1 - module name
@@ -329,53 +361,6 @@ check-jvm-variant = \
$(error Internal error: Invalid variant tested: $1)) \
$(if $(filter $1, $(JVM_VARIANTS)), true, false))
-################################################################################
-# Check if our build or target conforms to certain restrictions. This set of
-# functions all work in similar ways, testing the property that the name
-# implies, so e.g. isTargetCpu test the CPU of the target system.
-#
-# $1 - A property, or a space separated list of properties to test for.
-#
-# Returns true if the actual property matches one of the properties in the list,
-# and false otherwise.
-#
-# Examples: $(call isTargetOs, linux windows) will return true when executed
-# on either linux or windows, and false otherwise.
-# $(call isBuildCpuArch, x86) will return true iff the build CPU Arch is x86.
-
-isTargetOs = \
- $(strip $(if $(filter $(OPENJDK_TARGET_OS), $1), true, false))
-
-isTargetOsType = \
- $(strip $(if $(filter $(OPENJDK_TARGET_OS_TYPE), $1), true, false))
-
-isTargetCpu = \
- $(strip $(if $(filter $(OPENJDK_TARGET_CPU), $1), true, false))
-
-isTargetCpuArch = \
- $(strip $(if $(filter $(OPENJDK_TARGET_CPU_ARCH), $1), true, false))
-
-isTargetCpuBits = \
- $(strip $(if $(filter $(OPENJDK_TARGET_CPU_BITS), $1), true, false))
-
-isBuildOs = \
- $(strip $(if $(filter $(OPENJDK_BUILD_OS), $1), true, false))
-
-isBuildOsType = \
- $(strip $(if $(filter $(OPENJDK_BUILD_OS_TYPE), $1), true, false))
-
-isBuildOsEnv = \
- $(strip $(if $(filter $(OPENJDK_BUILD_OS_ENV), $1), true, false))
-
-isBuildCpu = \
- $(strip $(if $(filter $(OPENJDK_BUILD_CPU), $1), true, false))
-
-isBuildCpuArch = \
- $(strip $(if $(filter $(OPENJDK_BUILD_CPU_ARCH), $1), true, false))
-
-isCompiler = \
- $(strip $(if $(filter $(TOOLCHAIN_TYPE), $1), true, false))
-
################################################################################
# Converts a space separated list to a comma separated list.
#
diff --git a/make/common/modules/CopyCommon.gmk b/make/common/modules/CopyCommon.gmk
index b8cf880d673..75bfb0d62b1 100644
--- a/make/common/modules/CopyCommon.gmk
+++ b/make/common/modules/CopyCommon.gmk
@@ -23,6 +23,8 @@
# questions.
#
+include CopyFiles.gmk
+
LIB_DST_DIR := $(SUPPORT_OUTPUTDIR)/modules_libs/$(MODULE)
CONF_DST_DIR := $(SUPPORT_OUTPUTDIR)/modules_conf/$(MODULE)
LEGAL_DST_DIR := $(SUPPORT_OUTPUTDIR)/modules_legal/$(MODULE)
diff --git a/make/common/modules/LauncherCommon.gmk b/make/common/modules/LauncherCommon.gmk
index f072cbfb9bf..a801631a9c9 100644
--- a/make/common/modules/LauncherCommon.gmk
+++ b/make/common/modules/LauncherCommon.gmk
@@ -23,6 +23,7 @@
# questions.
#
+include CopyFiles.gmk
include JdkNativeCompilation.gmk
include Modules.gmk
include ProcessMarkdown.gmk
diff --git a/make/hotspot/CopyToExplodedJdk.gmk b/make/hotspot/CopyToExplodedJdk.gmk
index d8012f6878f..4462af0dc3b 100644
--- a/make/hotspot/CopyToExplodedJdk.gmk
+++ b/make/hotspot/CopyToExplodedJdk.gmk
@@ -23,6 +23,8 @@
# questions.
#
+include CopyFiles.gmk
+
# Copy all built libraries into exploded jdk
LIB_TARGETS := $(filter $(LIB_OUTPUTDIR)/%, $(TARGETS))
ifeq ($(call isTargetOs, windows), true)
diff --git a/make/hotspot/gensrc/GensrcJvmti.gmk b/make/hotspot/gensrc/GensrcJvmti.gmk
index b31a6f52292..718766cacae 100644
--- a/make/hotspot/gensrc/GensrcJvmti.gmk
+++ b/make/hotspot/gensrc/GensrcJvmti.gmk
@@ -23,6 +23,8 @@
# questions.
#
+include CopyFiles.gmk
+
$(eval $(call IncludeCustomExtension, hotspot/gensrc/GensrcJvmti.gmk))
################################################################################
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index e887885db2c..47f987092f9 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -23,6 +23,8 @@
# questions.
#
+include CopyFiles.gmk
+
# Include support files that will setup compiler flags due to the selected
# jvm feature set, specific file overrides, and general flags.
include lib/JvmFeatures.gmk
diff --git a/make/hotspot/test/GtestImage.gmk b/make/hotspot/test/GtestImage.gmk
index 9b2a37962cd..b6256cd575b 100644
--- a/make/hotspot/test/GtestImage.gmk
+++ b/make/hotspot/test/GtestImage.gmk
@@ -28,6 +28,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
+
$(foreach v, $(JVM_VARIANTS), \
$(eval $(call SetupCopyFiles, COPY_GTEST_$v, \
SRC := $(HOTSPOT_OUTPUTDIR)/variant-$v/libjvm/gtest, \
diff --git a/make/modules/java.base/Lib.gmk b/make/modules/java.base/Lib.gmk
index d09e33d489f..3a24c5e4550 100644
--- a/make/modules/java.base/Lib.gmk
+++ b/make/modules/java.base/Lib.gmk
@@ -23,6 +23,7 @@
# questions.
#
+include CopyFiles.gmk
include LibCommon.gmk
# Hook to include the corresponding custom file, if present.
diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
index c30901e1543..868e6fb7a7c 100644
--- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk
+++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
@@ -23,6 +23,8 @@
# questions.
#
+include CopyFiles.gmk
+
LIBAWT_DEFAULT_HEADER_DIRS := \
common/awt/utility \
libawt/awt/image \
diff --git a/make/modules/jdk.compiler/Gendata.gmk b/make/modules/jdk.compiler/Gendata.gmk
index e9ff3c439be..32fa70462b2 100644
--- a/make/modules/jdk.compiler/Gendata.gmk
+++ b/make/modules/jdk.compiler/Gendata.gmk
@@ -23,6 +23,7 @@
# questions.
#
+include CopyFiles.gmk
include JarArchive.gmk
include JavaCompilation.gmk
include Modules.gmk
diff --git a/make/test/BuildFailureHandler.gmk b/make/test/BuildFailureHandler.gmk
index 6c9876f80c7..005cbb330f6 100644
--- a/make/test/BuildFailureHandler.gmk
+++ b/make/test/BuildFailureHandler.gmk
@@ -27,6 +27,8 @@ default: build
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
TARGETS :=
diff --git a/make/test/BuildJtregTestThreadFactory.gmk b/make/test/BuildJtregTestThreadFactory.gmk
index b096ae303ea..aa9808dd584 100644
--- a/make/test/BuildJtregTestThreadFactory.gmk
+++ b/make/test/BuildJtregTestThreadFactory.gmk
@@ -27,6 +27,8 @@ default: build
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
TARGETS :=
diff --git a/make/test/BuildMicrobenchmark.gmk b/make/test/BuildMicrobenchmark.gmk
index ba502a56128..bb1e6111baa 100644
--- a/make/test/BuildMicrobenchmark.gmk
+++ b/make/test/BuildMicrobenchmark.gmk
@@ -28,6 +28,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
include TestFilesCompilation.gmk
diff --git a/make/test/BuildTestLib.gmk b/make/test/BuildTestLib.gmk
index 54c4c61642c..00642a1e7cf 100644
--- a/make/test/BuildTestLib.gmk
+++ b/make/test/BuildTestLib.gmk
@@ -33,6 +33,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include JavaCompilation.gmk
################################################################################
diff --git a/make/test/BuildTestLibNative.gmk b/make/test/BuildTestLibNative.gmk
index 455936d163f..63dcbbf008b 100644
--- a/make/test/BuildTestLibNative.gmk
+++ b/make/test/BuildTestLibNative.gmk
@@ -33,6 +33,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include TestFilesCompilation.gmk
################################################################################
diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk
index 33532f77974..887b530bfca 100644
--- a/make/test/JtregNativeHotspot.gmk
+++ b/make/test/JtregNativeHotspot.gmk
@@ -33,6 +33,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, test/JtregNativeHotspot.gmk))
diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk
index ae830501635..46eeb7684b6 100644
--- a/make/test/JtregNativeJdk.gmk
+++ b/make/test/JtregNativeJdk.gmk
@@ -33,6 +33,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, test/JtregNativeJdk.gmk))
diff --git a/make/test/JtregNativeLibTest.gmk b/make/test/JtregNativeLibTest.gmk
index 7ab45cce878..838d37f8145 100644
--- a/make/test/JtregNativeLibTest.gmk
+++ b/make/test/JtregNativeLibTest.gmk
@@ -33,6 +33,8 @@ default: all
include $(SPEC)
include MakeBase.gmk
+
+include CopyFiles.gmk
include TestFilesCompilation.gmk
$(eval $(call IncludeCustomExtension, test/JtregNativeLibTest.gmk))
diff --git a/test/make/TestCopyFiles.gmk b/test/make/TestCopyFiles.gmk
index cbfa1cc003c..cc05f89db23 100644
--- a/test/make/TestCopyFiles.gmk
+++ b/test/make/TestCopyFiles.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@ default: all
include $(SPEC)
include MakeBase.gmk
+include CopyFiles.gmk
include UtilsForTests.gmk
THIS_FILE := $(TOPDIR)/test/make/TestCopyFiles.gmk
From 0656f0809208160f83a7dd1ae91d9f09b582ce35 Mon Sep 17 00:00:00 2001
From: Guoxiong Li
Date: Thu, 11 Apr 2024 15:01:41 +0000
Subject: [PATCH 010/486] 8329469: Generational ZGC: Move the methods
forwarding_[index|find|insert] from zRelocate.cpp to ZForwarding
Reviewed-by: stefank, eosterlund
---
src/hotspot/share/gc/z/zForwarding.hpp | 17 ++++++--
src/hotspot/share/gc/z/zForwarding.inline.hpp | 39 ++++++++++++++---
src/hotspot/share/gc/z/zRelocate.cpp | 42 +++----------------
3 files changed, 52 insertions(+), 46 deletions(-)
diff --git a/src/hotspot/share/gc/z/zForwarding.hpp b/src/hotspot/share/gc/z/zForwarding.hpp
index a99473322d4..ee37508903f 100644
--- a/src/hotspot/share/gc/z/zForwarding.hpp
+++ b/src/hotspot/share/gc/z/zForwarding.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,6 +83,14 @@ private:
ZForwardingEntry first(uintptr_t from_index, ZForwardingCursor* cursor) const;
ZForwardingEntry next(ZForwardingCursor* cursor) const;
+ uintptr_t index(zoffset from_offset);
+
+ ZForwardingEntry find(uintptr_t from_index, ZForwardingCursor* cursor) const;
+ zaddress find(zoffset from_offset, ZForwardingCursor* cursor);
+
+ zoffset insert(uintptr_t from_index, zoffset to_offset, ZForwardingCursor* cursor);
+ zaddress insert(zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor);
+
template
void object_iterate_forwarded_via_livemap(Function function);
@@ -140,10 +148,11 @@ public:
void mark_done();
bool is_done() const;
- zaddress find(zaddress_unsafe addr);
+ zaddress find(zaddress from_addr, ZForwardingCursor* cursor);
+ zaddress find(zaddress_unsafe from_addr, ZForwardingCursor* cursor);
+ zaddress find(zaddress_unsafe from_addr);
- ZForwardingEntry find(uintptr_t from_index, ZForwardingCursor* cursor) const;
- zoffset insert(uintptr_t from_index, zoffset to_offset, ZForwardingCursor* cursor);
+ zaddress insert(zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor);
// Relocated remembered set fields support
void relocated_remembered_fields_register(volatile zpointer* p);
diff --git a/src/hotspot/share/gc/z/zForwarding.inline.hpp b/src/hotspot/share/gc/z/zForwarding.inline.hpp
index b0cb67a70cd..eb5f4a36161 100644
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp
+++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -218,11 +218,8 @@ inline ZForwardingEntry ZForwarding::next(ZForwardingCursor* cursor) const {
return at(cursor);
}
-inline zaddress ZForwarding::find(zaddress_unsafe addr) {
- const uintptr_t from_index = (ZAddress::offset(addr) - start()) >> object_alignment_shift();
- ZForwardingCursor cursor;
- const ZForwardingEntry entry = find(from_index, &cursor);
- return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null;
+inline uintptr_t ZForwarding::index(zoffset from_offset) {
+ return (from_offset - start()) >> object_alignment_shift();
}
inline ZForwardingEntry ZForwarding::find(uintptr_t from_index, ZForwardingCursor* cursor) const {
@@ -243,6 +240,25 @@ inline ZForwardingEntry ZForwarding::find(uintptr_t from_index, ZForwardingCurso
return entry;
}
+inline zaddress ZForwarding::find(zoffset from_offset, ZForwardingCursor* cursor) {
+ const uintptr_t from_index = index(from_offset);
+ const ZForwardingEntry entry = find(from_index, cursor);
+ return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null;
+}
+
+inline zaddress ZForwarding::find(zaddress from_addr, ZForwardingCursor* cursor) {
+ return find(ZAddress::offset(from_addr), cursor);
+}
+
+inline zaddress ZForwarding::find(zaddress_unsafe from_addr, ZForwardingCursor* cursor) {
+ return find(ZAddress::offset(from_addr), cursor);
+}
+
+inline zaddress ZForwarding::find(zaddress_unsafe from_addr) {
+ ZForwardingCursor cursor;
+ return find(from_addr, &cursor);
+}
+
inline zoffset ZForwarding::insert(uintptr_t from_index, zoffset to_offset, ZForwardingCursor* cursor) {
const ZForwardingEntry new_entry(from_index, untype(to_offset));
const ZForwardingEntry old_entry; // Empty
@@ -271,6 +287,17 @@ inline zoffset ZForwarding::insert(uintptr_t from_index, zoffset to_offset, ZFor
}
}
+inline zaddress ZForwarding::insert(zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor) {
+ const uintptr_t from_index = index(from_offset);
+ const zoffset to_offset = ZAddress::offset(to_addr);
+ const zoffset to_offset_final = insert(from_index, to_offset, cursor);
+ return ZOffset::address(to_offset_final);
+}
+
+inline zaddress ZForwarding::insert(zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor) {
+ return insert(ZAddress::offset(from_addr), to_addr, cursor);
+}
+
inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer* p) {
// Invariant: Page is being retained
assert(ZGeneration::young()->is_phase_mark(), "Only called when");
diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp
index 78efa7cdb12..b55a1863bde 100644
--- a/src/hotspot/share/gc/z/zRelocate.cpp
+++ b/src/hotspot/share/gc/z/zRelocate.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,35 +52,6 @@
static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
-static uintptr_t forwarding_index(ZForwarding* forwarding, zoffset from_offset) {
- return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
-}
-
-static zaddress forwarding_find(ZForwarding* forwarding, zoffset from_offset, ZForwardingCursor* cursor) {
- const uintptr_t from_index = forwarding_index(forwarding, from_offset);
- const ZForwardingEntry entry = forwarding->find(from_index, cursor);
- return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null;
-}
-
-static zaddress forwarding_find(ZForwarding* forwarding, zaddress_unsafe from_addr, ZForwardingCursor* cursor) {
- return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
-}
-
-static zaddress forwarding_find(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
- return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
-}
-
-static zaddress forwarding_insert(ZForwarding* forwarding, zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor) {
- const uintptr_t from_index = forwarding_index(forwarding, from_offset);
- const zoffset to_offset = ZAddress::offset(to_addr);
- const zoffset to_offset_final = forwarding->insert(from_index, to_offset, cursor);
- return ZOffset::address(to_offset_final);
-}
-
-static zaddress forwarding_insert(ZForwarding* forwarding, zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor) {
- return forwarding_insert(forwarding, ZAddress::offset(from_addr), to_addr, cursor);
-}
-
ZRelocateQueue::ZRelocateQueue()
: _lock(),
_queue(),
@@ -368,7 +339,7 @@ static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_add
ZUtils::object_copy_disjoint(from_addr, to_addr, size);
// Insert forwarding
- const zaddress to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
+ const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
if (to_addr_final != to_addr) {
// Already relocated, try undo allocation
@@ -382,7 +353,7 @@ zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe fro
ZForwardingCursor cursor;
// Lookup forwarding
- zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
+ zaddress to_addr = forwarding->find(from_addr, &cursor);
if (!is_null(to_addr)) {
// Already relocated
return to_addr;
@@ -409,8 +380,7 @@ zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe fro
}
zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
- ZForwardingCursor cursor;
- const zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
+ const zaddress to_addr = forwarding->find(from_addr);
assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
return to_addr;
}
@@ -626,7 +596,7 @@ private:
// Lookup forwarding
{
- const zaddress to_addr = forwarding_find(_forwarding, from_addr, &cursor);
+ const zaddress to_addr = _forwarding->find(from_addr, &cursor);
if (!is_null(to_addr)) {
// Already relocated
increase_other_forwarded(size);
@@ -650,7 +620,7 @@ private:
}
// Insert forwarding
- const zaddress to_addr = forwarding_insert(_forwarding, from_addr, allocated_addr, &cursor);
+ const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
if (to_addr != allocated_addr) {
// Already relocated, undo allocation
_allocator->undo_alloc_object(to_page, to_addr, size);
From 31ee5108e059afae0a3809947adb7b91e19baec6 Mon Sep 17 00:00:00 2001
From: Cesar Soares Lucas
Date: Thu, 11 Apr 2024 15:44:49 +0000
Subject: [PATCH 011/486] 8241503: C2: Share MacroAssembler between mach nodes
during code emission
Reviewed-by: kvn, mdoerr, amitkumar, lucy
---
src/hotspot/cpu/aarch64/aarch64.ad | 232 +--
src/hotspot/cpu/aarch64/aarch64_vector.ad | 28 +-
src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 | 28 +-
src/hotspot/cpu/aarch64/ad_encode.m4 | 9 +-
.../cpu/aarch64/compiledIC_aarch64.cpp | 14 +-
.../gc/shenandoah/shenandoah_aarch64.ad | 26 +-
src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad | 22 +-
src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad | 62 +-
.../cpu/aarch64/sharedRuntime_aarch64.cpp | 6 +-
src/hotspot/cpu/arm/arm.ad | 188 +-
src/hotspot/cpu/arm/compiledIC_arm.cpp | 8 +-
src/hotspot/cpu/ppc/compiledIC_ppc.cpp | 10 +-
.../cpu/ppc/gc/shenandoah/shenandoah_ppc.ad | 16 +-
src/hotspot/cpu/ppc/gc/x/x_ppc.ad | 32 +-
src/hotspot/cpu/ppc/gc/z/z_ppc.ad | 58 +-
src/hotspot/cpu/ppc/ppc.ad | 198 +-
src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp | 6 +-
src/hotspot/cpu/riscv/compiledIC_riscv.cpp | 14 +-
.../riscv/gc/shenandoah/shenandoah_riscv.ad | 24 +-
src/hotspot/cpu/riscv/gc/x/x_riscv.ad | 20 +-
src/hotspot/cpu/riscv/gc/z/z_riscv.ad | 52 +-
src/hotspot/cpu/riscv/riscv.ad | 105 +-
src/hotspot/cpu/riscv/riscv_v.ad | 18 +-
src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 6 +-
src/hotspot/cpu/s390/compiledIC_s390.cpp | 10 +-
src/hotspot/cpu/s390/s390.ad | 208 +-
src/hotspot/cpu/x86/assembler_x86.cpp | 13 +
.../cpu/x86/c2_intelJccErratum_x86.cpp | 4 +-
.../cpu/x86/c2_intelJccErratum_x86.hpp | 6 +-
src/hotspot/cpu/x86/compiledIC_x86.cpp | 10 +-
.../x86/gc/shenandoah/shenandoah_x86_32.ad | 4 +-
.../x86/gc/shenandoah/shenandoah_x86_64.ad | 8 +-
.../cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp | 8 +-
src/hotspot/cpu/x86/gc/x/x_x86_64.ad | 24 +-
.../cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp | 10 +-
src/hotspot/cpu/x86/gc/z/z_x86_64.ad | 46 +-
src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp | 6 +-
src/hotspot/cpu/x86/x86.ad | 27 +-
src/hotspot/cpu/x86/x86_32.ad | 1717 ++++++++---------
src/hotspot/cpu/x86/x86_64.ad | 168 +-
.../cpu/zero/c2_MacroAssembler_zero.hpp | 31 +
src/hotspot/cpu/zero/compiledIC_zero.cpp | 2 +-
src/hotspot/share/adlc/adlparse.cpp | 8 -
src/hotspot/share/adlc/output_c.cpp | 19 +-
src/hotspot/share/adlc/output_h.cpp | 2 +-
src/hotspot/share/asm/assembler.hpp | 19 +-
src/hotspot/share/code/compiledIC.hpp | 3 +-
.../share/jvmci/jvmciCodeInstaller.cpp | 3 +-
src/hotspot/share/opto/c2_CodeStubs.cpp | 9 +-
src/hotspot/share/opto/c2_CodeStubs.hpp | 2 +-
src/hotspot/share/opto/cfgnode.hpp | 2 +-
src/hotspot/share/opto/constantTable.cpp | 46 +-
src/hotspot/share/opto/constantTable.hpp | 8 +-
src/hotspot/share/opto/locknode.hpp | 2 +-
src/hotspot/share/opto/machnode.cpp | 4 +-
src/hotspot/share/opto/machnode.hpp | 32 +-
src/hotspot/share/opto/node.cpp | 4 +-
src/hotspot/share/opto/node.hpp | 6 +-
src/hotspot/share/opto/output.cpp | 84 +-
src/hotspot/share/opto/output.hpp | 2 +-
60 files changed, 1713 insertions(+), 2026 deletions(-)
create mode 100644 src/hotspot/cpu/zero/c2_MacroAssembler_zero.hpp
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 171ce00ae56..2d134475739 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1148,8 +1148,8 @@ class HandlerImpl {
public:
- static int emit_exception_handler(CodeBuffer &cbuf);
- static int emit_deopt_handler(CodeBuffer& cbuf);
+ static int emit_exception_handler(C2_MacroAssembler *masm);
+ static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_codestub_branch_size();
@@ -1602,7 +1602,7 @@ bool needs_acquiring_load_exclusive(const Node *n)
return true;
}
-#define __ _masm.
+#define __ masm->
// advance declarations for helper functions to convert register
// indices to register objects
@@ -1657,8 +1657,7 @@ void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ brk(0);
}
@@ -1674,8 +1673,7 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
#endif
- void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
- C2_MacroAssembler _masm(&cbuf);
+ void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
for (int i = 0; i < _count; i++) {
__ nop();
}
@@ -1697,7 +1695,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
ShouldNotReachHere();
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@@ -1751,9 +1749,8 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
// n.b. frame size includes space for return pc and rfp
const int framesize = C->output()->frame_size_in_bytes();
@@ -1802,7 +1799,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
guard = &stub->guard();
}
// In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
- bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
+ bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
}
}
@@ -1810,7 +1807,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Unimplemented();
}
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@@ -1864,9 +1861,8 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_slots() << LogBytesPerInt;
__ remove_frame(framesize);
@@ -1938,7 +1934,7 @@ static enum RC rc_class(OptoReg::Name reg) {
return rc_stack;
}
-uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
+uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
Compile* C = ra_->C;
// Get registers to move.
@@ -1971,8 +1967,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
uint ireg = ideal_reg();
- if (ireg == Op_VecA && cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (ireg == Op_VecA && masm) {
int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
@@ -1991,9 +1986,8 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
} else {
ShouldNotReachHere();
}
- } else if (cbuf) {
+ } else if (masm) {
assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
- C2_MacroAssembler _masm(cbuf);
assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
@@ -2020,8 +2014,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
ShouldNotReachHere();
}
}
- } else if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ } else if (masm) {
switch (src_lo_rc) {
case rc_int:
if (dst_lo_rc == rc_int) { // gpr --> gpr copy
@@ -2029,7 +2022,6 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
__ mov(as_Register(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
} else {
- C2_MacroAssembler _masm(cbuf);
__ movw(as_Register(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
}
@@ -2157,8 +2149,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation(&cbuf, ra_, false, nullptr);
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -2176,9 +2168,7 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
-
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
@@ -2217,10 +2207,8 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
-void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
- // This is the unverified entry point.
- C2_MacroAssembler _masm(&cbuf);
__ ic_check(InteriorEntryAlignment);
}
@@ -2234,13 +2222,12 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
-int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
+int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// mov rscratch1 #exception_blob_entry_point
// br rscratch1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2254,11 +2241,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
}
// Emit deopt handler code.
-int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
+int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2677,7 +2663,6 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack,
}
#define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \
- C2_MacroAssembler _masm(&cbuf); \
{ \
guarantee(INDEX == -1, "mode not permitted for volatile"); \
guarantee(DISP == 0, "mode not permitted for volatile"); \
@@ -2722,7 +2707,7 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
// Used for all non-volatile memory accesses. The use of
// $mem->opcode() to discover whether this pattern uses sign-extended
// offsets is something of a kludge.
- static void loadStore(C2_MacroAssembler masm, mem_insn insn,
+ static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
Register reg, int opcode,
Register base, int index, int scale, int disp,
int size_in_memory)
@@ -2732,12 +2717,12 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
/* Fix up any out-of-range offsets. */
assert_different_registers(rscratch1, base);
assert_different_registers(rscratch1, reg);
- addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
+ addr = __ legitimize_address(addr, size_in_memory, rscratch1);
}
- (masm.*insn)(reg, addr);
+ (masm->*insn)(reg, addr);
}
- static void loadStore(C2_MacroAssembler masm, mem_float_insn insn,
+ static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
FloatRegister reg, int opcode,
Register base, int index, int size, int disp,
int size_in_memory)
@@ -2760,23 +2745,23 @@ typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
/* Fix up any out-of-range offsets. */
assert_different_registers(rscratch1, base);
Address addr = Address(base, disp);
- addr = masm.legitimize_address(addr, size_in_memory, rscratch1);
- (masm.*insn)(reg, addr);
+ addr = __ legitimize_address(addr, size_in_memory, rscratch1);
+ (masm->*insn)(reg, addr);
} else {
assert(disp == 0, "unsupported address mode: disp = %d", disp);
- (masm.*insn)(reg, Address(base, as_Register(index), scale));
+ (masm->*insn)(reg, Address(base, as_Register(index), scale));
}
}
- static void loadStore(C2_MacroAssembler masm, mem_vector_insn insn,
+ static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
int opcode, Register base, int index, int size, int disp)
{
if (index == -1) {
- (masm.*insn)(reg, T, Address(base, disp));
+ (masm->*insn)(reg, T, Address(base, disp));
} else {
assert(disp == 0, "unsupported address mode");
- (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
+ (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
}
}
@@ -2821,7 +2806,6 @@ encode %{
// catch all for unimplemented encodings
enc_class enc_unimplemented %{
- C2_MacroAssembler _masm(&cbuf);
__ unimplemented("C2 catch all");
%}
@@ -2831,7 +2815,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@@ -2839,7 +2823,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@@ -2847,7 +2831,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@@ -2855,7 +2839,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@@ -2863,7 +2847,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@@ -2871,7 +2855,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@@ -2879,7 +2863,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@@ -2887,7 +2871,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@@ -2895,7 +2879,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@@ -2903,7 +2887,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@@ -2911,7 +2895,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@@ -2919,7 +2903,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
Register dst_reg = as_Register($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
@@ -2927,7 +2911,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@@ -2935,7 +2919,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
@@ -2943,15 +2927,14 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
Register src_reg = as_Register($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0(memory1 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@@ -2959,15 +2942,14 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
Register src_reg = as_Register($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strh0(memory2 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
@@ -2975,15 +2957,14 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
Register src_reg = as_Register($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strw0(memory4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@@ -2994,20 +2975,18 @@ encode %{
// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
- C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
}
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_str0(memory8 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
@@ -3015,7 +2994,7 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
@@ -3023,16 +3002,15 @@ encode %{
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
__ membar(Assembler::StoreStore);
- loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
@@ -3041,49 +3019,49 @@ encode %{
// Vector loads and stores
enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
+ loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
+ loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
+ loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
+ loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvH(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::H,
+ loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvS(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S,
+ loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvD(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D,
+ loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q,
+ loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -3199,7 +3177,6 @@ encode %{
// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
- C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
@@ -3215,7 +3192,6 @@ encode %{
enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
{
- C2_MacroAssembler _masm(&cbuf);
FloatRegister src_reg = as_FloatRegister($src$$reg);
__ fmovs(rscratch2, src_reg);
}
@@ -3225,7 +3201,6 @@ encode %{
enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
{
- C2_MacroAssembler _masm(&cbuf);
FloatRegister src_reg = as_FloatRegister($src$$reg);
__ fmovd(rscratch2, src_reg);
}
@@ -3236,7 +3211,6 @@ encode %{
// synchronized read/update encodings
enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register base = as_Register($mem$$base);
int index = $mem$$index;
@@ -3265,7 +3239,6 @@ encode %{
%}
enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
- C2_MacroAssembler _masm(&cbuf);
Register src_reg = as_Register($src$$reg);
Register base = as_Register($mem$$base);
int index = $mem$$index;
@@ -3295,7 +3268,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
@@ -3303,7 +3275,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
@@ -3311,7 +3282,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
@@ -3319,7 +3289,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
@@ -3332,7 +3301,6 @@ encode %{
// CompareAndSwap sequence to serve as a barrier on acquiring a
// lock.
enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
@@ -3340,7 +3308,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
@@ -3348,7 +3315,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
@@ -3356,7 +3322,6 @@ encode %{
%}
enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
- C2_MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
@@ -3365,7 +3330,6 @@ encode %{
// auxiliary used for CompareAndSwapX to set result register
enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
- C2_MacroAssembler _masm(&cbuf);
Register res_reg = as_Register($res$$reg);
__ cset(res_reg, Assembler::EQ);
%}
@@ -3373,7 +3337,6 @@ encode %{
// prefetch encodings
enc_class aarch64_enc_prefetchw(memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
Register base = as_Register($mem$$base);
int index = $mem$$index;
int scale = $mem$$scale;
@@ -3394,7 +3357,6 @@ encode %{
/// mov envcodings
enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
- C2_MacroAssembler _masm(&cbuf);
uint32_t con = (uint32_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
if (con == 0) {
@@ -3405,7 +3367,6 @@ encode %{
%}
enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
uint64_t con = (uint64_t)$src$$constant;
if (con == 0) {
@@ -3416,7 +3377,6 @@ encode %{
%}
enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr || con == (address)1) {
@@ -3442,24 +3402,20 @@ encode %{
%}
enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, zr);
%}
enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, (uint64_t)1);
%}
enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
- C2_MacroAssembler _masm(&cbuf);
__ load_byte_map_base($dst$$Register);
%}
enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@@ -3472,13 +3428,11 @@ encode %{
%}
enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, zr);
%}
enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@@ -3493,7 +3447,6 @@ encode %{
// arithmetic encodings
enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src_reg = as_Register($src1$$reg);
int32_t con = (int32_t)$src2$$constant;
@@ -3507,7 +3460,6 @@ encode %{
%}
enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src_reg = as_Register($src1$$reg);
int32_t con = (int32_t)$src2$$constant;
@@ -3521,7 +3473,6 @@ encode %{
%}
enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -3529,7 +3480,6 @@ encode %{
%}
enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -3537,7 +3487,6 @@ encode %{
%}
enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -3545,7 +3494,6 @@ encode %{
%}
enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -3555,14 +3503,12 @@ encode %{
// compare instruction encodings
enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmpw(reg1, reg2);
%}
enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src1$$reg);
int32_t val = $src2$$constant;
if (val >= 0) {
@@ -3573,7 +3519,6 @@ encode %{
%}
enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
uint32_t val = (uint32_t)$src2$$constant;
__ movw(rscratch1, val);
@@ -3581,14 +3526,12 @@ encode %{
%}
enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmp(reg1, reg2);
%}
enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src1$$reg);
int64_t val = $src2$$constant;
if (val >= 0) {
@@ -3603,7 +3546,6 @@ encode %{
%}
enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
uint64_t val = (uint64_t)$src2$$constant;
__ mov(rscratch1, val);
@@ -3611,45 +3553,38 @@ encode %{
%}
enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmp(reg1, reg2);
%}
enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmpw(reg1, reg2);
%}
enc_class aarch64_enc_testp(iRegP src) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src$$reg);
__ cmp(reg, zr);
%}
enc_class aarch64_enc_testn(iRegN src) %{
- C2_MacroAssembler _masm(&cbuf);
Register reg = as_Register($src$$reg);
__ cmpw(reg, zr);
%}
enc_class aarch64_enc_b(label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label *L = $lbl$$label;
__ b(*L);
%}
enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label *L = $lbl$$label;
__ br ((Assembler::Condition)$cmp$$cmpcode, *L);
%}
enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label *L = $lbl$$label;
__ br ((Assembler::Condition)$cmp$$cmpcode, *L);
%}
@@ -3662,7 +3597,6 @@ encode %{
Register result_reg = as_Register($result$$reg);
Label miss;
- C2_MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
nullptr, &miss,
/*set_cond_codes:*/ true);
@@ -3673,8 +3607,6 @@ encode %{
%}
enc_class aarch64_enc_java_static_call(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
-
address addr = (address)$meth$$method;
address call;
if (!_method) {
@@ -3690,7 +3622,7 @@ encode %{
__ nop();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
call = __ trampoline_call(Address(addr, rspec));
@@ -3701,10 +3633,10 @@ encode %{
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
- cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
+ __ code()->shared_stub_to_interp_for(_method, call - __ begin());
} else {
// Emit stub for static call
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -3721,8 +3653,7 @@ encode %{
%}
enc_class aarch64_enc_java_dynamic_call(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
address call = __ ic_call((address)$meth$$method, method_index);
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -3735,7 +3666,6 @@ encode %{
%}
enc_class aarch64_enc_call_epilog() %{
- C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
__ call_Unimplemented();
@@ -3743,8 +3673,6 @@ encode %{
%}
enc_class aarch64_enc_java_to_runtime(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
-
// some calls to generated routines (arraycopy code) are scheduled
// by C2 as runtime calls. if so we can call them using a br (they
// will be in a reachable segment) otherwise we have to use a blr
@@ -3775,12 +3703,10 @@ encode %{
%}
enc_class aarch64_enc_rethrow() %{
- C2_MacroAssembler _masm(&cbuf);
__ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
%}
enc_class aarch64_enc_ret() %{
- C2_MacroAssembler _masm(&cbuf);
#ifdef ASSERT
if (Compile::current()->max_vector_size() > 0) {
__ verify_ptrue();
@@ -3790,13 +3716,11 @@ encode %{
%}
enc_class aarch64_enc_tail_call(iRegP jump_target) %{
- C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
__ br(target_reg);
%}
enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
- C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
// exception oop should be in r0
// ret addr has been popped into lr
@@ -4565,7 +4489,7 @@ operand immP()
interface(CONST_INTER);
%}
-// Null Pointer Immediate
+// nullptr Pointer Immediate
operand immP0()
%{
predicate(n->get_ptr() == 0);
@@ -4673,7 +4597,7 @@ operand immN()
interface(CONST_INTER);
%}
-// Narrow Null Pointer Immediate
+// Narrow nullptr Pointer Immediate
operand immN0()
%{
predicate(n->get_narrowcon() == 0);
@@ -6768,7 +6692,7 @@ instruct loadConP0(iRegPNoSp dst, immP0 con)
match(Set dst con);
ins_cost(INSN_COST);
- format %{ "mov $dst, $con\t# null pointer" %}
+ format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p0(dst, con));
@@ -6782,7 +6706,7 @@ instruct loadConP1(iRegPNoSp dst, immP_1 con)
match(Set dst con);
ins_cost(INSN_COST);
- format %{ "mov $dst, $con\t# null pointer" %}
+ format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p1(dst, con));
@@ -6824,7 +6748,7 @@ instruct loadConN0(iRegNNoSp dst, immN0 con)
match(Set dst con);
ins_cost(INSN_COST);
- format %{ "mov $dst, $con\t# compressed null pointer" %}
+ format %{ "mov $dst, $con\t# compressed nullptr ptr" %}
ins_encode(aarch64_enc_mov_n0(dst, con));
@@ -7735,7 +7659,7 @@ instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
@@ -7776,7 +7700,7 @@ instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
@@ -16870,7 +16794,7 @@ instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
"mov $dst, $tdst"
%}
ins_encode %{
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ ldrs($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
@@ -16907,7 +16831,7 @@ instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
"mov $dst, $tdst"
%}
ins_encode %{
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ ldrd($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
@@ -16944,7 +16868,7 @@ instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
"mov $dst, $tdst"
%}
ins_encode %{
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ ldrs($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
@@ -16982,7 +16906,7 @@ instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
"mov $dst, $tdst"
%}
ins_encode %{
- loadStore(C2_MacroAssembler(&cbuf), &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ ldrd($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad
index d611c14f403..467d6ec2250 100644
--- a/src/hotspot/cpu/aarch64/aarch64_vector.ad
+++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad
@@ -94,7 +94,7 @@ source %{
PRegister Pg, const Address &adr);
// Predicated load/store, with optional ptrue to all elements of given predicate register.
- static void loadStoreA_predicated(C2_MacroAssembler masm, bool is_store, FloatRegister reg,
+ static void loadStoreA_predicated(C2_MacroAssembler* masm, bool is_store, FloatRegister reg,
PRegister pg, BasicType mem_elem_bt, BasicType vector_elem_bt,
int opcode, Register base, int index, int size, int disp) {
sve_mem_insn_predicate insn;
@@ -119,7 +119,7 @@ source %{
ShouldNotReachHere();
}
int imm4 = disp / mesize / Matcher::scalable_vector_reg_size(vector_elem_bt);
- (masm.*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
+ (masm->*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
} else {
assert(false, "unimplemented");
ShouldNotReachHere();
@@ -422,7 +422,7 @@ instruct loadV(vReg dst, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false,
+ loadStoreA_predicated(masm, /* is_store */ false,
$dst$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -439,7 +439,7 @@ instruct storeV(vReg src, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true,
+ loadStoreA_predicated(masm, /* is_store */ true,
$src$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -454,7 +454,7 @@ instruct loadV_masked(vReg dst, vmemA mem, pRegGov pg) %{
format %{ "loadV_masked $dst, $pg, $mem" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false, $dst$$FloatRegister,
+ loadStoreA_predicated(masm, /* is_store */ false, $dst$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -467,7 +467,7 @@ instruct storeV_masked(vReg src, vmemA mem, pRegGov pg) %{
format %{ "storeV_masked $mem, $pg, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true, $src$$FloatRegister,
+ loadStoreA_predicated(masm, /* is_store */ true, $src$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -4929,7 +4929,7 @@ instruct vloadmask_loadV(pReg dst, indirect mem, vReg tmp, rFlagsReg cr) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -4950,7 +4950,7 @@ instruct vloadmask_loadV_masked(pReg dst, indirect mem, pRegGov pg,
// Load valid mask values which are boolean type, and extend them to the
// defined vector element type. Convert the vector to predicate.
BasicType bt = Matcher::vector_element_basic_type(this);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -4977,7 +4977,7 @@ instruct vloadmask_loadVMasked(pReg dst, vmemA mem, pRegGov pg, vReg tmp, rFlags
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -5005,7 +5005,7 @@ instruct vloadmask_loadVMasked_masked(pReg dst, vmemA mem, pRegGov pg1, pRegGov
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg2$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -5030,7 +5030,7 @@ instruct storeV_vstoremask(indirect mem, pReg src, immI_gt_1 esize, vReg tmp) %{
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -5052,7 +5052,7 @@ instruct storeV_vstoremask_masked(indirect mem, pReg src, immI_gt_1 esize,
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -5078,7 +5078,7 @@ instruct storeVMasked_vstoremask(vmemA mem, pReg src, pRegGov pg, immI_gt_1 esiz
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type.");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -5105,7 +5105,7 @@ instruct storeVMasked_vstoremask_masked(vmemA mem, pReg src, pRegGov pg, immI_gt
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
index 5c4e13d432f..d31a4e05799 100644
--- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
+++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
@@ -84,7 +84,7 @@ source %{
PRegister Pg, const Address &adr);
// Predicated load/store, with optional ptrue to all elements of given predicate register.
- static void loadStoreA_predicated(C2_MacroAssembler masm, bool is_store, FloatRegister reg,
+ static void loadStoreA_predicated(C2_MacroAssembler* masm, bool is_store, FloatRegister reg,
PRegister pg, BasicType mem_elem_bt, BasicType vector_elem_bt,
int opcode, Register base, int index, int size, int disp) {
sve_mem_insn_predicate insn;
@@ -109,7 +109,7 @@ source %{
ShouldNotReachHere();
}
int imm4 = disp / mesize / Matcher::scalable_vector_reg_size(vector_elem_bt);
- (masm.*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
+ (masm->*insn)(reg, Assembler::elemType_to_regVariant(vector_elem_bt), pg, Address(base, imm4));
} else {
assert(false, "unimplemented");
ShouldNotReachHere();
@@ -361,7 +361,7 @@ instruct loadV(vReg dst, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false,
+ loadStoreA_predicated(masm, /* is_store */ false,
$dst$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -378,7 +378,7 @@ instruct storeV(vReg src, vmemA mem) %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
uint length_in_bytes = Matcher::vector_length_in_bytes(this, $src);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true,
+ loadStoreA_predicated(masm, /* is_store */ true,
$src$$FloatRegister, ptrue, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -393,7 +393,7 @@ instruct loadV_masked(vReg dst, vmemA mem, pRegGov pg) %{
format %{ "loadV_masked $dst, $pg, $mem" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ false, $dst$$FloatRegister,
+ loadStoreA_predicated(masm, /* is_store */ false, $dst$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -406,7 +406,7 @@ instruct storeV_masked(vReg src, vmemA mem, pRegGov pg) %{
format %{ "storeV_masked $mem, $pg, $src" %}
ins_encode %{
BasicType bt = Matcher::vector_element_basic_type(this, $src);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), /* is_store */ true, $src$$FloatRegister,
+ loadStoreA_predicated(masm, /* is_store */ true, $src$$FloatRegister,
$pg$$PRegister, bt, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -3321,7 +3321,7 @@ instruct vloadmask_loadV(pReg dst, indirect mem, vReg tmp, rFlagsReg cr) %{
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -3342,7 +3342,7 @@ instruct vloadmask_loadV_masked(pReg dst, indirect mem, pRegGov pg,
// Load valid mask values which are boolean type, and extend them to the
// defined vector element type. Convert the vector to predicate.
BasicType bt = Matcher::vector_element_basic_type(this);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -3369,7 +3369,7 @@ instruct vloadmask_loadVMasked(pReg dst, vmemA mem, pRegGov pg, vReg tmp, rFlags
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -3397,7 +3397,7 @@ instruct vloadmask_loadVMasked_masked(pReg dst, vmemA mem, pRegGov pg1, pRegGov
BasicType bt = Matcher::vector_element_basic_type(this);
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
assert(length_in_bytes == MaxVectorSize, "invalid vector length");
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), false, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, false, $tmp$$FloatRegister,
$pg2$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ sve_cmp(Assembler::NE, $dst$$PRegister, __ elemType_to_regVariant(bt),
@@ -3422,7 +3422,7 @@ instruct storeV_vstoremask(indirect mem, pReg src, immI_gt_1 esize, vReg tmp) %{
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -3444,7 +3444,7 @@ instruct storeV_vstoremask_masked(indirect mem, pReg src, immI_gt_1 esize,
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -3470,7 +3470,7 @@ instruct storeVMasked_vstoremask(vmemA mem, pReg src, pRegGov pg, immI_gt_1 esiz
assert(type2aelembytes(bt) == (int)$esize$$constant, "unsupported type.");
Assembler::SIMD_RegVariant size = __ elemBytes_to_regVariant($esize$$constant);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
ptrue, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
@@ -3497,7 +3497,7 @@ instruct storeVMasked_vstoremask_masked(vmemA mem, pReg src, pRegGov pg, immI_gt
Assembler::SIMD_RegVariant size = __ elemType_to_regVariant(bt);
__ sve_cpy($tmp$$FloatRegister, size, $src$$PRegister, 1, false);
__ sve_gen_mask_imm($pgtmp$$PRegister, bt, Matcher::vector_length(this, $src));
- loadStoreA_predicated(C2_MacroAssembler(&cbuf), true, $tmp$$FloatRegister,
+ loadStoreA_predicated(masm, true, $tmp$$FloatRegister,
$pgtmp$$PRegister, T_BOOLEAN, bt, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
diff --git a/src/hotspot/cpu/aarch64/ad_encode.m4 b/src/hotspot/cpu/aarch64/ad_encode.m4
index 4897998d870..008dbd2c936 100644
--- a/src/hotspot/cpu/aarch64/ad_encode.m4
+++ b/src/hotspot/cpu/aarch64/ad_encode.m4
@@ -29,7 +29,7 @@ define(choose, `loadStore($1, &MacroAssembler::$3, $2, $4,
%}')dnl
define(access, `
$3Register $1_reg = as_$3Register($$1$$reg);
- $4choose(C2_MacroAssembler(&cbuf), $1_reg,$2,$mem->opcode(),
+ $4choose(masm, $1_reg,$2,$mem->opcode(),
as_Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$5)')dnl
define(load,`
// This encoding class is generated automatically from ad_encode.m4.
@@ -59,8 +59,7 @@ define(STORE0,`
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_$2`'0(memory$4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
- choose(_masm,zr,$2,$mem->opcode(),
+ choose(masm,zr,$2,$mem->opcode(),
as_$3Register($mem$$base),$mem$$index,$mem$$scale,$mem$$disp,$4)')dnl
STORE(iRegI,strb,,,1)
STORE0(iRegI,strb,,1)
@@ -72,7 +71,6 @@ STORE(iRegL,str,,
`// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
- C2_MacroAssembler _masm(&cbuf);
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
@@ -85,8 +83,7 @@ STORE(vRegD,strd,Float,,8)
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
__ membar(Assembler::StoreStore);
- loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
+ loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
index 23c08f11d1a..03ee729b767 100644
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
@@ -34,10 +34,10 @@
// ----------------------------------------------------------------------------
-#define __ _masm.
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
- precond(cbuf.stubs()->start() != badAddress);
- precond(cbuf.stubs()->end() != badAddress);
+#define __ masm->
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
+ precond(__ code()->stubs()->start() != badAddress);
+ precond(__ code()->stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
@@ -45,13 +45,9 @@ address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// jmp -4 # to self
if (mark == nullptr) {
- mark = cbuf.insts_mark(); // Get mark within main instrs section.
+ mark = __ inst_mark(); // Get mark within main instrs section.
}
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
address base = __ start_a_stub(to_interp_stub_size());
int offset = __ offset();
if (base == nullptr) {
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
index 0572e7d8d11..d5dcf7f9534 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad
@@ -29,20 +29,18 @@ source_hpp %{
encode %{
enc_class aarch64_enc_cmpxchg_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
- MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
enc_class aarch64_enc_cmpxchg_acq_oop_shenandoah(memory mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, iRegINoSp res) %{
- MacroAssembler _masm(&cbuf);
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
%}
@@ -77,7 +75,7 @@ instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, i
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -115,7 +113,7 @@ instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -131,7 +129,7 @@ instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -147,7 +145,7 @@ instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -164,7 +162,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -181,7 +179,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ true, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -199,7 +197,7 @@ instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldva
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -216,7 +214,7 @@ instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldva
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ false, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -235,7 +233,7 @@ instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN ol
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -254,7 +252,7 @@ instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP ol
Register tmp = $tmp$$Register;
__ mov(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not currently supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
/*acquire*/ true, /*release*/ true, /*is_cae*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
diff --git a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
index a8ef3ce9f13..c7c7165affb 100644
--- a/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
+++ b/src/hotspot/cpu/aarch64/gc/x/x_aarch64.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@ source_hpp %{
source %{
-static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
+static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
@@ -42,7 +42,7 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ bind(*stub->continuation());
}
-static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ b(*stub->entry());
__ bind(*stub->continuation());
@@ -64,7 +64,7 @@ instruct xLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
ins_encode %{
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ ldr($dst$$Register, ref_addr);
- x_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
+ x_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(iload_reg_mem);
@@ -83,7 +83,7 @@ instruct xLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
ins_encode %{
__ ldar($dst$$Register, $mem$$Register);
- x_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
+ x_load_barrier(masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
@@ -110,7 +110,7 @@ instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, rscratch2);
__ cbz(rscratch1, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
@@ -142,7 +142,7 @@ instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, rscratch2);
__ cbz(rscratch1, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, rscratch2);
__ cset($res$$Register, Assembler::EQ);
@@ -171,7 +171,7 @@ instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, $res$$Register);
__ cbz(rscratch1, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
__ bind(good);
@@ -199,7 +199,7 @@ instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
__ ldr(rscratch1, Address(rthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(rscratch1, rscratch1, $res$$Register);
__ cbz(rscratch1, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
__ bind(good);
@@ -220,7 +220,7 @@ instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
- x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
+ x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
@@ -237,7 +237,7 @@ instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr)
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
- x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
+ x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}
diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
index 23564a3f23c..92181e2b6b9 100644
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
@@ -33,40 +33,40 @@ source %{
#include "gc/z/zBarrierSetAssembler.hpp"
-static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src) {
+static void z_color(MacroAssembler* masm, const MachNode* node, Register dst, Register src) {
assert_different_registers(src, dst);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBeforeMov);
__ movzw(dst, barrier_Relocation::unpatched);
__ orr(dst, dst, src, Assembler::LSL, ZPointerLoadShift);
}
-static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
+static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ lsr(ref, ref, ZPointerLoadShift);
}
-static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+static void z_keep_alive_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadBeforeMov);
__ movzw(tmp, barrier_Relocation::unpatched);
__ tst(ref, tmp);
ZLoadBarrierStubC2Aarch64* const stub = ZLoadBarrierStubC2Aarch64::create(node, ref_addr, ref);
__ br(Assembler::NE, *stub->entry());
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
-static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
- Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
+static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+ Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (on_non_strong) {
- z_keep_alive_load_barrier(_masm, node, ref_addr, ref, tmp);
+ z_keep_alive_load_barrier(masm, node, ref_addr, ref, tmp);
return;
}
if (node->barrier_data() == ZBarrierElided) {
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
return;
}
@@ -81,19 +81,19 @@ static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ b(*stub->entry());
__ bind(good);
}
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
-static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
- Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
+static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
+ Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
- z_color(_masm, node, rnew_zpointer, rnew_zaddress);
+ z_color(masm, node, rnew_zpointer, rnew_zaddress);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2Aarch64* const stub = ZStoreBarrierStubC2Aarch64::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
- bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
+ bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
@@ -113,7 +113,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
ins_encode %{
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
__ ldr($dst$$Register, ref_addr);
- z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1);
+ z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
%}
ins_pipe(iload_reg_mem);
@@ -133,7 +133,7 @@ instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg
ins_encode %{
const Address ref_addr = Address($mem$$Register);
__ ldar($dst$$Register, $mem$$Register);
- z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch1);
+ z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
%}
ins_pipe(pipe_serial);
@@ -150,7 +150,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
- z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
+ z_store_barrier(masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
__ str($tmp$$Register, ref_addr);
%}
ins_pipe(pipe_serial);
@@ -167,7 +167,7 @@ instruct zStorePVolatile(indirect mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
const Address ref_addr = Address($mem$$Register);
- z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
+ z_store_barrier(masm, this, ref_addr, $src$$Register, $tmp$$Register, rscratch2, false /* is_atomic */);
__ stlr($tmp$$Register, $mem$$Register);
%}
ins_pipe(pipe_serial);
@@ -187,8 +187,8 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, noreg);
__ cset($res$$Register, Assembler::EQ);
@@ -211,8 +211,8 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, noreg);
__ cset($res$$Register, Assembler::EQ);
@@ -235,11 +235,11 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
false /* acquire */, true /* release */, false /* weak */, $res$$Register);
- z_uncolor(_masm, this, $res$$Register);
+ z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -258,11 +258,11 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
ins_encode %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, rscratch2, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::xword,
true /* acquire */, true /* release */, false /* weak */, $res$$Register);
- z_uncolor(_masm, this, $res$$Register);
+ z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -278,9 +278,9 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
- z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
+ z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
- z_uncolor(_masm, this, $prev$$Register);
+ z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
@@ -296,9 +296,9 @@ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr)
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
- z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
+ z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, rscratch2, true /* is_atomic */);
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
- z_uncolor(_masm, this, $prev$$Register);
+ z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 97a10afde7a..b200fb4c4b0 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1108,8 +1108,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(exit);
- CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1173,8 +1172,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ br(r1); // the exception handler
}
- CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad
index 07c5b953254..638c48ad5aa 100644
--- a/src/hotspot/cpu/arm/arm.ad
+++ b/src/hotspot/cpu/arm/arm.ad
@@ -105,8 +105,8 @@ class HandlerImpl {
public:
- static int emit_exception_handler(CodeBuffer &cbuf);
- static int emit_deopt_handler(CodeBuffer& cbuf);
+ static int emit_exception_handler(C2_MacroAssembler *masm);
+ static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return ( 3 * 4 );
@@ -138,7 +138,7 @@ bool assert_not_var_shift(const Node *n) {
return true;
}
-#define __ _masm.
+#define __ masm->
static FloatRegister reg_to_FloatRegister_object(int register_encoding);
static Register reg_to_register_object(int register_encoding);
@@ -159,8 +159,7 @@ int MachNode::compute_padding(int current_offset) const {
// REQUIRED FUNCTIONALITY
// emit an interrupt that is caught by the debugger (for debugging compiler)
-void emit_break(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+void emit_break(C2_MacroAssembler *masm) {
__ breakpoint();
}
@@ -170,8 +169,8 @@ void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
}
#endif
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- emit_break(cbuf);
+void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ emit_break(masm);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
@@ -179,16 +178,14 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
-void emit_nop(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+void emit_nop(C2_MacroAssembler *masm) {
__ nop();
}
-void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
+void emit_call_reloc(C2_MacroAssembler *masm, const MachCallNode *n, MachOper *m, RelocationHolder const& rspec) {
int ret_addr_offset0 = n->as_MachCall()->ret_addr_offset();
- int call_site_offset = cbuf.insts()->mark_off();
- C2_MacroAssembler _masm(&cbuf);
+ int call_site_offset = __ code()->insts()->mark_off();
__ set_inst_mark(); // needed in emit_to_interp_stub() to locate the call
address target = (address)m->method();
assert(n->as_MachCall()->entry_point() == target, "sanity");
@@ -210,8 +207,8 @@ void emit_call_reloc(CodeBuffer &cbuf, const MachCallNode *n, MachOper *m, Reloc
//=============================================================================
// REQUIRED FUNCTIONALITY for encoding
-void emit_lo(CodeBuffer &cbuf, int val) { }
-void emit_hi(CodeBuffer &cbuf, int val) { }
+void emit_lo(C2_MacroAssembler *masm, int val) { }
+void emit_hi(C2_MacroAssembler *masm, int val) { }
//=============================================================================
@@ -232,10 +229,9 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
ShouldNotReachHere();
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
Compile* C = ra_->C;
ConstantTable& constant_table = C->output()->constant_table();
- C2_MacroAssembler _masm(&cbuf);
Register r = as_Register(ra_->get_encode(this));
CodeSection* consts_section = __ code()->consts();
@@ -303,9 +299,8 @@ void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
for (int i = 0; i < OptoPrologueNops; i++) {
__ nop();
@@ -334,11 +329,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->nmethod_entry_barrier(&_masm);
+ bs->nmethod_entry_barrier(masm);
}
// offset from scratch buffer is not valid
- if (strcmp(cbuf.name(), "Compile::Fill_buffer") == 0) {
+ if (strcmp(__ code()->name(), "Compile::Fill_buffer") == 0) {
C->output()->set_frame_complete( __ offset() );
}
@@ -379,8 +374,7 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
size_t framesize = C->output()->frame_size_in_bytes();
@@ -430,7 +424,7 @@ static inline bool is_iRegLd_memhd(OptoReg::Name src_first, OptoReg::Name src_se
return (rlo&1)==0 && (rlo+1 == rhi) && is_memoryHD(offset);
}
-uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
+uint MachSpillCopyNode::implementation( C2_MacroAssembler *masm,
PhaseRegAlloc *ra_,
bool do_size,
outputStream* st ) const {
@@ -463,14 +457,12 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Bailout only for real instruction emit.
// This requires a single comment change in shared code. ( see output.cpp "Normal" instruction case )
- C2_MacroAssembler _masm(cbuf);
-
// --------------------------------------
// Check for mem-mem move. Load into unused float registers and fall into
// the float-store case.
if (src_first_rc == rc_stack && dst_first_rc == rc_stack) {
int offset = ra_->reg2offset(src_first);
- if (cbuf && !is_memoryfp(offset)) {
+ if (masm && !is_memoryfp(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
@@ -480,7 +472,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
src_second = OptoReg::Name(R_mem_copy_hi_num);
src_first_rc = rc_float;
src_second_rc = rc_float;
- if (cbuf) {
+ if (masm) {
__ ldr_double(Rmemcopy, Address(SP, offset));
} else if (!do_size) {
st->print(LDR_DOUBLE " R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
@@ -488,7 +480,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
} else {
src_first = OptoReg::Name(R_mem_copy_lo_num);
src_first_rc = rc_float;
- if (cbuf) {
+ if (masm) {
__ ldr_float(Rmemcopy, Address(SP, offset));
} else if (!do_size) {
st->print(LDR_FLOAT " R_%s,[R_SP + #%d]\t! spill",OptoReg::regname(src_first),offset);
@@ -507,7 +499,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
if (src_first_rc == rc_int && dst_first_rc == rc_int) {
// Else normal reg-reg copy
assert( src_second != dst_first, "smashed second before evacuating it" );
- if (cbuf) {
+ if (masm) {
__ mov(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -522,13 +514,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for integer store
if (src_first_rc == rc_int && dst_first_rc == rc_stack) {
int offset = ra_->reg2offset(dst_first);
- if (cbuf && !is_memoryI(offset)) {
+ if (masm && !is_memoryI(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
if (src_second_rc != rc_bad && is_iRegLd_memhd(src_first, src_second, offset)) {
assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
- if (cbuf) {
+ if (masm) {
__ str_64(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -538,7 +530,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
- if (cbuf) {
+ if (masm) {
__ str_32(reg_to_register_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -554,13 +546,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for integer load
if (dst_first_rc == rc_int && src_first_rc == rc_stack) {
int offset = ra_->reg2offset(src_first);
- if (cbuf && !is_memoryI(offset)) {
+ if (masm && !is_memoryI(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
if (src_second_rc != rc_bad && is_iRegLd_memhd(dst_first, dst_second, offset)) {
assert((src_first&1)==0 && src_first+1 == src_second, "pair of registers must be aligned/contiguous");
- if (cbuf) {
+ if (masm) {
__ ldr_64(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -570,7 +562,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
- if (cbuf) {
+ if (masm) {
__ ldr_32(reg_to_register_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -587,7 +579,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
if (src_first_rc == rc_float && dst_first_rc == rc_float) {
if (src_second_rc != rc_bad) {
assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
- if (cbuf) {
+ if (masm) {
__ mov_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -598,7 +590,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return 4;
}
- if (cbuf) {
+ if (masm) {
__ mov_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -613,14 +605,14 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for float store
if (src_first_rc == rc_float && dst_first_rc == rc_stack) {
int offset = ra_->reg2offset(dst_first);
- if (cbuf && !is_memoryfp(offset)) {
+ if (masm && !is_memoryfp(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
// Further check for aligned-adjacent pair, so we can use a double store
if (src_second_rc != rc_bad) {
assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
- if (cbuf) {
+ if (masm) {
__ str_double(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -630,7 +622,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
- if (cbuf) {
+ if (masm) {
__ str_float(reg_to_FloatRegister_object(Matcher::_regEncode[src_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -646,14 +638,14 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for float load
if (dst_first_rc == rc_float && src_first_rc == rc_stack) {
int offset = ra_->reg2offset(src_first);
- if (cbuf && !is_memoryfp(offset)) {
+ if (masm && !is_memoryfp(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
// Further check for aligned-adjacent pair, so we can use a double store
if (src_second_rc != rc_bad) {
assert((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers and stack slots must be aligned/contiguous");
- if (cbuf) {
+ if (masm) {
__ ldr_double(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -663,7 +655,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
- if (cbuf) {
+ if (masm) {
__ ldr_float(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -683,7 +675,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
assert(src_second_rc == rc_int && dst_second_rc == rc_float, "unsupported");
- if (cbuf) {
+ if (masm) {
__ fmdrr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]), reg_to_register_object(Matcher::_regEncode[src_second]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -693,7 +685,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
- if (cbuf) {
+ if (masm) {
__ fmsr(reg_to_FloatRegister_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -712,7 +704,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
assert((src_first&1)==0 && src_first+1 == src_second, "pairs of registers must be aligned/contiguous");
assert((dst_first&1)==0 && dst_first+1 == dst_second, "pairs of registers must be aligned/contiguous");
assert(src_second_rc == rc_float && dst_second_rc == rc_int, "unsupported");
- if (cbuf) {
+ if (masm) {
__ fmrrd(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -722,7 +714,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
}
return size + 4;
} else {
- if (cbuf) {
+ if (masm) {
__ fmrs(reg_to_register_object(Matcher::_regEncode[dst_first]), reg_to_FloatRegister_object(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -746,7 +738,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// register (else it's a hi-bits-to-hi-bits copy which should have
// happened already as part of a 64-bit move)
if (src_second_rc == rc_int && dst_second_rc == rc_int) {
- if (cbuf) {
+ if (masm) {
__ mov(reg_to_register_object(Matcher::_regEncode[dst_second]), reg_to_register_object(Matcher::_regEncode[src_second]));
#ifndef PRODUCT
} else if (!do_size) {
@@ -763,11 +755,11 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
if (src_second_rc == rc_int && dst_second_rc == rc_stack) {
int offset = ra_->reg2offset(dst_second);
- if (cbuf && !is_memoryP(offset)) {
+ if (masm && !is_memoryP(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
- if (cbuf) {
+ if (masm) {
__ str(reg_to_register_object(Matcher::_regEncode[src_second]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -782,11 +774,11 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
// Check for high word integer load
if (dst_second_rc == rc_int && src_second_rc == rc_stack) {
int offset = ra_->reg2offset(src_second);
- if (cbuf && !is_memoryP(offset)) {
+ if (masm && !is_memoryP(offset)) {
ra_->C->record_method_not_compilable("unable to handle large constant offsets");
return 0;
} else {
- if (cbuf) {
+ if (masm) {
__ ldr(reg_to_register_object(Matcher::_regEncode[dst_second]), Address(SP, offset));
#ifndef PRODUCT
} else if (!do_size) {
@@ -804,12 +796,12 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
#ifndef PRODUCT
void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
- implementation(nullptr, ra_, false, st );
+ implementation( nullptr, ra_, false, st );
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation( &cbuf, ra_, false, nullptr );
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation( masm, ra_, false, nullptr );
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -823,8 +815,7 @@ void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
}
#endif
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc * ) const {
for(int i = 0; i < _count; i += 1) {
__ nop();
}
@@ -844,8 +835,7 @@ void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
Register dst = reg_to_register_object(reg);
@@ -875,8 +865,7 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
}
#endif
-void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ ic_check(InteriorEntryAlignment);
}
@@ -888,9 +877,7 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
// Emit exception handler code.
-int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
- C2_MacroAssembler _masm(&cbuf);
-
+int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -909,11 +896,9 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
return offset;
}
-int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
+int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Can't use any of the current frame's registers as we may have deopted
// at a poll and everything can be live.
- C2_MacroAssembler _masm(&cbuf);
-
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -1208,13 +1193,25 @@ bool maybe_far_call(const MachCallNode *n) {
// tertiary opcode. Only the opcode sections which a particular instruction
// needs for encoding need to be specified.
encode %{
+ // Set instruction mark in MacroAssembler. This is used only in
+ // instructions that emit bytes directly to the CodeBuffer wraped
+ // in the MacroAssembler. Should go away once all "instruct" are
+ // patched to emit bytes only using methods in MacroAssembler.
+ enc_class SetInstMark %{
+ __ set_inst_mark();
+ %}
+
+ enc_class ClearInstMark %{
+ __ clear_inst_mark();
+ %}
+
enc_class call_epilog %{
// nothing
%}
enc_class Java_To_Runtime (method meth) %{
// CALL directly to the runtime
- emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
+ emit_call_reloc(masm, as_MachCall(), $meth, runtime_call_Relocation::spec());
%}
enc_class Java_Static_Call (method meth) %{
@@ -1222,15 +1219,15 @@ encode %{
// who we intended to call.
if ( !_method) {
- emit_call_reloc(cbuf, as_MachCall(), $meth, runtime_call_Relocation::spec());
+ emit_call_reloc(masm, as_MachCall(), $meth, runtime_call_Relocation::spec());
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
- emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
+ emit_call_reloc(masm, as_MachCall(), $meth, rspec);
// Emit stubs for static call.
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -1240,39 +1237,35 @@ encode %{
enc_class save_last_PC %{
// preserve mark
- address mark = cbuf.insts()->mark();
- debug_only(int off0 = cbuf.insts_size());
- C2_MacroAssembler _masm(&cbuf);
+ address mark = __ inst_mark();
+ debug_only(int off0 = __ offset());
int ret_addr_offset = as_MachCall()->ret_addr_offset();
__ adr(LR, mark + ret_addr_offset);
__ str(LR, Address(Rthread, JavaThread::last_Java_pc_offset()));
- debug_only(int off1 = cbuf.insts_size());
+ debug_only(int off1 = __ offset());
assert(off1 - off0 == 2 * Assembler::InstructionSize, "correct size prediction");
// restore mark
- cbuf.insts()->set_mark(mark);
+ __ set_inst_mark(mark);
%}
enc_class preserve_SP %{
// preserve mark
- address mark = cbuf.insts()->mark();
- debug_only(int off0 = cbuf.insts_size());
- C2_MacroAssembler _masm(&cbuf);
+ address mark = __ inst_mark();
+ debug_only(int off0 = __ offset());
// FP is preserved across all calls, even compiled calls.
// Use it to preserve SP in places where the callee might change the SP.
__ mov(Rmh_SP_save, SP);
- debug_only(int off1 = cbuf.insts_size());
+ debug_only(int off1 = __ offset());
assert(off1 - off0 == 4, "correct size prediction");
// restore mark
- cbuf.insts()->set_mark(mark);
+ __ set_inst_mark(mark);
%}
enc_class restore_SP %{
- C2_MacroAssembler _masm(&cbuf);
__ mov(SP, Rmh_SP_save);
%}
enc_class Java_Dynamic_Call (method meth) %{
- C2_MacroAssembler _masm(&cbuf);
Register R8_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
assert(R8_ic_reg == Ricklass, "should be");
__ set_inst_mark();
@@ -1281,9 +1274,9 @@ encode %{
address virtual_call_oop_addr = __ inst_mark();
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
- emit_call_reloc(cbuf, as_MachCall(), $meth, RelocationHolder::none);
+ emit_call_reloc(masm, as_MachCall(), $meth, RelocationHolder::none);
%}
enc_class LdReplImmI(immI src, regD dst, iRegI tmp, int cnt, int wth) %{
@@ -1300,7 +1293,6 @@ encode %{
val |= (val << bit_width);
}
}
- C2_MacroAssembler _masm(&cbuf);
if (val == -1) {
__ mvn($tmp$$Register, 0);
@@ -1317,7 +1309,6 @@ encode %{
// Replicate float con 2 times and pack into vector (8 bytes) in regD.
float fval = $src$$constant;
int val = *((int*)&fval);
- C2_MacroAssembler _masm(&cbuf);
if (val == -1) {
__ mvn($tmp$$Register, 0);
@@ -1332,7 +1323,6 @@ encode %{
enc_class enc_String_Compare(R0RegP str1, R1RegP str2, R2RegI cnt1, R3RegI cnt2, iRegI result, iRegI tmp1, iRegI tmp2) %{
Label Ldone, Lloop;
- C2_MacroAssembler _masm(&cbuf);
Register str1_reg = $str1$$Register;
Register str2_reg = $str2$$Register;
@@ -1424,7 +1414,6 @@ encode %{
enc_class enc_String_Equals(R0RegP str1, R1RegP str2, R2RegI cnt, iRegI result, iRegI tmp1, iRegI tmp2) %{
Label Lchar, Lchar_loop, Ldone, Lequal;
- C2_MacroAssembler _masm(&cbuf);
Register str1_reg = $str1$$Register;
Register str2_reg = $str2$$Register;
@@ -1486,7 +1475,6 @@ encode %{
enc_class enc_Array_Equals(R0RegP ary1, R1RegP ary2, iRegI tmp1, iRegI tmp2, iRegI tmp3, iRegI result) %{
Label Ldone, Lloop, Lequal;
- C2_MacroAssembler _masm(&cbuf);
Register ary1_reg = $ary1$$Register;
Register ary2_reg = $ary2$$Register;
@@ -8847,7 +8835,7 @@ instruct CallStaticJavaDirect( method meth ) %{
ins_cost(CALL_COST);
format %{ "CALL,static ==> " %}
- ins_encode( Java_Static_Call( meth ), call_epilog );
+ ins_encode( SetInstMark, Java_Static_Call( meth ), call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@@ -8861,7 +8849,7 @@ instruct CallStaticJavaHandle( method meth ) %{
ins_cost(CALL_COST);
format %{ "CALL,static/MethodHandle ==> " %}
- ins_encode( preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog );
+ ins_encode( SetInstMark, preserve_SP, Java_Static_Call( meth ), restore_SP, call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@@ -8873,7 +8861,7 @@ instruct CallDynamicJavaDirect( method meth ) %{
ins_cost(CALL_COST);
format %{ "MOV_OOP (empty),R_R8\n\t"
"CALL,dynamic ; NOP ==> " %}
- ins_encode( Java_Dynamic_Call( meth ), call_epilog );
+ ins_encode( SetInstMark, Java_Dynamic_Call( meth ), call_epilog, ClearInstMark );
ins_pipe(call);
%}
@@ -8883,8 +8871,8 @@ instruct CallRuntimeDirect(method meth) %{
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL,runtime" %}
- ins_encode( Java_To_Runtime( meth ),
- call_epilog );
+ ins_encode( SetInstMark, Java_To_Runtime( meth ),
+ call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@@ -8895,8 +8883,8 @@ instruct CallLeafDirect(method meth) %{
ins_cost(CALL_COST);
format %{ "CALL,runtime leaf" %}
// TODO: need save_last_PC here?
- ins_encode( Java_To_Runtime( meth ),
- call_epilog );
+ ins_encode( SetInstMark, Java_To_Runtime( meth ),
+ call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
@@ -8907,8 +8895,8 @@ instruct CallLeafNoFPDirect(method meth) %{
ins_cost(CALL_COST);
format %{ "CALL,runtime leaf nofp" %}
// TODO: need save_last_PC here?
- ins_encode( Java_To_Runtime( meth ),
- call_epilog );
+ ins_encode( SetInstMark, Java_To_Runtime( meth ),
+ call_epilog, ClearInstMark );
ins_pipe(simple_call);
%}
diff --git a/src/hotspot/cpu/arm/compiledIC_arm.cpp b/src/hotspot/cpu/arm/compiledIC_arm.cpp
index 71389d2353d..a2c37e2907c 100644
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp
@@ -34,20 +34,18 @@
// ----------------------------------------------------------------------------
#if COMPILER2_OR_JVMCI
-#define __ _masm.
+#define __ masm->
// emit call stub, compiled java to interpreter
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
// set (empty), R9
// b -1
if (mark == nullptr) {
- mark = cbuf.insts_mark(); // get mark within main instrs section
+ mark = __ inst_mark(); // get mark within main instrs section
}
- MacroAssembler _masm(&cbuf);
-
address base = __ start_a_stub(to_interp_stub_size());
if (base == nullptr) {
return nullptr; // CodeBuffer::expand failed
diff --git a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
index 355ac4815d5..0780837e8e5 100644
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
@@ -77,18 +77,14 @@
// Usage of r1 and r2 in the stubs allows to distinguish them.
const int IC_pos_in_java_to_interp_stub = 8;
-#define __ _masm.
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
+#define __ masm->
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark/* = nullptr*/) {
#ifdef COMPILER2
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
- mark = cbuf.insts_mark();
+ mark = __ inst_mark();
}
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
// Start the stub.
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
diff --git a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoah_ppc.ad b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoah_ppc.ad
index 4825ca9cf81..eb4894656e2 100644
--- a/src/hotspot/cpu/ppc/gc/shenandoah/shenandoah_ppc.ad
+++ b/src/hotspot/cpu/ppc/gc/shenandoah/shenandoah_ppc.ad
@@ -44,7 +44,7 @@ instruct compareAndSwapP_shenandoah(iRegIdst res, indirect mem, iRegPsrc oldval,
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@@ -65,7 +65,7 @@ instruct compareAndSwapN_shenandoah(iRegIdst res, indirect mem, iRegNsrc oldval,
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@@ -86,7 +86,7 @@ instruct compareAndSwapP_acq_shenandoah(iRegIdst res, indirect mem, iRegPsrc old
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@@ -112,7 +112,7 @@ instruct compareAndSwapN_acq_shenandoah(iRegIdst res, indirect mem, iRegNsrc old
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false, $res$$Register
@@ -137,7 +137,7 @@ instruct compareAndExchangeP_shenandoah(iRegPdst res, indirect mem, iRegPsrc old
format %{ "CMPXCHGD $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
@@ -157,7 +157,7 @@ instruct compareAndExchangeN_shenandoah(iRegNdst res, indirect mem, iRegNsrc old
format %{ "CMPXCHGD $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
@@ -177,7 +177,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPdst res, indirect mem, iRegPsrc
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
@@ -202,7 +202,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNdst res, indirect mem, iRegNsrc
format %{ "CMPXCHGD acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
ShenandoahBarrierSet::assembler()->cmpxchg_oop(
- &_masm,
+ masm,
$mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true, $res$$Register
diff --git a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad
index 644fb3def38..b206b6593fb 100644
--- a/src/hotspot/cpu/ppc/gc/x/x_ppc.ad
+++ b/src/hotspot/cpu/ppc/gc/x/x_ppc.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2021 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@@ -32,7 +32,7 @@ source_hpp %{
source %{
-static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
+static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
@@ -45,14 +45,14 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ bind(*stub->continuation());
}
-static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref,
+static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref,
Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ b(*stub->entry());
__ bind(*stub->continuation());
}
-static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
+static void x_compare_and_swap(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp_xchg, Register tmp_mask,
bool weak, bool acquire) {
@@ -70,7 +70,7 @@ static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
__ beq(CCR0, skip_barrier);
// CAS must have failed because pointer in memory is bad.
- x_load_barrier_slow_path(_masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
+ x_load_barrier_slow_path(masm, node, Address(mem), tmp_xchg, res /* used as tmp */);
__ cmpxchgd(CCR0, tmp_xchg, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true, weak);
@@ -89,7 +89,7 @@ static void x_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
}
}
-static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
+static void x_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval, Register tmp,
bool weak, bool acquire) {
// z-specific load barrier requires strong CAS operations.
@@ -104,7 +104,7 @@ static void x_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
__ and_(tmp, tmp, res);
__ beq(CCR0, skip_barrier);
- x_load_barrier_slow_path(_masm, node, Address(mem), res, tmp);
+ x_load_barrier_slow_path(masm, node, Address(mem), res, tmp);
__ cmpxchgd(CCR0, res, oldval, newval, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true, weak);
@@ -138,7 +138,7 @@ instruct xLoadP(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
ins_encode %{
assert($mem$$index == 0, "sanity");
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
- x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
+ x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
%}
ins_pipe(pipe_class_default);
%}
@@ -156,7 +156,7 @@ instruct xLoadP_acq(iRegPdst dst, memoryAlg4 mem, iRegPdst tmp, flagsRegCR0 cr0)
format %{ "LD acq $dst, $mem" %}
ins_encode %{
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
- x_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
+ x_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register, $tmp$$Register, barrier_data());
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
@@ -175,7 +175,7 @@ instruct xCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
- x_compare_and_swap(_masm, this,
+ x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
false /* weak */, false /* acquire */);
@@ -193,7 +193,7 @@ instruct xCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
- x_compare_and_swap(_masm, this,
+ x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
false /* weak */, true /* acquire */);
@@ -211,7 +211,7 @@ instruct xCompareAndSwapPWeak(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "weak CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
- x_compare_and_swap(_masm, this,
+ x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
true /* weak */, false /* acquire */);
@@ -229,7 +229,7 @@ instruct xCompareAndSwapPWeak_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, i
format %{ "weak CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
- x_compare_and_swap(_masm, this,
+ x_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp_xchg$$Register, $tmp_mask$$Register,
true /* weak */, true /* acquire */);
@@ -250,7 +250,7 @@ instruct xCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
- x_compare_and_exchange(_masm, this,
+ x_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* weak */, false /* acquire */);
%}
@@ -270,7 +270,7 @@ instruct xCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
- x_compare_and_exchange(_masm, this,
+ x_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* weak */, true /* acquire */);
%}
@@ -286,7 +286,7 @@ instruct xGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{
__ getandsetd($res$$Register, $newval$$Register, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
- x_load_barrier(_masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
+ x_load_barrier(masm, this, Address(noreg, (intptr_t) 0), $res$$Register, $tmp$$Register, barrier_data());
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ isync();
diff --git a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad
index 777e5a785a7..017574d40ff 100644
--- a/src/hotspot/cpu/ppc/gc/z/z_ppc.ad
+++ b/src/hotspot/cpu/ppc/gc/z/z_ppc.ad
@@ -34,7 +34,7 @@ source %{
#include "gc/z/zBarrierSetAssembler.hpp"
-static void z_color(MacroAssembler& _masm, Register dst, Register src) {
+static void z_color(MacroAssembler* masm, Register dst, Register src) {
assert_different_registers(dst, src);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
__ li(dst, barrier_Relocation::unpatched); // Load color bits.
@@ -47,55 +47,55 @@ static void z_color(MacroAssembler& _masm, Register dst, Register src) {
}
}
-static void z_uncolor(MacroAssembler& _masm, Register ref) {
+static void z_uncolor(MacroAssembler* masm, Register ref) {
__ srdi(ref, ref, ZPointerLoadShift);
}
-static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong) {
+static void check_color(MacroAssembler* masm, Register ref, bool on_non_strong) {
int relocFormat = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
: ZBarrierRelocationFormatLoadBadMask;
__ relocate(barrier_Relocation::spec(), relocFormat);
__ andi_(R0, ref, barrier_Relocation::unpatched);
}
-static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
- Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
+static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
+ Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
- z_uncolor(_masm, ref);
+ z_uncolor(masm, ref);
} else {
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
- check_color(_masm, ref, on_non_strong);
+ check_color(masm, ref, on_non_strong);
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
- z_uncolor(_masm, ref);
+ z_uncolor(masm, ref);
__ bind(*stub->continuation());
}
}
-static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Register ref_base, intptr_t disp, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
- Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
+static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Register ref_base, intptr_t disp, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
+ Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
- z_color(_masm, rnew_zpointer, rnew_zaddress);
+ z_color(masm, rnew_zpointer, rnew_zaddress);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, Address(ref_base, disp), rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
- bs_asm->store_barrier_fast(&_masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
+ bs_asm->store_barrier_fast(masm, ref_base, disp, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
-static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
+static void z_compare_and_swap(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp1, Register tmp2, bool acquire) {
Register rold_zpointer = tmp1, rnew_zpointer = tmp2;
- z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
- z_color(_masm, rold_zpointer, oldval);
+ z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
+ z_color(masm, rold_zpointer, oldval);
__ cmpxchgd(CCR0, R0, rold_zpointer, rnew_zpointer, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
false /* we could support weak, but benefit is questionable */);
@@ -111,17 +111,17 @@ static void z_compare_and_swap(MacroAssembler& _masm, const MachNode* node,
}
}
-static void z_compare_and_exchange(MacroAssembler& _masm, const MachNode* node,
+static void z_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
Register res, Register mem, Register oldval, Register newval,
Register tmp, bool acquire) {
Register rold_zpointer = R0, rnew_zpointer = tmp;
- z_store_barrier(_masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
- z_color(_masm, rold_zpointer, oldval);
+ z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
+ z_color(masm, rold_zpointer, oldval);
__ cmpxchgd(CCR0, res, rold_zpointer, rnew_zpointer, mem,
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
false /* we could support weak, but benefit is questionable */);
- z_uncolor(_masm, res);
+ z_uncolor(masm, res);
if (acquire) {
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
@@ -149,7 +149,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
ins_encode %{
assert($mem$$index == 0, "sanity");
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
- z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
+ z_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
%}
ins_pipe(pipe_class_default);
%}
@@ -167,7 +167,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
format %{ "LD acq $dst, $mem" %}
ins_encode %{
__ ld($dst$$Register, $mem$$disp, $mem$$base$$Register);
- z_load_barrier(_masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
+ z_load_barrier(masm, this, Address($mem$$base$$Register, $mem$$disp), $dst$$Register);
// Uses the isync instruction as an acquire barrier.
// This exploits the compare and the branch in the z load barrier (load, compare and branch, isync).
@@ -186,7 +186,7 @@ instruct zStoreP(memoryAlg4 mem, iRegPsrc src, iRegPdst tmp, flagsRegCR0 cr0)
ins_cost(2 * MEMORY_REF_COST);
format %{ "std $mem, $src\t# ptr" %}
ins_encode %{
- z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, $src$$Register, $tmp$$Register, false /* is_atomic */);
+ z_store_barrier(masm, this, $mem$$base$$Register, $mem$$disp, $src$$Register, $tmp$$Register, false /* is_atomic */);
__ std($tmp$$Register, $mem$$disp, $mem$$base$$Register);
%}
ins_pipe(pipe_class_default);
@@ -200,7 +200,7 @@ instruct zStorePNull(memoryAlg4 mem, immP_0 zero, iRegPdst tmp, flagsRegCR0 cr0)
ins_cost(MEMORY_REF_COST);
format %{ "std $mem, null\t# ptr" %}
ins_encode %{
- z_store_barrier(_masm, this, $mem$$base$$Register, $mem$$disp, noreg, $tmp$$Register, false /* is_atomic */);
+ z_store_barrier(masm, this, $mem$$base$$Register, $mem$$disp, noreg, $tmp$$Register, false /* is_atomic */);
__ std($tmp$$Register, $mem$$disp, $mem$$base$$Register);
%}
ins_pipe(pipe_class_default);
@@ -217,7 +217,7 @@ instruct zCompareAndSwapP(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegPsrc
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
- z_compare_and_swap(_masm, this,
+ z_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
false /* acquire */);
@@ -236,7 +236,7 @@ instruct zCompareAndSwapP_acq(iRegIdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as bool; ptr" %}
ins_encode %{
- z_compare_and_swap(_masm, this,
+ z_compare_and_swap(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register,
$tmp1$$Register, $tmp2$$Register,
true /* acquire */);
@@ -257,7 +257,7 @@ instruct zCompareAndExchangeP(iRegPdst res, iRegPdst mem, iRegPsrc oldval, iRegP
format %{ "CMPXCHG $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
- z_compare_and_exchange(_masm, this,
+ z_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
false /* acquire */);
%}
@@ -277,7 +277,7 @@ instruct zCompareAndExchangeP_acq(iRegPdst res, iRegPdst mem, iRegPsrc oldval, i
format %{ "CMPXCHG acq $res, $mem, $oldval, $newval; as ptr; ptr" %}
ins_encode %{
- z_compare_and_exchange(_masm, this,
+ z_compare_and_exchange(masm, this,
$res$$Register, $mem$$Register, $oldval$$Register, $newval$$Register, $tmp$$Register,
true /* acquire */);
%}
@@ -293,9 +293,9 @@ instruct zGetAndSetP(iRegPdst res, iRegPdst mem, iRegPsrc newval, iRegPdst tmp,
format %{ "GetAndSetP $res, $mem, $newval" %}
ins_encode %{
Register rnew_zpointer = $tmp$$Register, result = $res$$Register;
- z_store_barrier(_masm, this, $mem$$Register, 0, $newval$$Register, rnew_zpointer, true /* is_atomic */);
+ z_store_barrier(masm, this, $mem$$Register, 0, $newval$$Register, rnew_zpointer, true /* is_atomic */);
__ getandsetd(result, rnew_zpointer, $mem$$Register, MacroAssembler::cmpxchgx_hint_atomic_update());
- z_uncolor(_masm, result);
+ z_uncolor(masm, result);
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
__ isync();
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 773e60e3f4f..cbe28deb516 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -1079,7 +1079,7 @@ bool followed_by_acquire(const Node *load) {
return false;
}
-#define __ _masm.
+#define __ masm->
// Tertiary op of a LoadP or StoreP encoding.
#define REGP_OP true
@@ -1189,8 +1189,7 @@ int cmprb_Whitespace_reg_reg_prefixedNode::compute_padding(int current_offset) c
//=============================================================================
// Emit an interrupt that is caught by the debugger (for debugging compiler).
-void emit_break(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+void emit_break(C2_MacroAssembler *masm) {
__ illtrap();
}
@@ -1200,8 +1199,8 @@ void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- emit_break(cbuf);
+void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ emit_break(masm);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
@@ -1210,14 +1209,13 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
//=============================================================================
-void emit_nop(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+void emit_nop(C2_MacroAssembler *masm) {
__ nop();
}
-static inline void emit_long(CodeBuffer &cbuf, int value) {
- *((int*)(cbuf.insts_end())) = value;
- cbuf.set_insts_end(cbuf.insts_end() + BytesPerInstWord);
+static inline void emit_long(C2_MacroAssembler *masm, int value) {
+ *((int*)(__ pc())) = value;
+ __ set_inst_end(__ pc() + BytesPerInstWord);
}
//=============================================================================
@@ -1237,7 +1235,7 @@ class CallStubImpl {
public:
// Emit call stub, compiled java to interpreter.
- static void emit_trampoline_stub(C2_MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset);
+ static void emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset);
// Size of call trampoline stub.
// This doesn't need to be accurate to the byte, but it
@@ -1268,7 +1266,7 @@ source %{
// load the call target from the constant pool
// branch via CTR (LR/link still points to the call-site above)
-void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
+void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset) {
address stub = __ emit_trampoline_stub(destination_toc_offset, insts_call_instruction_offset);
if (stub == nullptr) {
ciEnv::current()->record_out_of_memory_failure();
@@ -1299,7 +1297,7 @@ typedef struct {
// - Add a relocation at the branch-and-link instruction.
// - Emit a branch-and-link.
// - Remember the return pc offset.
-EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler &_masm, address entry_point, relocInfo::relocType rtype) {
+EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler *masm, address entry_point, relocInfo::relocType rtype) {
EmitCallOffsets offsets = { -1, -1 };
const int start_offset = __ offset();
offsets.insts_call_instruction_offset = __ offset();
@@ -1316,7 +1314,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler &_masm, address
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
- CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
+ CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
__ relocate(rtype);
@@ -1366,7 +1364,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
nodes->push(m2);
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Is postalloc expanded.
ShouldNotReachHere();
}
@@ -1404,9 +1402,8 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
const long framesize = C->output()->frame_size_in_bytes();
assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
@@ -1556,10 +1553,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->nmethod_entry_barrier(&_masm, push_frame_temp);
+ bs->nmethod_entry_barrier(masm, push_frame_temp);
}
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@@ -1588,9 +1585,8 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
const long framesize = ((long)C->output()->frame_slots()) << LogBytesPerInt;
assert(framesize >= 0, "negative frame-size?");
@@ -1668,7 +1664,7 @@ static enum RC rc_class(OptoReg::Name reg) {
return rc_stack;
}
-static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int reg, int offset,
+static int ld_st_helper(C2_MacroAssembler *masm, const char *op_str, uint opcode, int reg, int offset,
bool do_print, Compile* C, outputStream *st) {
assert(opcode == Assembler::LD_OPCODE ||
@@ -1681,12 +1677,12 @@ static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int r
opcode == Assembler::STFS_OPCODE,
"opcode not supported");
- if (cbuf) {
+ if (masm) {
int d =
(Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ?
Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/)
: Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build.
- emit_long(*cbuf, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
+ emit_long(masm, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
}
#ifndef PRODUCT
else if (do_print) {
@@ -1699,7 +1695,7 @@ static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int r
return 4; // size
}
-uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
+uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
Compile* C = ra_->C;
// Get registers to move.
@@ -1729,8 +1725,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
__ ld(R0, src_offset, R1_SP);
__ std(R0, dst_offset, R1_SP);
__ ld(R0, src_offset+8, R1_SP);
@@ -1742,8 +1737,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
int dst_offset = ra_->reg2offset(dst_lo);
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
__ addi(R0, R1_SP, dst_offset);
__ stxvd2x(Rsrc, R0);
}
@@ -1753,8 +1747,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
int src_offset = ra_->reg2offset(src_lo);
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
__ addi(R0, R1_SP, src_offset);
__ lxvd2x(Rdst, R0);
}
@@ -1764,8 +1757,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
__ xxlor(Rdst, Rsrc, Rsrc);
}
size += 4;
@@ -1784,13 +1776,13 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
- size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st);
- if (!cbuf && !do_size) st->print("\n\t");
- size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st);
+ if (!masm && !do_size) st->print("\n\t");
+ size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
} else {
- size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
- if (!cbuf && !do_size) st->print("\n\t");
- size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
+ if (!masm && !do_size) st->print("\n\t");
+ size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
}
return size;
}
@@ -1808,8 +1800,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
size = (Rsrc != Rdst) ? 4 : 0;
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
if (size) {
__ mr(Rdst, Rsrc);
}
@@ -1832,9 +1823,9 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
- size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
} else {
- size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
}
return size;
}
@@ -1845,17 +1836,16 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack,
"expected same type of move for high parts");
- size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
} else {
- size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
}
return size;
}
// Check for float reg-reg copy.
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
__ fmr(Rdst, Rsrc);
@@ -1874,9 +1864,9 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack,
"expected same type of move for high parts");
- size += ld_st_helper(cbuf, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
} else {
- size += ld_st_helper(cbuf, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
}
return size;
}
@@ -1887,9 +1877,9 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (src_hi != OptoReg::Bad) {
assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack,
"expected same type of move for high parts");
- size += ld_st_helper(cbuf, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
} else {
- size += ld_st_helper(cbuf, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
+ size += ld_st_helper(masm, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
}
return size;
}
@@ -1914,8 +1904,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation(&cbuf, ra_, false, nullptr);
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -1928,8 +1918,7 @@ void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *) const {
// _count contains the number of nops needed for padding.
for (int i = 0; i < _count; i++) {
__ nop();
@@ -1949,9 +1938,7 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
-
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
@@ -1974,10 +1961,8 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// This is the unverified entry point.
- C2_MacroAssembler _masm(&cbuf);
-
__ ic_check(CodeEntryAlignment);
// Argument is valid and klass is as expected, continue.
}
@@ -1997,8 +1982,8 @@ class HandlerImpl {
public:
- static int emit_exception_handler(CodeBuffer &cbuf);
- static int emit_deopt_handler(CodeBuffer& cbuf);
+ static int emit_exception_handler(C2_MacroAssembler *masm);
+ static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// The exception_handler is a b64_patchable.
@@ -2023,9 +2008,7 @@ public:
source %{
-int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
-
+int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2043,9 +2026,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
// The deopt_handler is like the exception handler, but it calls to
// the deoptimization blob instead of jumping to the exception blob.
-int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
- C2_MacroAssembler _masm(&cbuf);
-
+int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2421,27 +2402,23 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
// needs for encoding need to be specified.
encode %{
enc_class enc_unimplemented %{
- C2_MacroAssembler _masm(&cbuf);
__ unimplemented("Unimplemented mach node encoding in AD file.", 13);
%}
enc_class enc_untested %{
#ifdef ASSERT
- C2_MacroAssembler _masm(&cbuf);
__ untested("Untested mach node encoding in AD file.");
#else
#endif
%}
enc_class enc_lbz(iRegIdst dst, memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lbz($dst$$Register, Idisp, $mem$$base$$Register);
%}
// Load acquire.
enc_class enc_lbz_ac(iRegIdst dst, memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lbz($dst$$Register, Idisp, $mem$$base$$Register);
__ twi_0($dst$$Register);
@@ -2449,16 +2426,12 @@ encode %{
%}
enc_class enc_lhz(iRegIdst dst, memory mem) %{
-
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lhz($dst$$Register, Idisp, $mem$$base$$Register);
%}
// Load acquire.
enc_class enc_lhz_ac(iRegIdst dst, memory mem) %{
-
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lhz($dst$$Register, Idisp, $mem$$base$$Register);
__ twi_0($dst$$Register);
@@ -2466,16 +2439,12 @@ encode %{
%}
enc_class enc_lwz(iRegIdst dst, memory mem) %{
-
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lwz($dst$$Register, Idisp, $mem$$base$$Register);
%}
// Load acquire.
enc_class enc_lwz_ac(iRegIdst dst, memory mem) %{
-
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lwz($dst$$Register, Idisp, $mem$$base$$Register);
__ twi_0($dst$$Register);
@@ -2483,7 +2452,6 @@ encode %{
%}
enc_class enc_ld(iRegLdst dst, memoryAlg4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
// Operand 'ds' requires 4-alignment.
assert((Idisp & 0x3) == 0, "unaligned offset");
@@ -2492,7 +2460,6 @@ encode %{
// Load acquire.
enc_class enc_ld_ac(iRegLdst dst, memoryAlg4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
// Operand 'ds' requires 4-alignment.
assert((Idisp & 0x3) == 0, "unaligned offset");
@@ -2502,14 +2469,11 @@ encode %{
%}
enc_class enc_lfd(RegF dst, memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
%}
enc_class enc_load_long_constL(iRegLdst dst, immL src, iRegLdst toc) %{
-
- C2_MacroAssembler _masm(&cbuf);
int toc_offset = 0;
address const_toc_addr;
@@ -2531,9 +2495,6 @@ encode %{
%}
enc_class enc_load_long_constL_hi(iRegLdst dst, iRegLdst toc, immL src) %{
-
- C2_MacroAssembler _masm(&cbuf);
-
if (!ra_->C->output()->in_scratch_emit_size()) {
address const_toc_addr;
// Create a non-oop constant, no relocation needed.
@@ -2765,8 +2726,6 @@ encode %{
%}
enc_class enc_load_long_constP(iRegLdst dst, immP src, iRegLdst toc) %{
-
- C2_MacroAssembler _masm(&cbuf);
int toc_offset = 0;
intptr_t val = $src$$constant;
@@ -2799,8 +2758,6 @@ encode %{
%}
enc_class enc_load_long_constP_hi(iRegLdst dst, immP src, iRegLdst toc) %{
-
- C2_MacroAssembler _masm(&cbuf);
if (!ra_->C->output()->in_scratch_emit_size()) {
intptr_t val = $src$$constant;
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
@@ -2935,13 +2892,11 @@ encode %{
%}
enc_class enc_stw(iRegIsrc src, memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ stw($src$$Register, Idisp, $mem$$base$$Register);
%}
enc_class enc_std(iRegIsrc src, memoryAlg4 mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
// Operand 'ds' requires 4-alignment.
assert((Idisp & 0x3) == 0, "unaligned offset");
@@ -2949,13 +2904,11 @@ encode %{
%}
enc_class enc_stfs(RegF src, memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ stfs($src$$FloatRegister, Idisp, $mem$$base$$Register);
%}
enc_class enc_stfd(RegF src, memory mem) %{
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
__ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
%}
@@ -3154,8 +3107,6 @@ encode %{
%}
enc_class enc_cmove_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src, cmpOp cmp) %{
-
- C2_MacroAssembler _masm(&cbuf);
int cc = $cmp$$cmpcode;
int flags_reg = $crx$$reg;
Label done;
@@ -3167,8 +3118,6 @@ encode %{
%}
enc_class enc_cmove_imm(iRegIdst dst, flagsRegSrc crx, immI16 src, cmpOp cmp) %{
-
- C2_MacroAssembler _masm(&cbuf);
Label done;
assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
// Branch if not (cmp crx).
@@ -3180,14 +3129,10 @@ encode %{
// This enc_class is needed so that scheduler gets proper
// input mapping for latency computation.
enc_class enc_andc(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
- C2_MacroAssembler _masm(&cbuf);
__ andc($dst$$Register, $src1$$Register, $src2$$Register);
%}
enc_class enc_convI2B_regI__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
-
- C2_MacroAssembler _masm(&cbuf);
-
Label done;
__ cmpwi($crx$$CondRegister, $src$$Register, 0);
__ li($dst$$Register, $zero$$constant);
@@ -3197,9 +3142,6 @@ encode %{
%}
enc_class enc_convP2B_regP__cmove(iRegIdst dst, iRegPsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
-
- C2_MacroAssembler _masm(&cbuf);
-
Label done;
__ cmpdi($crx$$CondRegister, $src$$Register, 0);
__ li($dst$$Register, $zero$$constant);
@@ -3209,8 +3151,6 @@ encode %{
%}
enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL mem ) %{
-
- C2_MacroAssembler _masm(&cbuf);
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
Label done;
__ bso($crx$$CondRegister, done);
@@ -3219,8 +3159,6 @@ encode %{
%}
enc_class enc_cmove_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
-
- C2_MacroAssembler _masm(&cbuf);
Label done;
__ bso($crx$$CondRegister, done);
__ mffprd($dst$$Register, $src$$FloatRegister);
@@ -3228,8 +3166,6 @@ encode %{
%}
enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
-
- C2_MacroAssembler _masm(&cbuf);
Label d; // dummy
__ bind(d);
Label* p = ($lbl$$label);
@@ -3257,8 +3193,6 @@ encode %{
enc_class enc_bc_far(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
// The scheduler doesn't know about branch shortening, so we set the opcode
// to ppc64Opcode_bc in order to hide this detail from the scheduler.
-
- C2_MacroAssembler _masm(&cbuf);
Label d; // dummy
__ bind(d);
Label* p = ($lbl$$label);
@@ -3333,7 +3267,6 @@ encode %{
// Fake operand dst needed for PPC scheduler.
assert($dst$$constant == 0x0, "dst must be 0x0");
- C2_MacroAssembler _masm(&cbuf);
// Mark the code position where the load from the safepoint
// polling page was emitted as relocInfo::poll_type.
__ relocate(relocInfo::poll_type);
@@ -3387,13 +3320,11 @@ encode %{
//
// Usage of r1 and r2 in the stubs allows to distinguish them.
enc_class enc_java_static_call(method meth) %{
-
- C2_MacroAssembler _masm(&cbuf);
address entry_point = (address)$meth$$method;
if (!_method) {
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
- emit_call_with_trampoline_stub(_masm, entry_point, relocInfo::runtime_call_type);
+ emit_call_with_trampoline_stub(masm, entry_point, relocInfo::runtime_call_type);
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
} else {
// Remember the offset not the address.
@@ -3413,9 +3344,9 @@ encode %{
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
// Emit the trampoline stub which will be related to the branch-and-link below.
- CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+ CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, start_offset);
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
__ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index));
@@ -3423,11 +3354,12 @@ encode %{
// Note: At this point we do not have the address of the trampoline
// stub, and the entry point might be too far away for bl, so __ pc()
// serves as dummy and the bl will be patched later.
- cbuf.set_insts_mark();
+ __ set_inst_mark();
__ bl(__ pc()); // Emits a relocation.
// The stub for call to interpreter.
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm);
+ __ clear_inst_mark();
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -3438,9 +3370,6 @@ encode %{
// Second node of expanded dynamic call - the call.
enc_class enc_java_dynamic_call_sched(method meth) %{
-
- C2_MacroAssembler _masm(&cbuf);
-
if (!ra_->C->output()->in_scratch_emit_size()) {
// Create a call trampoline stub for the given method.
const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
@@ -3450,7 +3379,7 @@ encode %{
return;
}
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
- CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
+ CallStubImpl::emit_trampoline_stub(masm, entry_point_const_toc_offset, __ offset());
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
// Build relocation at call site with ic position as data.
@@ -3466,7 +3395,7 @@ encode %{
const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset);
assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr),
"should be load from TOC");
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
}
@@ -3541,7 +3470,6 @@ encode %{
// Toc is only passed so that it can be used in ins_encode statement.
// In the code we have to use $constanttablebase.
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
- C2_MacroAssembler _masm(&cbuf);
int start_offset = __ offset();
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
@@ -3564,7 +3492,7 @@ encode %{
// CALL to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call.
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
- emit_call_with_trampoline_stub(_masm, (address)$meth$$method, relocInfo::none);
+ emit_call_with_trampoline_stub(masm, (address)$meth$$method, relocInfo::none);
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
"Fix constant in ret_addr_offset(), expected %d", __ offset() - start_offset);
@@ -3595,8 +3523,6 @@ encode %{
// a runtime call
enc_class enc_java_to_runtime_call (method meth) %{
-
- C2_MacroAssembler _masm(&cbuf);
const address start_pc = __ pc();
#if defined(ABI_ELFv2)
@@ -3630,7 +3556,6 @@ encode %{
// This enc_class is needed so that scheduler gets proper
// input mapping for latency computation.
enc_class enc_leaf_call_mtctr(iRegLsrc src) %{
- C2_MacroAssembler _masm(&cbuf);
__ mtctr($src$$Register);
%}
@@ -4085,7 +4010,7 @@ operand immN() %{
interface(CONST_INTER);
%}
-// Null Pointer Immediate
+// nullptr Pointer Immediate
operand immN_0() %{
predicate(n->get_narrowcon() == 0);
match(ConN);
@@ -14581,8 +14506,9 @@ instruct RethrowException() %{
format %{ "Jmp rethrow_stub" %}
ins_encode %{
- cbuf.set_insts_mark();
+ __ set_inst_mark();
__ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type);
+ __ clear_inst_mark();
%}
ins_pipe(pipe_class_call);
%}
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 66b19794b05..666b46817f9 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -1760,8 +1760,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(L_exit);
// static stub for the call above
- CodeBuffer* cbuf = masm->code_section()->outer();
- stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
+ stub = CompiledDirectCall::emit_to_interp_stub(masm, c2i_call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@@ -1853,8 +1852,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ blr();
// static stub for the call above
- CodeBuffer* cbuf = masm->code_section()->outer();
- stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, call_pc);
+ stub = CompiledDirectCall::emit_to_interp_stub(masm, call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
diff --git a/src/hotspot/cpu/riscv/compiledIC_riscv.cpp b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
index fdb2bcb06ff..60dceb3ada7 100644
--- a/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
+++ b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
@@ -35,23 +35,19 @@
// ----------------------------------------------------------------------------
-#define __ _masm.
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
- precond(cbuf.stubs()->start() != badAddress);
- precond(cbuf.stubs()->end() != badAddress);
+#define __ masm->
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
+ precond(__ code()->stubs()->start() != badAddress);
+ precond(__ code()->stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// mv xmethod, 0
// jalr -4 # to self
if (mark == nullptr) {
- mark = cbuf.insts_mark(); // Get mark within main instrs section.
+ mark = __ inst_mark(); // Get mark within main instrs section.
}
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
address base = __ start_a_stub(to_interp_stub_size());
int offset = __ offset();
if (base == nullptr) {
diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv.ad b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv.ad
index 6c855f23c2a..81bcd3c1362 100644
--- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv.ad
+++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoah_riscv.ad
@@ -41,7 +41,7 @@ instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, i
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -62,7 +62,7 @@ instruct compareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldval, i
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -84,7 +84,7 @@ instruct compareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP oldval
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -106,7 +106,7 @@ instruct compareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN oldval
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -126,7 +126,7 @@ instruct compareAndExchangeN_shenandoah(iRegNNoSp res, indirect mem, iRegN oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@@ -146,7 +146,7 @@ instruct compareAndExchangeP_shenandoah(iRegPNoSp res, indirect mem, iRegP oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@@ -168,7 +168,7 @@ instruct weakCompareAndSwapN_shenandoah(iRegINoSp res, indirect mem, iRegN oldva
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -189,7 +189,7 @@ instruct compareAndExchangeNAcq_shenandoah(iRegNNoSp res, indirect mem, iRegN ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register);
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@@ -210,7 +210,7 @@ instruct compareAndExchangePAcq_shenandoah(iRegPNoSp res, indirect mem, iRegP ol
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register);
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
true /* is_cae */, $res$$Register);
%}
@@ -230,7 +230,7 @@ instruct weakCompareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldva
ins_encode %{
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::relaxed /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -253,7 +253,7 @@ instruct weakCompareAndSwapNAcq_shenandoah(iRegINoSp res, indirect mem, iRegN ol
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
@@ -276,7 +276,7 @@ instruct weakCompareAndSwapPAcq_shenandoah(iRegINoSp res, indirect mem, iRegP ol
Register tmp = $tmp$$Register;
__ mv(tmp, $oldval$$Register); // Must not clobber oldval.
// Weak is not current supported by ShenandoahBarrierSet::cmpxchg_oop
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm, $mem$$Register, tmp, $newval$$Register,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm, $mem$$Register, tmp, $newval$$Register,
Assembler::aq /* acquire */, Assembler::rl /* release */,
false /* is_cae */, $res$$Register);
%}
diff --git a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad
index 3d0273109ac..ef02f301c6a 100644
--- a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad
+++ b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
@@ -32,7 +32,7 @@ source_hpp %{
source %{
-static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
+static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, int barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
@@ -43,7 +43,7 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
__ bind(*stub->continuation());
}
-static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+static void x_load_barrier_slow_path(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
__ j(*stub->entry());
__ bind(*stub->continuation());
@@ -65,7 +65,7 @@ instruct xLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp)
ins_encode %{
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
__ ld($dst$$Register, ref_addr);
- x_load_barrier(_masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
+ x_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(iload_reg_mem);
@@ -94,7 +94,7 @@ instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $tmp$$Register);
__ beqz(t0, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result_as_bool */);
@@ -128,7 +128,7 @@ instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $tmp$$Register);
__ beqz(t0, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result_as_bool */);
@@ -157,7 +157,7 @@ instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $res$$Register);
__ beqz(t0, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
__ bind(good);
@@ -185,7 +185,7 @@ instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
__ andr(t0, t0, $res$$Register);
__ beqz(t0, good);
- x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
+ x_load_barrier_slow_path(masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
__ bind(good);
@@ -206,7 +206,7 @@ instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
- x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
+ x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
@@ -223,7 +223,7 @@ instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp)
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
- x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
+ x_load_barrier(masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
%}
ins_pipe(pipe_serial);
%}
diff --git a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
index 29de8234029..4c94e504475 100644
--- a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
+++ b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad
@@ -33,7 +33,7 @@ source_hpp %{
source %{
#include "gc/z/zBarrierSetAssembler.hpp"
-static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, Register src, Register tmp) {
+static void z_color(MacroAssembler* masm, const MachNode* node, Register dst, Register src, Register tmp) {
assert_different_registers(dst, tmp);
__ relocate(barrier_Relocation::spec(), [&] {
@@ -43,11 +43,11 @@ static void z_color(MacroAssembler& _masm, const MachNode* node, Register dst, R
__ orr(dst, dst, tmp);
}
-static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
+static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ srli(ref, ref, ZPointerLoadShift);
}
-static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong, Register result) {
+static void check_color(MacroAssembler* masm, Register ref, bool on_non_strong, Register result) {
int format = on_non_strong ? ZBarrierRelocationFormatMarkBadMask
: ZBarrierRelocationFormatLoadBadMask;
__ relocate(barrier_Relocation::spec(), [&] {
@@ -56,35 +56,35 @@ static void check_color(MacroAssembler& _masm, Register ref, bool on_non_strong,
__ andr(result, ref, result);
}
-static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (node->barrier_data() == ZBarrierElided) {
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
return;
}
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
Label good;
- check_color(_masm, ref, on_non_strong, tmp);
+ check_color(masm, ref, on_non_strong, tmp);
__ beqz(tmp, good);
__ j(*stub->entry());
__ bind(good);
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
-static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
+static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
if (node->barrier_data() == ZBarrierElided) {
- z_color(_masm, node, rnew_zpointer, rnew_zaddress, tmp);
+ z_color(masm, node, rnew_zpointer, rnew_zaddress, tmp);
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
- bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
+ bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, tmp, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
%}
@@ -103,7 +103,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp)
ins_encode %{
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
__ ld($dst$$Register, ref_addr);
- z_load_barrier(_masm, this, ref_addr, $dst$$Register, $tmp$$Register);
+ z_load_barrier(masm, this, ref_addr, $dst$$Register, $tmp$$Register);
%}
ins_pipe(iload_reg_mem);
@@ -120,7 +120,7 @@ instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2)
format %{ "sd $mem, $src\t# ptr" %}
ins_encode %{
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
- z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp1$$Register, $tmp2$$Register, false /* is_atomic */);
+ z_store_barrier(masm, this, ref_addr, $src$$Register, $tmp1$$Register, $tmp2$$Register, false /* is_atomic */);
__ sd($tmp1$$Register, ref_addr);
%}
ins_pipe(pipe_serial);
@@ -141,8 +141,8 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
%}
@@ -164,8 +164,8 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
%}
@@ -185,10 +185,10 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
- z_uncolor(_masm, this, $res$$Register);
+ z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -207,10 +207,10 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
ins_encode %{
guarantee($mem$$disp == 0, "impossible encoding");
Address ref_addr($mem$$Register);
- z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
- z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
+ z_color(masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
+ z_store_barrier(masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
- z_uncolor(_masm, this, $res$$Register);
+ z_uncolor(masm, this, $res$$Register);
%}
ins_pipe(pipe_slow);
@@ -226,9 +226,9 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
ins_encode %{
- z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
+ z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
- z_uncolor(_masm, this, $prev$$Register);
+ z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
@@ -244,9 +244,9 @@ instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp)
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
ins_encode %{
- z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
+ z_store_barrier(masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
- z_uncolor(_masm, this, $prev$$Register);
+ z_uncolor(masm, this, $prev$$Register);
%}
ins_pipe(pipe_serial);
%}
diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad
index 56a2d71bb5f..2c69486a0e5 100644
--- a/src/hotspot/cpu/riscv/riscv.ad
+++ b/src/hotspot/cpu/riscv/riscv.ad
@@ -1060,8 +1060,8 @@ class HandlerImpl {
public:
- static int emit_exception_handler(CodeBuffer &cbuf);
- static int emit_deopt_handler(CodeBuffer& cbuf);
+ static int emit_exception_handler(C2_MacroAssembler *masm);
+ static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_branch_size();
@@ -1207,7 +1207,7 @@ bool needs_acquiring_load_reserved(const Node *n)
// so we can just return true here
return true;
}
-#define __ _masm.
+#define __ masm->
// advance declarations for helper functions to convert register
// indices to register objects
@@ -1291,8 +1291,7 @@ void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ ebreak();
}
@@ -1308,9 +1307,8 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
#endif
- void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
- C2_MacroAssembler _masm(&cbuf);
- Assembler::CompressibleRegion cr(&_masm); // nops shall be 2-byte under RVC for alignment purposes.
+ void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
+ Assembler::CompressibleRegion cr(masm); // nops shall be 2-byte under RVC for alignment purposes.
for (int i = 0; i < _count; i++) {
__ nop();
}
@@ -1332,7 +1330,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
ShouldNotReachHere();
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@@ -1376,10 +1374,9 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
assert_cond(ra_ != nullptr);
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
// n.b. frame size includes space for return pc and fp
const int framesize = C->output()->frame_size_in_bytes();
@@ -1387,7 +1384,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
{
- Assembler::IncompressibleRegion ir(&_masm); // keep the nop as 4 bytes for patching.
+ Assembler::IncompressibleRegion ir(masm); // keep the nop as 4 bytes for patching.
MacroAssembler::assert_alignment(__ pc());
__ nop(); // 4 bytes
}
@@ -1431,7 +1428,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
guard = &stub->guard();
}
// In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
- bs->nmethod_entry_barrier(&_masm, slow_path, continuation, guard);
+ bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
}
}
@@ -1439,7 +1436,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
Unimplemented();
}
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@@ -1490,10 +1487,9 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
assert_cond(ra_ != nullptr);
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
assert_cond(C != nullptr);
int framesize = C->output()->frame_size_in_bytes();
@@ -1567,7 +1563,7 @@ static enum RC rc_class(OptoReg::Name reg) {
return rc_stack;
}
-uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
+uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
assert_cond(ra_ != nullptr);
Compile* C = ra_->C;
@@ -1601,8 +1597,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (bottom_type()->isa_vect() != nullptr) {
uint ireg = ideal_reg();
- if (ireg == Op_VecA && cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (ireg == Op_VecA && masm) {
int vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack to stack
@@ -1620,8 +1615,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
} else {
ShouldNotReachHere();
}
- } else if (bottom_type()->isa_vectmask() && cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ } else if (bottom_type()->isa_vectmask() && masm) {
int vmask_size_in_bytes = Matcher::scalable_predicate_reg_slots() * 32 / 8;
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack to stack
@@ -1640,8 +1634,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
ShouldNotReachHere();
}
}
- } else if (cbuf != nullptr) {
- C2_MacroAssembler _masm(cbuf);
+ } else if (masm != nullptr) {
switch (src_lo_rc) {
case rc_int:
if (dst_lo_rc == rc_int) { // gpr --> gpr copy
@@ -1753,8 +1746,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation(&cbuf, ra_, false, nullptr);
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -1773,9 +1766,8 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
- Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see BoxLockNode::size()
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ Assembler::IncompressibleRegion ir(masm); // Fixed length: see BoxLockNode::size()
assert_cond(ra_ != nullptr);
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
@@ -1820,10 +1812,9 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
}
#endif
-void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
+void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
- C2_MacroAssembler _masm(&cbuf);
__ ic_check(CodeEntryAlignment);
// Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
@@ -1842,13 +1833,12 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const
//=============================================================================
// Emit exception handler code.
-int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
+int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// auipc t0, #exception_blob_entry_point
// jr (offset)t0
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -1862,11 +1852,8 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf)
}
// Emit deopt handler code.
-int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf)
+int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a handler.
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2201,14 +2188,12 @@ encode %{
// BEGIN Non-volatile memory access
enc_class riscv_enc_mov_imm(iRegIorL dst, immIorL src) %{
- C2_MacroAssembler _masm(&cbuf);
int64_t con = (int64_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
__ mv(dst_reg, con);
%}
enc_class riscv_enc_mov_p(iRegP dst, immP src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr || con == (address)1) {
@@ -2227,18 +2212,15 @@ encode %{
%}
enc_class riscv_enc_mov_p1(iRegP dst) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mv(dst_reg, 1);
%}
enc_class riscv_enc_mov_byte_map_base(iRegP dst) %{
- C2_MacroAssembler _masm(&cbuf);
__ load_byte_map_base($dst$$Register);
%}
enc_class riscv_enc_mov_n(iRegN dst, immN src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@@ -2251,13 +2233,11 @@ encode %{
%}
enc_class riscv_enc_mov_zero(iRegNorP dst) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
__ mv(dst_reg, zr);
%}
enc_class riscv_enc_mov_nk(iRegN dst, immNKlass src) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
@@ -2270,42 +2250,36 @@ encode %{
%}
enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
- C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
- C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
- C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
- C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
- C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
- C2_MacroAssembler _masm(&cbuf);
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
@@ -2314,13 +2288,11 @@ encode %{
// compare and branch instruction encodings
enc_class riscv_enc_j(label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label* L = $lbl$$label;
__ j(*L);
%}
enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label* L = $lbl$$label;
switch ($cmp$$cmpcode) {
case(BoolTest::ge):
@@ -2344,7 +2316,6 @@ encode %{
Label miss;
Label done;
- C2_MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
nullptr, &miss);
if ($primary) {
@@ -2363,8 +2334,7 @@ encode %{
%}
enc_class riscv_enc_java_static_call(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
- Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset
+ Assembler::IncompressibleRegion ir(masm); // Fixed length: see ret_addr_offset
address addr = (address)$meth$$method;
address call = nullptr;
@@ -2382,7 +2352,7 @@ encode %{
__ nop();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
call = __ trampoline_call(Address(addr, rspec));
@@ -2394,10 +2364,10 @@ encode %{
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
- cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
+ __ code()->shared_stub_to_interp_for(_method, call - (__ begin()));
} else {
// Emit stub for static call
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -2409,9 +2379,8 @@ encode %{
%}
enc_class riscv_enc_java_dynamic_call(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
- Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset
- int method_index = resolved_method_index(cbuf);
+ Assembler::IncompressibleRegion ir(masm); // Fixed length: see ret_addr_offset
+ int method_index = resolved_method_index(masm);
address call = __ ic_call((address)$meth$$method, method_index);
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2422,7 +2391,6 @@ encode %{
%}
enc_class riscv_enc_call_epilog() %{
- C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
__ call_Unimplemented();
@@ -2430,8 +2398,7 @@ encode %{
%}
enc_class riscv_enc_java_to_runtime(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
- Assembler::IncompressibleRegion ir(&_masm); // Fixed length: see ret_addr_offset
+ Assembler::IncompressibleRegion ir(masm); // Fixed length: see ret_addr_offset
// some calls to generated routines (arraycopy code) are scheduled
// by C2 as runtime calls. if so we can call them using a jr (they
@@ -2463,7 +2430,6 @@ encode %{
// arithmetic encodings
enc_class riscv_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2471,7 +2437,6 @@ encode %{
%}
enc_class riscv_enc_divuw(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2479,7 +2444,6 @@ encode %{
%}
enc_class riscv_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2487,7 +2451,6 @@ encode %{
%}
enc_class riscv_enc_divu(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2495,7 +2458,6 @@ encode %{
%}
enc_class riscv_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2503,7 +2465,6 @@ encode %{
%}
enc_class riscv_enc_moduw(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2511,7 +2472,6 @@ encode %{
%}
enc_class riscv_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2519,7 +2479,6 @@ encode %{
%}
enc_class riscv_enc_modu(iRegI dst, iRegI src1, iRegI src2) %{
- C2_MacroAssembler _masm(&cbuf);
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
@@ -2527,13 +2486,11 @@ encode %{
%}
enc_class riscv_enc_tail_call(iRegP jump_target) %{
- C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
__ jr(target_reg);
%}
enc_class riscv_enc_tail_jmp(iRegP jump_target) %{
- C2_MacroAssembler _masm(&cbuf);
Register target_reg = as_Register($jump_target$$reg);
// exception oop should be in x10
// ret addr has been popped into ra
@@ -2543,12 +2500,10 @@ encode %{
%}
enc_class riscv_enc_rethrow() %{
- C2_MacroAssembler _masm(&cbuf);
__ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
%}
enc_class riscv_enc_ret() %{
- C2_MacroAssembler _masm(&cbuf);
__ ret();
%}
diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad
index 25ad0ba39ea..36f4d499c38 100644
--- a/src/hotspot/cpu/riscv/riscv_v.ad
+++ b/src/hotspot/cpu/riscv/riscv_v.ad
@@ -30,19 +30,19 @@ opclass vmemA(indirect);
source %{
- static void loadStore(C2_MacroAssembler masm, bool is_store,
+ static void loadStore(C2_MacroAssembler* masm, bool is_store,
VectorRegister reg, BasicType bt, Register base,
uint vector_length, Assembler::VectorMask vm = Assembler::unmasked) {
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
- masm.vsetvli_helper(bt, vector_length);
+ __ vsetvli_helper(bt, vector_length);
if (is_store) {
- masm.vsex_v(reg, base, sew, vm);
+ __ vsex_v(reg, base, sew, vm);
} else {
if (vm == Assembler::v0_t) {
- masm.vxor_vv(reg, reg, reg);
+ __ vxor_vv(reg, reg, reg);
}
- masm.vlex_v(reg, base, sew, vm);
+ __ vlex_v(reg, base, sew, vm);
}
}
@@ -108,7 +108,7 @@ instruct loadV(vReg dst, vmemA mem) %{
format %{ "loadV $dst, $mem\t# vector (rvv)" %}
ins_encode %{
VectorRegister dst_reg = as_VectorRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), false, dst_reg,
+ loadStore(masm, false, dst_reg,
Matcher::vector_element_basic_type(this), as_Register($mem$$base), Matcher::vector_length(this));
%}
ins_pipe(pipe_slow);
@@ -120,7 +120,7 @@ instruct storeV(vReg src, vmemA mem) %{
format %{ "storeV $mem, $src\t# vector (rvv)" %}
ins_encode %{
VectorRegister src_reg = as_VectorRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), true, src_reg,
+ loadStore(masm, true, src_reg,
Matcher::vector_element_basic_type(this, $src), as_Register($mem$$base), Matcher::vector_length(this, $src));
%}
ins_pipe(pipe_slow);
@@ -3154,7 +3154,7 @@ instruct loadV_masked(vReg dst, vmemA mem, vRegMask_V0 v0) %{
format %{ "loadV_masked $dst, $mem, $v0" %}
ins_encode %{
VectorRegister dst_reg = as_VectorRegister($dst$$reg);
- loadStore(C2_MacroAssembler(&cbuf), false, dst_reg,
+ loadStore(masm, false, dst_reg,
Matcher::vector_element_basic_type(this), as_Register($mem$$base),
Matcher::vector_length(this), Assembler::v0_t);
%}
@@ -3166,7 +3166,7 @@ instruct storeV_masked(vReg src, vmemA mem, vRegMask_V0 v0) %{
format %{ "storeV_masked $mem, $src, $v0" %}
ins_encode %{
VectorRegister src_reg = as_VectorRegister($src$$reg);
- loadStore(C2_MacroAssembler(&cbuf), true, src_reg,
+ loadStore(masm, true, src_reg,
Matcher::vector_element_basic_type(this, $src), as_Register($mem$$base),
Matcher::vector_length(this, $src), Assembler::v0_t);
%}
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index 5945f9d5fe2..b84e31b40a7 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -974,8 +974,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ j(exit);
- CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1040,8 +1039,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ jr(x11); // the exception handler
}
- CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
diff --git a/src/hotspot/cpu/s390/compiledIC_s390.cpp b/src/hotspot/cpu/s390/compiledIC_s390.cpp
index 3adcfbc85f1..9c9073cb93d 100644
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp
@@ -37,22 +37,18 @@
// ----------------------------------------------------------------------------
#undef __
-#define __ _masm.
+#define __ masm->
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
- mark = cbuf.insts_mark();
+ mark = __ inst_mark();
}
assert(mark != nullptr, "mark must not be null");
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeBuffer::expand failed.
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index e2a11733cd6..28cac16864d 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -584,7 +584,7 @@ source %{
#define BIND(label) __ bind(label); BLOCK_COMMENT(#label ":")
#endif
-#define __ _masm.
+#define __ masm->
#define Z_DISP_SIZE Immediate::is_uimm12((long)opnd_array(1)->disp(ra_,this,2)) ? 4 : 6
#define Z_DISP3_SIZE 6
@@ -666,14 +666,12 @@ int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
return (12 - current_offset) & 2;
}
-void emit_nop(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+void emit_nop(C2_MacroAssembler *masm) {
__ z_nop();
}
// Emit an interrupt that is caught by the debugger (for debugging compiler).
-void emit_break(CodeBuffer &cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+void emit_break(C2_MacroAssembler *masm) {
__ z_illtrap();
}
@@ -683,51 +681,45 @@ void MachBreakpointNode::format(PhaseRegAlloc *, outputStream *os) const {
}
#endif
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- emit_break(cbuf);
+void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ emit_break(masm);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
-static inline void z_emit16(CodeBuffer &cbuf, long value) {
- C2_MacroAssembler _masm(&cbuf);
+static inline void z_emit16(C2_MacroAssembler *masm, long value) {
__ emit_instruction((unsigned long)value, 2);
}
-static inline void z_emit32(CodeBuffer &cbuf, long value) {
- C2_MacroAssembler _masm(&cbuf);
+static inline void z_emit32(C2_MacroAssembler *masm, long value) {
__ emit_instruction((unsigned long)value, 4);
}
-static inline void z_emit48(CodeBuffer &cbuf, long value) {
- C2_MacroAssembler _masm(&cbuf);
+static inline void z_emit48(C2_MacroAssembler *masm, long value) {
__ emit_instruction((unsigned long)value, 6);
}
-static inline unsigned int z_emit_inst(CodeBuffer &cbuf, long value) {
+static inline unsigned int z_emit_inst(C2_MacroAssembler *masm, long value) {
if (value < 0) {
// There obviously has been an unintended sign extension (int->long). Revert it.
value = (long)((unsigned long)((unsigned int)value));
}
- C2_MacroAssembler _masm(&cbuf);
int len = __ emit_instruction((unsigned long)value, 0);
return len;
}
// Check effective address (at runtime) for required alignment.
-static inline void z_assert_aligned(CodeBuffer &cbuf, int disp, Register index, Register base, int alignment) {
- C2_MacroAssembler _masm(&cbuf);
-
+static inline void z_assert_aligned(C2_MacroAssembler *masm, int disp, Register index, Register base, int alignment) {
__ z_lay(Z_R0, disp, index, base);
__ z_nill(Z_R0, alignment-1);
__ z_brc(Assembler::bcondEqual, +3);
__ z_illtrap();
}
-int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, relocInfo::relocType rtype,
+int emit_call_reloc(C2_MacroAssembler *masm, intptr_t entry_point, relocInfo::relocType rtype,
PhaseRegAlloc* ra_, bool is_native_call = false) {
__ set_inst_mark(); // Used in z_enc_java_static_call() and emit_java_to_interp().
address old_mark = __ inst_mark();
@@ -758,7 +750,7 @@ int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, relocInfo::r
return (ret_off - start_off);
}
-static int emit_call_reloc(C2_MacroAssembler &_masm, intptr_t entry_point, RelocationHolder const& rspec) {
+static int emit_call_reloc(C2_MacroAssembler *masm, intptr_t entry_point, RelocationHolder const& rspec) {
__ set_inst_mark(); // Used in z_enc_java_static_call() and emit_java_to_interp().
address old_mark = __ inst_mark();
unsigned int start_off = __ offset();
@@ -790,8 +782,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
// Even with PC-relative TOC addressing, we still need this node.
// Float loads/stores do not support PC-relative addresses.
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
Register Rtoc = as_Register(ra_->get_encode(this));
__ load_toc(Rtoc);
}
@@ -841,9 +832,8 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
}
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
size_t framesize = C->output()->frame_size_in_bytes();
size_t bangsize = C->output()->bang_size_in_bytes();
@@ -892,10 +882,10 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->nmethod_entry_barrier(&_masm);
+ bs->nmethod_entry_barrier(masm);
}
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
}
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
@@ -921,8 +911,7 @@ void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
// If this does safepoint polling, then do it here.
@@ -990,15 +979,15 @@ static enum RC rc_class(OptoReg::Name reg) {
}
// Returns size as obtained from z_emit_instr.
-static unsigned int z_ld_st_helper(CodeBuffer *cbuf, const char *op_str, unsigned long opcode,
+static unsigned int z_ld_st_helper(C2_MacroAssembler *masm, const char *op_str, unsigned long opcode,
int reg, int offset, bool do_print, outputStream *os) {
- if (cbuf) {
+ if (masm) {
if (opcode > (1L<<32)) {
- return z_emit_inst(*cbuf, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 48) |
+ return z_emit_inst(masm, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 48) |
Assembler::simm20(offset) | Assembler::reg(Z_R0, 12, 48) | Assembler::regz(Z_SP, 16, 48));
} else {
- return z_emit_inst(*cbuf, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 32) |
+ return z_emit_inst(masm, opcode | Assembler::reg(Matcher::_regEncode[reg], 8, 32) |
Assembler::uimm12(offset, 20, 32) | Assembler::reg(Z_R0, 12, 32) | Assembler::regz(Z_SP, 16, 32));
}
}
@@ -1011,9 +1000,8 @@ static unsigned int z_ld_st_helper(CodeBuffer *cbuf, const char *op_str, unsigne
return (opcode > (1L << 32)) ? 6 : 4;
}
-static unsigned int z_mvc_helper(CodeBuffer *cbuf, int len, int dst_off, int src_off, bool do_print, outputStream *os) {
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+static unsigned int z_mvc_helper(C2_MacroAssembler *masm, int len, int dst_off, int src_off, bool do_print, outputStream *os) {
+ if (masm) {
__ z_mvc(dst_off, len-1, Z_SP, src_off, Z_SP);
}
@@ -1026,7 +1014,7 @@ static unsigned int z_mvc_helper(CodeBuffer *cbuf, int len, int dst_off, int src
return 6;
}
-uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *os) const {
+uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *os) const {
// Get registers to move.
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
@@ -1066,17 +1054,17 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
"expected same type of move for high parts");
if (src12 && dst12) {
- return z_mvc_helper(cbuf, is64 ? 8 : 4, dst_offset, src_offset, print, os);
+ return z_mvc_helper(masm, is64 ? 8 : 4, dst_offset, src_offset, print, os);
}
int r0 = Z_R0_num;
if (is64) {
- return z_ld_st_helper(cbuf, "LG ", LG_ZOPC, r0, src_offset, print, os) +
- z_ld_st_helper(cbuf, "STG ", STG_ZOPC, r0, dst_offset, print, os);
+ return z_ld_st_helper(masm, "LG ", LG_ZOPC, r0, src_offset, print, os) +
+ z_ld_st_helper(masm, "STG ", STG_ZOPC, r0, dst_offset, print, os);
}
- return z_ld_st_helper(cbuf, "LY ", LY_ZOPC, r0, src_offset, print, os) +
- z_ld_st_helper(cbuf, "STY ", STY_ZOPC, r0, dst_offset, print, os);
+ return z_ld_st_helper(masm, "LY ", LY_ZOPC, r0, src_offset, print, os) +
+ z_ld_st_helper(masm, "STY ", STY_ZOPC, r0, dst_offset, print, os);
}
// Check for float->int copy. Requires a trip through memory.
@@ -1086,8 +1074,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
// Check for integer reg-reg copy.
if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
__ z_lgr(Rdst, Rsrc);
@@ -1108,14 +1095,14 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
"expected same type of move for high parts");
if (is64) {
- return z_ld_st_helper(cbuf, "STG ", STG_ZOPC, src_lo, dst_offset, print, os);
+ return z_ld_st_helper(masm, "STG ", STG_ZOPC, src_lo, dst_offset, print, os);
}
// else
mnemo = dst12 ? "ST " : "STY ";
opc = dst12 ? ST_ZOPC : STY_ZOPC;
- return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
+ return z_ld_st_helper(masm, mnemo, opc, src_lo, dst_offset, print, os);
}
// Check for integer load
@@ -1128,13 +1115,12 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
mnemo = is64 ? "LG " : "LLGF";
opc = is64 ? LG_ZOPC : LLGF_ZOPC;
- return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
+ return z_ld_st_helper(masm, mnemo, opc, dst_lo, src_offset, print, os);
}
// Check for float reg-reg copy.
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
__ z_ldr(Rdst, Rsrc);
@@ -1157,13 +1143,13 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (is64) {
mnemo = dst12 ? "STD " : "STDY ";
opc = dst12 ? STD_ZOPC : STDY_ZOPC;
- return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
+ return z_ld_st_helper(masm, mnemo, opc, src_lo, dst_offset, print, os);
}
// else
mnemo = dst12 ? "STE " : "STEY ";
opc = dst12 ? STE_ZOPC : STEY_ZOPC;
- return z_ld_st_helper(cbuf, mnemo, opc, src_lo, dst_offset, print, os);
+ return z_ld_st_helper(masm, mnemo, opc, src_lo, dst_offset, print, os);
}
// Check for float load.
@@ -1174,13 +1160,13 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
if (is64) {
mnemo = src12 ? "LD " : "LDY ";
opc = src12 ? LD_ZOPC : LDY_ZOPC;
- return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
+ return z_ld_st_helper(masm, mnemo, opc, dst_lo, src_offset, print, os);
}
// else
mnemo = src12 ? "LE " : "LEY ";
opc = src12 ? LE_ZOPC : LEY_ZOPC;
- return z_ld_st_helper(cbuf, mnemo, opc, dst_lo, src_offset, print, os);
+ return z_ld_st_helper(masm, mnemo, opc, dst_lo, src_offset, print, os);
}
// --------------------------------------------------------------------
@@ -1216,8 +1202,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation(&cbuf, ra_, false, nullptr);
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -1232,12 +1218,10 @@ void MachNopNode::format(PhaseRegAlloc *, outputStream *os) const {
}
#endif
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ra_) const {
- C2_MacroAssembler _masm(&cbuf);
-
+void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc * ra_) const {
int rem_space = 0;
if (!(ra_->C->output()->in_scratch_emit_size())) {
- rem_space = cbuf.insts()->remaining();
+ rem_space = __ code()->insts()->remaining();
if (rem_space <= _count*2 + 8) {
tty->print("NopNode: _count = %3.3d, remaining space before = %d", _count, rem_space);
}
@@ -1249,7 +1233,7 @@ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ra_) const {
if (!(ra_->C->output()->in_scratch_emit_size())) {
if (rem_space <= _count*2 + 8) {
- int rem_space2 = cbuf.insts()->remaining();
+ int rem_space2 = __ code()->insts()->remaining();
tty->print_cr(", after = %d", rem_space2);
}
}
@@ -1272,9 +1256,7 @@ void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
#endif
// Take care of the size function, if you make changes here!
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- C2_MacroAssembler _masm(&cbuf);
-
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
__ z_lay(as_Register(reg), offset, Z_SP);
@@ -1340,9 +1322,8 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
}
#endif
-void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
// This is Unverified Entry Point
- C2_MacroAssembler _masm(&cbuf);
__ ic_check(CodeEntryAlignment);
}
@@ -1360,8 +1341,8 @@ source_hpp %{ // Header information of the source block.
class HandlerImpl {
public:
- static int emit_exception_handler(CodeBuffer &cbuf);
- static int emit_deopt_handler(CodeBuffer& cbuf);
+ static int emit_exception_handler(C2_MacroAssembler *masm);
+ static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return NativeJump::max_instruction_size();
@@ -1399,9 +1380,8 @@ source %{
// 3) The handler will get patched such that it does not jump to the
// exception blob, but to an entry in the deoptimization blob being
// aware of the exception.
-int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
+int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
Register temp_reg = Z_R1;
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
@@ -1422,8 +1402,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
}
// Emit deopt handler code.
-int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
- C2_MacroAssembler _masm(&cbuf);
+int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
@@ -1701,13 +1680,11 @@ bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack,
// needs for encoding need to be specified.
encode %{
enc_class enc_unimplemented %{
- C2_MacroAssembler _masm(&cbuf);
__ unimplemented("Unimplemented mach node encoding in AD file.", 13);
%}
enc_class enc_untested %{
#ifdef ASSERT
- C2_MacroAssembler _masm(&cbuf);
__ untested("Untested mach node encoding in AD file.");
#endif
%}
@@ -1715,21 +1692,21 @@ encode %{
enc_class z_rrform(iRegI dst, iRegI src) %{
assert((($primary >> 14) & 0x03) == 0, "Instruction format error");
assert( ($primary >> 16) == 0, "Instruction format error");
- z_emit16(cbuf, $primary |
+ z_emit16(masm, $primary |
Assembler::reg($dst$$reg,8,16) |
Assembler::reg($src$$reg,12,16));
%}
enc_class z_rreform(iRegI dst1, iRegI src2) %{
assert((($primary >> 30) & 0x03) == 2, "Instruction format error");
- z_emit32(cbuf, $primary |
+ z_emit32(masm, $primary |
Assembler::reg($dst1$$reg,24,32) |
Assembler::reg($src2$$reg,28,32));
%}
enc_class z_rrfform(iRegI dst1, iRegI src2, iRegI src3) %{
assert((($primary >> 30) & 0x03) == 2, "Instruction format error");
- z_emit32(cbuf, $primary |
+ z_emit32(masm, $primary |
Assembler::reg($dst1$$reg,24,32) |
Assembler::reg($src2$$reg,28,32) |
Assembler::reg($src3$$reg,16,32));
@@ -1737,21 +1714,21 @@ encode %{
enc_class z_riform_signed(iRegI dst, immI16 src) %{
assert((($primary>>30) & 0x03) == 2, "Instruction format error");
- z_emit32(cbuf, $primary |
+ z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::simm16($src$$constant,16,32));
%}
enc_class z_riform_unsigned(iRegI dst, uimmI16 src) %{
assert((($primary>>30) & 0x03) == 2, "Instruction format error");
- z_emit32(cbuf, $primary |
+ z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::uimm16($src$$constant,16,32));
%}
enc_class z_rieform_d(iRegI dst1, iRegI src3, immI src2) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst1$$reg,8,48) |
Assembler::reg($src3$$reg,12,48) |
Assembler::simm16($src2$$constant,16,48));
@@ -1759,27 +1736,27 @@ encode %{
enc_class z_rilform_signed(iRegI dst, immL32 src) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::simm32($src$$constant,16,48));
%}
enc_class z_rilform_unsigned(iRegI dst, uimmL32 src) %{
assert((($primary>>46) & 0x03) == 3, "Instruction format error");
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::uimm32($src$$constant,16,48));
%}
enc_class z_rsyform_const(iRegI dst, iRegI src1, immI src2) %{
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src1$$reg,12,48) |
Assembler::simm20($src2$$constant));
%}
enc_class z_rsyform_reg_reg(iRegI dst, iRegI src, iRegI shft) %{
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src$$reg,12,48) |
Assembler::reg($shft$$reg,16,48) |
@@ -1788,7 +1765,7 @@ encode %{
enc_class z_rxform_imm_reg_reg(iRegL dst, immL con, iRegL src1, iRegL src2) %{
assert((($primary>>30) & 0x03) == 1, "Instruction format error");
- z_emit32(cbuf, $primary |
+ z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::reg($src1$$reg,12,32) |
Assembler::reg($src2$$reg,16,32) |
@@ -1797,14 +1774,14 @@ encode %{
enc_class z_rxform_imm_reg(iRegL dst, immL con, iRegL src) %{
assert((($primary>>30) & 0x03) == 1, "Instruction format error");
- z_emit32(cbuf, $primary |
+ z_emit32(masm, $primary |
Assembler::reg($dst$$reg,8,32) |
Assembler::reg($src$$reg,16,32) |
Assembler::uimm12($con$$constant,20,32));
%}
enc_class z_rxyform_imm_reg_reg(iRegL dst, immL con, iRegL src1, iRegL src2) %{
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src1$$reg,12,48) |
Assembler::reg($src2$$reg,16,48) |
@@ -1812,7 +1789,7 @@ encode %{
%}
enc_class z_rxyform_imm_reg(iRegL dst, immL con, iRegL src) %{
- z_emit48(cbuf, $primary |
+ z_emit48(masm, $primary |
Assembler::reg($dst$$reg,8,48) |
Assembler::reg($src$$reg,16,48) |
Assembler::simm20($con$$constant));
@@ -1825,14 +1802,14 @@ encode %{
int con = $src$$constant;
assert(VM_Version::has_MemWithImmALUOps(), "unsupported CPU");
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::regz(base,16,48) |
Assembler::simm20(disp) |
Assembler::simm8(con,8,48));
%}
enc_class z_silform(memoryRS mem, immI16 src) %{
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::regz(reg_to_register_object($mem$$base),16,48) |
Assembler::uimm12($mem$$disp,20,48) |
Assembler::simm16($src$$constant,32,48));
@@ -1843,13 +1820,13 @@ encode %{
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if ($primary > (1L << 32)) {
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::uimm12($mem$$disp, 20, 48) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 32) |
Assembler::uimm12($mem$$disp, 20, 32) |
Assembler::reg(Ridx, 12, 32) |
@@ -1861,13 +1838,13 @@ encode %{
Register Ridx = $mem$$index$$Register;
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if ($primary > (1L<<32)) {
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::simm20($mem$$disp) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 32) |
Assembler::uimm12($mem$$disp, 20, 32) |
Assembler::reg(Ridx, 12, 32) |
@@ -1881,22 +1858,21 @@ encode %{
if (Ridx == noreg) { Ridx = Z_R0; } // Index is 0.
if (Displacement::is_shortDisp((long)$mem$$disp)) {
- z_emit_inst(cbuf, $secondary |
+ z_emit_inst(masm, $secondary |
Assembler::reg($dst$$reg, 8, isize) |
Assembler::uimm12($mem$$disp, 20, isize) |
Assembler::reg(Ridx, 12, isize) |
Assembler::regz(reg_to_register_object($mem$$base), 16, isize));
} else if (Displacement::is_validDisp((long)$mem$$disp)) {
- z_emit_inst(cbuf, $primary |
+ z_emit_inst(masm, $primary |
Assembler::reg($dst$$reg, 8, 48) |
Assembler::simm20($mem$$disp) |
Assembler::reg(Ridx, 12, 48) |
Assembler::regz(reg_to_register_object($mem$$base), 16, 48));
} else {
- C2_MacroAssembler _masm(&cbuf);
__ load_const_optimized(Z_R1_scratch, $mem$$disp);
if (Ridx != Z_R0) { __ z_agr(Z_R1_scratch, Ridx); }
- z_emit_inst(cbuf, $secondary |
+ z_emit_inst(masm, $secondary |
Assembler::reg($dst$$reg, 8, isize) |
Assembler::uimm12(0, 20, isize) |
Assembler::reg(Z_R1_scratch, 12, isize) |
@@ -1905,7 +1881,6 @@ encode %{
%}
enc_class z_enc_brul(Label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -1918,7 +1893,6 @@ encode %{
%}
enc_class z_enc_bru(Label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -1931,7 +1905,6 @@ encode %{
%}
enc_class z_enc_branch_con_far(cmpOp cmp, Label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -1944,7 +1917,6 @@ encode %{
%}
enc_class z_enc_branch_con_short(cmpOp cmp, Label lbl) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -1957,7 +1929,6 @@ encode %{
%}
enc_class z_enc_cmpb_regreg(iRegI src1, iRegI src2, Label lbl, cmpOpT cmp) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -1981,7 +1952,6 @@ encode %{
%}
enc_class z_enc_cmpb_regregFar(iRegI src1, iRegI src2, Label lbl, cmpOpT cmp) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -2007,7 +1977,6 @@ encode %{
%}
enc_class z_enc_cmpb_regimm(iRegI src1, immI8 src2, Label lbl, cmpOpT cmp) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -2032,7 +2001,6 @@ encode %{
%}
enc_class z_enc_cmpb_regimmFar(iRegI src1, immI8 src2, Label lbl, cmpOpT cmp) %{
- C2_MacroAssembler _masm(&cbuf);
Label* p = $lbl$$label;
// 'p' is `nullptr' when this encoding class is used only to
@@ -2059,8 +2027,6 @@ encode %{
// Call from Java to runtime.
enc_class z_enc_java_to_runtime_call(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
-
// Save return pc before call to the place where we need it, since
// callee doesn't.
unsigned int start_off = __ offset();
@@ -2087,36 +2053,37 @@ encode %{
enc_class z_enc_java_static_call(method meth) %{
// Call to fixup routine. Fixup routine uses ScopeDesc info to determine
// whom we intended to call.
- C2_MacroAssembler _masm(&cbuf);
int ret_offset = 0;
if (!_method) {
- ret_offset = emit_call_reloc(_masm, $meth$$method,
+ ret_offset = emit_call_reloc(masm, $meth$$method,
relocInfo::runtime_call_w_cp_type, ra_);
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
if (_optimized_virtual) {
- ret_offset = emit_call_reloc(_masm, $meth$$method,
+ ret_offset = emit_call_reloc(masm, $meth$$method,
opt_virtual_call_Relocation::spec(method_index));
} else {
- ret_offset = emit_call_reloc(_masm, $meth$$method,
+ ret_offset = emit_call_reloc(masm, $meth$$method,
static_call_Relocation::spec(method_index));
}
}
assert(__ inst_mark() != nullptr, "emit_call_reloc must set_inst_mark()");
if (_method) { // Emit stub for static call.
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm);
if (stub == nullptr) {
+ __ clear_inst_mark();
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
+
+ __ clear_inst_mark();
%}
// Java dynamic call
enc_class z_enc_java_dynamic_call(method meth) %{
- C2_MacroAssembler _masm(&cbuf);
unsigned int start_off = __ offset();
int vtable_index = this->_vtable_index;
@@ -2134,11 +2101,12 @@ encode %{
// Call to fixup routine. Fixup routine uses ScopeDesc info
// to determine who we intended to call.
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
unsigned int ret_off = __ offset();
assert(__ offset() - start_off == 6, "bad prelude len: %d", __ offset() - start_off);
- ret_off += emit_call_reloc(_masm, $meth$$method, relocInfo::none, ra_);
+ ret_off += emit_call_reloc(masm, $meth$$method, relocInfo::none, ra_);
+ __ clear_inst_mark();
assert(_method, "lazy_constant may be wrong when _method==null");
} else {
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
@@ -2171,7 +2139,6 @@ encode %{
%}
enc_class z_enc_cmov_reg(cmpOp cmp, iRegI dst, iRegI src) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
Register Rsrc = reg_to_register_object($src$$reg);
@@ -2192,7 +2159,6 @@ encode %{
%}
enc_class z_enc_cmov_imm(cmpOp cmp, iRegI dst, immI16 src) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
int Csrc = $src$$constant;
Assembler::branch_condition cc = (Assembler::branch_condition)$cmp$$cmpcode;
@@ -2209,7 +2175,6 @@ encode %{
%}
enc_class z_enc_cctobool(iRegI res) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rres = reg_to_register_object($res$$reg);
if (VM_Version::has_LoadStoreConditional()) {
@@ -2226,7 +2191,6 @@ encode %{
%}
enc_class z_enc_casI(iRegI compare_value, iRegI exchange_value, iRegP addr_ptr) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rcomp = reg_to_register_object($compare_value$$reg);
Register Rnew = reg_to_register_object($exchange_value$$reg);
Register Raddr = reg_to_register_object($addr_ptr$$reg);
@@ -2235,7 +2199,6 @@ encode %{
%}
enc_class z_enc_casL(iRegL compare_value, iRegL exchange_value, iRegP addr_ptr) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rcomp = reg_to_register_object($compare_value$$reg);
Register Rnew = reg_to_register_object($exchange_value$$reg);
Register Raddr = reg_to_register_object($addr_ptr$$reg);
@@ -2244,7 +2207,6 @@ encode %{
%}
enc_class z_enc_SwapI(memoryRSY mem, iRegI dst, iRegI tmp) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
Register Rtmp = reg_to_register_object($tmp$$reg);
guarantee(Rdst != Rtmp, "Fix match rule to use TEMP_DEF");
@@ -2260,7 +2222,6 @@ encode %{
%}
enc_class z_enc_SwapL(memoryRSY mem, iRegL dst, iRegL tmp) %{
- C2_MacroAssembler _masm(&cbuf);
Register Rdst = reg_to_register_object($dst$$reg);
Register Rtmp = reg_to_register_object($tmp$$reg);
guarantee(Rdst != Rtmp, "Fix match rule to use TEMP_DEF");
@@ -9558,9 +9519,10 @@ instruct RethrowException() %{
// TODO: s390 port size(VARIABLE_SIZE);
format %{ "Jmp rethrow_stub" %}
ins_encode %{
- cbuf.set_insts_mark();
+ __ set_inst_mark();
__ load_const_optimized(Z_R1_scratch, (address)OptoRuntime::rethrow_stub());
__ z_br(Z_R1_scratch);
+ __ clear_inst_mark();
%}
ins_pipe(pipe_class_dummy);
%}
diff --git a/src/hotspot/cpu/x86/assembler_x86.cpp b/src/hotspot/cpu/x86/assembler_x86.cpp
index bb04ae12fa8..0896fbd8bf5 100644
--- a/src/hotspot/cpu/x86/assembler_x86.cpp
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp
@@ -4260,6 +4260,7 @@ void Assembler::vpermb(XMMRegister dst, XMMRegister nds, XMMRegister src, int ve
void Assembler::vpermb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
assert(VM_Version::supports_avx512_vbmi(), "");
+ InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_is_evex_instruction();
vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
@@ -4695,6 +4696,7 @@ void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4712,6 +4714,7 @@ void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4729,6 +4732,7 @@ void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4746,6 +4750,7 @@ void Assembler::pextrb(Register dst, XMMRegister src, int imm8) {
void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4763,6 +4768,7 @@ void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4787,6 +4793,7 @@ void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -4811,6 +4818,7 @@ void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse2(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
@@ -4828,6 +4836,7 @@ void Assembler::vpinsrw(XMMRegister dst, XMMRegister nds, Register src, int imm8
void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
+ InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
@@ -13318,21 +13327,25 @@ void Assembler::decq(Address dst) {
}
void Assembler::fxrstor(Address src) {
+ InstructionMark im(this);
emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(1), src, 0);
}
void Assembler::xrstor(Address src) {
+ InstructionMark im(this);
emit_int24(get_prefixq(src), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(5), src, 0);
}
void Assembler::fxsave(Address dst) {
+ InstructionMark im(this);
emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(0), dst, 0);
}
void Assembler::xsave(Address dst) {
+ InstructionMark im(this);
emit_int24(get_prefixq(dst), 0x0F, (unsigned char)0xAE);
emit_operand(as_Register(4), dst, 0);
}
diff --git a/src/hotspot/cpu/x86/c2_intelJccErratum_x86.cpp b/src/hotspot/cpu/x86/c2_intelJccErratum_x86.cpp
index fce09c31b9c..7b71a6bdbfc 100644
--- a/src/hotspot/cpu/x86/c2_intelJccErratum_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_intelJccErratum_x86.cpp
@@ -114,13 +114,13 @@ int IntelJccErratum::compute_padding(uintptr_t current_offset, const MachNode* m
}
}
-#define __ _masm.
+#define __ _masm->
uintptr_t IntelJccErratumAlignment::pc() {
return (uintptr_t)__ pc();
}
-IntelJccErratumAlignment::IntelJccErratumAlignment(MacroAssembler& masm, int jcc_size) :
+IntelJccErratumAlignment::IntelJccErratumAlignment(MacroAssembler* masm, int jcc_size) :
_masm(masm),
_start_pc(pc()) {
if (!VM_Version::has_intel_jcc_erratum()) {
diff --git a/src/hotspot/cpu/x86/c2_intelJccErratum_x86.hpp b/src/hotspot/cpu/x86/c2_intelJccErratum_x86.hpp
index 415d8a99933..485a2d17c16 100644
--- a/src/hotspot/cpu/x86/c2_intelJccErratum_x86.hpp
+++ b/src/hotspot/cpu/x86/c2_intelJccErratum_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -54,13 +54,13 @@ public:
class IntelJccErratumAlignment {
private:
- MacroAssembler& _masm;
+ MacroAssembler* _masm;
uintptr_t _start_pc;
uintptr_t pc();
public:
- IntelJccErratumAlignment(MacroAssembler& masm, int jcc_size);
+ IntelJccErratumAlignment(MacroAssembler* masm, int jcc_size);
~IntelJccErratumAlignment();
};
diff --git a/src/hotspot/cpu/x86/compiledIC_x86.cpp b/src/hotspot/cpu/x86/compiledIC_x86.cpp
index 95b41f62b6a..e46f892b388 100644
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp
@@ -34,21 +34,17 @@
// ----------------------------------------------------------------------------
-#define __ _masm.
-address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+#define __ masm->
+address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// movq rbx, 0
// jmp -5 # to self
if (mark == nullptr) {
- mark = cbuf.insts_mark(); // Get mark within main instrs section.
+ mark = __ inst_mark(); // Get mark within main instrs section.
}
- // Note that the code buffer's insts_mark is always relative to insts.
- // That's why we must use the macroassembler to generate a stub.
- MacroAssembler _masm(&cbuf);
-
address base = __ start_a_stub(to_interp_stub_size());
if (base == nullptr) {
return nullptr; // CodeBuffer::expand failed.
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad
index 8675a34324f..3cf82bf9fb1 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad
@@ -40,7 +40,7 @@ instruct compareAndSwapP_shenandoah(rRegI res,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
false, // swap
$tmp1$$Register, $tmp2$$Register
@@ -61,7 +61,7 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
index cb595f16168..c580d21c9b8 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad
@@ -40,7 +40,7 @@ instruct compareAndSwapP_shenandoah(rRegI res,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
false, // swap
$tmp1$$Register, $tmp2$$Register
@@ -61,7 +61,7 @@ instruct compareAndSwapN_shenandoah(rRegI res,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
$res$$Register, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
false, // swap
$tmp1$$Register, $tmp2$$Register
@@ -80,7 +80,7 @@ instruct compareAndExchangeN_shenandoah(memory mem_ptr,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
@@ -101,7 +101,7 @@ instruct compareAndExchangeP_shenandoah(memory mem_ptr,
format %{ "shenandoah_cas_oop $mem_ptr,$newval" %}
ins_encode %{
- ShenandoahBarrierSet::assembler()->cmpxchg_oop(&_masm,
+ ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm,
noreg, $mem_ptr$$Address, $oldval$$Register, $newval$$Register,
true, // exchange
$tmp1$$Register, $tmp2$$Register
diff --git a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
index 38129a9fc81..4805b213084 100644
--- a/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/x/xBarrierSetAssembler_x86.cpp
@@ -375,7 +375,7 @@ OptoReg::Name XBarrierSetAssembler::refine_register(const Node* node, OptoReg::N
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
-extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
+extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
@@ -437,13 +437,15 @@ private:
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
- vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+ C2_MacroAssembler c2_masm(__ code());
+ vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
- vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+ C2_MacroAssembler c2_masm(__ code());
+ vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}
diff --git a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad b/src/hotspot/cpu/x86/gc/x/x_x86_64.ad
index c33a994a4b8..116fb3cbc6d 100644
--- a/src/hotspot/cpu/x86/gc/x/x_x86_64.ad
+++ b/src/hotspot/cpu/x86/gc/x/x_x86_64.ad
@@ -1,5 +1,5 @@
//
-// Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -33,34 +33,34 @@ source %{
#include "c2_intelJccErratum_x86.hpp"
-static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
+static void x_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, uint8_t barrier_data) {
if (barrier_data == XLoadBarrierElided) {
return;
}
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
{
- IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
+ IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::notZero, *stub->entry());
}
__ bind(*stub->continuation());
}
-static void x_load_barrier_cmpxchg(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
+static void x_load_barrier_cmpxchg(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, Label& good) {
XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong);
{
- IntelJccErratumAlignment intel_alignment(_masm, 10 /* jcc_size */);
+ IntelJccErratumAlignment intel_alignment(masm, 10 /* jcc_size */);
__ testptr(ref, Address(r15_thread, XThreadLocalData::address_bad_mask_offset()));
__ jcc(Assembler::zero, good);
}
{
- IntelJccErratumAlignment intel_alignment(_masm, 5 /* jcc_size */);
+ IntelJccErratumAlignment intel_alignment(masm, 5 /* jcc_size */);
__ jmp(*stub->entry());
}
__ bind(*stub->continuation());
}
-static void x_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
+static void x_cmpxchg_common(MacroAssembler* masm, const MachNode* node, Register mem_reg, Register newval, Register tmp) {
// Compare value (oldval) is in rax
const Address mem = Address(mem_reg, 0);
@@ -73,7 +73,7 @@ static void x_cmpxchg_common(MacroAssembler& _masm, const MachNode* node, Regist
if (node->barrier_data() != XLoadBarrierElided) {
Label good;
- x_load_barrier_cmpxchg(_masm, node, mem, rax, tmp, good);
+ x_load_barrier_cmpxchg(masm, node, mem, rax, tmp, good);
__ movptr(rax, tmp);
__ lock();
__ cmpxchgptr(newval, mem);
@@ -96,7 +96,7 @@ instruct xLoadP(rRegP dst, memory mem, rFlagsReg cr)
ins_encode %{
__ movptr($dst$$Register, $mem$$Address);
- x_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
+ x_load_barrier(masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, barrier_data());
%}
ins_pipe(ialu_reg_mem);
@@ -112,7 +112,7 @@ instruct xCompareAndExchangeP(indirect mem, rax_RegP oldval, rRegP newval, rRegP
ins_encode %{
precond($oldval$$Register == rax);
- x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
+ x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
%}
ins_pipe(pipe_cmpxchg);
@@ -131,7 +131,7 @@ instruct xCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rFla
ins_encode %{
precond($oldval$$Register == rax);
- x_cmpxchg_common(_masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
+ x_cmpxchg_common(masm, this, $mem$$Register, $newval$$Register, $tmp$$Register);
if (barrier_data() != XLoadBarrierElided) {
__ cmpptr($tmp$$Register, rax);
}
@@ -151,7 +151,7 @@ instruct xXChgP(indirect mem, rRegP newval, rFlagsReg cr) %{
ins_encode %{
__ xchgptr($newval$$Register, Address($mem$$Register, 0));
- x_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
+ x_load_barrier(masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, barrier_data());
%}
ins_pipe(pipe_cmpxchg);
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
index 6cb16a09d55..5f0b73bf23f 100644
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
@@ -356,7 +356,7 @@ static void emit_store_fast_path_check_c2(MacroAssembler* masm, Address ref_addr
// This is a JCC erratum mitigation wrapper for calling the inner check
int size = store_fast_path_check_size(masm, ref_addr, is_atomic, medium_path);
// Emit JCC erratum mitigation nops with the right size
- IntelJccErratumAlignment intel_alignment(*masm, size);
+ IntelJccErratumAlignment intel_alignment(masm, size);
// Emit the JCC erratum mitigation guarded code
emit_store_fast_path_check(masm, ref_addr, is_atomic, medium_path);
#endif
@@ -1184,7 +1184,7 @@ OptoReg::Name ZBarrierSetAssembler::refine_register(const Node* node, OptoReg::N
}
// We use the vec_spill_helper from the x86.ad file to avoid reinventing this wheel
-extern void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
+extern void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
#undef __
@@ -1246,13 +1246,15 @@ private:
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
_spill_offset -= reg_data._size;
- vec_spill_helper(__ code(), false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+ C2_MacroAssembler c2_masm(__ code());
+ vec_spill_helper(&c2_masm, false /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
}
void xmm_register_restore(const XMMRegisterData& reg_data) {
const OptoReg::Name opto_reg = OptoReg::as_OptoReg(reg_data._reg->as_VMReg());
const uint ideal_reg = xmm_ideal_reg_for_size(reg_data._size);
- vec_spill_helper(__ code(), true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
+ C2_MacroAssembler c2_masm(__ code());
+ vec_spill_helper(&c2_masm, true /* is_load */, _spill_offset, opto_reg, ideal_reg, tty);
_spill_offset += reg_data._size;
}
diff --git a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
index 0cc2ea03b35..1a4499c3d44 100644
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
@@ -34,66 +34,66 @@ source %{
#include "c2_intelJccErratum_x86.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
-static void z_color(MacroAssembler& _masm, const MachNode* node, Register ref) {
+static void z_color(MacroAssembler* masm, const MachNode* node, Register ref) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shlq(ref, barrier_Relocation::unpatched);
__ orq_imm32(ref, barrier_Relocation::unpatched);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr);
}
-static void z_uncolor(MacroAssembler& _masm, const MachNode* node, Register ref) {
+static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shrq(ref, barrier_Relocation::unpatched);
}
-static void z_keep_alive_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
+static void z_keep_alive_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
__ Assembler::testl(ref, barrier_Relocation::unpatched);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadAfterTest);
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
__ jcc(Assembler::notEqual, *stub->entry());
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
-static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref) {
- Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
+static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
+ Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (on_non_strong) {
- z_keep_alive_load_barrier(_masm, node, ref_addr, ref);
+ z_keep_alive_load_barrier(masm, node, ref_addr, ref);
return;
}
- z_uncolor(_masm, node, ref);
+ z_uncolor(masm, node, ref);
if (node->barrier_data() == ZBarrierElided) {
return;
}
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
{
- IntelJccErratumAlignment intel_alignment(_masm, 6);
+ IntelJccErratumAlignment intel_alignment(masm, 6);
__ jcc(Assembler::above, *stub->entry());
}
__ bind(*stub->continuation());
}
-static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
- Assembler::InlineSkippedInstructionsCounter skipped_counter(&_masm);
+static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
+ Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
if (rnew_zaddress != noreg) {
// noreg means null; no need to color
__ movptr(rnew_zpointer, rnew_zaddress);
- z_color(_masm, node, rnew_zpointer);
+ z_color(masm, node, rnew_zpointer);
}
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
- bs_asm->store_barrier_fast(&_masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
+ bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
@@ -124,7 +124,7 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
ins_encode %{
__ movptr($dst$$Register, $mem$$Address);
- z_load_barrier(_masm, this, $mem$$Address, $dst$$Register);
+ z_load_barrier(masm, this, $mem$$Address, $dst$$Register);
%}
ins_pipe(ialu_reg_mem);
@@ -156,7 +156,7 @@ instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
ins_cost(125); // XXX
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
- z_store_barrier(_masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */);
+ z_store_barrier(masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */);
__ movq($mem$$Address, $tmp$$Register);
%}
ins_pipe(ialu_mem_reg);
@@ -172,7 +172,7 @@ instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
ins_cost(125); // XXX
format %{ "movq $mem, 0\t# ptr" %}
ins_encode %{
- z_store_barrier(_masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */);
+ z_store_barrier(masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */);
// Store a colored null - barrier code above does not need to color
__ movq($mem$$Address, barrier_Relocation::unpatched);
// The relocation cant be fully after the mov, as that is the beginning of a random subsequent
@@ -194,11 +194,11 @@ instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_R
assert_different_registers($oldval$$Register, $mem$$Register);
assert_different_registers($oldval$$Register, $newval$$Register);
const Address mem_addr = Address($mem$$Register, 0);
- z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
- z_color(_masm, this, $oldval$$Register);
+ z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
+ z_color(masm, this, $oldval$$Register);
__ lock();
__ cmpxchgptr($tmp$$Register, mem_addr);
- z_uncolor(_masm, this, $oldval$$Register);
+ z_uncolor(masm, this, $oldval$$Register);
%}
ins_pipe(pipe_cmpxchg);
@@ -218,8 +218,8 @@ instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_
ins_encode %{
assert_different_registers($oldval$$Register, $mem$$Register);
const Address mem_addr = Address($mem$$Register, 0);
- z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
- z_color(_masm, this, $oldval$$Register);
+ z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
+ z_color(masm, this, $oldval$$Register);
__ lock();
__ cmpxchgptr($tmp$$Register, mem_addr);
__ setb(Assembler::equal, $res$$Register);
@@ -239,10 +239,10 @@ instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{
ins_encode %{
assert_different_registers($mem$$Register, $newval$$Register);
const Address mem_addr = Address($mem$$Register, 0);
- z_store_barrier(_masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
+ z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
__ movptr($newval$$Register, $tmp$$Register);
__ xchgptr($newval$$Register, mem_addr);
- z_uncolor(_masm, this, $newval$$Register);
+ z_uncolor(masm, this, $newval$$Register);
%}
ins_pipe(pipe_cmpxchg);
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index c666f982d0f..0c1dc865c78 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1441,8 +1441,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// Make sure the call is patchable
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
- CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1478,8 +1477,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
- CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index 54306d02ea0..0b262bb9c37 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1187,8 +1187,8 @@ class HandlerImpl {
public:
- static int emit_exception_handler(CodeBuffer &cbuf);
- static int emit_deopt_handler(CodeBuffer& cbuf);
+ static int emit_exception_handler(C2_MacroAssembler *masm);
+ static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
// NativeCall instruction size is the same as NativeJump.
@@ -1306,11 +1306,10 @@ int MachNode::compute_padding(int current_offset) const {
// Emit exception handler code.
// Stuff framesize into a register and call a VM stub routine.
-int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
+int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -1324,11 +1323,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
}
// Emit deopt handler code.
-int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
+int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
- C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
@@ -2523,14 +2521,13 @@ static inline Assembler::ComparisonPredicateFP booltest_pred_to_comparison_pred_
}
// Helper methods for MachSpillCopyNode::implementation().
-static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
+static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st) {
assert(ireg == Op_VecS || // 32bit vector
((src_lo & 1) == 0 && (src_lo + 1) == src_hi &&
(dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi),
"no non-adjacent vector moves" );
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
switch (ireg) {
case Op_VecS: // copy whole register
case Op_VecD:
@@ -2581,10 +2578,9 @@ static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
}
}
-void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
+void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st) {
- if (cbuf) {
- C2_MacroAssembler _masm(cbuf);
+ if (masm) {
if (is_load) {
switch (ireg) {
case Op_VecS:
@@ -2742,8 +2738,7 @@ static inline jlong high_bit_set(BasicType bt) {
}
#endif
- void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
- C2_MacroAssembler _masm(&cbuf);
+ void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
__ nop(_count);
}
@@ -2757,8 +2752,7 @@ static inline jlong high_bit_set(BasicType bt) {
}
#endif
- void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
- C2_MacroAssembler _masm(&cbuf);
+ void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc* ra_) const {
__ int3();
}
@@ -2771,7 +2765,6 @@ static inline jlong high_bit_set(BasicType bt) {
encode %{
enc_class call_epilog %{
- C2_MacroAssembler _masm(&cbuf);
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad
index 36ec4d283ed..39c09efc49c 100644
--- a/src/hotspot/cpu/x86/x86_32.ad
+++ b/src/hotspot/cpu/x86/x86_32.ad
@@ -252,7 +252,7 @@ source %{
#define RELOC_IMM32 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand
-#define __ _masm.
+#define __ masm->
// How to find the high register of a Long pair, given the low register
#define HIGH_FROM_LOW(x) (as_Register((x)->encoding()+2))
@@ -337,107 +337,107 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
}
// EMIT_RM()
-void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
+void emit_rm(C2_MacroAssembler *masm, int f1, int f2, int f3) {
unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
- cbuf.insts()->emit_int8(c);
+ __ emit_int8(c);
}
// EMIT_CC()
-void emit_cc(CodeBuffer &cbuf, int f1, int f2) {
+void emit_cc(C2_MacroAssembler *masm, int f1, int f2) {
unsigned char c = (unsigned char)( f1 | f2 );
- cbuf.insts()->emit_int8(c);
+ __ emit_int8(c);
}
// EMIT_OPCODE()
-void emit_opcode(CodeBuffer &cbuf, int code) {
- cbuf.insts()->emit_int8((unsigned char) code);
+void emit_opcode(C2_MacroAssembler *masm, int code) {
+ __ emit_int8((unsigned char) code);
}
// EMIT_OPCODE() w/ relocation information
-void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) {
- cbuf.relocate(cbuf.insts_mark() + offset, reloc);
- emit_opcode(cbuf, code);
+void emit_opcode(C2_MacroAssembler *masm, int code, relocInfo::relocType reloc, int offset = 0) {
+ __ relocate(__ inst_mark() + offset, reloc);
+ emit_opcode(masm, code);
}
// EMIT_D8()
-void emit_d8(CodeBuffer &cbuf, int d8) {
- cbuf.insts()->emit_int8((unsigned char) d8);
+void emit_d8(C2_MacroAssembler *masm, int d8) {
+ __ emit_int8((unsigned char) d8);
}
// EMIT_D16()
-void emit_d16(CodeBuffer &cbuf, int d16) {
- cbuf.insts()->emit_int16(d16);
+void emit_d16(C2_MacroAssembler *masm, int d16) {
+ __ emit_int16(d16);
}
// EMIT_D32()
-void emit_d32(CodeBuffer &cbuf, int d32) {
- cbuf.insts()->emit_int32(d32);
+void emit_d32(C2_MacroAssembler *masm, int d32) {
+ __ emit_int32(d32);
}
// emit 32 bit value and construct relocation entry from relocInfo::relocType
-void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
+void emit_d32_reloc(C2_MacroAssembler *masm, int d32, relocInfo::relocType reloc,
int format) {
- cbuf.relocate(cbuf.insts_mark(), reloc, format);
- cbuf.insts()->emit_int32(d32);
+ __ relocate(__ inst_mark(), reloc, format);
+ __ emit_int32(d32);
}
// emit 32 bit value and construct relocation entry from RelocationHolder
-void emit_d32_reloc(CodeBuffer &cbuf, int d32, RelocationHolder const& rspec,
+void emit_d32_reloc(C2_MacroAssembler *masm, int d32, RelocationHolder const& rspec,
int format) {
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
assert(oopDesc::is_oop(cast_to_oop(d32)), "cannot embed broken oops in code");
}
#endif
- cbuf.relocate(cbuf.insts_mark(), rspec, format);
- cbuf.insts()->emit_int32(d32);
+ __ relocate(__ inst_mark(), rspec, format);
+ __ emit_int32(d32);
}
// Access stack slot for load or store
-void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp) {
- emit_opcode( cbuf, opcode ); // (e.g., FILD [ESP+src])
+void store_to_stackslot(C2_MacroAssembler *masm, int opcode, int rm_field, int disp) {
+ emit_opcode( masm, opcode ); // (e.g., FILD [ESP+src])
if( -128 <= disp && disp <= 127 ) {
- emit_rm( cbuf, 0x01, rm_field, ESP_enc ); // R/M byte
- emit_rm( cbuf, 0x00, ESP_enc, ESP_enc); // SIB byte
- emit_d8 (cbuf, disp); // Displacement // R/M byte
+ emit_rm( masm, 0x01, rm_field, ESP_enc ); // R/M byte
+ emit_rm( masm, 0x00, ESP_enc, ESP_enc); // SIB byte
+ emit_d8 (masm, disp); // Displacement // R/M byte
} else {
- emit_rm( cbuf, 0x02, rm_field, ESP_enc ); // R/M byte
- emit_rm( cbuf, 0x00, ESP_enc, ESP_enc); // SIB byte
- emit_d32(cbuf, disp); // Displacement // R/M byte
+ emit_rm( masm, 0x02, rm_field, ESP_enc ); // R/M byte
+ emit_rm( masm, 0x00, ESP_enc, ESP_enc); // SIB byte
+ emit_d32(masm, disp); // Displacement // R/M byte
}
}
// rRegI ereg, memory mem) %{ // emit_reg_mem
-void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int scale, int displace, relocInfo::relocType disp_reloc ) {
+void encode_RegMem( C2_MacroAssembler *masm, int reg_encoding, int base, int index, int scale, int displace, relocInfo::relocType disp_reloc ) {
// There is no index & no scale, use form without SIB byte
if ((index == 0x4) &&
(scale == 0) && (base != ESP_enc)) {
// If no displacement, mode is 0x0; unless base is [EBP]
if ( (displace == 0) && (base != EBP_enc) ) {
- emit_rm(cbuf, 0x0, reg_encoding, base);
+ emit_rm(masm, 0x0, reg_encoding, base);
}
else { // If 8-bit displacement, mode 0x1
if ((displace >= -128) && (displace <= 127)
&& (disp_reloc == relocInfo::none) ) {
- emit_rm(cbuf, 0x1, reg_encoding, base);
- emit_d8(cbuf, displace);
+ emit_rm(masm, 0x1, reg_encoding, base);
+ emit_d8(masm, displace);
}
else { // If 32-bit displacement
if (base == -1) { // Special flag for absolute address
- emit_rm(cbuf, 0x0, reg_encoding, 0x5);
+ emit_rm(masm, 0x0, reg_encoding, 0x5);
// (manual lies; no SIB needed here)
if ( disp_reloc != relocInfo::none ) {
- emit_d32_reloc(cbuf, displace, disp_reloc, 1);
+ emit_d32_reloc(masm, displace, disp_reloc, 1);
} else {
- emit_d32 (cbuf, displace);
+ emit_d32 (masm, displace);
}
}
else { // Normal base + offset
- emit_rm(cbuf, 0x2, reg_encoding, base);
+ emit_rm(masm, 0x2, reg_encoding, base);
if ( disp_reloc != relocInfo::none ) {
- emit_d32_reloc(cbuf, displace, disp_reloc, 1);
+ emit_d32_reloc(masm, displace, disp_reloc, 1);
} else {
- emit_d32 (cbuf, displace);
+ emit_d32 (masm, displace);
}
}
}
@@ -446,28 +446,28 @@ void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int
else { // Else, encode with the SIB byte
// If no displacement, mode is 0x0; unless base is [EBP]
if (displace == 0 && (base != EBP_enc)) { // If no displacement
- emit_rm(cbuf, 0x0, reg_encoding, 0x4);
- emit_rm(cbuf, scale, index, base);
+ emit_rm(masm, 0x0, reg_encoding, 0x4);
+ emit_rm(masm, scale, index, base);
}
else { // If 8-bit displacement, mode 0x1
if ((displace >= -128) && (displace <= 127)
&& (disp_reloc == relocInfo::none) ) {
- emit_rm(cbuf, 0x1, reg_encoding, 0x4);
- emit_rm(cbuf, scale, index, base);
- emit_d8(cbuf, displace);
+ emit_rm(masm, 0x1, reg_encoding, 0x4);
+ emit_rm(masm, scale, index, base);
+ emit_d8(masm, displace);
}
else { // If 32-bit displacement
if (base == 0x04 ) {
- emit_rm(cbuf, 0x2, reg_encoding, 0x4);
- emit_rm(cbuf, scale, index, 0x04);
+ emit_rm(masm, 0x2, reg_encoding, 0x4);
+ emit_rm(masm, scale, index, 0x04);
} else {
- emit_rm(cbuf, 0x2, reg_encoding, 0x4);
- emit_rm(cbuf, scale, index, base);
+ emit_rm(masm, 0x2, reg_encoding, 0x4);
+ emit_rm(masm, scale, index, base);
}
if ( disp_reloc != relocInfo::none ) {
- emit_d32_reloc(cbuf, displace, disp_reloc, 1);
+ emit_d32_reloc(masm, displace, disp_reloc, 1);
} else {
- emit_d32 (cbuf, displace);
+ emit_d32 (masm, displace);
}
}
}
@@ -475,16 +475,16 @@ void encode_RegMem( CodeBuffer &cbuf, int reg_encoding, int base, int index, int
}
-void encode_Copy( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
+void encode_Copy( C2_MacroAssembler *masm, int dst_encoding, int src_encoding ) {
if( dst_encoding == src_encoding ) {
// reg-reg copy, use an empty encoding
} else {
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, dst_encoding, src_encoding );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, dst_encoding, src_encoding );
}
}
-void emit_cmpfp_fixup(MacroAssembler& _masm) {
+void emit_cmpfp_fixup(MacroAssembler* masm) {
Label exit;
__ jccb(Assembler::noParity, exit);
__ pushf();
@@ -504,7 +504,7 @@ void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
-static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
+static void emit_cmpfp3(MacroAssembler* masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@@ -527,7 +527,7 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray *nodes, Phase
ShouldNotReachHere();
}
-void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
@@ -607,16 +607,15 @@ void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
#endif
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
- C2_MacroAssembler _masm(&cbuf);
int framesize = C->output()->frame_size_in_bytes();
int bangsize = C->output()->bang_size_in_bytes();
__ verified_entry(framesize, C->output()->need_stack_bang(bangsize)?bangsize:0, C->in_24_bit_fp_mode(), C->stub_function() != nullptr);
- C->output()->set_frame_complete(cbuf.insts_size());
+ C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
@@ -664,18 +663,17 @@ void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
}
#endif
-void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile *C = ra_->C;
- MacroAssembler _masm(&cbuf);
if (C->max_vector_size() > 16) {
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
- _masm.vzeroupper();
+ __ vzeroupper();
}
// If method set FPU control word, restore to standard control word
if (C->in_24_bit_fp_mode()) {
- _masm.fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
+ __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
}
int framesize = C->output()->frame_size_in_bytes();
@@ -686,16 +684,16 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here
if (framesize >= 128) {
- emit_opcode(cbuf, 0x81); // add SP, #framesize
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d32(cbuf, framesize);
+ emit_opcode(masm, 0x81); // add SP, #framesize
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d32(masm, framesize);
} else if (framesize) {
- emit_opcode(cbuf, 0x83); // add SP, #framesize
- emit_rm(cbuf, 0x3, 0x00, ESP_enc);
- emit_d8(cbuf, framesize);
+ emit_opcode(masm, 0x83); // add SP, #framesize
+ emit_rm(masm, 0x3, 0x00, ESP_enc);
+ emit_d8(masm, framesize);
}
- emit_opcode(cbuf, 0x58 | EBP_enc);
+ emit_opcode(masm, 0x58 | EBP_enc);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
@@ -703,7 +701,6 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (do_polling() && C->is_method_compilation()) {
Register thread = as_Register(EBX_enc);
- MacroAssembler masm(&cbuf);
__ get_thread(thread);
Label dummy_label;
Label* code_stub = &dummy_label;
@@ -712,7 +709,9 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C->output()->add_stub(stub);
code_stub = &stub->entry();
}
+ __ set_inst_mark();
__ relocate(relocInfo::poll_return_type);
+ __ clear_inst_mark();
__ safepoint_poll(*code_stub, thread, true /* at_return */, true /* in_nmethod */);
}
}
@@ -749,11 +748,13 @@ static enum RC rc_class( OptoReg::Name reg ) {
return rc_xmm;
}
-static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset, int reg,
+static int impl_helper( C2_MacroAssembler *masm, bool do_size, bool is_load, int offset, int reg,
int opcode, const char *op_str, int size, outputStream* st ) {
- if( cbuf ) {
- emit_opcode (*cbuf, opcode );
- encode_RegMem(*cbuf, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
+ if( masm ) {
+ masm->set_inst_mark();
+ emit_opcode (masm, opcode );
+ encode_RegMem(masm, Matcher::_regEncode[reg], ESP_enc, 0x4, 0, offset, relocInfo::none);
+ masm->clear_inst_mark();
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -770,7 +771,7 @@ static int impl_helper( CodeBuffer *cbuf, bool do_size, bool is_load, int offset
}
// Helper for XMM registers. Extra opcode bits, limited syntax.
-static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
+static int impl_x_helper( C2_MacroAssembler *masm, bool do_size, bool is_load,
int offset, int reg_lo, int reg_hi, int size, outputStream* st ) {
int in_size_in_bits = Assembler::EVEX_32bit;
int evex_encoding = 0;
@@ -778,11 +779,10 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
in_size_in_bits = Assembler::EVEX_64bit;
evex_encoding = Assembler::VEX_W;
}
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations,
// it maps more cases to single byte displacement
- _masm.set_managed();
+ __ set_managed();
if (reg_lo+1 == reg_hi) { // double move?
if (is_load) {
__ movdbl(as_XMMRegister(Matcher::_regEncode[reg_lo]), Address(rsp, offset));
@@ -829,12 +829,11 @@ static int impl_x_helper( CodeBuffer *cbuf, bool do_size, bool is_load,
}
-static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+static int impl_movx_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size, outputStream* st ) {
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
- _masm.set_managed();
+ __ set_managed();
if (src_lo+1 == src_hi && dst_lo+1 == dst_hi) { // double move?
__ movdbl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
as_XMMRegister(Matcher::_regEncode[src_lo]));
@@ -868,13 +867,12 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
return size + sz;
}
-static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+static int impl_movgpr2x_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size, outputStream* st ) {
// 32-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
- _masm.set_managed();
+ __ set_managed();
__ movdl(as_XMMRegister(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
#ifndef PRODUCT
@@ -886,13 +884,12 @@ static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
}
-static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
+static int impl_movx2gpr_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int dst_lo,
int src_hi, int dst_hi, int size, outputStream* st ) {
// 32-bit
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
// EVEX spills remain EVEX: logic complex between full EVEX, partial and AVX, manage EVEX spill code one way.
- _masm.set_managed();
+ __ set_managed();
__ movdl(as_Register(Matcher::_regEncode[dst_lo]),
as_XMMRegister(Matcher::_regEncode[src_lo]));
#ifndef PRODUCT
@@ -903,10 +900,10 @@ static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
return (UseAVX> 2) ? 6 : 4;
}
-static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
- if( cbuf ) {
- emit_opcode(*cbuf, 0x8B );
- emit_rm (*cbuf, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
+static int impl_mov_helper( C2_MacroAssembler *masm, bool do_size, int src, int dst, int size, outputStream* st ) {
+ if( masm ) {
+ emit_opcode(masm, 0x8B );
+ emit_rm (masm, 0x3, Matcher::_regEncode[dst], Matcher::_regEncode[src] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -916,12 +913,12 @@ static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, in
return size+2;
}
-static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
+static int impl_fp_store_helper( C2_MacroAssembler *masm, bool do_size, int src_lo, int src_hi, int dst_lo, int dst_hi,
int offset, int size, outputStream* st ) {
if( src_lo != FPR1L_num ) { // Move value to top of FP stack, if not already there
- if( cbuf ) {
- emit_opcode( *cbuf, 0xD9 ); // FLD (i.e., push it)
- emit_d8( *cbuf, 0xC0-1+Matcher::_regEncode[src_lo] );
+ if( masm ) {
+ emit_opcode( masm, 0xD9 ); // FLD (i.e., push it)
+ emit_d8( masm, 0xC0-1+Matcher::_regEncode[src_lo] );
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -943,20 +940,19 @@ static int impl_fp_store_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int
assert( !OptoReg::is_valid(src_hi) && !OptoReg::is_valid(dst_hi), "no non-adjacent float-stores" );
}
- return impl_helper(cbuf,do_size,false,offset,st_op,op,op_str,size, st);
+ return impl_helper(masm,do_size,false,offset,st_op,op,op_str,size, st);
}
// Next two methods are shared by 32- and 64-bit VM. They are defined in x86.ad.
-static void vec_mov_helper(CodeBuffer *cbuf, int src_lo, int dst_lo,
+static void vec_mov_helper(C2_MacroAssembler *masm, int src_lo, int dst_lo,
int src_hi, int dst_hi, uint ireg, outputStream* st);
-void vec_spill_helper(CodeBuffer *cbuf, bool is_load,
+void vec_spill_helper(C2_MacroAssembler *masm, bool is_load,
int stack_offset, int reg, uint ireg, outputStream* st);
-static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
+static void vec_stack_to_stack_helper(C2_MacroAssembler *masm, int src_offset,
int dst_offset, uint ireg, outputStream* st) {
- if (cbuf) {
- MacroAssembler _masm(cbuf);
+ if (masm) {
switch (ireg) {
case Op_VecS:
__ pushl(Address(rsp, src_offset));
@@ -1032,7 +1028,7 @@ static void vec_stack_to_stack_helper(CodeBuffer *cbuf, int src_offset,
}
}
-uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
+uint MachSpillCopyNode::implementation( C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
// Get registers to move
OptoReg::Name src_second = ra_->get_reg_second(in(1));
OptoReg::Name src_first = ra_->get_reg_first(in(1));
@@ -1061,15 +1057,15 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// mem -> mem
int src_offset = ra_->reg2offset(src_first);
int dst_offset = ra_->reg2offset(dst_first);
- vec_stack_to_stack_helper(cbuf, src_offset, dst_offset, ireg, st);
+ vec_stack_to_stack_helper(masm, src_offset, dst_offset, ireg, st);
} else if (src_first_rc == rc_xmm && dst_first_rc == rc_xmm ) {
- vec_mov_helper(cbuf, src_first, dst_first, src_second, dst_second, ireg, st);
+ vec_mov_helper(masm, src_first, dst_first, src_second, dst_second, ireg, st);
} else if (src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
int stack_offset = ra_->reg2offset(dst_first);
- vec_spill_helper(cbuf, false, stack_offset, src_first, ireg, st);
+ vec_spill_helper(masm, false, stack_offset, src_first, ireg, st);
} else if (src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
int stack_offset = ra_->reg2offset(src_first);
- vec_spill_helper(cbuf, true, stack_offset, dst_first, ireg, st);
+ vec_spill_helper(masm, true, stack_offset, dst_first, ireg, st);
} else {
ShouldNotReachHere();
}
@@ -1081,16 +1077,16 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
if( src_second == dst_first ) { // overlapping stack copy ranges
assert( src_second_rc == rc_stack && dst_second_rc == rc_stack, "we only expect a stk-stk copy here" );
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
src_second_rc = dst_second_rc = rc_bad; // flag as already moved the second bits
}
// move low bits
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size, st);
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_first),ESI_num,0xFF,"PUSH ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_first),EAX_num,0x8F,"POP ",size, st);
if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { // mov second bits
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_second),ESI_num,0xFF,"PUSH ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_second),EAX_num,0x8F,"POP ",size, st);
}
return size;
}
@@ -1098,41 +1094,41 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// --------------------------------------
// Check for integer reg-reg copy
if( src_first_rc == rc_int && dst_first_rc == rc_int )
- size = impl_mov_helper(cbuf,do_size,src_first,dst_first,size, st);
+ size = impl_mov_helper(masm,do_size,src_first,dst_first,size, st);
// Check for integer store
if( src_first_rc == rc_int && dst_first_rc == rc_stack )
- size = impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
+ size = impl_helper(masm,do_size,false,ra_->reg2offset(dst_first),src_first,0x89,"MOV ",size, st);
// Check for integer load
if( src_first_rc == rc_stack && dst_first_rc == rc_int )
- size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
+ size = impl_helper(masm,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
// Check for integer reg-xmm reg copy
if( src_first_rc == rc_int && dst_first_rc == rc_xmm ) {
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
"no 64 bit integer-float reg moves" );
- return impl_movgpr2x_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+ return impl_movgpr2x_helper(masm,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// --------------------------------------
// Check for float reg-reg copy
if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
(src_first+1 == src_second && dst_first+1 == dst_second), "no non-adjacent float-moves" );
- if( cbuf ) {
+ if( masm ) {
// Note the mucking with the register encode to compensate for the 0/1
// indexing issue mentioned in a comment in the reg_def sections
// for FPR registers many lines above here.
if( src_first != FPR1L_num ) {
- emit_opcode (*cbuf, 0xD9 ); // FLD ST(i)
- emit_d8 (*cbuf, 0xC0+Matcher::_regEncode[src_first]-1 );
- emit_opcode (*cbuf, 0xDD ); // FSTP ST(i)
- emit_d8 (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
+ emit_opcode (masm, 0xD9 ); // FLD ST(i)
+ emit_d8 (masm, 0xC0+Matcher::_regEncode[src_first]-1 );
+ emit_opcode (masm, 0xDD ); // FSTP ST(i)
+ emit_d8 (masm, 0xD8+Matcher::_regEncode[dst_first] );
} else {
- emit_opcode (*cbuf, 0xDD ); // FST ST(i)
- emit_d8 (*cbuf, 0xD0+Matcher::_regEncode[dst_first]-1 );
+ emit_opcode (masm, 0xDD ); // FST ST(i)
+ emit_d8 (masm, 0xD0+Matcher::_regEncode[dst_first]-1 );
}
#ifndef PRODUCT
} else if( !do_size ) {
@@ -1146,7 +1142,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// Check for float store
if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
- return impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
+ return impl_fp_store_helper(masm,do_size,src_first,src_second,dst_first,dst_second,ra_->reg2offset(dst_first),size, st);
}
// Check for float load
@@ -1162,11 +1158,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
op = 0xD9;
assert( src_second_rc == rc_bad && dst_second_rc == rc_bad, "no non-adjacent float-loads" );
}
- if( cbuf ) {
- emit_opcode (*cbuf, op );
- encode_RegMem(*cbuf, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
- emit_opcode (*cbuf, 0xDD ); // FSTP ST(i)
- emit_d8 (*cbuf, 0xD8+Matcher::_regEncode[dst_first] );
+ if( masm ) {
+ masm->set_inst_mark();
+ emit_opcode (masm, op );
+ encode_RegMem(masm, 0x0, ESP_enc, 0x4, 0, offset, relocInfo::none);
+ emit_opcode (masm, 0xDD ); // FSTP ST(i)
+ emit_d8 (masm, 0xD8+Matcher::_regEncode[dst_first] );
+ masm->clear_inst_mark();
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1182,35 +1180,35 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad) ||
(src_first+1 == src_second && dst_first+1 == dst_second),
"no non-adjacent float-moves" );
- return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+ return impl_movx_helper(masm,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// Check for xmm reg-integer reg copy
if( src_first_rc == rc_xmm && dst_first_rc == rc_int ) {
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
"no 64 bit float-integer reg moves" );
- return impl_movx2gpr_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
+ return impl_movx2gpr_helper(masm,do_size,src_first,dst_first,src_second, dst_second, size, st);
}
// Check for xmm store
if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
- return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first), src_first, src_second, size, st);
+ return impl_x_helper(masm,do_size,false,ra_->reg2offset(dst_first), src_first, src_second, size, st);
}
// Check for float xmm load
if( src_first_rc == rc_stack && dst_first_rc == rc_xmm ) {
- return impl_x_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
+ return impl_x_helper(masm,do_size,true ,ra_->reg2offset(src_first),dst_first, dst_second, size, st);
}
// Copy from float reg to xmm reg
if( src_first_rc == rc_float && dst_first_rc == rc_xmm ) {
// copy to the top of stack from floating point reg
// and use LEA to preserve flags
- if( cbuf ) {
- emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP-8]
- emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
- emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
- emit_d8(*cbuf,0xF8);
+ if( masm ) {
+ emit_opcode(masm,0x8D); // LEA ESP,[ESP-8]
+ emit_rm(masm, 0x1, ESP_enc, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d8(masm,0xF8);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1219,16 +1217,16 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
}
size += 4;
- size = impl_fp_store_helper(cbuf,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
+ size = impl_fp_store_helper(masm,do_size,src_first,src_second,dst_first,dst_second,0,size, st);
// Copy from the temp memory to the xmm reg.
- size = impl_x_helper(cbuf,do_size,true ,0,dst_first, dst_second, size, st);
+ size = impl_x_helper(masm,do_size,true ,0,dst_first, dst_second, size, st);
- if( cbuf ) {
- emit_opcode(*cbuf,0x8D); // LEA ESP,[ESP+8]
- emit_rm(*cbuf, 0x1, ESP_enc, 0x04);
- emit_rm(*cbuf, 0x0, 0x04, ESP_enc);
- emit_d8(*cbuf,0x08);
+ if( masm ) {
+ emit_opcode(masm,0x8D); // LEA ESP,[ESP+8]
+ emit_rm(masm, 0x1, ESP_enc, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d8(masm,0x08);
#ifndef PRODUCT
} else if( !do_size ) {
if( size != 0 ) st->print("\n\t");
@@ -1244,8 +1242,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair");
assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair");
int offset = ra_->reg2offset(src_first);
- if (cbuf != nullptr) {
- MacroAssembler _masm(cbuf);
+ if (masm != nullptr) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), Address(rsp, offset));
#ifndef PRODUCT
} else {
@@ -1259,8 +1256,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair");
assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair");
int offset = ra_->reg2offset(dst_first);
- if (cbuf != nullptr) {
- MacroAssembler _masm(cbuf);
+ if (masm != nullptr) {
__ kmov(Address(rsp, offset), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1283,8 +1279,7 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
if (src_first_rc == rc_kreg && dst_first_rc == rc_kreg) {
assert((src_first & 1) == 0 && src_first + 1 == src_second, "invalid register pair");
assert((dst_first & 1) == 0 && dst_first + 1 == dst_second, "invalid register pair");
- if (cbuf != nullptr) {
- MacroAssembler _masm(cbuf);
+ if (masm != nullptr) {
__ kmov(as_KRegister(Matcher::_regEncode[dst_first]), as_KRegister(Matcher::_regEncode[src_first]));
#ifndef PRODUCT
} else {
@@ -1304,15 +1299,15 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
// Check for second word int-int move
if( src_second_rc == rc_int && dst_second_rc == rc_int )
- return impl_mov_helper(cbuf,do_size,src_second,dst_second,size, st);
+ return impl_mov_helper(masm,do_size,src_second,dst_second,size, st);
// Check for second word integer store
if( src_second_rc == rc_int && dst_second_rc == rc_stack )
- return impl_helper(cbuf,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
+ return impl_helper(masm,do_size,false,ra_->reg2offset(dst_second),src_second,0x89,"MOV ",size, st);
// Check for second word integer load
if( dst_second_rc == rc_int && src_second_rc == rc_stack )
- return impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
+ return impl_helper(masm,do_size,true ,ra_->reg2offset(src_second),dst_second,0x8B,"MOV ",size, st);
Unimplemented();
return 0; // Mute compiler
@@ -1324,8 +1319,8 @@ void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
}
#endif
-void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- implementation( &cbuf, ra_, false, nullptr );
+void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ implementation( masm, ra_, false, nullptr );
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
@@ -1342,20 +1337,20 @@ void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
}
#endif
-void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
if( offset >= 128 ) {
- emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
- emit_rm(cbuf, 0x2, reg, 0x04);
- emit_rm(cbuf, 0x0, 0x04, ESP_enc);
- emit_d32(cbuf, offset);
+ emit_opcode(masm, 0x8D); // LEA reg,[SP+offset]
+ emit_rm(masm, 0x2, reg, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d32(masm, offset);
}
else {
- emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
- emit_rm(cbuf, 0x1, reg, 0x04);
- emit_rm(cbuf, 0x0, 0x04, ESP_enc);
- emit_d8(cbuf, offset);
+ emit_opcode(masm, 0x8D); // LEA reg,[SP+offset]
+ emit_rm(masm, 0x1, reg, 0x04);
+ emit_rm(masm, 0x0, 0x04, ESP_enc);
+ emit_d8(masm, offset);
}
}
@@ -1381,9 +1376,8 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
}
#endif
-void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
- MacroAssembler masm(&cbuf);
- masm.ic_check(CodeEntryAlignment);
+void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
+ __ ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -1528,37 +1522,49 @@ encode %{
// adding a syntax that specifies the sizes of fields in an order,
// so that the adlc can build the emit functions automagically
+ // Set instruction mark in MacroAssembler. This is used only in
+ // instructions that emit bytes directly to the CodeBuffer wraped
+ // in the MacroAssembler. Should go away once all "instruct" are
+ // patched to emit bytes only using methods in MacroAssembler.
+ enc_class SetInstMark %{
+ __ set_inst_mark();
+ %}
+
+ enc_class ClearInstMark %{
+ __ clear_inst_mark();
+ %}
+
// Emit primary opcode
enc_class OpcP %{
- emit_opcode(cbuf, $primary);
+ emit_opcode(masm, $primary);
%}
// Emit secondary opcode
enc_class OpcS %{
- emit_opcode(cbuf, $secondary);
+ emit_opcode(masm, $secondary);
%}
// Emit opcode directly
enc_class Opcode(immI d8) %{
- emit_opcode(cbuf, $d8$$constant);
+ emit_opcode(masm, $d8$$constant);
%}
enc_class SizePrefix %{
- emit_opcode(cbuf,0x66);
+ emit_opcode(masm,0x66);
%}
enc_class RegReg (rRegI dst, rRegI src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class OpcRegReg (immI opcode, rRegI dst, rRegI src) %{ // OpcRegReg(Many)
- emit_opcode(cbuf,$opcode$$constant);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,$opcode$$constant);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class mov_r32_imm0( rRegI dst ) %{
- emit_opcode( cbuf, 0xB8 + $dst$$reg ); // 0xB8+ rd -- MOV r32 ,imm32
- emit_d32 ( cbuf, 0x0 ); // imm32==0x0
+ emit_opcode( masm, 0xB8 + $dst$$reg ); // 0xB8+ rd -- MOV r32 ,imm32
+ emit_d32 ( masm, 0x0 ); // imm32==0x0
%}
enc_class cdq_enc %{
@@ -1585,26 +1591,26 @@ encode %{
// F7 F9 idiv rax,ecx
// done:
//
- emit_opcode(cbuf,0x81); emit_d8(cbuf,0xF8);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x80); // cmp rax,80000000h
- emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x85);
- emit_opcode(cbuf,0x0B); emit_d8(cbuf,0x00);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00); // jne normal_case
- emit_opcode(cbuf,0x33); emit_d8(cbuf,0xD2); // xor rdx,edx
- emit_opcode(cbuf,0x83); emit_d8(cbuf,0xF9); emit_d8(cbuf,0xFF); // cmp rcx,0FFh
- emit_opcode(cbuf,0x0F); emit_d8(cbuf,0x84);
- emit_opcode(cbuf,0x03); emit_d8(cbuf,0x00);
- emit_opcode(cbuf,0x00); emit_d8(cbuf,0x00); // je done
+ emit_opcode(masm,0x81); emit_d8(masm,0xF8);
+ emit_opcode(masm,0x00); emit_d8(masm,0x00);
+ emit_opcode(masm,0x00); emit_d8(masm,0x80); // cmp rax,80000000h
+ emit_opcode(masm,0x0F); emit_d8(masm,0x85);
+ emit_opcode(masm,0x0B); emit_d8(masm,0x00);
+ emit_opcode(masm,0x00); emit_d8(masm,0x00); // jne normal_case
+ emit_opcode(masm,0x33); emit_d8(masm,0xD2); // xor rdx,edx
+ emit_opcode(masm,0x83); emit_d8(masm,0xF9); emit_d8(masm,0xFF); // cmp rcx,0FFh
+ emit_opcode(masm,0x0F); emit_d8(masm,0x84);
+ emit_opcode(masm,0x03); emit_d8(masm,0x00);
+ emit_opcode(masm,0x00); emit_d8(masm,0x00); // je done
// normal_case:
- emit_opcode(cbuf,0x99); // cdq
+ emit_opcode(masm,0x99); // cdq
// idiv (note: must be emitted by the user of this rule)
// normal:
%}
// Dense encoding for older common ops
enc_class Opc_plus(immI opcode, rRegI reg) %{
- emit_opcode(cbuf, $opcode$$constant + $reg$$reg);
+ emit_opcode(masm, $opcode$$constant + $reg$$reg);
%}
@@ -1612,10 +1618,10 @@ encode %{
enc_class OpcSE (immI imm) %{ // Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
- emit_opcode(cbuf, $primary | 0x02);
+ emit_opcode(masm, $primary | 0x02);
}
else { // If 32-bit immediate
- emit_opcode(cbuf, $primary);
+ emit_opcode(masm, $primary);
}
%}
@@ -1623,12 +1629,12 @@ encode %{
// Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
if (($imm$$constant >= -128) && ($imm$$constant <= 127)) {
- emit_opcode(cbuf, $primary | 0x02); }
+ emit_opcode(masm, $primary | 0x02); }
else { // If 32-bit immediate
- emit_opcode(cbuf, $primary);
+ emit_opcode(masm, $primary);
}
// Emit r/m byte with secondary opcode, after primary opcode.
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
%}
enc_class Con8or32 (immI imm) %{ // Con8or32(storeImmI), 8 or 32 bits
@@ -1646,62 +1652,62 @@ encode %{
// Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
int con = (int)$imm$$constant; // Throw away top bits
- emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
+ emit_opcode(masm, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
// Emit r/m byte with secondary opcode, after primary opcode.
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
- if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
- else emit_d32(cbuf,con);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
+ if ((con >= -128) && (con <= 127)) emit_d8 (masm,con);
+ else emit_d32(masm,con);
%}
enc_class Long_OpcSErm_Hi(eRegL dst, immL imm) %{
// Emit primary opcode and set sign-extend bit
// Check for 8-bit immediate, and set sign extend bit in opcode
int con = (int)($imm$$constant >> 32); // Throw away bottom bits
- emit_opcode(cbuf, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
+ emit_opcode(masm, ((con >= -128) && (con <= 127)) ? ($primary | 0x02) : $primary);
// Emit r/m byte with tertiary opcode, after primary opcode.
- emit_rm(cbuf, 0x3, $tertiary, HIGH_FROM_LOW_ENC($dst$$reg));
- if ((con >= -128) && (con <= 127)) emit_d8 (cbuf,con);
- else emit_d32(cbuf,con);
+ emit_rm(masm, 0x3, $tertiary, HIGH_FROM_LOW_ENC($dst$$reg));
+ if ((con >= -128) && (con <= 127)) emit_d8 (masm,con);
+ else emit_d32(masm,con);
%}
enc_class OpcSReg (rRegI dst) %{ // BSWAP
- emit_cc(cbuf, $secondary, $dst$$reg );
+ emit_cc(masm, $secondary, $dst$$reg );
%}
enc_class bswap_long_bytes(eRegL dst) %{ // BSWAP
int destlo = $dst$$reg;
int desthi = HIGH_FROM_LOW_ENC(destlo);
// bswap lo
- emit_opcode(cbuf, 0x0F);
- emit_cc(cbuf, 0xC8, destlo);
+ emit_opcode(masm, 0x0F);
+ emit_cc(masm, 0xC8, destlo);
// bswap hi
- emit_opcode(cbuf, 0x0F);
- emit_cc(cbuf, 0xC8, desthi);
+ emit_opcode(masm, 0x0F);
+ emit_cc(masm, 0xC8, desthi);
// xchg lo and hi
- emit_opcode(cbuf, 0x87);
- emit_rm(cbuf, 0x3, destlo, desthi);
+ emit_opcode(masm, 0x87);
+ emit_rm(masm, 0x3, destlo, desthi);
%}
enc_class RegOpc (rRegI div) %{ // IDIV, IMOD, JMP indirect, ...
- emit_rm(cbuf, 0x3, $secondary, $div$$reg );
+ emit_rm(masm, 0x3, $secondary, $div$$reg );
%}
enc_class enc_cmov(cmpOp cop ) %{ // CMOV
$$$emit8$primary;
- emit_cc(cbuf, $secondary, $cop$$cmpcode);
+ emit_cc(masm, $secondary, $cop$$cmpcode);
%}
enc_class enc_cmov_dpr(cmpOp cop, regDPR src ) %{ // CMOV
int op = 0xDA00 + $cop$$cmpcode + ($src$$reg-1);
- emit_d8(cbuf, op >> 8 );
- emit_d8(cbuf, op & 255);
+ emit_d8(masm, op >> 8 );
+ emit_d8(masm, op & 255);
%}
// emulate a CMOV with a conditional branch around a MOV
enc_class enc_cmov_branch( cmpOp cop, immI brOffs ) %{ // CMOV
// Invert sense of branch from sense of CMOV
- emit_cc( cbuf, 0x70, ($cop$$cmpcode^1) );
- emit_d8( cbuf, $brOffs$$constant );
+ emit_cc( masm, 0x70, ($cop$$cmpcode^1) );
+ emit_d8( masm, $brOffs$$constant );
%}
enc_class enc_PartialSubtypeCheck( ) %{
@@ -1711,7 +1717,6 @@ encode %{
Register Resi = as_Register(ESI_enc); // sub class
Label miss;
- MacroAssembler _masm(&cbuf);
__ check_klass_subtype_slow_path(Resi, Reax, Recx, Redi,
nullptr, &miss,
/*set_cond_codes:*/ true);
@@ -1722,43 +1727,40 @@ encode %{
%}
enc_class FFree_Float_Stack_All %{ // Free_Float_Stack_All
- MacroAssembler masm(&cbuf);
- int start = masm.offset();
+ int start = __ offset();
if (UseSSE >= 2) {
if (VerifyFPU) {
- masm.verify_FPU(0, "must be empty in SSE2+ mode");
+ __ verify_FPU(0, "must be empty in SSE2+ mode");
}
} else {
// External c_calling_convention expects the FPU stack to be 'clean'.
// Compiled code leaves it dirty. Do cleanup now.
- masm.empty_FPU_stack();
+ __ empty_FPU_stack();
}
if (sizeof_FFree_Float_Stack_All == -1) {
- sizeof_FFree_Float_Stack_All = masm.offset() - start;
+ sizeof_FFree_Float_Stack_All = __ offset() - start;
} else {
- assert(masm.offset() - start == sizeof_FFree_Float_Stack_All, "wrong size");
+ assert(__ offset() - start == sizeof_FFree_Float_Stack_All, "wrong size");
}
%}
enc_class Verify_FPU_For_Leaf %{
if( VerifyFPU ) {
- MacroAssembler masm(&cbuf);
- masm.verify_FPU( -3, "Returning from Runtime Leaf call");
+ __ verify_FPU( -3, "Returning from Runtime Leaf call");
}
%}
enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
// This is the instruction starting address for relocation info.
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
+ __ set_inst_mark();
$$$emit8$primary;
// CALL directly to the runtime
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
+ emit_d32_reloc(masm, ($meth$$method - (int)(__ pc()) - 4),
runtime_call_Relocation::spec(), RELOC_IMM32 );
+ __ clear_inst_mark();
__ post_call_nop();
if (UseSSE >= 2) {
- MacroAssembler _masm(&cbuf);
BasicType rt = tf()->return_type();
if ((rt == T_FLOAT || rt == T_DOUBLE) && !return_value_is_used()) {
@@ -1783,54 +1785,53 @@ encode %{
enc_class pre_call_resets %{
// If method sets FPU control word restore it here
- debug_only(int off0 = cbuf.insts_size());
+ debug_only(int off0 = __ offset());
if (ra_->C->in_24_bit_fp_mode()) {
- MacroAssembler _masm(&cbuf);
__ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_std()));
}
// Clear upper bits of YMM registers when current compiled code uses
// wide vectors to avoid AVX <-> SSE transition penalty during call.
- MacroAssembler _masm(&cbuf);
__ vzeroupper();
- debug_only(int off1 = cbuf.insts_size());
+ debug_only(int off1 = __ offset());
assert(off1 - off0 == pre_call_resets_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
// If method sets FPU control word do it here also
if (Compile::current()->in_24_bit_fp_mode()) {
- MacroAssembler masm(&cbuf);
- masm.fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_24()));
+ __ fldcw(ExternalAddress(StubRoutines::x86::addr_fpu_cntrl_wrd_24()));
}
%}
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
+ __ set_inst_mark();
$$$emit8$primary;
if (!_method) {
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
+ emit_d32_reloc(masm, ($meth$$method - (int)(__ pc()) - 4),
runtime_call_Relocation::spec(),
RELOC_IMM32);
+ __ clear_inst_mark();
__ post_call_nop();
} else {
- int method_index = resolved_method_index(cbuf);
+ int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
- emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
+ emit_d32_reloc(masm, ($meth$$method - (int)(__ pc()) - 4),
rspec, RELOC_DISP32);
__ post_call_nop();
- address mark = cbuf.insts_mark();
+ address mark = __ inst_mark();
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
- cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
+ __ code()->shared_stub_to_interp_for(_method, __ code()->insts()->mark_off());
+ __ clear_inst_mark();
} else {
// Emit stubs for static call.
- address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(masm, mark);
+ __ clear_inst_mark();
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -1840,8 +1841,7 @@ encode %{
%}
enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
- MacroAssembler _masm(&cbuf);
- __ ic_call((address)$meth$$method, resolved_method_index(cbuf));
+ __ ic_call((address)$meth$$method, resolved_method_index(masm));
__ post_call_nop();
%}
@@ -1850,57 +1850,31 @@ encode %{
assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small");
// CALL *[EAX+in_bytes(Method::from_compiled_code_entry_point_offset())]
- MacroAssembler _masm(&cbuf);
- cbuf.set_insts_mark();
+ __ set_inst_mark();
$$$emit8$primary;
- emit_rm(cbuf, 0x01, $secondary, EAX_enc ); // R/M byte
- emit_d8(cbuf, disp); // Displacement
+ emit_rm(masm, 0x01, $secondary, EAX_enc ); // R/M byte
+ emit_d8(masm, disp); // Displacement
+ __ clear_inst_mark();
__ post_call_nop();
%}
-// Following encoding is no longer used, but may be restored if calling
-// convention changes significantly.
-// Became: Xor_Reg(EBP), Java_To_Runtime( labl )
-//
-// enc_class Java_Interpreter_Call (label labl) %{ // JAVA INTERPRETER CALL
-// // int ic_reg = Matcher::inline_cache_reg();
-// // int ic_encode = Matcher::_regEncode[ic_reg];
-// // int imo_reg = Matcher::interpreter_method_reg();
-// // int imo_encode = Matcher::_regEncode[imo_reg];
-//
-// // // Interpreter expects method_ptr in EBX, currently a callee-saved register,
-// // // so we load it immediately before the call
-// // emit_opcode(cbuf, 0x8B); // MOV imo_reg,ic_reg # method_ptr
-// // emit_rm(cbuf, 0x03, imo_encode, ic_encode ); // R/M byte
-//
-// // xor rbp,ebp
-// emit_opcode(cbuf, 0x33);
-// emit_rm(cbuf, 0x3, EBP_enc, EBP_enc);
-//
-// // CALL to interpreter.
-// cbuf.set_insts_mark();
-// $$$emit8$primary;
-// emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4),
-// runtime_call_Relocation::spec(), RELOC_IMM32 );
-// %}
-
enc_class RegOpcImm (rRegI dst, immI8 shift) %{ // SHL, SAR, SHR
$$$emit8$primary;
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
$$$emit8$shift$$constant;
%}
enc_class LdImmI (rRegI dst, immI src) %{ // Load Immediate
// Load immediate does not have a zero or sign extended version
// for 8-bit immediates
- emit_opcode(cbuf, 0xB8 + $dst$$reg);
+ emit_opcode(masm, 0xB8 + $dst$$reg);
$$$emit32$src$$constant;
%}
enc_class LdImmP (rRegI dst, immI src) %{ // Load Immediate
// Load immediate does not have a zero or sign extended version
// for 8-bit immediates
- emit_opcode(cbuf, $primary + $dst$$reg);
+ emit_opcode(masm, $primary + $dst$$reg);
$$$emit32$src$$constant;
%}
@@ -1911,11 +1885,11 @@ encode %{
int src_con = $src$$constant & 0x0FFFFFFFFL;
if (src_con == 0) {
// xor dst, dst
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, dst_enc, dst_enc);
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, dst_enc, dst_enc);
} else {
- emit_opcode(cbuf, $primary + dst_enc);
- emit_d32(cbuf, src_con);
+ emit_opcode(masm, $primary + dst_enc);
+ emit_d32(masm, src_con);
}
%}
@@ -1926,48 +1900,48 @@ encode %{
int src_con = ((julong)($src$$constant)) >> 32;
if (src_con == 0) {
// xor dst, dst
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, dst_enc, dst_enc);
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, dst_enc, dst_enc);
} else {
- emit_opcode(cbuf, $primary + dst_enc);
- emit_d32(cbuf, src_con);
+ emit_opcode(masm, $primary + dst_enc);
+ emit_d32(masm, src_con);
}
%}
// Encode a reg-reg copy. If it is useless, then empty encoding.
enc_class enc_Copy( rRegI dst, rRegI src ) %{
- encode_Copy( cbuf, $dst$$reg, $src$$reg );
+ encode_Copy( masm, $dst$$reg, $src$$reg );
%}
enc_class enc_CopyL_Lo( rRegI dst, eRegL src ) %{
- encode_Copy( cbuf, $dst$$reg, $src$$reg );
+ encode_Copy( masm, $dst$$reg, $src$$reg );
%}
enc_class RegReg (rRegI dst, rRegI src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class RegReg_Lo(eRegL dst, eRegL src) %{ // RegReg(Many)
$$$emit8$primary;
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class RegReg_Hi(eRegL dst, eRegL src) %{ // RegReg(Many)
$$$emit8$secondary;
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class RegReg_Lo2(eRegL dst, eRegL src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class RegReg_Hi2(eRegL dst, eRegL src) %{ // RegReg(Many)
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class RegReg_HiLo( eRegL src, rRegI dst ) %{
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($src$$reg));
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($src$$reg));
%}
enc_class Con32 (immI src) %{ // Con32(storeImmI)
@@ -1979,14 +1953,14 @@ encode %{
// Output Float immediate bits
jfloat jf = $src$$constant;
int jf_as_bits = jint_cast( jf );
- emit_d32(cbuf, jf_as_bits);
+ emit_d32(masm, jf_as_bits);
%}
enc_class Con32F_as_bits(immF src) %{ // storeX_imm
// Output Float immediate bits
jfloat jf = $src$$constant;
int jf_as_bits = jint_cast( jf );
- emit_d32(cbuf, jf_as_bits);
+ emit_d32(masm, jf_as_bits);
%}
enc_class Con16 (immI src) %{ // Con16(storeImmI)
@@ -1995,17 +1969,17 @@ encode %{
%}
enc_class Con_d32(immI src) %{
- emit_d32(cbuf,$src$$constant);
+ emit_d32(masm,$src$$constant);
%}
enc_class conmemref (eRegP t1) %{ // Con32(storeImmI)
// Output immediate memory reference
- emit_rm(cbuf, 0x00, $t1$$reg, 0x05 );
- emit_d32(cbuf, 0x00);
+ emit_rm(masm, 0x00, $t1$$reg, 0x05 );
+ emit_d32(masm, 0x00);
%}
enc_class lock_prefix( ) %{
- emit_opcode(cbuf,0xF0); // [Lock]
+ emit_opcode(masm,0xF0); // [Lock]
%}
// Cmp-xchg long value.
@@ -2016,71 +1990,67 @@ encode %{
enc_class enc_cmpxchg8(eSIRegP mem_ptr) %{
// XCHG rbx,ecx
- emit_opcode(cbuf,0x87);
- emit_opcode(cbuf,0xD9);
+ emit_opcode(masm,0x87);
+ emit_opcode(masm,0xD9);
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// CMPXCHG8 [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xC7);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xC7);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
// XCHG rbx,ecx
- emit_opcode(cbuf,0x87);
- emit_opcode(cbuf,0xD9);
+ emit_opcode(masm,0x87);
+ emit_opcode(masm,0xD9);
%}
enc_class enc_cmpxchg(eSIRegP mem_ptr) %{
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// CMPXCHG [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xB1);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xB1);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_cmpxchgb(eSIRegP mem_ptr) %{
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// CMPXCHGB [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xB0);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xB0);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_cmpxchgw(eSIRegP mem_ptr) %{
// [Lock]
- emit_opcode(cbuf,0xF0);
+ emit_opcode(masm,0xF0);
// 16-bit mode
- emit_opcode(cbuf, 0x66);
+ emit_opcode(masm, 0x66);
// CMPXCHGW [Eptr]
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xB1);
- emit_rm( cbuf, 0x0, 1, $mem_ptr$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xB1);
+ emit_rm( masm, 0x0, 1, $mem_ptr$$reg );
%}
enc_class enc_flags_ne_to_boolean( iRegI res ) %{
int res_encoding = $res$$reg;
// MOV res,0
- emit_opcode( cbuf, 0xB8 + res_encoding);
- emit_d32( cbuf, 0 );
+ emit_opcode( masm, 0xB8 + res_encoding);
+ emit_d32( masm, 0 );
// JNE,s fail
- emit_opcode(cbuf,0x75);
- emit_d8(cbuf, 5 );
+ emit_opcode(masm,0x75);
+ emit_d8(masm, 5 );
// MOV res,1
- emit_opcode( cbuf, 0xB8 + res_encoding);
- emit_d32( cbuf, 1 );
+ emit_opcode( masm, 0xB8 + res_encoding);
+ emit_d32( masm, 1 );
// fail:
%}
- enc_class set_instruction_start( ) %{
- cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand
- %}
-
enc_class RegMem (rRegI ereg, memory mem) %{ // emit_reg_mem
int reg_encoding = $ereg$$reg;
int base = $mem$$base;
@@ -2088,7 +2058,7 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
relocInfo::relocType disp_reloc = $mem->disp_reloc();
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class RegMem_Hi(eRegL ereg, memory mem) %{ // emit_reg_mem
@@ -2098,33 +2068,33 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp + 4; // Offset is 4 further in memory
assert( $mem->disp_reloc() == relocInfo::none, "Cannot add 4 to oop" );
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, relocInfo::none);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, relocInfo::none);
%}
enc_class move_long_small_shift( eRegL dst, immI_1_31 cnt ) %{
int r1, r2;
if( $tertiary == 0xA4 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW_ENC($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW_ENC($dst$$reg); }
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,$tertiary);
- emit_rm(cbuf, 0x3, r1, r2);
- emit_d8(cbuf,$cnt$$constant);
- emit_d8(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, r1);
- emit_d8(cbuf,$cnt$$constant);
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,$tertiary);
+ emit_rm(masm, 0x3, r1, r2);
+ emit_d8(masm,$cnt$$constant);
+ emit_d8(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, r1);
+ emit_d8(masm,$cnt$$constant);
%}
enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{
- emit_opcode( cbuf, 0x8B ); // Move
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode( masm, 0x8B ); // Move
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
if( $cnt$$constant > 32 ) { // Shift, if not by zero
- emit_d8(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
- emit_d8(cbuf,$cnt$$constant-32);
+ emit_d8(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, $dst$$reg);
+ emit_d8(masm,$cnt$$constant-32);
}
- emit_d8(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW_ENC($dst$$reg));
- emit_d8(cbuf,31);
+ emit_d8(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_d8(masm,31);
%}
enc_class move_long_big_shift_clr( eRegL dst, immI_32_63 cnt ) %{
@@ -2132,28 +2102,28 @@ encode %{
if( $secondary == 0x5 ) { r1 = $dst$$reg; r2 = HIGH_FROM_LOW_ENC($dst$$reg); }
else { r2 = $dst$$reg; r1 = HIGH_FROM_LOW_ENC($dst$$reg); }
- emit_opcode( cbuf, 0x8B ); // Move r1,r2
- emit_rm(cbuf, 0x3, r1, r2);
+ emit_opcode( masm, 0x8B ); // Move r1,r2
+ emit_rm(masm, 0x3, r1, r2);
if( $cnt$$constant > 32 ) { // Shift, if not by zero
- emit_opcode(cbuf,$primary);
- emit_rm(cbuf, 0x3, $secondary, r1);
- emit_d8(cbuf,$cnt$$constant-32);
+ emit_opcode(masm,$primary);
+ emit_rm(masm, 0x3, $secondary, r1);
+ emit_d8(masm,$cnt$$constant-32);
}
- emit_opcode(cbuf,0x33); // XOR r2,r2
- emit_rm(cbuf, 0x3, r2, r2);
+ emit_opcode(masm,0x33); // XOR r2,r2
+ emit_rm(masm, 0x3, r2, r2);
%}
// Clone of RegMem but accepts an extra parameter to access each
// half of a double in memory; it never needs relocation info.
enc_class Mov_MemD_half_to_Reg (immI opcode, memory mem, immI disp_for_half, rRegI rm_reg) %{
- emit_opcode(cbuf,$opcode$$constant);
+ emit_opcode(masm,$opcode$$constant);
int reg_encoding = $rm_reg$$reg;
int base = $mem$$base;
int index = $mem$$index;
int scale = $mem$$scale;
int displace = $mem$$disp + $disp_for_half$$constant;
relocInfo::relocType disp_reloc = relocInfo::none;
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
%}
// !!!!! Special Custom Code used by MemMove, and stack access instructions !!!!!
@@ -2168,7 +2138,7 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
assert( $mem->disp_reloc() == relocInfo::none, "No oops here because no reloc info allowed" );
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
+ encode_RegMem(masm, rm_byte_opcode, base, index, scale, displace, relocInfo::none);
%}
enc_class RMopc_Mem (immI rm_opcode, memory mem) %{
@@ -2178,7 +2148,7 @@ encode %{
int scale = $mem$$scale;
int displace = $mem$$disp;
relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
- encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, rm_byte_opcode, base, index, scale, displace, disp_reloc);
%}
enc_class RegLea (rRegI dst, rRegI src0, immI src1 ) %{ // emit_reg_lea
@@ -2188,31 +2158,31 @@ encode %{
int scale = 0x00; // 0x00 indicates no scale
int displace = $src1$$constant; // 0x00 indicates no displacement
relocInfo::relocType disp_reloc = relocInfo::none;
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
%}
enc_class min_enc (rRegI dst, rRegI src) %{ // MIN
// Compare dst,src
- emit_opcode(cbuf,0x3B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x3B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
// jmp dst < src around move
- emit_opcode(cbuf,0x7C);
- emit_d8(cbuf,2);
+ emit_opcode(masm,0x7C);
+ emit_d8(masm,2);
// move dst,src
- emit_opcode(cbuf,0x8B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x8B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class max_enc (rRegI dst, rRegI src) %{ // MAX
// Compare dst,src
- emit_opcode(cbuf,0x3B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x3B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
// jmp dst > src around move
- emit_opcode(cbuf,0x7F);
- emit_d8(cbuf,2);
+ emit_opcode(masm,0x7F);
+ emit_d8(masm,2);
// move dst,src
- emit_opcode(cbuf,0x8B);
- emit_rm(cbuf, 0x3, $dst$$reg, $src$$reg);
+ emit_opcode(masm,0x8B);
+ emit_rm(masm, 0x3, $dst$$reg, $src$$reg);
%}
enc_class enc_FPR_store(memory mem, regDPR src) %{
@@ -2226,115 +2196,116 @@ encode %{
relocInfo::relocType disp_reloc = $mem->disp_reloc(); // disp-as-oop when working with static globals
if( $src$$reg != FPR1L_enc ) {
reg_encoding = 0x3; // Store & pop
- emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it)
- emit_d8( cbuf, 0xC0-1+$src$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD (i.e., push it)
+ emit_d8( masm, 0xC0-1+$src$$reg );
}
- cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand
- emit_opcode(cbuf,$primary);
- encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
+ __ set_inst_mark(); // Mark start of opcode for reloc info in mem operand
+ emit_opcode(masm,$primary);
+ encode_RegMem(masm, reg_encoding, base, index, scale, displace, disp_reloc);
+ __ clear_inst_mark();
%}
enc_class neg_reg(rRegI dst) %{
// NEG $dst
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0x03, $dst$$reg );
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0x03, $dst$$reg );
%}
enc_class setLT_reg(eCXRegI dst) %{
// SETLT $dst
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0x9C);
- emit_rm( cbuf, 0x3, 0x4, $dst$$reg );
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0x9C);
+ emit_rm( masm, 0x3, 0x4, $dst$$reg );
%}
enc_class enc_cmpLTP(ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp) %{ // cadd_cmpLT
int tmpReg = $tmp$$reg;
// SUB $p,$q
- emit_opcode(cbuf,0x2B);
- emit_rm(cbuf, 0x3, $p$$reg, $q$$reg);
+ emit_opcode(masm,0x2B);
+ emit_rm(masm, 0x3, $p$$reg, $q$$reg);
// SBB $tmp,$tmp
- emit_opcode(cbuf,0x1B);
- emit_rm(cbuf, 0x3, tmpReg, tmpReg);
+ emit_opcode(masm,0x1B);
+ emit_rm(masm, 0x3, tmpReg, tmpReg);
// AND $tmp,$y
- emit_opcode(cbuf,0x23);
- emit_rm(cbuf, 0x3, tmpReg, $y$$reg);
+ emit_opcode(masm,0x23);
+ emit_rm(masm, 0x3, tmpReg, $y$$reg);
// ADD $p,$tmp
- emit_opcode(cbuf,0x03);
- emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
+ emit_opcode(masm,0x03);
+ emit_rm(masm, 0x3, $p$$reg, tmpReg);
%}
enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{
// TEST shift,32
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0, ECX_enc);
- emit_d32(cbuf,0x20);
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0, ECX_enc);
+ emit_d32(masm,0x20);
// JEQ,s small
- emit_opcode(cbuf, 0x74);
- emit_d8(cbuf, 0x04);
+ emit_opcode(masm, 0x74);
+ emit_d8(masm, 0x04);
// MOV $dst.hi,$dst.lo
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg );
// CLR $dst.lo
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, $dst$$reg, $dst$$reg);
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, $dst$$reg, $dst$$reg);
// small:
// SHLD $dst.hi,$dst.lo,$shift
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xA5);
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xA5);
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg));
// SHL $dst.lo,$shift"
- emit_opcode(cbuf,0xD3);
- emit_rm(cbuf, 0x3, 0x4, $dst$$reg );
+ emit_opcode(masm,0xD3);
+ emit_rm(masm, 0x3, 0x4, $dst$$reg );
%}
enc_class shift_right_long( eRegL dst, eCXRegI shift ) %{
// TEST shift,32
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0, ECX_enc);
- emit_d32(cbuf,0x20);
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0, ECX_enc);
+ emit_d32(masm,0x20);
// JEQ,s small
- emit_opcode(cbuf, 0x74);
- emit_d8(cbuf, 0x04);
+ emit_opcode(masm, 0x74);
+ emit_d8(masm, 0x04);
// MOV $dst.lo,$dst.hi
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// CLR $dst.hi
- emit_opcode(cbuf, 0x33);
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($dst$$reg));
+ emit_opcode(masm, 0x33);
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), HIGH_FROM_LOW_ENC($dst$$reg));
// small:
// SHRD $dst.lo,$dst.hi,$shift
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xAD);
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xAD);
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
// SHR $dst.hi,$shift"
- emit_opcode(cbuf,0xD3);
- emit_rm(cbuf, 0x3, 0x5, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode(masm,0xD3);
+ emit_rm(masm, 0x3, 0x5, HIGH_FROM_LOW_ENC($dst$$reg) );
%}
enc_class shift_right_arith_long( eRegL dst, eCXRegI shift ) %{
// TEST shift,32
- emit_opcode(cbuf,0xF7);
- emit_rm(cbuf, 0x3, 0, ECX_enc);
- emit_d32(cbuf,0x20);
+ emit_opcode(masm,0xF7);
+ emit_rm(masm, 0x3, 0, ECX_enc);
+ emit_d32(masm,0x20);
// JEQ,s small
- emit_opcode(cbuf, 0x74);
- emit_d8(cbuf, 0x05);
+ emit_opcode(masm, 0x74);
+ emit_d8(masm, 0x05);
// MOV $dst.lo,$dst.hi
- emit_opcode( cbuf, 0x8B );
- emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode( masm, 0x8B );
+ emit_rm(masm, 0x3, $dst$$reg, HIGH_FROM_LOW_ENC($dst$$reg) );
// SAR $dst.hi,31
- emit_opcode(cbuf, 0xC1);
- emit_rm(cbuf, 0x3, 7, HIGH_FROM_LOW_ENC($dst$$reg) );
- emit_d8(cbuf, 0x1F );
+ emit_opcode(masm, 0xC1);
+ emit_rm(masm, 0x3, 7, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_d8(masm, 0x1F );
// small:
// SHRD $dst.lo,$dst.hi,$shift
- emit_opcode(cbuf,0x0F);
- emit_opcode(cbuf,0xAD);
- emit_rm(cbuf, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
+ emit_opcode(masm,0x0F);
+ emit_opcode(masm,0xAD);
+ emit_rm(masm, 0x3, HIGH_FROM_LOW_ENC($dst$$reg), $dst$$reg);
// SAR $dst.hi,$shift"
- emit_opcode(cbuf,0xD3);
- emit_rm(cbuf, 0x3, 0x7, HIGH_FROM_LOW_ENC($dst$$reg) );
+ emit_opcode(masm,0xD3);
+ emit_rm(masm, 0x3, 0x7, HIGH_FROM_LOW_ENC($dst$$reg) );
%}
@@ -2342,136 +2313,135 @@ encode %{
// May leave result in FPU-TOS or FPU reg depending on opcodes
enc_class OpcReg_FPR(regFPR src) %{ // FMUL, FDIV
$$$emit8$primary;
- emit_rm(cbuf, 0x3, $secondary, $src$$reg );
+ emit_rm(masm, 0x3, $secondary, $src$$reg );
%}
// Pop argument in FPR0 with FSTP ST(0)
enc_class PopFPU() %{
- emit_opcode( cbuf, 0xDD );
- emit_d8( cbuf, 0xD8 );
+ emit_opcode( masm, 0xDD );
+ emit_d8( masm, 0xD8 );
%}
// !!!!! equivalent to Pop_Reg_F
enc_class Pop_Reg_DPR( regDPR dst ) %{
- emit_opcode( cbuf, 0xDD ); // FSTP ST(i)
- emit_d8( cbuf, 0xD8+$dst$$reg );
+ emit_opcode( masm, 0xDD ); // FSTP ST(i)
+ emit_d8( masm, 0xD8+$dst$$reg );
%}
enc_class Push_Reg_DPR( regDPR dst ) %{
- emit_opcode( cbuf, 0xD9 );
- emit_d8( cbuf, 0xC0-1+$dst$$reg ); // FLD ST(i-1)
+ emit_opcode( masm, 0xD9 );
+ emit_d8( masm, 0xC0-1+$dst$$reg ); // FLD ST(i-1)
%}
enc_class strictfp_bias1( regDPR dst ) %{
- emit_opcode( cbuf, 0xDB ); // FLD m80real
- emit_opcode( cbuf, 0x2D );
- emit_d32( cbuf, (int)StubRoutines::x86::addr_fpu_subnormal_bias1() );
- emit_opcode( cbuf, 0xDE ); // FMULP ST(dst), ST0
- emit_opcode( cbuf, 0xC8+$dst$$reg );
+ emit_opcode( masm, 0xDB ); // FLD m80real
+ emit_opcode( masm, 0x2D );
+ emit_d32( masm, (int)StubRoutines::x86::addr_fpu_subnormal_bias1() );
+ emit_opcode( masm, 0xDE ); // FMULP ST(dst), ST0
+ emit_opcode( masm, 0xC8+$dst$$reg );
%}
enc_class strictfp_bias2( regDPR dst ) %{
- emit_opcode( cbuf, 0xDB ); // FLD m80real
- emit_opcode( cbuf, 0x2D );
- emit_d32( cbuf, (int)StubRoutines::x86::addr_fpu_subnormal_bias2() );
- emit_opcode( cbuf, 0xDE ); // FMULP ST(dst), ST0
- emit_opcode( cbuf, 0xC8+$dst$$reg );
+ emit_opcode( masm, 0xDB ); // FLD m80real
+ emit_opcode( masm, 0x2D );
+ emit_d32( masm, (int)StubRoutines::x86::addr_fpu_subnormal_bias2() );
+ emit_opcode( masm, 0xDE ); // FMULP ST(dst), ST0
+ emit_opcode( masm, 0xC8+$dst$$reg );
%}
// Special case for moving an integer register to a stack slot.
enc_class OpcPRegSS( stackSlotI dst, rRegI src ) %{ // RegSS
- store_to_stackslot( cbuf, $primary, $src$$reg, $dst$$disp );
+ store_to_stackslot( masm, $primary, $src$$reg, $dst$$disp );
%}
// Special case for moving a register to a stack slot.
enc_class RegSS( stackSlotI dst, rRegI src ) %{ // RegSS
// Opcode already emitted
- emit_rm( cbuf, 0x02, $src$$reg, ESP_enc ); // R/M byte
- emit_rm( cbuf, 0x00, ESP_enc, ESP_enc); // SIB byte
- emit_d32(cbuf, $dst$$disp); // Displacement
+ emit_rm( masm, 0x02, $src$$reg, ESP_enc ); // R/M byte
+ emit_rm( masm, 0x00, ESP_enc, ESP_enc); // SIB byte
+ emit_d32(masm, $dst$$disp); // Displacement
%}
// Push the integer in stackSlot 'src' onto FP-stack
enc_class Push_Mem_I( memory src ) %{ // FILD [ESP+src]
- store_to_stackslot( cbuf, $primary, $secondary, $src$$disp );
+ store_to_stackslot( masm, $primary, $secondary, $src$$disp );
%}
// Push FPU's TOS float to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_FPR( stackSlotF dst ) %{ // FSTP_S [ESP+dst]
- store_to_stackslot( cbuf, 0xD9, 0x03, $dst$$disp );
+ store_to_stackslot( masm, 0xD9, 0x03, $dst$$disp );
%}
// Same as Pop_Mem_F except for opcode
// Push FPU's TOS double to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_DPR( stackSlotD dst ) %{ // FSTP_D [ESP+dst]
- store_to_stackslot( cbuf, 0xDD, 0x03, $dst$$disp );
+ store_to_stackslot( masm, 0xDD, 0x03, $dst$$disp );
%}
enc_class Pop_Reg_FPR( regFPR dst ) %{
- emit_opcode( cbuf, 0xDD ); // FSTP ST(i)
- emit_d8( cbuf, 0xD8+$dst$$reg );
+ emit_opcode( masm, 0xDD ); // FSTP ST(i)
+ emit_d8( masm, 0xD8+$dst$$reg );
%}
enc_class Push_Reg_FPR( regFPR dst ) %{
- emit_opcode( cbuf, 0xD9 ); // FLD ST(i-1)
- emit_d8( cbuf, 0xC0-1+$dst$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD ST(i-1)
+ emit_d8( masm, 0xC0-1+$dst$$reg );
%}
// Push FPU's float to a stack-slot, and pop FPU-stack
enc_class Pop_Mem_Reg_FPR( stackSlotF dst, regFPR src ) %{
int pop = 0x02;
if ($src$$reg != FPR1L_enc) {
- emit_opcode( cbuf, 0xD9 ); // FLD ST(i-1)
- emit_d8( cbuf, 0xC0-1+$src$$reg );
+ emit_opcode( masm, 0xD9 ); // FLD ST(i-1)
+ emit_d8( masm, 0xC0-1+$src$$reg );
pop = 0x03;
}
- store_to_stackslot( cbuf, 0xD9, pop, $dst$$disp ); // FST