From c3f6630f6a3eb0ba38b2f57475d19afa361df03d Mon Sep 17 00:00:00 2001 From: loongson-jvm Date: Wed, 30 Oct 2024 16:18:22 +0800 Subject: [PATCH] Update (2024.10.30, 2nd) 34958: LoongArch64: Generate comments in -XX:+PrintInterpreter to link to source code 34840: New Object to ObjectMonitor mapping --- .../c1_MacroAssembler_loongarch_64.cpp | 2 +- .../loongarch/c2_MacroAssembler_loongarch.cpp | 181 +++++++++++------- .../loongarch/c2_MacroAssembler_loongarch.hpp | 4 +- .../loongarch/interp_masm_loongarch_64.cpp | 2 +- src/hotspot/cpu/loongarch/loongarch_64.ad | 4 +- .../loongarch/macroAssembler_loongarch.cpp | 9 +- .../loongarch/macroAssembler_loongarch.hpp | 2 +- .../cpu/loongarch/methodHandles_loongarch.cpp | 3 +- .../loongarch/sharedRuntime_loongarch_64.cpp | 2 +- ...templateInterpreterGenerator_loongarch.cpp | 25 ++- .../loongarch/templateTable_loongarch_64.cpp | 3 +- .../share/runtime/basicLock.inline.hpp | 8 +- 12 files changed, 155 insertions(+), 90 deletions(-) diff --git a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp index 102a1cdf63465..a577b5c8cae7b 100644 --- a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp @@ -58,7 +58,7 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr } if (LockingMode == LM_LIGHTWEIGHT) { - lightweight_lock(obj, hdr, SCR1, SCR2, slow_case); + lightweight_lock(disp_hdr, obj, hdr, SCR1, SCR2, slow_case); } else if (LockingMode == LM_LEGACY) { Label done; // Load object header diff --git a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp index dad723b0f9172..38f4e307ba205 100644 --- a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp +++ b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.cpp @@ -214,9 +214,9 @@ void C2_MacroAssembler::fast_unlock_c2(Register oop, Register box, Register flag bind(no_count); } -void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register flag, Register tmp1, Register tmp2, Register tmp3) { +void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register flag, Register tmp1, Register tmp2, Register tmp3) { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - assert_different_registers(obj, tmp1, tmp2, tmp3, flag); + assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag); // Handle inflated monitor. Label inflated; @@ -225,6 +225,11 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register flag, Regis // Finish fast lock unsuccessfully. slow_path MUST branch to with flag == 0 Label slow_path; + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + st_d(R0, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + } + move(flag, R0); if (DiagnoseSyncOnValueBasedClasses != 0) { load_klass(tmp1, obj); @@ -234,6 +239,7 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register flag, Regis } const Register tmp1_mark = tmp1; + const Register tmp3_t = tmp3; { // Lightweight locking @@ -241,7 +247,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register flag, Regis Label push; const Register tmp2_top = tmp2; - const Register tmp3_t = tmp3; // Check if lock-stack is full. ld_wu(tmp2_top, Address(TREG, JavaThread::lock_stack_top_offset())); @@ -278,29 +283,67 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register flag, Regis { // Handle inflated monitor. bind(inflated); + const Register tmp1_monitor = tmp1; if (!UseObjectMonitorTable) { - // mark contains the tagged ObjectMonitor*. - const Register tmp1_tagged_monitor = tmp1_mark; - const uintptr_t monitor_tag = markWord::monitor_value; - const Register tmp2_owner_addr = tmp2; - const Register tmp3_owner = tmp3; + assert(tmp1_monitor == tmp1_mark, "should be the same here"); + } else { + Label monitor_found; - // Compute owner address. - lea(tmp2_owner_addr, Address(tmp1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag))); + // Load cache address + lea(tmp3_t, Address(TREG, JavaThread::om_cache_oops_offset())); - move(tmp3_owner, R0); - // CAS owner (null => current thread). - cmpxchg(Address(tmp2_owner_addr, 0), tmp3_owner, TREG, flag, true, true /* acquire */); - bnez(flag, locked); + const int num_unrolled = 2; + for (int i = 0; i < num_unrolled; i++) { + ld_d(tmp1, Address(tmp3_t)); + beq(obj, tmp1, monitor_found); + addi_d(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference())); + } - // Check if recursive. - bne(tmp3_owner, TREG, slow_path); + Label loop; - // Recursive. - increment(Address(tmp1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1); - } else { - // OMCache lookup not supported yet. Take the slowpath. + // Search for obj in cache. + bind(loop); + + // Check for match. + ld_d(tmp1, tmp3_t, 0); + beq(obj, tmp1, monitor_found); + + // Search until null encountered, guaranteed _null_sentinel at end. + addi_d(tmp3_t, tmp3_t, in_bytes(OMCache::oop_to_oop_difference())); + bnez(tmp1, loop); + // Cache Miss. Take the slowpath. b(slow_path); + + bind(monitor_found); + ld_d(tmp1_monitor, Address(tmp3_t, OMCache::oop_to_monitor_difference())); + } + + const Register tmp2_owner_addr = tmp2; + const Register tmp3_owner = tmp3; + + const ByteSize monitor_tag = in_ByteSize(UseObjectMonitorTable ? 0 : checked_cast(markWord::monitor_value)); + const Address owner_address(tmp1_monitor, ObjectMonitor::owner_offset() - monitor_tag); + const Address recursions_address(tmp1_monitor, ObjectMonitor::recursions_offset() - monitor_tag); + + Label monitor_locked; + + // Compute owner address. + lea(tmp2_owner_addr, owner_address); + + move(tmp3_owner, R0); + // CAS owner (null => current thread). + cmpxchg(Address(tmp2_owner_addr, 0), tmp3_owner, TREG, flag, true, true /* acquire */); + bnez(flag, locked); + + // Check if recursive. + bne(tmp3_owner, TREG, slow_path); + + // Recursive. + increment(recursions_address, 1); + + bind(monitor_locked); + if (UseObjectMonitorTable) { + st_d(tmp1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); } } @@ -326,12 +369,12 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register flag, Regis // C2 uses the value of flag (0 vs !0) to determine the continuation. } -void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register flag, Register tmp1, Register tmp2, Register tmp3) { +void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register flag, Register tmp1, Register tmp2, Register tmp3) { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - assert_different_registers(obj, tmp1, tmp2, tmp3, flag, AT); + assert_different_registers(obj, box, tmp1, tmp2, tmp3, flag, AT); // Handle inflated monitor. - Label inflated, inflated_load_monitor; + Label inflated, inflated_load_mark; // Finish fast unlock successfully. unlocked MUST branch to with flag == 0 Label unlocked; // Finish fast unlock unsuccessfully. MUST branch to with flag != 0 @@ -343,14 +386,14 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register flag, Reg move(flag, R0); { // Lightweight unlock - Label push_and_slow; + Label push_and_slow_path; // Check if obj is top of lock-stack. ld_wu(tmp2_top, Address(TREG, JavaThread::lock_stack_top_offset())); addi_w(tmp2_top, tmp2_top, -oopSize); ldx_d(tmp3_t, TREG, tmp2_top); // Top of lock stack was not obj. Must be monitor. - bne(obj, tmp3_t, inflated_load_monitor); + bne(obj, tmp3_t, inflated_load_mark); // Pop lock-stack. DEBUG_ONLY(stx_d(R0, TREG, tmp2_top);) @@ -366,12 +409,11 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register flag, Reg ld_d(tmp1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); // Check header for monitor (0b10). + // Because we got here by popping (meaning we pushed in locked) + // there will be no monitor in the box. So we need to push back the obj + // so that the runtime can fix any potential anonymous owner. test_bit(tmp3_t, tmp1_mark, exact_log2(markWord::monitor_value)); - if (!UseObjectMonitorTable) { - bnez(tmp3_t, inflated); - } else { - bnez(tmp3_t, push_and_slow); - } + bnez(tmp3_t, UseObjectMonitorTable ? push_and_slow_path : inflated); // Try to unlock. Transition lock bits 0b00 => 0b01 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); @@ -379,7 +421,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register flag, Reg cmpxchg(Address(obj, 0), tmp1_mark, tmp3_t, flag, false, false /* acquire */); bnez(flag, unlocked); - bind(push_and_slow); + bind(push_and_slow_path); // Compare and exchange failed. // Restore lock-stack and handle the unlock in runtime. DEBUG_ONLY(stx_d(obj, TREG, tmp2_top);) @@ -389,7 +431,7 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register flag, Reg } { // Handle inflated monitor. - bind(inflated_load_monitor); + bind(inflated_load_mark); ld_d(tmp1_mark, Address(obj, oopDesc::mark_offset_in_bytes())); #ifdef ASSERT test_bit(tmp3_t, tmp1_mark, exact_log2(markWord::monitor_value)); @@ -410,54 +452,55 @@ void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register flag, Reg bind(check_done); #endif - if (!UseObjectMonitorTable) { - // mark contains the tagged ObjectMonitor*. - const Register tmp1_monitor = tmp1_mark; - const intptr_t monitor_tag = markWord::monitor_value; + const Register tmp1_monitor = tmp1; + if (!UseObjectMonitorTable) { + assert(tmp1_monitor == tmp1_mark, "should be the same here"); // Untag the monitor. - addi_d(tmp1_monitor, tmp1_mark, -monitor_tag); + addi_d(tmp1_monitor, tmp1_mark, -(int)markWord::monitor_value); + } else { + ld_d(tmp1_monitor, Address(box, BasicLock::object_monitor_cache_offset_in_bytes())); + // No valid pointer below alignof(ObjectMonitor*). Take the slow path. + li(tmp3_t, alignof(ObjectMonitor*)); + bltu(tmp1_monitor, tmp3_t, slow_path); + } - const Register tmp2_recursions = tmp2; - Label not_recursive; + const Register tmp2_recursions = tmp2; + Label not_recursive; - // Check if recursive. - ld_d(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); - beqz(tmp2_recursions, not_recursive); + // Check if recursive. + ld_d(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); + beqz(tmp2_recursions, not_recursive); - // Recursive unlock. - addi_d(tmp2_recursions, tmp2_recursions, -1); - st_d(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); - b(unlocked); + // Recursive unlock. + addi_d(tmp2_recursions, tmp2_recursions, -1); + st_d(tmp2_recursions, Address(tmp1_monitor, ObjectMonitor::recursions_offset())); + b(unlocked); - bind(not_recursive); + bind(not_recursive); - Label release; - const Register tmp2_owner_addr = tmp2; + Label release; + const Register tmp2_owner_addr = tmp2; - // Compute owner address. - lea(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset())); + // Compute owner address. + lea(tmp2_owner_addr, Address(tmp1_monitor, ObjectMonitor::owner_offset())); - // Check if the entry lists are empty. - ld_d(AT, Address(tmp1_monitor, ObjectMonitor::EntryList_offset())); - ld_d(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset())); - orr(AT, AT, tmp3_t); - beqz(AT, release); + // Check if the entry lists are empty. + ld_d(AT, Address(tmp1_monitor, ObjectMonitor::EntryList_offset())); + ld_d(tmp3_t, Address(tmp1_monitor, ObjectMonitor::cxq_offset())); + orr(AT, AT, tmp3_t); + beqz(AT, release); - // The owner may be anonymous and we removed the last obj entry in - // the lock-stack. This loses the information about the owner. - // Write the thread to the owner field so the runtime knows the owner. - st_d(TREG, Address(tmp2_owner_addr, 0)); - b(slow_path); + // The owner may be anonymous and we removed the last obj entry in + // the lock-stack. This loses the information about the owner. + // Write the thread to the owner field so the runtime knows the owner. + st_d(TREG, tmp2_owner_addr, 0); + b(slow_path); - bind(release); - // Set owner to null. - membar(Assembler::Membar_mask_bits(MacroAssembler::LoadStore | MacroAssembler::StoreStore)); - st_d(R0, Address(tmp2_owner_addr)); - } else { - // OMCache lookup not supported yet. Take the slowpath. - b(slow_path); - } + bind(release); + // Set owner to null. + membar(Assembler::Membar_mask_bits(MacroAssembler::LoadStore | MacroAssembler::StoreStore)); + st_d(R0, tmp2_owner_addr, 0); } bind(unlocked); diff --git a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.hpp index 8e4f0150897e7..7a19a6f9949e6 100644 --- a/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.hpp +++ b/src/hotspot/cpu/loongarch/c2_MacroAssembler_loongarch.hpp @@ -38,9 +38,9 @@ void fast_unlock_c2(Register oop, Register box, Register flag, Register disp_hdr, Register tmp); // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file. - void fast_lock_lightweight(Register object, Register flag, + void fast_lock_lightweight(Register object, Register box, Register flag, Register tmp1, Register tmp2, Register tmp3); - void fast_unlock_lightweight(Register object, Register flag, + void fast_unlock_lightweight(Register object, Register box, Register flag, Register tmp1, Register tmp2, Register tmp3); // Compare strings. diff --git a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp index 5db61066cf13d..ee9d1d7fd191b 100644 --- a/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/interp_masm_loongarch_64.cpp @@ -782,7 +782,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) { } if (LockingMode == LM_LIGHTWEIGHT) { - lightweight_lock(scr_reg, tmp_reg, SCR1, SCR2, slow_case); + lightweight_lock(lock_reg, scr_reg, tmp_reg, SCR1, SCR2, slow_case); b(count); } else if (LockingMode == LM_LEGACY) { // Load (object->mark() | 1) into tmp_reg diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad index 41a7cc0f587d1..f72c6baba4473 100644 --- a/src/hotspot/cpu/loongarch/loongarch_64.ad +++ b/src/hotspot/cpu/loongarch/loongarch_64.ad @@ -11109,7 +11109,7 @@ instruct cmpFastLockLightweight(FlagsReg cr, no_CR_mRegP object, no_CR_mRegP box format %{ "FASTLOCK $cr <-- $object, $box, $tmp1, $tmp2, $tmp3 #@ cmpFastLockLightweight" %} ins_encode %{ - __ fast_lock_lightweight($object$$Register, $cr$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_lock_lightweight($object$$Register, $box$$Register, $cr$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe( pipe_slow ); @@ -11123,7 +11123,7 @@ instruct cmpFastUnlockLightweight(FlagsReg cr, no_CR_mRegP object, no_CR_mRegP b format %{ "FASTUNLOCK $cr <-- $object, $box, $tmp1, $tmp2, $tmp3 #@ cmpFastUnlockLightweight" %} ins_encode %{ - __ fast_unlock_lightweight($object$$Register, $cr$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); + __ fast_unlock_lightweight($object$$Register, $box$$Register, $cr$$Register, $tmp1$$Register, $tmp2$$Register, $tmp3$$Register); %} ins_pipe( pipe_slow ); diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp index 40d4680d873ff..0c2f3dfe7a65d 100644 --- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp +++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.cpp @@ -4444,9 +4444,9 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { // - obj: the object to be locked // - tmp1, tmp2, tmp3: temporary registers, will be destroyed // - slow: branched to if locking fails -void MacroAssembler::lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { +void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow) { assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); - assert_different_registers(obj, tmp1, tmp2, tmp3); + assert_different_registers(basic_lock, obj, tmp1, tmp2, tmp3); Label _push, _tmp; const Register top = tmp1; @@ -4457,6 +4457,11 @@ void MacroAssembler::lightweight_lock(Register obj, Register tmp1, Register tmp2 // instruction emitted as it is part of C1's null check semantics. ld_d(mark, Address(obj, oopDesc::mark_offset_in_bytes())); + if (UseObjectMonitorTable) { + // Clear cache in case fast locking succeeds. + st_d(R0, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); + } + // Check if the lock-stack is full. ld_wu(top, Address(TREG, JavaThread::lock_stack_top_offset())); li(t, (unsigned)LockStack::end_offset()); diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp index 344506a05092e..db5e1c2a5a9ca 100644 --- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp @@ -776,7 +776,7 @@ class MacroAssembler: public Assembler { } } - void lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); + void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); #if INCLUDE_ZGC diff --git a/src/hotspot/cpu/loongarch/methodHandles_loongarch.cpp b/src/hotspot/cpu/loongarch/methodHandles_loongarch.cpp index 85896921a2ec0..f2bfc86f9b08e 100644 --- a/src/hotspot/cpu/loongarch/methodHandles_loongarch.cpp +++ b/src/hotspot/cpu/loongarch/methodHandles_loongarch.cpp @@ -27,6 +27,7 @@ #include "asm/macroAssembler.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/vmClasses.hpp" +#include "compiler/disassembler.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "memory/allocation.inline.hpp" @@ -36,7 +37,7 @@ #include "runtime/stubRoutines.hpp" #include "utilities/preserveException.hpp" -#define __ _masm-> +#define __ Disassembler::hook(__FILE__, __LINE__, _masm)-> #ifdef PRODUCT #define BLOCK_COMMENT(str) // nothing diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp index 01ac1de705c78..8ef6a63a56f6a 100644 --- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp @@ -1787,7 +1787,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, assert(LockingMode == LM_LIGHTWEIGHT, "must be"); // FIXME Register tmp = T1; - __ lightweight_lock(obj_reg, swap_reg, tmp, SCR1, slow_path_lock); + __ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, SCR1, slow_path_lock); } __ bind(count); diff --git a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp index 14d130bf6cc28..4787d0e562056 100644 --- a/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp +++ b/src/hotspot/cpu/loongarch/templateInterpreterGenerator_loongarch.cpp @@ -26,6 +26,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" #include "classfile/javaClasses.hpp" +#include "compiler/disassembler.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/bytecodeHistogram.hpp" #include "interpreter/interp_masm.hpp" @@ -54,7 +55,7 @@ #include "runtime/vframeArray.hpp" #include "utilities/debug.hpp" -#define __ _masm-> +#define __ Disassembler::hook(__FILE__, __LINE__, _masm)-> int TemplateInterpreter::InterpreterCodeSize = 500 * K; @@ -2022,13 +2023,21 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& vep) { assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); Label L; - fep = __ pc(); __ push(ftos); __ b(L); - dep = __ pc(); __ push(dtos); __ b(L); - lep = __ pc(); __ push(ltos); __ b(L); - aep =__ pc(); __ push(atos); __ b(L); - bep = cep = sep = - iep = __ pc(); __ push(itos); - vep = __ pc(); + aep = __ pc(); // atos entry point + __ push(atos); + __ b(L); + fep = __ pc(); // ftos entry point + __ push(ftos); + __ b(L); + dep = __ pc(); // dtos entry point + __ push(dtos); + __ b(L); + lep = __ pc(); // ltos entry point + __ push(ltos); + __ b(L); + bep = cep = sep = iep = __ pc(); // [bcsi]tos entry point + __ push(itos); + vep = __ pc(); // vtos entry point __ bind(L); generate_and_dispatch(t); } diff --git a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp index a3fc79c0aae68..be0a8f997536c 100644 --- a/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/templateTable_loongarch_64.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.hpp" +#include "compiler/disassembler.hpp" #include "interpreter/interpreter.hpp" #include "interpreter/interpreterRuntime.hpp" #include "interpreter/interp_masm.hpp" @@ -48,7 +49,7 @@ #include "utilities/macros.hpp" -#define __ _masm-> +#define __ Disassembler::hook(__FILE__, __LINE__, _masm)-> // Address computation: local variables diff --git a/src/hotspot/share/runtime/basicLock.inline.hpp b/src/hotspot/share/runtime/basicLock.inline.hpp index c04c8e5b11706..f78537372057d 100644 --- a/src/hotspot/share/runtime/basicLock.inline.hpp +++ b/src/hotspot/share/runtime/basicLock.inline.hpp @@ -22,6 +22,12 @@ * */ +/* + * This file has been modified by Loongson Technology in 2024. These + * modifications are Copyright (c) 2024, Loongson Technology, and are made + * available on the same license terms set forth above. + */ + #ifndef SHARE_RUNTIME_BASICLOCK_INLINE_HPP #define SHARE_RUNTIME_BASICLOCK_INLINE_HPP @@ -39,7 +45,7 @@ inline void BasicLock::set_displaced_header(markWord header) { inline ObjectMonitor* BasicLock::object_monitor_cache() const { assert(UseObjectMonitorTable, "must be"); -#if defined(X86) || defined(AARCH64) || defined(RISCV64) +#if defined(X86) || defined(AARCH64) || defined(RISCV64) || defined(LOONGARCH64) return reinterpret_cast(get_metadata()); #else // Other platforms do not make use of the cache yet,