diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp index a54d04c757d4e..78c71448ae6f3 100644 --- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp @@ -557,7 +557,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, break; case T_OBJECT: case T_ARRAY: - assert(c->as_jobject() == 0, "should be"); + assert(c->as_jobject() == nullptr, "should be"); if (UseCompressedOops && !wide) { insn = &Assembler::st_w; } else { diff --git a/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp index aaa38f590e389..5cf3083f738ba 100644 --- a/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp @@ -1063,7 +1063,4 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { #undef __ -const char *Runtime1::pd_name_for_address(address entry) { - Unimplemented(); - return 0; -} +const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); } \ No newline at end of file diff --git a/src/hotspot/cpu/loongarch/frame_loongarch.cpp b/src/hotspot/cpu/loongarch/frame_loongarch.cpp index 79c889c4ebd1c..35adcf57e3fd1 100644 --- a/src/hotspot/cpu/loongarch/frame_loongarch.cpp +++ b/src/hotspot/cpu/loongarch/frame_loongarch.cpp @@ -272,7 +272,7 @@ void frame::patch_pc(Thread* thread, address pc) { // Either the return address is the original one or we are going to // patch in the same address that's already there. - assert(_pc == pc_old || pc == pc_old || pc_old == 0, "must be"); + assert(_pc == pc_old || pc == pc_old || pc_old == nullptr, "must be"); DEBUG_ONLY(address old_pc = _pc;) *pc_addr = pc; _pc = pc; // must be set before call to get_deopt_original_pc @@ -499,10 +499,10 @@ frame frame::sender_for_interpreter_frame(RegisterMap* map) const { bool frame::is_interpreted_frame_valid(JavaThread* thread) const { assert(is_interpreted_frame(), "Not an interpreted frame"); // These are reasonable sanity checks - if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) { + if (fp() == nullptr || (intptr_t(fp()) & (wordSize-1)) != 0) { return false; } - if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) { + if (sp() == nullptr || (intptr_t(sp()) & (wordSize-1)) != 0) { return false; } if (fp() + interpreter_frame_initial_sp_offset < sp()) { diff --git a/src/hotspot/cpu/loongarch/loongarch_64.ad b/src/hotspot/cpu/loongarch/loongarch_64.ad index 4e61af245b700..c072569bade1e 100644 --- a/src/hotspot/cpu/loongarch/loongarch_64.ad +++ b/src/hotspot/cpu/loongarch/loongarch_64.ad @@ -1140,6 +1140,10 @@ bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { return false; } +bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen) { + return false; +} + const RegMask* Matcher::predicate_reg_mask(void) { return nullptr; } @@ -15209,36 +15213,6 @@ instruct populateIndexV(vReg dst, mRegI src1, immI_1 src2) %{ ins_pipe( pipe_slow ); %} -// ---------------------------- LOAD_SHUFFLE ---------------------------------- - -instruct loadShuffleVB(vReg dst) %{ - predicate(Matcher::vector_element_basic_type(n) == T_BYTE); - match(Set dst (VectorLoadShuffle dst)); - format %{ "(x)vld_shuffle $dst\t# @loadShuffleVB" %} - ins_encode %{ - // empty - %} - ins_pipe( pipe_slow ); -%} - -instruct loadShuffleV(vReg dst, vReg src) %{ - predicate(Matcher::vector_element_basic_type(n) != T_BYTE); - match(Set dst (VectorLoadShuffle src)); - format %{ "(x)vld_shuffle $dst, $src\t# @loadShuffleV" %} - ins_encode %{ - switch (Matcher::vector_element_basic_type(this)) { - case T_SHORT : __ vext2xv_hu_bu($dst$$FloatRegister, $src$$FloatRegister); break; - case T_FLOAT : - case T_INT : __ vext2xv_wu_bu($dst$$FloatRegister, $src$$FloatRegister); break; - case T_DOUBLE: - case T_LONG : __ vext2xv_du_bu($dst$$FloatRegister, $src$$FloatRegister); break; - default: - ShouldNotReachHere(); - } - %} - ins_pipe( pipe_slow ); -%} - // ---------------------------- Rearrange ------------------------------------- instruct rearrangeV(vReg dst, vReg src, vReg tmp) %{ diff --git a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp index e89d339b3d3fa..15b665468564b 100644 --- a/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp +++ b/src/hotspot/cpu/loongarch/macroAssembler_loongarch.hpp @@ -556,7 +556,7 @@ class MacroAssembler: public Assembler { void patchable_jump_far(Register ra, jlong offs); void patchable_jump(address target, bool force_patchable = false); - void patchable_call(address target, address call_size = 0); + void patchable_call(address target, address call_size = nullptr); // Floating void generate_dsin_dcos(bool isCos, address npio2_hw, address two_over_pi, diff --git a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp index ad3eadaa19808..8d62e4528412c 100644 --- a/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp +++ b/src/hotspot/cpu/loongarch/nativeInst_loongarch.hpp @@ -163,7 +163,7 @@ class NativeCall: public NativeInstruction { return next_instruction_address(); } - address target_addr_for_bl(address orig_addr = 0) const; + address target_addr_for_bl(address orig_addr = nullptr) const; address destination() const; void set_destination(address dest); @@ -245,7 +245,7 @@ class NativeFarCall: public NativeInstruction { } // Returns the NativeFarCall's destination. - address destination(address orig_addr = 0) const; + address destination(address orig_addr = nullptr) const; // Sets the NativeFarCall's destination, not necessarily mt-safe. // Used when relocating code. @@ -403,7 +403,7 @@ class NativeJump: public NativeInstruction { bool is_far(); address instruction_address() const { return addr_at(instruction_offset); } - address jump_destination(address orig_addr = 0); + address jump_destination(address orig_addr = nullptr); void set_jump_destination(address dest); // Creation diff --git a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp index 9903c839d4d1d..3cd420f7c2045 100644 --- a/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/sharedRuntime_loongarch_64.cpp @@ -1417,7 +1417,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); - BasicType* in_elem_bt = nullptr; int argc = 0; out_sig_bt[argc++] = T_ADDRESS; @@ -1842,8 +1841,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ cast_primitive_type(ret_type, V0); } - Label after_transition; - // Switch thread to "native transition" state before reading the synchronization state. // This additional state is necessary because reading and testing the synchronization // state is not atomic w.r.t. GC, as this scenario demonstrates: @@ -1908,7 +1905,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, } else { __ st_w(AT, TREG, in_bytes(JavaThread::thread_state_offset())); } - __ bind(after_transition); if (LockingMode != LM_LEGACY && method->is_object_wait0()) { // Check preemption for Object.wait()