From 8c74573cd9315d5f12b9aea789cec18b907a49ec Mon Sep 17 00:00:00 2001 From: loongson-jvm Date: Thu, 31 Oct 2024 15:01:53 +0800 Subject: [PATCH] Update (2024.10.31, 2nd) 34933: LA port of 8339849: Enumerate opto and C1 stubs, generate enums, names, fields and generator calls --- .../loongarch/c1_CodeStubs_loongarch_64.cpp | 46 ++++---- .../c1_LIRAssembler_loongarch_64.cpp | 34 +++--- .../c1_LIRGenerator_loongarch_64.cpp | 6 +- .../c1_MacroAssembler_loongarch_64.cpp | 4 +- .../loongarch/c1_Runtime1_loongarch_64.cpp | 104 +++++++++--------- 5 files changed, 97 insertions(+), 97 deletions(-) diff --git a/src/hotspot/cpu/loongarch/c1_CodeStubs_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_CodeStubs_loongarch_64.cpp index e1cda84acc946..b737e4ee46b3c 100644 --- a/src/hotspot/cpu/loongarch/c1_CodeStubs_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_CodeStubs_loongarch_64.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2021, 2023, Loongson Technology. All rights reserved. + * Copyright (c) 2021, 2024, Loongson Technology. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,7 +56,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ mov_metadata(SCR2, m); ce->store_parameter(SCR2, 1); ce->store_parameter(_bci, 0); - __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::counter_overflow_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ b(_continuation); @@ -65,7 +65,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ call(a, relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -78,13 +78,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { } else { __ li(SCR1, _index->as_jint()); } - Runtime1::StubID stub_id; + C1StubId stub_id; if (_throw_index_out_of_bounds_exception) { - stub_id = Runtime1::throw_index_exception_id; + stub_id = C1StubId::throw_index_exception_id; } else { assert(_array != LIR_Opr::nullOpr(), "sanity"); __ move(SCR2, _array->as_pointer_register()); - stub_id = Runtime1::throw_range_check_failed_id; + stub_id = C1StubId::throw_range_check_failed_id; } __ call(Runtime1::entry_for(stub_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); @@ -98,7 +98,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { void PredicateFailedStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); __ call(a, relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); @@ -110,7 +110,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); } __ bind(_entry); - __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::throw_div0_exception_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); #ifdef ASSERT @@ -121,14 +121,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) { // Implementation of NewInstanceStub NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, - CodeEmitInfo* info, Runtime1::StubID stub_id) { + CodeEmitInfo* info, C1StubId stub_id) { _result = result; _klass = klass; _klass_reg = klass_reg; _info = new CodeEmitInfo(info); - assert(stub_id == Runtime1::new_instance_id || - stub_id == Runtime1::fast_new_instance_id || - stub_id == Runtime1::fast_new_instance_init_check_id, + assert(stub_id == C1StubId::new_instance_id || + stub_id == C1StubId::fast_new_instance_id || + stub_id == C1StubId::fast_new_instance_init_check_id, "need new_instance id"); _stub_id = stub_id; } @@ -159,7 +159,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == S0, "length must in S0,"); assert(_klass_reg->as_register() == A3, "klass_reg must in A3"); - __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::new_type_array_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == A0, "result must in A0"); @@ -181,7 +181,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(_length->as_register() == S0, "length must in S0,"); assert(_klass_reg->as_register() == A3, "klass_reg must in A3"); - __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::new_object_array_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == A0, "result must in A0"); @@ -193,11 +193,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_obj_reg->as_register(), 1); ce->store_parameter(_lock_reg->as_register(), 0); - Runtime1::StubID enter_id; + C1StubId enter_id; if (ce->compilation()->has_fpu_code()) { - enter_id = Runtime1::monitorenter_id; + enter_id = C1StubId::monitorenter_id; } else { - enter_id = Runtime1::monitorenter_nofpu_id; + enter_id = C1StubId::monitorenter_nofpu_id; } __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); @@ -213,11 +213,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) { } ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed - Runtime1::StubID exit_id; + C1StubId exit_id; if (ce->compilation()->has_fpu_code()) { - exit_id = Runtime1::monitorexit_id; + exit_id = C1StubId::monitorexit_id; } else { - exit_id = Runtime1::monitorexit_nofpu_id; + exit_id = C1StubId::monitorexit_nofpu_id; } __ lipc(RA, _continuation); __ jmp(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type); @@ -243,7 +243,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) { void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); ce->store_parameter(_trap_request, 0); - __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::deoptimize_id), relocInfo::runtime_call_type); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); } @@ -252,9 +252,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is probably wrong to do it here. - a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); + a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id); } else { - a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); + a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id); } ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); diff --git a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp index 538a31018811a..29805acc3b678 100644 --- a/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_LIRAssembler_loongarch_64.cpp @@ -278,16 +278,16 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(Runtime1::access_field_patching_id); + target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + target = Runtime1::entry_for(C1StubId::load_klass_patching_id); break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); break; default: ShouldNotReachHere(); } @@ -325,7 +325,7 @@ int LIR_Assembler::emit_exception_handler() { __ verify_not_null_oop(A0); // search an exception handler (A0: exception oop, A1: throwing pc) - __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id), relocInfo::runtime_call_type); __ should_not_reach_here(); guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); @@ -379,7 +379,7 @@ int LIR_Assembler::emit_unwind_handler() { // remove the activation and dispatch to the unwind handler __ block_comment("remove_frame and dispatch to the unwind handler"); __ remove_frame(initial_frame_size_in_bytes()); - __ jmp(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); + __ jmp(Runtime1::entry_for(C1StubId::unwind_exception_id), relocInfo::runtime_call_type); // Emit the slow path assembly if (stub != nullptr) { @@ -760,16 +760,16 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) { switch (patching_id(info)) { case PatchingStub::access_field_id: - target = Runtime1::entry_for(Runtime1::access_field_patching_id); + target = Runtime1::entry_for(C1StubId::access_field_patching_id); break; case PatchingStub::load_klass_id: - target = Runtime1::entry_for(Runtime1::load_klass_patching_id); + target = Runtime1::entry_for(C1StubId::load_klass_patching_id); break; case PatchingStub::load_mirror_id: - target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); + target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); break; case PatchingStub::load_appendix_id: - target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); + target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); break; default: ShouldNotReachHere(); } @@ -1337,7 +1337,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, __ addi_d(SP, SP, -2 * wordSize); __ st_d(k_RInfo, Address(SP, 0 * wordSize)); __ st_d(klass_RInfo, Address(SP, 1 * wordSize)); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ ld_d(klass_RInfo, Address(SP, 0 * wordSize)); __ addi_d(SP, SP, 2 * wordSize); // result is a boolean @@ -1351,7 +1351,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, __ addi_d(SP, SP, -2 * wordSize); __ st_d(k_RInfo, Address(SP, 0 * wordSize)); __ st_d(klass_RInfo, Address(SP, 1 * wordSize)); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ ld_d(k_RInfo, Address(SP, 0 * wordSize)); __ ld_d(klass_RInfo, Address(SP, 1 * wordSize)); __ addi_d(SP, SP, 2 * wordSize); @@ -1431,7 +1431,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { __ addi_d(SP, SP, -2 * wordSize); __ st_d(k_RInfo, Address(SP, 0 * wordSize)); __ st_d(klass_RInfo, Address(SP, 1 * wordSize)); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ ld_d(k_RInfo, Address(SP, 0 * wordSize)); __ ld_d(klass_RInfo, Address(SP, 1 * wordSize)); __ addi_d(SP, SP, 2 * wordSize); @@ -2319,7 +2319,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit // exception object is not added to oop map by LinearScan // (LinearScan assumes that no oops are in fixed registers) info->add_register_oop(exceptionOop); - Runtime1::StubID unwind_id; + C1StubId unwind_id; // get current pc information // pc is only needed if the method has an exception handler, the unwind code does not need it. @@ -2339,9 +2339,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit __ verify_not_null_oop(A0); // search an exception handler (A0: exception oop, A1: throwing pc) if (compilation()->has_fpu_code()) { - unwind_id = Runtime1::handle_exception_id; + unwind_id = C1StubId::handle_exception_id; } else { - unwind_id = Runtime1::handle_exception_nofpu_id; + unwind_id = C1StubId::handle_exception_nofpu_id; } __ call(Runtime1::entry_for(unwind_id), relocInfo::runtime_call_type); @@ -2600,7 +2600,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { __ addi_d(SP, SP, -2 * wordSize); __ st_d(dst, Address(SP, 0 * wordSize)); __ st_d(src, Address(SP, 1 * wordSize)); - __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); + __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); __ ld_d(dst, Address(SP, 0 * wordSize)); __ ld_d(src, Address(SP, 1 * wordSize)); __ addi_d(SP, SP, 2 * wordSize); diff --git a/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp index 16feea2bd5b5c..8294c45c12d01 100644 --- a/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_LIRGenerator_loongarch_64.cpp @@ -1241,7 +1241,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { args->append(rank); args->append(varargs); LIR_Opr reg = result_register_for(x->type()); - __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), + __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); @@ -1274,7 +1274,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == nullptr, "can't patch this"); - stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, + stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else if (x->is_invokespecial_receiver_check()) { assert(patching_info == nullptr, "can't patch this"); @@ -1282,7 +1282,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) { Deoptimization::Reason_class_check, Deoptimization::Action_none); } else { - stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, + stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); diff --git a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp index 21e10e5e00232..ddd15e3e14699 100644 --- a/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_MacroAssembler_loongarch_64.cpp @@ -245,7 +245,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == A0, "must be"); - call(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id), relocInfo::runtime_call_type); + call(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id), relocInfo::runtime_call_type); } verify_oop(obj); @@ -286,7 +286,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, if (CURRENT_ENV->dtrace_alloc_probes()) { assert(obj == A0, "must be"); - call(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id), relocInfo::runtime_call_type); + call(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id), relocInfo::runtime_call_type); } verify_oop(obj); diff --git a/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp b/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp index d616e05e63801..548fa9039292c 100644 --- a/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp +++ b/src/hotspot/cpu/loongarch/c1_Runtime1_loongarch_64.cpp @@ -96,10 +96,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre if (frame_size() == no_frame_size) { leave(); jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); - } else if (_stub_id == Runtime1::forward_exception_id) { + } else if (_stub_id == (int)C1StubId::forward_exception_id) { should_not_reach_here(); } else { - jmp(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type); + jmp(Runtime1::entry_for(C1StubId::forward_exception_id), relocInfo::runtime_call_type); } bind(L); } @@ -353,7 +353,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe return oop_maps; } -OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { +OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) { __ block_comment("generate_handle_exception"); // incoming parameters @@ -365,7 +365,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { OopMapSet* oop_maps = new OopMapSet(); OopMap* oop_map = nullptr; switch (id) { - case forward_exception_id: + case C1StubId::forward_exception_id: // We're handling an exception in the context of a compiled frame. // The registers have been saved in the standard places. Perform // an exception lookup in the caller and dispatch to the handler @@ -384,12 +384,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ st_d(R0, Address(TREG, JavaThread::vm_result_offset())); __ st_d(R0, Address(TREG, JavaThread::vm_result_2_offset())); break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // At this point all registers MAY be live. - oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id); + oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: { + case C1StubId::handle_exception_from_callee_id: { // At this point all registers except exception oop (A0) and // exception pc (RA) are dead. const int frame_size = 2 /*fp, return address*/; @@ -445,13 +445,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { __ st_d(A0, Address(FP, frame::return_addr_offset * BytesPerWord)); switch (id) { - case forward_exception_id: - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::forward_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: // Restore the registers that were saved at the beginning. - restore_live_registers(sasm, id != handle_exception_nofpu_id); + restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id); break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: break; default: ShouldNotReachHere(); } @@ -597,7 +597,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { return oop_maps; } -OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { +OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) { // for better readability const bool must_gc_arguments = true; const bool dont_gc_arguments = false; @@ -611,7 +611,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { OopMap* oop_map = nullptr; switch (id) { { - case forward_exception_id: + case C1StubId::forward_exception_id: { oop_maps = generate_handle_exception(id, sasm); __ leave(); @@ -619,33 +619,33 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_div0_exception_id: + case C1StubId::throw_div0_exception_id: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); } break; - case throw_null_pointer_exception_id: + case C1StubId::throw_null_pointer_exception_id: { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); } break; - case new_instance_id: - case fast_new_instance_id: - case fast_new_instance_init_check_id: + case C1StubId::new_instance_id: + case C1StubId::fast_new_instance_id: + case C1StubId::fast_new_instance_init_check_id: { Register klass = A3; // Incoming Register obj = A0; // Result - if (id == new_instance_id) { + if (id == C1StubId::new_instance_id) { __ set_info("new_instance", dont_gc_arguments); - } else if (id == fast_new_instance_id) { + } else if (id == C1StubId::fast_new_instance_id) { __ set_info("fast new_instance", dont_gc_arguments); } else { - assert(id == fast_new_instance_init_check_id, "bad StubID"); + assert(id == C1StubId::fast_new_instance_init_check_id, "bad StubID"); __ set_info("fast new_instance init check", dont_gc_arguments); } @@ -664,7 +664,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { break; - case counter_overflow_id: + case C1StubId::counter_overflow_id: { Register bci = A0, method = A1; __ enter(); @@ -682,14 +682,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_type_array_id: - case new_object_array_id: + case C1StubId::new_type_array_id: + case C1StubId::new_object_array_id: { Register length = S0; // Incoming Register klass = A3; // Incoming Register obj = A0; // Result - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { __ set_info("new_type_array", dont_gc_arguments); } else { __ set_info("new_object_array", dont_gc_arguments); @@ -702,7 +702,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { Register t0 = obj; __ ld_w(t0, Address(klass, Klass::layout_helper_offset())); __ srai_w(t0, t0, Klass::_lh_array_tag_shift); - int tag = ((id == new_type_array_id) + int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value); __ li(SCR1, tag); @@ -716,7 +716,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { __ enter(); OopMap* map = save_live_registers(sasm); int call_offset; - if (id == new_type_array_id) { + if (id == C1StubId::new_type_array_id) { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); } else { call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); @@ -734,7 +734,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case new_multi_array_id: + case C1StubId::new_multi_array_id: { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); // A0,: klass @@ -755,7 +755,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case register_finalizer_id: + case C1StubId::register_finalizer_id: { __ set_info("register_finalizer", dont_gc_arguments); @@ -788,21 +788,21 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_class_cast_exception_id: + case C1StubId::throw_class_cast_exception_id: { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); } break; - case throw_incompatible_class_change_error_id: + case C1StubId::throw_incompatible_class_change_error_id: { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); } break; - case slow_subtype_check_id: + case C1StubId::slow_subtype_check_id: { // Typical calling sequence: // __ push(klass_RInfo); // object klass or other subclass @@ -855,10 +855,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorenter_nofpu_id: + case C1StubId::monitorenter_nofpu_id: save_fpu_registers = false; // fall through - case monitorenter_id: + case C1StubId::monitorenter_id: { StubFrame f(sasm, "monitorenter", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -876,10 +876,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case monitorexit_nofpu_id: + case C1StubId::monitorexit_nofpu_id: save_fpu_registers = false; // fall through - case monitorexit_id: + case C1StubId::monitorexit_id: { StubFrame f(sasm, "monitorexit", dont_gc_arguments); OopMap* map = save_live_registers(sasm, save_fpu_registers); @@ -899,7 +899,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case deoptimize_id: + case C1StubId::deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return); OopMap* oop_map = save_live_registers(sasm); @@ -916,14 +916,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case throw_range_check_failed_id: + case C1StubId::throw_range_check_failed_id: { StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); } break; - case unwind_exception_id: + case C1StubId::unwind_exception_id: { __ set_info("unwind_exception", dont_gc_arguments); // note: no stubframe since we are about to leave the current @@ -932,7 +932,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case access_field_patching_id: + case C1StubId::access_field_patching_id: { StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -940,7 +940,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case load_klass_patching_id: + case C1StubId::load_klass_patching_id: { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -948,7 +948,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case load_mirror_patching_id: + case C1StubId::load_mirror_patching_id: { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -956,7 +956,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case load_appendix_patching_id: + case C1StubId::load_appendix_patching_id: { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return); // we should set up register map @@ -964,29 +964,29 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case handle_exception_nofpu_id: - case handle_exception_id: + case C1StubId::handle_exception_nofpu_id: + case C1StubId::handle_exception_id: { StubFrame f(sasm, "handle_exception", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case handle_exception_from_callee_id: + case C1StubId::handle_exception_from_callee_id: { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); oop_maps = generate_handle_exception(id, sasm); } break; - case throw_index_exception_id: + case C1StubId::throw_index_exception_id: { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return); oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); } break; - case throw_array_store_exception_id: + case C1StubId::throw_array_store_exception_id: { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return); // tos + 0: link @@ -995,7 +995,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case predicate_failed_trap_id: + case C1StubId::predicate_failed_trap_id: { StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return); @@ -1013,7 +1013,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { } break; - case dtrace_object_alloc_id: + case C1StubId::dtrace_object_alloc_id: { // A0: object StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);