mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-29 12:38:24 +00:00
8320275: assert(_chunk->bitmap().at(index)) failed: Bit not set at index
Reviewed-by: dlong, fparain
This commit is contained in:
parent
da1091eed9
commit
e9e694f4ef
@ -310,7 +310,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
uint int_args = 0;
|
||||
uint fp_args = 0;
|
||||
uint stk_args = 0; // inc by 2 each time
|
||||
uint stk_args = 0;
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
@ -322,8 +322,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_VOID:
|
||||
@ -340,6 +341,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@ -348,8 +350,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@ -357,6 +360,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@ -367,7 +371,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
return align_up(stk_args, 2);
|
||||
return stk_args;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@ -441,7 +441,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
if (slot & 1) slot++;
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
||||
@ -734,7 +734,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
return align_up(stk, 2);
|
||||
return stk;
|
||||
}
|
||||
|
||||
#if defined(COMPILER1) || defined(COMPILER2)
|
||||
|
||||
@ -266,7 +266,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
uint int_args = 0;
|
||||
uint fp_args = 0;
|
||||
uint stk_args = 0; // inc by 2 each time
|
||||
uint stk_args = 0;
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
@ -278,8 +278,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_VOID:
|
||||
@ -295,6 +296,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@ -303,8 +305,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@ -312,6 +315,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@ -321,7 +325,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
return align_up(stk_args, 2);
|
||||
return stk_args;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@ -755,7 +755,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
return align_up(stk, 2);
|
||||
return stk;
|
||||
}
|
||||
|
||||
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
@ -528,8 +528,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
// return value can be odd number of VMRegImpl stack slots make multiple of 2
|
||||
return align_up(stack, 2);
|
||||
return stack;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@ -498,7 +498,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
uint int_args = 0;
|
||||
uint fp_args = 0;
|
||||
uint stk_args = 0; // inc by 2 each time
|
||||
uint stk_args = 0;
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
@ -510,8 +510,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_VOID:
|
||||
@ -528,6 +529,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@ -536,8 +538,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@ -545,6 +548,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@ -555,7 +559,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
return align_up(stk_args, 2);
|
||||
return stk_args;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@ -72,7 +72,7 @@ CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signa
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
intptr_t out_preserve = align_up(SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs), 2);
|
||||
LIR_OprList* args = new LIR_OprList(signature->length());
|
||||
for (i = 0; i < sizeargs;) {
|
||||
BasicType t = sig_bt[i];
|
||||
|
||||
@ -3019,7 +3019,7 @@ void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bo
|
||||
assert(sig_index == sizeargs, "");
|
||||
}
|
||||
const char* spname = "sp"; // make arch-specific?
|
||||
intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
int stack_slot_offset = this->frame_size() * wordSize;
|
||||
int tab1 = 14, tab2 = 24;
|
||||
int sig_index = 0;
|
||||
|
||||
@ -395,7 +395,8 @@ public:
|
||||
void remove_unshareable_flags() NOT_CDS_RETURN;
|
||||
|
||||
// the number of argument reg slots that the compiled method uses on the stack.
|
||||
int num_stack_arg_slots() const { return constMethod()->num_stack_arg_slots(); }
|
||||
int num_stack_arg_slots(bool rounded = true) const {
|
||||
return rounded ? align_up(constMethod()->num_stack_arg_slots(), 2) : constMethod()->num_stack_arg_slots(); }
|
||||
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
|
||||
virtual MetaspaceObj::Type type() const { return MethodType; }
|
||||
|
||||
@ -155,7 +155,7 @@ public:
|
||||
|
||||
inline void* gc_data() const;
|
||||
inline BitMapView bitmap() const;
|
||||
inline BitMap::idx_t bit_index_for(intptr_t* p) const;
|
||||
inline BitMap::idx_t bit_index_for(address p) const;
|
||||
inline intptr_t* address_for_bit(BitMap::idx_t index) const;
|
||||
template <typename OopT> inline BitMap::idx_t bit_index_for(OopT* p) const;
|
||||
template <typename OopT> inline OopT* address_for_bit(BitMap::idx_t index) const;
|
||||
|
||||
@ -256,12 +256,13 @@ inline BitMapView stackChunkOopDesc::bitmap() const {
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(intptr_t* p) const {
|
||||
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const {
|
||||
return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
|
||||
}
|
||||
|
||||
template <typename OopT>
|
||||
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
|
||||
assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
|
||||
assert(p >= (OopT*)start_address(), "Address not in chunk");
|
||||
return p - (OopT*)start_address();
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ int ForeignGlobals::compute_out_arg_bytes(const GrowableArray<VMStorage>& out_re
|
||||
|
||||
int ForeignGlobals::java_calling_convention(const BasicType* signature, int num_args, GrowableArray<VMStorage>& out_regs) {
|
||||
VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
|
||||
int slots = SharedRuntime::java_calling_convention(signature, vm_regs, num_args);
|
||||
int slots = align_up(SharedRuntime::java_calling_convention(signature, vm_regs, num_args), 2);
|
||||
for (int i = 0; i < num_args; i++) {
|
||||
VMRegPair pair = vm_regs[i];
|
||||
// note, we ignore second here. Signature should consist of register-size values. So there should be
|
||||
|
||||
@ -1775,7 +1775,7 @@ private:
|
||||
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
|
||||
inline void after_thaw_java_frame(const frame& f, bool bottom);
|
||||
inline void patch(frame& f, const frame& caller, bool bottom);
|
||||
void clear_bitmap_bits(intptr_t* start, int range);
|
||||
void clear_bitmap_bits(address start, address end);
|
||||
|
||||
NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
|
||||
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
|
||||
@ -2166,13 +2166,22 @@ inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
|
||||
assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
|
||||
}
|
||||
|
||||
void ThawBase::clear_bitmap_bits(intptr_t* start, int range) {
|
||||
void ThawBase::clear_bitmap_bits(address start, address end) {
|
||||
assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
|
||||
assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
|
||||
|
||||
// we need to clear the bits that correspond to arguments as they reside in the caller frame
|
||||
// or they will keep objects that are otherwise unreachable alive
|
||||
log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(start+range));
|
||||
// or they will keep objects that are otherwise unreachable alive.
|
||||
|
||||
// Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
|
||||
// `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
|
||||
// If that's the case the bit range corresponding to the last stack slot should not have bits set
|
||||
// anyways and we assert that before returning.
|
||||
address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
|
||||
log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
|
||||
stackChunkOop chunk = _cont.tail();
|
||||
chunk->bitmap().clear_range(chunk->bit_index_for(start),
|
||||
chunk->bit_index_for(start+range));
|
||||
chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
|
||||
assert(chunk->bitmap().count_one_bits(chunk->bit_index_for(effective_end), chunk->bit_index_for(end)) == 0, "bits should not be set");
|
||||
}
|
||||
|
||||
NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
|
||||
@ -2225,7 +2234,9 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c
|
||||
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
|
||||
} else if (_cont.tail()->has_bitmap() && locals > 0) {
|
||||
assert(hf.is_heap_frame(), "should be");
|
||||
clear_bitmap_bits(heap_frame_bottom - locals, locals);
|
||||
address start = (address)(heap_frame_bottom - locals);
|
||||
address end = (address)heap_frame_bottom;
|
||||
clear_bitmap_bits(start, end);
|
||||
}
|
||||
|
||||
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
|
||||
@ -2298,7 +2309,10 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
|
||||
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
|
||||
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
|
||||
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
|
||||
clear_bitmap_bits(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top, added_argsize);
|
||||
address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
|
||||
int stack_args_slots = f.cb()->as_compiled_method()->method()->num_stack_arg_slots(false /* rounded */);
|
||||
int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
|
||||
clear_bitmap_bits(start, start + argsize_in_bytes);
|
||||
}
|
||||
|
||||
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
|
||||
|
||||
@ -1439,7 +1439,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
|
||||
assert(sig_index == sizeargs, "");
|
||||
}
|
||||
int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
assert(stack_arg_slots == m->num_stack_arg_slots(), "");
|
||||
assert(stack_arg_slots == m->num_stack_arg_slots(false /* rounded */), "");
|
||||
int out_preserve = SharedRuntime::out_preserve_stack_slots();
|
||||
int sig_index = 0;
|
||||
int arg_index = (m->is_static() ? 0 : -1);
|
||||
|
||||
@ -2004,7 +2004,7 @@ void SharedRuntime::check_member_name_argument_is_last_argument(const methodHand
|
||||
assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
|
||||
assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
|
||||
|
||||
int comp_args_on_stack = java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
|
||||
java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
|
||||
|
||||
for (int i = 0; i < member_arg_pos; i++) {
|
||||
VMReg a = regs_with_member_name[i].first();
|
||||
@ -3102,7 +3102,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
|
||||
BasicType ret_type = si.return_type();
|
||||
|
||||
// Now get the compiled-Java arguments layout.
|
||||
int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
|
||||
SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
|
||||
|
||||
// Generate the compiled-to-native wrapper code
|
||||
nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
|
||||
|
||||
@ -178,7 +178,6 @@ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) {
|
||||
}
|
||||
|
||||
#if defined(_LP64) && !defined(ZERO)
|
||||
_stack_arg_slots = align_up(_stack_arg_slots, 2);
|
||||
#ifdef ASSERT
|
||||
int dbg_stack_arg_slots = compute_num_stack_arg_slots(_signature, _param_size, static_flag);
|
||||
assert(_stack_arg_slots == dbg_stack_arg_slots, "fingerprinter: %d full: %d", _stack_arg_slots, dbg_stack_arg_slots);
|
||||
@ -235,14 +234,17 @@ void Fingerprinter::do_type_calling_convention(BasicType type) {
|
||||
case T_BYTE:
|
||||
case T_SHORT:
|
||||
case T_INT:
|
||||
#if defined(PPC64) || defined(S390)
|
||||
if (_int_args < Argument::n_int_register_parameters_j) {
|
||||
_int_args++;
|
||||
} else {
|
||||
#if defined(PPC64) || defined(S390)
|
||||
_stack_arg_slots += 1;
|
||||
#else
|
||||
_stack_arg_slots = align_up(_stack_arg_slots, 2);
|
||||
_stack_arg_slots += 1;
|
||||
#endif // defined(PPC64) || defined(S390)
|
||||
}
|
||||
break;
|
||||
#endif // defined(PPC64) || defined(S390)
|
||||
case T_LONG:
|
||||
case T_OBJECT:
|
||||
case T_ARRAY:
|
||||
@ -250,26 +252,27 @@ void Fingerprinter::do_type_calling_convention(BasicType type) {
|
||||
if (_int_args < Argument::n_int_register_parameters_j) {
|
||||
_int_args++;
|
||||
} else {
|
||||
PPC64_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
|
||||
S390_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
|
||||
_stack_arg_slots = align_up(_stack_arg_slots, 2);
|
||||
_stack_arg_slots += 2;
|
||||
}
|
||||
break;
|
||||
case T_FLOAT:
|
||||
#if defined(PPC64) || defined(S390)
|
||||
if (_fp_args < Argument::n_float_register_parameters_j) {
|
||||
_fp_args++;
|
||||
} else {
|
||||
#if defined(PPC64) || defined(S390)
|
||||
_stack_arg_slots += 1;
|
||||
#else
|
||||
_stack_arg_slots = align_up(_stack_arg_slots, 2);
|
||||
_stack_arg_slots += 1;
|
||||
#endif // defined(PPC64) || defined(S390)
|
||||
}
|
||||
break;
|
||||
#endif // defined(PPC64) || defined(S390)
|
||||
case T_DOUBLE:
|
||||
if (_fp_args < Argument::n_float_register_parameters_j) {
|
||||
_fp_args++;
|
||||
} else {
|
||||
PPC64_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
|
||||
S390_ONLY(_stack_arg_slots = align_up(_stack_arg_slots, 2));
|
||||
_stack_arg_slots = align_up(_stack_arg_slots, 2);
|
||||
_stack_arg_slots += 2;
|
||||
}
|
||||
break;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user