23
23
*/
24
24
25
25
#include " precompiled.hpp"
26
+ #include " asm/macroAssembler.inline.hpp"
27
+ #include " code/codeBlob.hpp"
28
+ #include " code/codeCache.hpp"
29
+ #include " code/vmreg.inline.hpp"
30
+ #include " compiler/oopMap.hpp"
31
+ #include " logging/logStream.hpp"
32
+ #include " memory/resourceArea.hpp"
26
33
#include " prims/downcallLinker.hpp"
27
- #include " utilities/debug.hpp"
34
+ #include " runtime/globals.hpp"
35
+ #include " runtime/stubCodeGenerator.hpp"
36
+
37
+ #define __ _masm->
38
+
39
+ class DowncallStubGenerator : public StubCodeGenerator {
40
+ BasicType* _signature;
41
+ int _num_args;
42
+ BasicType _ret_bt;
43
+ const ABIDescriptor& _abi;
44
+
45
+ const GrowableArray<VMStorage>& _input_registers;
46
+ const GrowableArray<VMStorage>& _output_registers;
47
+
48
+ bool _needs_return_buffer;
49
+ int _captured_state_mask;
50
+ bool _needs_transition;
51
+
52
+ int _frame_complete;
53
+ int _frame_size_slots;
54
+ OopMapSet* _oop_maps;
55
+ public:
56
+ DowncallStubGenerator (CodeBuffer* buffer,
57
+ BasicType* signature,
58
+ int num_args,
59
+ BasicType ret_bt,
60
+ const ABIDescriptor& abi,
61
+ const GrowableArray<VMStorage>& input_registers,
62
+ const GrowableArray<VMStorage>& output_registers,
63
+ bool needs_return_buffer,
64
+ int captured_state_mask,
65
+ bool needs_transition)
66
+ :StubCodeGenerator(buffer, PrintMethodHandleStubs),
67
+ _signature (signature),
68
+ _num_args(num_args),
69
+ _ret_bt(ret_bt),
70
+ _abi(abi),
71
+ _input_registers(input_registers),
72
+ _output_registers(output_registers),
73
+ _needs_return_buffer(needs_return_buffer),
74
+ _captured_state_mask(captured_state_mask),
75
+ _needs_transition(needs_transition),
76
+ _frame_complete(0 ),
77
+ _frame_size_slots(0 ),
78
+ _oop_maps(nullptr ) {
79
+ }
80
+ void generate ();
81
+ int frame_complete () const {
82
+ return _frame_complete;
83
+ }
84
+
85
+ int framesize () const {
86
+ return (_frame_size_slots >> (LogBytesPerWord - LogBytesPerInt));
87
+ }
88
+
89
+ OopMapSet* oop_maps () const {
90
+ return _oop_maps;
91
+ }
92
+ };
93
+
94
+ static const int native_invoker_code_base_size = 512 ;
95
+ static const int native_invoker_size_per_args = 8 ;
28
96
29
97
RuntimeStub* DowncallLinker::make_downcall_stub (BasicType* signature,
30
98
int num_args,
@@ -35,6 +103,197 @@ RuntimeStub* DowncallLinker::make_downcall_stub(BasicType* signature,
35
103
bool needs_return_buffer,
36
104
int captured_state_mask,
37
105
bool needs_transition) {
38
- Unimplemented ();
39
- return nullptr ;
106
+
107
+ int code_size = native_invoker_code_base_size + (num_args * native_invoker_size_per_args);
108
+ int locs_size = 1 ; // must be non zero
109
+ CodeBuffer code (" nep_invoker_blob" , code_size, locs_size);
110
+
111
+ DowncallStubGenerator g (&code, signature, num_args, ret_bt, abi,
112
+ input_registers, output_registers,
113
+ needs_return_buffer, captured_state_mask,
114
+ needs_transition);
115
+ g.generate ();
116
+ code.log_section_sizes (" nep_invoker_blob" );
117
+
118
+ RuntimeStub* stub =
119
+ RuntimeStub::new_runtime_stub (" nep_invoker_blob" ,
120
+ &code,
121
+ g.frame_complete (),
122
+ g.framesize (),
123
+ g.oop_maps (), false );
124
+
125
+ #ifndef PRODUCT
126
+ LogTarget (Trace, foreign, downcall) lt;
127
+ if (lt.is_enabled ()) {
128
+ ResourceMark rm;
129
+ LogStream ls (lt);
130
+ stub->print_on (&ls);
131
+ }
132
+ #endif
133
+
134
+ return stub;
135
+ }
136
+
137
+ void DowncallStubGenerator::generate () {
138
+ Register call_target_address = Z_R1_scratch,
139
+ tmp = Z_R0_scratch;
140
+
141
+ VMStorage shuffle_reg = _abi._scratch1 ;
142
+
143
+ JavaCallingConvention in_conv;
144
+ NativeCallingConvention out_conv (_input_registers);
145
+ ArgumentShuffle arg_shuffle (_signature, _num_args, _signature, _num_args, &in_conv, &out_conv, shuffle_reg);
146
+
147
+ #ifndef PRODUCT
148
+ LogTarget (Trace, foreign, downcall) lt;
149
+ if (lt.is_enabled ()) {
150
+ ResourceMark rm;
151
+ LogStream ls (lt);
152
+ arg_shuffle.print_on (&ls);
153
+ }
154
+ #endif
155
+
156
+ assert (_abi._shadow_space_bytes == frame::z_abi_160_size, " expected space according to ABI" );
157
+ int allocated_frame_size = _abi._shadow_space_bytes ;
158
+ allocated_frame_size += arg_shuffle.out_arg_bytes ();
159
+
160
+ assert (!_needs_return_buffer, " unexpected needs_return_buffer" );
161
+ RegSpiller out_reg_spiller (_output_registers);
162
+ int spill_offset = allocated_frame_size;
163
+ allocated_frame_size += BytesPerWord;
164
+
165
+ StubLocations locs;
166
+ locs.set (StubLocations::TARGET_ADDRESS, _abi._scratch2 );
167
+
168
+ if (_captured_state_mask != 0 ) {
169
+ __ block_comment (" { _captured_state_mask is set" );
170
+ locs.set_frame_data (StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size);
171
+ allocated_frame_size += BytesPerWord;
172
+ __ block_comment (" } _captured_state_mask is set" );
173
+ }
174
+
175
+ allocated_frame_size = align_up (allocated_frame_size, StackAlignmentInBytes);
176
+ _frame_size_slots = allocated_frame_size >> LogBytesPerInt;
177
+
178
+ _oop_maps = _needs_transition ? new OopMapSet () : nullptr ;
179
+ address start = __ pc ();
180
+
181
+ __ save_return_pc ();
182
+ __ push_frame (allocated_frame_size, Z_R11); // Create a new frame for the wrapper.
183
+
184
+ _frame_complete = __ pc () - start; // frame build complete.
185
+
186
+ if (_needs_transition) {
187
+ __ block_comment (" { thread java2native" );
188
+ __ get_PC (Z_R1_scratch);
189
+ address the_pc = __ pc ();
190
+ __ set_last_Java_frame (Z_SP, Z_R1_scratch);
191
+
192
+ OopMap* map = new OopMap (_frame_size_slots, 0 );
193
+ _oop_maps->add_gc_map (the_pc - start, map);
194
+
195
+ // State transition
196
+ __ set_thread_state (_thread_in_native);
197
+ __ block_comment (" } thread java2native" );
198
+ }
199
+ __ block_comment (" { argument shuffle" );
200
+ arg_shuffle.generate (_masm, shuffle_reg, frame::z_jit_out_preserve_size, _abi._shadow_space_bytes , locs);
201
+ __ block_comment (" } argument shuffle" );
202
+
203
+ __ call (as_Register (locs.get (StubLocations::TARGET_ADDRESS)));
204
+
205
+ // ////////////////////////////////////////////////////////////////////////////
206
+
207
+ if (_captured_state_mask != 0 ) {
208
+ __ block_comment (" { save thread local" );
209
+
210
+ out_reg_spiller.generate_spill (_masm, spill_offset);
211
+
212
+ __ load_const_optimized (call_target_address, CAST_FROM_FN_PTR (uint64_t , DowncallLinker::capture_state));
213
+ __ z_lg (Z_ARG1, Address (Z_SP, locs.data_offset (StubLocations::CAPTURED_STATE_BUFFER)));
214
+ __ load_const_optimized (Z_ARG2, _captured_state_mask);
215
+ __ call (call_target_address);
216
+
217
+ out_reg_spiller.generate_fill (_masm, spill_offset);
218
+
219
+ __ block_comment (" } save thread local" );
220
+ }
221
+
222
+ // ////////////////////////////////////////////////////////////////////////////
223
+
224
+ Label L_after_safepoint_poll;
225
+ Label L_safepoint_poll_slow_path;
226
+ Label L_reguard;
227
+ Label L_after_reguard;
228
+
229
+ if (_needs_transition) {
230
+ __ block_comment (" { thread native2java" );
231
+ __ set_thread_state (_thread_in_native_trans);
232
+
233
+ if (!UseSystemMemoryBarrier) {
234
+ __ z_fence (); // Order state change wrt. safepoint poll.
235
+ }
236
+
237
+ __ safepoint_poll (L_safepoint_poll_slow_path, tmp);
238
+
239
+ __ load_and_test_int (tmp, Address (Z_thread, JavaThread::suspend_flags_offset ()));
240
+ __ z_brne (L_safepoint_poll_slow_path);
241
+
242
+ __ bind (L_after_safepoint_poll);
243
+
244
+ // change thread state
245
+ __ set_thread_state (_thread_in_Java);
246
+
247
+ __ block_comment (" reguard stack check" );
248
+ __ z_cli (Address (Z_thread, JavaThread::stack_guard_state_offset () + in_ByteSize (sizeof (StackOverflow::StackGuardState) - 1 )),
249
+ StackOverflow::stack_guard_yellow_reserved_disabled);
250
+ __ z_bre (L_reguard);
251
+ __ bind (L_after_reguard);
252
+
253
+ __ reset_last_Java_frame ();
254
+ __ block_comment (" } thread native2java" );
255
+ }
256
+
257
+ __ pop_frame ();
258
+ __ restore_return_pc (); // This is the way back to the caller.
259
+ __ z_br (Z_R14);
260
+
261
+ // ////////////////////////////////////////////////////////////////////////////
262
+
263
+ if (_needs_transition) {
264
+ __ block_comment (" { L_safepoint_poll_slow_path" );
265
+ __ bind (L_safepoint_poll_slow_path);
266
+
267
+ // Need to save the native result registers around any runtime calls.
268
+ out_reg_spiller.generate_spill (_masm, spill_offset);
269
+
270
+ __ load_const_optimized (call_target_address, CAST_FROM_FN_PTR (uint64_t , JavaThread::check_special_condition_for_native_trans));
271
+ __ z_lgr (Z_ARG1, Z_thread);
272
+ __ call (call_target_address);
273
+
274
+ out_reg_spiller.generate_fill (_masm, spill_offset);
275
+
276
+ __ z_bru (L_after_safepoint_poll);
277
+ __ block_comment (" } L_safepoint_poll_slow_path" );
278
+
279
+ // ////////////////////////////////////////////////////////////////////////////
280
+ __ block_comment (" { L_reguard" );
281
+ __ bind (L_reguard);
282
+
283
+ // Need to save the native result registers around any runtime calls.
284
+ out_reg_spiller.generate_spill (_masm, spill_offset);
285
+
286
+ __ load_const_optimized (call_target_address, CAST_FROM_FN_PTR (uint64_t , SharedRuntime::reguard_yellow_pages));
287
+ __ call (call_target_address);
288
+
289
+ out_reg_spiller.generate_fill (_masm, spill_offset);
290
+
291
+ __ z_bru (L_after_reguard);
292
+
293
+ __ block_comment (" } L_reguard" );
294
+ }
295
+
296
+ // ////////////////////////////////////////////////////////////////////////////
297
+
298
+ __ flush ();
40
299
}
0 commit comments