@@ -514,7 +514,39 @@ static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
514
514
515
515
extern " C" {
516
516
int SpinPause () {
517
- return 0 ;
517
+ // We don't use StubRoutines::aarch64::spin_wait stub in order to
518
+ // avoid a costly call to os::current_thread_enable_wx() on MacOS.
519
+ // We should return 1 if SpinPause is implemented, and since there
520
+ // will be a sequence of 11 instructions for NONE and YIELD and 12
521
+ // instructions for NOP and ISB, SpinPause will always return 1.
522
+ uint64_t br_dst;
523
+ const int instructions_per_case = 2 ;
524
+ int64_t off = VM_Version::spin_wait_desc ().inst () * instructions_per_case * Assembler::instruction_size;
525
+
526
+ assert (VM_Version::spin_wait_desc ().inst () >= SpinWait::NONE &&
527
+ VM_Version::spin_wait_desc ().inst () <= SpinWait::YIELD, " must be" );
528
+ assert (-1 == SpinWait::NONE, " must be" );
529
+ assert ( 0 == SpinWait::NOP, " must be" );
530
+ assert ( 1 == SpinWait::ISB, " must be" );
531
+ assert ( 2 == SpinWait::YIELD, " must be" );
532
+
533
+ asm volatile (
534
+ " adr %[d], 20 \n " // 20 == PC here + 5 instructions => address
535
+ // to entry for case SpinWait::NOP
536
+ " add %[d], %[d], %[o] \n "
537
+ " br %[d] \n "
538
+ " b SpinPause_return \n " // case SpinWait::NONE (-1)
539
+ " nop \n " // padding
540
+ " nop \n " // case SpinWait::NOP ( 0)
541
+ " b SpinPause_return \n "
542
+ " isb \n " // case SpinWait::ISB ( 1)
543
+ " b SpinPause_return \n "
544
+ " yield \n " // case SpinWait::YIELD ( 2)
545
+ " SpinPause_return: \n "
546
+ : [d]" =&r" (br_dst)
547
+ : [o]" r" (off)
548
+ : " memory" );
549
+ return 1 ;
518
550
}
519
551
520
552
void _Copy_conjoint_jshorts_atomic (const jshort* from, jshort* to, size_t count) {
1 commit comments
openjdk-notifier[bot] commentedon Jan 8, 2024
Review
Issues