Skip to content

Commit

Permalink
8290324: Move atomic operations outside of os_xxx.hpp
Browse files Browse the repository at this point in the history
Reviewed-by: dholmes, kbarrett
  • Loading branch information
iklam committed Jul 21, 2022
1 parent e8975be commit 2c73a1f
Show file tree
Hide file tree
Showing 15 changed files with 176 additions and 188 deletions.
9 changes: 7 additions & 2 deletions src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
Expand Up @@ -549,6 +549,10 @@ void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
}

static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}

extern "C" {
int SpinPause() {
return 0;
Expand Down Expand Up @@ -582,18 +586,19 @@ extern "C" {
*(to--) = *(from--);
}
}

void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

Expand Down
7 changes: 1 addition & 6 deletions src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
Expand Down Expand Up @@ -35,9 +35,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }

// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}

#endif // OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP
25 changes: 22 additions & 3 deletions src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -285,12 +285,31 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
return value;
}

// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
"stfd %0, 0(%2)\n"
: "=f"(tmp)
: "b"(src), "b"(dst));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, 0(%1)\n"
"std %0, 0(%2)\n"
: "=r"(tmp)
: "a"(src), "a"(dst));
#else
*(jlong *) dst = *(const jlong *) src;
#endif
}

template<>
template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
volatile int64_t dest;
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
return PrimitiveConversions::cast<T>(dest);
}

Expand All @@ -299,7 +318,7 @@ template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
}

#endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP
5 changes: 3 additions & 2 deletions src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
Expand Up @@ -31,6 +31,7 @@
// no precompiled headers
#include "jvm.h"
#include "asm/assembler.inline.hpp"
#include "atomic_bsd_zero.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
Expand Down Expand Up @@ -295,14 +296,14 @@ extern "C" {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

Expand Down
21 changes: 1 addition & 20 deletions src/hotspot/os_cpu/bsd_zero/os_bsd_zero.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -32,23 +32,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }

// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
"stfd %0, 0(%2)\n"
: "=f"(tmp)
: "b"(src), "b"(dst));
#elif defined(S390) && !defined(_LP64)
double tmp;
asm volatile ("ld %0, 0(%1)\n"
"std %0, 0(%2)\n"
: "=r"(tmp)
: "a"(src), "a"(dst));
#else
*(jlong *) dst = *(const jlong *) src;
#endif
}

#endif // OS_CPU_BSD_ZERO_OS_BSD_ZERO_HPP
9 changes: 7 additions & 2 deletions src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
Expand Up @@ -387,6 +387,10 @@ int os::extra_bang_size_in_bytes() {
return 0;
}

static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}

extern "C" {
int SpinPause() {
using spin_wait_func_ptr_t = void (*)();
Expand Down Expand Up @@ -433,18 +437,19 @@ extern "C" {
*(to--) = *(from--);
}
}

void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
if (from > to) {
const jlong *end = from + count;
while (from < end)
os::atomic_copy64(from++, to++);
atomic_copy64(from++, to++);
}
else if (from < to) {
const jlong *end = from;
from += count - 1;
to += count - 1;
while (from >= end)
os::atomic_copy64(from--, to--);
atomic_copy64(from--, to--);
}
}

Expand Down
7 changes: 1 addition & 6 deletions src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -36,9 +36,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }

// Atomically copy 64 bits of data
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}

#endif // OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_HPP
46 changes: 39 additions & 7 deletions src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand All @@ -25,11 +25,43 @@
#ifndef OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
#define OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP

#include "memory/allStatic.hpp"
#include "runtime/os.hpp"
#include "runtime/vm_version.hpp"

// Implementation of class atomic

class ARMAtomicFuncs : AllStatic {
public:
typedef int64_t (*cmpxchg_long_func_t)(int64_t, int64_t, volatile int64_t*);
typedef int64_t (*load_long_func_t)(const volatile int64_t*);
typedef void (*store_long_func_t)(int64_t, volatile int64_t*);
typedef int32_t (*atomic_add_func_t)(int32_t add_value, volatile int32_t *dest);
typedef int32_t (*atomic_xchg_func_t)(int32_t exchange_value, volatile int32_t *dest);
typedef int32_t (*cmpxchg_func_t)(int32_t, int32_t, volatile int32_t*);

static cmpxchg_long_func_t _cmpxchg_long_func;
static load_long_func_t _load_long_func;
static store_long_func_t _store_long_func;
static atomic_add_func_t _add_func;
static atomic_xchg_func_t _xchg_func;
static cmpxchg_func_t _cmpxchg_func;

static int64_t cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);

static int64_t load_long_bootstrap(const volatile int64_t*);

static void store_long_bootstrap(int64_t, volatile int64_t*);

static int32_t add_bootstrap(int32_t add_value, volatile int32_t *dest);

static int32_t xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);

static int32_t cmpxchg_bootstrap(int32_t compare_value,
int32_t exchange_value,
volatile int32_t *dest);
};

/*
* Atomic long operations on 32-bit ARM
* ARM v7 supports LDREXD/STREXD synchronization instructions so no problem.
Expand All @@ -49,15 +81,15 @@ template<typename T>
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
STATIC_ASSERT(8 == sizeof(T));
return PrimitiveConversions::cast<T>(
(*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
(*ARMAtomicFuncs::_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
}

template<>
template<typename T>
inline void Atomic::PlatformStore<8>::operator()(T volatile* dest,
T store_value) const {
STATIC_ASSERT(8 == sizeof(T));
(*os::atomic_store_long_func)(
(*ARMAtomicFuncs::_store_long_func)(
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
}

Expand All @@ -83,7 +115,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(I));
STATIC_ASSERT(4 == sizeof(D));
return add_using_helper<int32_t>(os::atomic_add_func, dest, add_value);
return add_using_helper<int32_t>(ARMAtomicFuncs::_add_func, dest, add_value);
}


Expand All @@ -93,7 +125,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(4 == sizeof(T));
return xchg_using_helper<int32_t>(os::atomic_xchg_func, dest, exchange_value);
return xchg_using_helper<int32_t>(ARMAtomicFuncs::_xchg_func, dest, exchange_value);
}


Expand All @@ -108,15 +140,15 @@ inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
int32_t volatile* dest,
int32_t compare_value) {
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
return (*ARMAtomicFuncs::_cmpxchg_func)(compare_value, exchange_value, dest);
}

inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
int64_t volatile* dest,
int64_t compare_value) {
assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
// Warning: Arguments are swapped to avoid moving them for kernel call
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
return (*ARMAtomicFuncs::_cmpxchg_long_func)(compare_value, exchange_value, dest);
}


Expand Down

0 comments on commit 2c73a1f

Please sign in to comment.