about summary refs log tree commit diff stats
path: root/src/dynarec/native_lock.h
diff options
context:
space:
mode:
authorptitSeb <sebastien.chev@gmail.com>2023-03-12 14:04:57 +0100
committerGitHub <noreply@github.com>2023-03-12 14:04:57 +0100
commit262ec3ed3c9fdf8f5028c55f616565266fc53e4b (patch)
treee4713396644f8dcc524643b5b4e9778ed9d27fea /src/dynarec/native_lock.h
parent38a5e55745b0e2474dcda1ff4636424af2f37bad (diff)
downloadbox64-262ec3ed3c9fdf8f5028c55f616565266fc53e4b.tar.gz
box64-262ec3ed3c9fdf8f5028c55f616565266fc53e4b.zip
Rv64 dynarec (#550)
* [RV64_DYNAREC] Pushed the rv64_lock.h
* [RV64_DYNAREC] Add initial support for atomic functions
* [RV64_DYNAREC] Added some basic infrastructure for the Dynarec (and 1 opcode)
* [RV64_DYNAREC] Add a disassembler for RV64 instructions
* [RV64_DYNAREC] Added 86 MOV opcode, and some fixes too
* [RV64_DYNAREC] Added 8D LEA opcode
* [RV64_DYNAREC] Added POP reg opcode
* [RV64_DYNAREC] Various fixes and small optims

---------

Co-authored-by: Yang Liu <liuyang22@iscas.ac.cn>
Diffstat (limited to 'src/dynarec/native_lock.h')
-rwxr-xr-xsrc/dynarec/native_lock.h40
1 files changed, 40 insertions, 0 deletions
diff --git a/src/dynarec/native_lock.h b/src/dynarec/native_lock.h
index 32cf56fa..2134673e 100755
--- a/src/dynarec/native_lock.h
+++ b/src/dynarec/native_lock.h
@@ -27,6 +27,46 @@
 #define native_lock_decifnot0(A)            arm64_lock_decifnot0(A)

 #define native_lock_store(A, B)             arm64_lock_store(A, B)

 

+#elif defined(RV64)

+#include "rv64/rv64_lock.h"

+

+#define USE_CAS

+// RV64 is quite strict (or at least strongly recommand) on what you can do between an LD.A and an SD.A

+// That basicaly forbid to call a function, so there cannot be READ / WRITE separated

+// And so need to use a Compare and Swap mecanism instead

+

+// no byte or 2-bytes atomic access on RISC-V

+#define native_lock_xchg(A, B)              rv64_lock_xchg(A, B)

+#define native_lock_xchg_d(A, B)            rv64_lock_xchg_d(A, B)

+#define native_lock_storeifref(A, B, C)     rv64_lock_storeifref(A, B, C)

+#define native_lock_storeifref_d(A, B, C)   rv64_lock_storeifref_d(A, B, C)

+#define native_lock_storeifref2_d(A, B, C)  rv64_lock_storeifref2_d(A, B, C)

+#define native_lock_storeifnull(A, B)       rv64_lock_storeifnull(A, B)

+#define native_lock_storeifnull_d(A, B)     rv64_lock_storeifnull_d(A, B)

+#define native_lock_decifnot0b(A)           rv64_lock_decifnot0b(A)

+#define native_lock_storeb(A, B)            rv64_lock_storeb(A, B)

+#define native_lock_incif0(A)               rv64_lock_incif0(A)

+#define native_lock_decifnot0(A)            rv64_lock_decifnot0(A)

+#define native_lock_store(A, B)             rv64_lock_store(A, B)

+#define native_lock_cas_d(A, B, C)          rv64_lock_cas_d(A, B, C)

+#define native_lock_cas_dd(A, B, C)         rv64_lock_cas_dd(A, B, C)

+

+#define native_lock_xchg_b(A, B)            rv64_lock_xchg_b(A, B)

+#define native_lock_cas_b(A, B, C)          rv64_lock_cas_b(A, B, C)

+#define native_lock_cas_h(A, B, C)          rv64_lock_cas_h(A, B, C)

+

+#define native_lock_read_b(A)               tmpcas=*(uint8_t*)(A)

+#define native_lock_write_b(A, B)           rv64_lock_cas_b(A, tmpcas, B)

+#define native_lock_read_h(A)               tmpcas=*(uint16_t*)(A)

+#define native_lock_write_h(A, B)           rv64_lock_cas_h(A, tmpcas, B)

+#define native_lock_read_d(A)               tmpcas=*(uint32_t*)(A)

+#define native_lock_write_d(A, B)           rv64_lock_cas_d(A, tmpcas, B)

+#define native_lock_read_dd(A)              tmpcas=*(uint64_t*)(A)

+#define native_lock_write_dd(A, B)          rv64_lock_cas_dd(A, tmpcas, B)

+// there is no atomic move on 16bytes, so faking it

+#define native_lock_read_dq(A, B, C)        *A=tmpcas=((uint64_t*)(C))[0]; *B=((uint64_t*)(C))[1];

+#define native_lock_write_dq(A, B, C)       rv64_lock_cas_dq(C, A, tmpcas, B);

+

 #else

 #error Unsupported architecture

 #endif