about summary refs log tree commit diff stats
path: root/src
diff options
context:
space:
mode:
authorWANG Guidong <61500878+wangguidong1999@users.noreply.github.com>2024-08-26 14:19:06 +0800
committerGitHub <noreply@github.com>2024-08-26 08:19:06 +0200
commit9beb745765e9c99bad6410094a97bf0bf9ebc1eb (patch)
tree66e1b483568149a8aa510fdf6e550489c06f6a7c /src
parent0b1dd757be7191fee1b575f6604b39b81b38d91e (diff)
downloadbox64-9beb745765e9c99bad6410094a97bf0bf9ebc1eb.tar.gz
box64-9beb745765e9c99bad6410094a97bf0bf9ebc1eb.zip
[RV64_DYNAREC] Fix some typos in docs and dynarec/rv64 (#1758)
* [Typo] fix some typo in docs and dynarec/rv64

* [Typo] fix some typo in docs and dynarec/rv64

* [Typo] fix some typo in dynarec/rv64

* [Typo] fix some typo in dynarec/rv64

* [Typo] fix some typo in docs
Diffstat (limited to 'src')
-rw-r--r--src/dynarec/rv64/dynarec_rv64_00_3.c2
-rw-r--r--src/dynarec/rv64/dynarec_rv64_0f.c2
-rw-r--r--src/dynarec/rv64/dynarec_rv64_660f.c2
-rw-r--r--src/dynarec/rv64/dynarec_rv64_f0.c2
-rw-r--r--src/dynarec/rv64/dynarec_rv64_helper.c8
-rw-r--r--src/dynarec/rv64/dynarec_rv64_helper.h2
-rw-r--r--src/dynarec/rv64/dynarec_rv64_private.h14
-rw-r--r--src/dynarec/rv64/rv64_emitter.h14
-rw-r--r--src/dynarec/rv64/rv64_prolog.S2
9 files changed, 24 insertions, 24 deletions
diff --git a/src/dynarec/rv64/dynarec_rv64_00_3.c b/src/dynarec/rv64/dynarec_rv64_00_3.c
index 68a6f5f3..3686e357 100644
--- a/src/dynarec/rv64/dynarec_rv64_00_3.c
+++ b/src/dynarec/rv64/dynarec_rv64_00_3.c
@@ -1181,7 +1181,7 @@ uintptr_t dynarec64_00_3(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int
                            && dyn->insts[ninst-1].x64.addr
                            && *(uint8_t*)(dyn->insts[ninst-1].x64.addr)==0xB8
                            && *(uint32_t*)(dyn->insts[ninst-1].x64.addr+1)==0) {
-                            // hack for some protection that check a divide by zero actualy trigger a divide by zero exception
+                            // hack for some protection that check a divide by zero actually trigger a divide by zero exception
                             MESSAGE(LOG_INFO, "Divide by 0 hack\n");
                             GETIP(ip);
                             STORE_XEMU_CALL(x3);
diff --git a/src/dynarec/rv64/dynarec_rv64_0f.c b/src/dynarec/rv64/dynarec_rv64_0f.c
index 0c691c05..809b4ac1 100644
--- a/src/dynarec/rv64/dynarec_rv64_0f.c
+++ b/src/dynarec/rv64/dynarec_rv64_0f.c
@@ -1718,7 +1718,7 @@ uintptr_t dynarec64_0F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
             NOTEST(x1);
             MV(A1, xRAX);
             CALL_(my_cpuid, -1, 0);
-            // BX and DX are not synchronized durring the call, so need to force the update
+            // BX and DX are not synchronized during the call, so need to force the update
             LD(xRDX, xEmu, offsetof(x64emu_t, regs[_DX]));
             LD(xRBX, xEmu, offsetof(x64emu_t, regs[_BX]));
             break;
diff --git a/src/dynarec/rv64/dynarec_rv64_660f.c b/src/dynarec/rv64/dynarec_rv64_660f.c
index 8fb7734d..0236012f 100644
--- a/src/dynarec/rv64/dynarec_rv64_660f.c
+++ b/src/dynarec/rv64/dynarec_rv64_660f.c
@@ -269,7 +269,7 @@ uintptr_t dynarec64_660F(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int
 
                     ADDI(x5, xEmu, offsetof(x64emu_t, scratch));
 
-                    // perserve gd
+                    // preserve gd
                     LD(x3, gback, gdoffset + 0);
                     LD(x4, gback, gdoffset + 8);
                     SD(x3, x5, 0);
diff --git a/src/dynarec/rv64/dynarec_rv64_f0.c b/src/dynarec/rv64/dynarec_rv64_f0.c
index 86f28453..3e504ca6 100644
--- a/src/dynarec/rv64/dynarec_rv64_f0.c
+++ b/src/dynarec/rv64/dynarec_rv64_f0.c
@@ -49,7 +49,7 @@ uintptr_t dynarec64_F0(dynarec_rv64_t* dyn, uintptr_t addr, uintptr_t ip, int ni
 
     GETREX();
 
-    // TODO: Add support for unligned memory access for all the LOCK ones.
+    // TODO: Add support for unaligned memory access for all the LOCK ones.
     // TODO: Add support for BOX4_DYNAREC_ALIGNED_ATOMICS.
 
     switch(opcode) {
diff --git a/src/dynarec/rv64/dynarec_rv64_helper.c b/src/dynarec/rv64/dynarec_rv64_helper.c
index 6c86d94a..4cc6e024 100644
--- a/src/dynarec/rv64/dynarec_rv64_helper.c
+++ b/src/dynarec/rv64/dynarec_rv64_helper.c
@@ -888,7 +888,7 @@ int extcache_st_coherency(dynarec_rv64_t* dyn, int ninst, int a, int b)
     return i1;
 }
 
-// On step 1, Float/Double for ST is actualy computed and back-propagated
+// On step 1, Float/Double for ST is actually computed and back-propagated
 // On step 2-3, the value is just read for inst[...].n.neocache[..]
 // the reg returned is *2 for FLOAT
 int x87_do_push(dynarec_rv64_t* dyn, int ninst, int s1, int t)
@@ -2229,7 +2229,7 @@ static void fpuCacheTransform(dynarec_rv64_t* dyn, int ninst, int s1, int s2, in
     extcache_t cache = dyn->e;
     int s1_val = 0;
     int s2_val = 0;
-    // unload every uneeded cache
+    // unload every unneeded cache
     // check SSE first, than MMX, in order, for optimisation issue
     for (int i = 0; i < 16; ++i) {
         int j = findCacheSlot(dyn, ninst, EXT_CACHE_SS, i, &cache);
@@ -2400,7 +2400,7 @@ void CacheTransform(dynarec_rv64_t* dyn, int ninst, int cacheupd, int s1, int s2
 
 void rv64_move32(dynarec_rv64_t* dyn, int ninst, int reg, int32_t val, int zeroup)
 {
-    // Depending on val, the following insns are emitted.
+    // Depending on val, the following insts are emitted.
     // val == 0               -> ADDI
     // lo12 != 0 && hi20 == 0 -> ADDI
     // lo12 == 0 && hi20 != 0 -> LUI
@@ -2449,7 +2449,7 @@ void fpu_reflectcache(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3)
 
 void fpu_unreflectcache(dynarec_rv64_t* dyn, int ninst, int s1, int s2, int s3)
 {
-    // need to undo the top and stack tracking that must not be reflected permenatly yet
+    // need to undo the top and stack tracking that must not be reflected permanently yet
     x87_unreflectcache(dyn, ninst, s1, s2, s3);
 }
 
diff --git a/src/dynarec/rv64/dynarec_rv64_helper.h b/src/dynarec/rv64/dynarec_rv64_helper.h
index db71985d..e1082c1f 100644
--- a/src/dynarec/rv64/dynarec_rv64_helper.h
+++ b/src/dynarec/rv64/dynarec_rv64_helper.h
@@ -1399,7 +1399,7 @@ void emit_shld16c(dynarec_rv64_t* dyn, int ninst, rex_t rex, int s1, int s2, uin
 void emit_pf(dynarec_rv64_t* dyn, int ninst, int s1, int s3, int s4);
 
 // x87 helper
-// cache of the local stack counter, to avoid upadte at every call
+// cache of the local stack counter, to avoid update at every call
 int x87_stackcount(dynarec_rv64_t* dyn, int ninst, int scratch);
 // restore local stack counter
 void x87_unstackcount(dynarec_rv64_t* dyn, int ninst, int scratch, int count);
diff --git a/src/dynarec/rv64/dynarec_rv64_private.h b/src/dynarec/rv64/dynarec_rv64_private.h
index 61737deb..06312105 100644
--- a/src/dynarec/rv64/dynarec_rv64_private.h
+++ b/src/dynarec/rv64/dynarec_rv64_private.h
@@ -91,10 +91,10 @@ typedef struct flagcache_s {
 
 typedef struct instruction_rv64_s {
     instruction_x64_t   x64;
-    uintptr_t           address;    // (start) address of the arm emitted instruction
+    uintptr_t           address;    // (start) address of the riscv emitted instruction
     uintptr_t           epilog;     // epilog of current instruction (can be start of next, or barrier stuff)
-    int                 size;       // size of the arm emitted instruction
-    int                 size2;      // size of the arm emitted instrucion after pass2
+    int                 size;       // size of the riscv emitted instruction
+    int                 size2;      // size of the riscv emitted instruction after pass2
     int                 pred_sz;    // size of predecessor list
     int                 *pred;      // predecessor array
     uintptr_t           mark[3];
@@ -111,8 +111,8 @@ typedef struct instruction_rv64_s {
     uint16_t            ymm0_out;   // the ymmm0 at th end of the opcode
     uint16_t            ymm0_pass2, ymm0_pass3;
     int                 barrier_maybe;
-    flagcache_t         f_exit;     // flags status at end of intruction
-    extcache_t          e;          // extcache at end of intruction (but before poping)
+    flagcache_t         f_exit;     // flags status at end of instruction
+    extcache_t          e;          // extcache at end of instruction (but before poping)
     flagcache_t         f_entry;    // flags status before the instruction begin
     uint8_t             vector_sew;
 } instruction_rv64_t;
@@ -124,8 +124,8 @@ typedef struct dynarec_rv64_s {
     uintptr_t           start;      // start of the block
     uint32_t            isize;      // size in byte of x64 instructions included
     void*               block;      // memory pointer where next instruction is emitted
-    uintptr_t           native_start;  // start of the arm code
-    size_t              native_size;   // size of emitted arm code
+    uintptr_t           native_start;  // start of the riscv code
+    size_t              native_size;   // size of emitted riscv code
     uintptr_t           last_ip;    // last set IP in RIP (or NULL if unclean state) TODO: move to a cache something
     uint64_t*           table64;   // table of 64bits value
     int                 table64size;// size of table (will be appended at end of executable code)
diff --git a/src/dynarec/rv64/rv64_emitter.h b/src/dynarec/rv64/rv64_emitter.h
index fa27dd8b..2d1c6385 100644
--- a/src/dynarec/rv64/rv64_emitter.h
+++ b/src/dynarec/rv64/rv64_emitter.h
@@ -257,7 +257,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 #define BLTU(rs1, rs2, imm13) EMIT(B_type(imm13, rs2, rs1, 0b110, 0b1100011))
 #define BGEU(rs1, rs2, imm13) EMIT(B_type(imm13, rs2, rs1, 0b111, 0b1100011))
 
-// TODO: Find a better way to have conditionnal jumps? Imm is a relative jump address, so the the 2nd jump needs to be addapted
+// TODO: Find a better way to have conditionnal jumps? Imm is a relative jump address, so the the 2nd jump needs to be adapted
 #define BEQ_safe(rs1, rs2, imm)              \
     if ((imm) > -0x1000 && (imm) < 0x1000) { \
         BEQ(rs1, rs2, imm);                  \
@@ -605,7 +605,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 #define FSGNJS(rd, rs1, rs2) EMIT(R_type(0b0010000, rs2, rs1, 0b000, rd, 0b1010011))
 // move rs1 to rd
 #define FMVS(rd, rs1) FSGNJS(rd, rs1, rs1)
-// store rs1 with oposite rs2 sign bit to rd
+// store rs1 with opposite rs2 sign bit to rd
 #define FSGNJNS(rd, rs1, rs2) EMIT(R_type(0b0010000, rs2, rs1, 0b001, rd, 0b1010011))
 // -rs1 => rd
 #define FNEGS(rd, rs1) FSGNJNS(rd, rs1, rs1)
@@ -619,7 +619,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 #define FMVWX(frd, rs1) EMIT(R_type(0b1111000, 0b00000, rs1, 0b000, frd, 0b1010011))
 // Convert from signed 32bits to Single
 #define FCVTSW(frd, rs1, rm) EMIT(R_type(0b1101000, 0b00000, rs1, rm, frd, 0b1010011))
-// Convert from Single to signed 32bits (trucated)
+// Convert from Single to signed 32bits (truncated)
 #define FCVTWS(rd, frs1, rm) EMIT(R_type(0b1100000, 0b00000, frs1, rm, rd, 0b1010011))
 
 #define FADDS(frd, frs1, frs2) EMIT(R_type(0b0000000, frs2, frs1, 0b000, frd, 0b1010011))
@@ -644,7 +644,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 #define FCVTLS(rd, frs1, rm) EMIT(R_type(0b1100000, 0b00010, frs1, rm, rd, 0b1010011))
 // Convert from Single to unsigned 64bits
 #define FCVTLUS(rd, frs1, rm) EMIT(R_type(0b1100000, 0b00011, frs1, rm, rd, 0b1010011))
-// onvert from Single to signed 32/64bits (trucated)
+// Convert from Single to signed 32/64bits (truncated)
 #define FCVTSxw(rd, frs1, rm) EMIT(R_type(0b1100000, rex.w ? 0b00010 : 0b00000, frs1, rm, rd, 0b1010011))
 
 // RV32D
@@ -664,7 +664,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 #define FSGNJD(rd, rs1, rs2) EMIT(R_type(0b0010001, rs2, rs1, 0b000, rd, 0b1010011))
 // move rs1 to rd
 #define FMVD(rd, rs1) FSGNJD(rd, rs1, rs1)
-// store rs1 with oposite rs2 sign bit to rd
+// store rs1 with opposite rs2 sign bit to rd
 #define FSGNJND(rd, rs1, rs2) EMIT(R_type(0b0010001, rs2, rs1, 0b001, rd, 0b1010011))
 // -rs1 => rd
 #define FNEGD(rd, rs1) FSGNJND(rd, rs1, rs1)
@@ -939,7 +939,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 
 
 // Zbc
-//  Carry-less multily (low-part)
+//  Carry-less multiply (low-part)
 #define CLMUL(rd, rs1, rs2) EMIT(R_type(0b0000101, rs2, rs1, 0b001, rd, 0b0110011))
 // Carry-less multiply (high-part)
 #define CLMULH(rd, rs1, rs2) EMIT(R_type(0b0000101, rs2, rs1, 0b011, rd, 0b0110011))
@@ -947,7 +947,7 @@ f28–31  ft8–11  FP temporaries                  Caller
 #define CLMULR(rd, rs1, rs2) EMIT(R_type(0b0000101, rs2, rs1, 0b010, rd, 0b0110011))
 
 // Zbs
-//  encoding of the "imm" on RV64 use a slight different mask, but it will work using R_type with high bit of imm ovewriting low bit op func
+//  encoding of the "imm" on RV64 use a slight different mask, but it will work using R_type with high bit of imm overwriting low bit op func
 //  Single-bit Clear (Register)
 #define BCLR(rd, rs1, rs2) EMIT(R_type(0b0100100, rs2, rs1, 0b001, rd, 0b0110011))
 // Single-bit Clear (Immediate)
diff --git a/src/dynarec/rv64/rv64_prolog.S b/src/dynarec/rv64/rv64_prolog.S
index b209db9a..1ca425fc 100644
--- a/src/dynarec/rv64/rv64_prolog.S
+++ b/src/dynarec/rv64/rv64_prolog.S
@@ -1,4 +1,4 @@
-//arm prologue for dynarec
+//riscv prologue for dynarec
 //Save stuff, prepare stack and register
 //called with pointer to emu as 1st parameter
 //and address to jump to as 2nd parameter