diff options
Diffstat (limited to 'src/dynarec/dynarec_native_functions.c')
| -rw-r--r-- | src/dynarec/dynarec_native_functions.c | 49 |
1 files changed, 36 insertions, 13 deletions
diff --git a/src/dynarec/dynarec_native_functions.c b/src/dynarec/dynarec_native_functions.c index 7f4ec40b..434fbd8a 100644 --- a/src/dynarec/dynarec_native_functions.c +++ b/src/dynarec/dynarec_native_functions.c @@ -66,12 +66,24 @@ void native_fxtract(x64emu_t* emu) } void native_fprem(x64emu_t* emu) { - int32_t tmp32s = ST0.d / ST1.d; - ST0.d -= ST1.d * tmp32s; - emu->sw.f.F87_C2 = 0; - emu->sw.f.F87_C1 = (tmp32s&1); - emu->sw.f.F87_C3 = ((tmp32s>>1)&1); - emu->sw.f.F87_C0 = ((tmp32s>>2)&1); + int e0, e1; + int64_t ll; + frexp(ST0.d, &e0); + frexp(ST1.d, &e1); + int32_t tmp32s = e0 - e1; + if(tmp32s<64) + { + ll = (int64_t)floor(ST0.d/ST1.d); + ST0.d = ST0.d - (ST1.d*ll); + emu->sw.f.F87_C2 = 0; + emu->sw.f.F87_C1 = (ll&1)?1:0; + emu->sw.f.F87_C3 = (ll&2)?1:0; + emu->sw.f.F87_C0 = (ll&4)?1:0; + } else { + ll = (int64_t)(floor((ST0.d/ST1.d))/exp2(tmp32s - 32)); + ST0.d = ST0.d - ST1.d*ll*exp2(tmp32s - 32); + emu->sw.f.F87_C2 = 1; + } } void native_fyl2xp1(x64emu_t* emu) { @@ -200,13 +212,24 @@ void native_frstor(x64emu_t* emu, uint8_t* ed) void native_fprem1(x64emu_t* emu) { - // simplified version - int32_t tmp32s = round(ST0.d / ST1.d); - ST0.d -= ST1.d*tmp32s; - emu->sw.f.F87_C2 = 0; - emu->sw.f.F87_C1 = (tmp32s&1); - emu->sw.f.F87_C3 = ((tmp32s>>1)&1); - emu->sw.f.F87_C0 = ((tmp32s>>2)&1); + int e0, e1; + int64_t ll; + frexp(ST0.d, &e0); + frexp(ST1.d, &e1); + int32_t tmp32s = e0 - e1; + if(tmp32s<64) + { + ll = (int64_t)round(ST0.d/ST1.d); + ST0.d = ST0.d - (ST1.d*ll); + emu->sw.f.F87_C2 = 0; + emu->sw.f.F87_C1 = (ll&1)?1:0; + emu->sw.f.F87_C3 = (ll&2)?1:0; + emu->sw.f.F87_C0 = (ll&4)?1:0; + } else { + ll = (int64_t)(trunc((ST0.d/ST1.d))/exp2(tmp32s - 32)); + ST0.d = ST0.d - ST1.d*ll*exp2(tmp32s - 32); + emu->sw.f.F87_C2 = 1; + } } static uint8_t ff_mult(uint8_t a, uint8_t b) |