/*
* If the record set is too big, don't use the cache
*/
-// if (nrecords > 100)
-// return;
+ if (nrecords > 50)
+ return;
if (!RVM_STATUS_GETBIT(cpu, RVM_STATUS_N) && !RVM_STATUS_GETBIT(cpu, RVM_STATUS_Z)) {
prec = (rparecord_t *)r_array_slot(stat->records, rec);
+// r_printf("Set the cache for: %s (%ld)\n", prec->rule, nrecords);
R_ASSERT(nrecords);
rpa_cache_set(stat->cache, prec->top, ruleid, r0, prec, nrecords);
}
rlong r0 = 0;
entry = rpa_cache_lookup(stat->cache, top, ruleid);
if (entry) {
+// rparecord_t *prec = (rparecord_t *)r_array_slot(entry->records, 0);
+// r_printf("Hit the cache for: %s (%ld), r0 = %ld\n", prec->rule, r_array_length(entry->records), entry->ret);
for (i = 0; i < r_array_length(entry->records); i++) {
r_array_add(stat->records, r_array_slot(entry->records, i));
}
RVM_ABORT(cpu, RVM_CPUREG_GETU(cpu, ins->op1));
}
-#if 0
-static void rvm_op_elds(rvmcpu_t *cpu, rvm_asmins_t *ins)
-{
- ruint index = RVM_CPUREG_GETU(cpu, ins->op2) + RVM_CPUREG_GETU(cpu, ins->op3);
- RVM_CPUREG_SET(cpu, ins->op1, *RVM_STACK_ADDR(cpu, index));
-}
-
-
-static void rvm_op_ests(rvmcpu_t *cpu, rvm_asmins_t *ins)
-{
- ruint index = RVM_CPUREG_GETU(cpu, ins->op2) + RVM_CPUREG_GETU(cpu, ins->op3);
- *RVM_STACK_ADDR(cpu, index) = RVM_CPUREG_GET(cpu, ins->op1);
-}
-#endif
static rvm_cpu_op ops[] = {
rvm_op_exit, // RVM_EXT
if (pi->da) {
*regda = pi->data;
}
+ if (pi->cond) {
+ switch (pi->cond) {
+ case RVM_CEXEC_GRE:
+ if (!((cpu->status & RVM_STATUS_N) == 0 && (cpu->status & RVM_STATUS_Z) == 0))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_GEQ:
+ if (!((cpu->status & RVM_STATUS_N) == 0 || (cpu->status & RVM_STATUS_Z) == 1))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_EQ:
+ if (!((cpu->status & RVM_STATUS_Z)))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_NEQ:
+ if (!((cpu->status & RVM_STATUS_Z) == 0))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_LEQ:
+ if (!((cpu->status & RVM_STATUS_N) || (cpu->status & RVM_STATUS_Z)))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_LES:
+ if (!((cpu->status & RVM_STATUS_N)))
+ goto skipexec;
+ break;
+ default:
+ goto skipexec;
+ };
+ }
ops[pi->opcode](cpu, pi);
+skipexec:
RVM_REG_INCIP(regpc, 1);
} while (!cpu->abort);
if (cpu->error)
if (pi->da) {
*regda = pi->data;
}
+ if (pi->cond) {
+ switch (pi->cond) {
+ case RVM_CEXEC_GRE:
+ if (!((cpu->status & RVM_STATUS_N) == 0 && (cpu->status & RVM_STATUS_Z) == 0))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_GEQ:
+ if (!((cpu->status & RVM_STATUS_N) == 0 || (cpu->status & RVM_STATUS_Z) == 1))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_EQ:
+ if (!((cpu->status & RVM_STATUS_Z)))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_NEQ:
+ if (!((cpu->status & RVM_STATUS_Z) == 0))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_LEQ:
+ if (!((cpu->status & RVM_STATUS_N) || (cpu->status & RVM_STATUS_Z)))
+ goto skipexec;
+ break;
+ case RVM_CEXEC_LES:
+ if (!((cpu->status & RVM_STATUS_N)))
+ goto skipexec;
+ break;
+ default:
+ goto skipexec;
+ };
+ }
ops[pi->opcode](cpu, pi);
r_printf("%7ld :", ++line);
rvm_cpu_dumpregs(pi, cpu);
+skipexec:
RVM_REG_INCIP(regpc, 1);
} while (!cpu->abort);
if (cpu->error)
}
-rvm_asmins_t rvm_asmx(rword opcode, rword op1, rword op2, rword op3, rpointer pReloc)
-{
- rvm_asmins_t a;
-
- r_memset(&a, 0, sizeof(a));
- a.opcode = (ruint32) RVM_ASMINS_OPCODE(opcode);
- a.swi = (ruint32) RVM_ASMINS_SWI(opcode);
- a.op1 = (ruint8)op1;
- a.op2 = (ruint8)op2;
- a.op3 = (ruint8)op3;
- RVM_REG_SETP(&a.data, pReloc);
- a.flags = RVM_ASMINS_RELOC;
- a.da = 1;
-
- return a;
-}
-
-
rvmreg_t *rvm_cpu_alloc_global(rvmcpu_t *cpu)
{
rvmreg_t *global;
rvmcpu_swi op;
} rvm_switable_t;
-
-#define RVM_ASMINS_RELOC (1 << 0)
+#define RVM_CEXEC_NAN 0
+#define RVM_CEXEC_GRE 1
+#define RVM_CEXEC_GEQ 2
+#define RVM_CEXEC_EQ 3
+#define RVM_CEXEC_NEQ 4
+#define RVM_CEXEC_LEQ 5
+#define RVM_CEXEC_LES 6
struct rvm_asmins_s {
rvmreg_t data;
ruint16 op3:RVM_OPERAND_BITS;
ruint16 da:1;
ruint16 swi;
- ruint8 flags;
+ ruint8 cond;
ruint8 opcode;
};
rvm_asmins_t rvm_asms(rword opcode, rword op1, rword op2, rword op3, rword data);
rvm_asmins_t rvm_asmf(rword opcode, rword op1, rword op2, rword op3, rword data);
rvm_asmins_t rvm_asm2(rword opcode, rword op1, rword op2, rword op3, ruint32 p1, ruint32 p2);
-rvm_asmins_t rvm_asmr(rword opcode, rword op1, rword op2, rword op3, rpointer pReloc);
-rvm_asmins_t rvm_asmx(rword opcode, rword op1, rword op2, rword op3, rpointer pReloc);
void rvm_asm_dump(rvm_asmins_t *pi, ruint count);
vmcode[off++] = rvm_asm(RVM_MOV, R0, DA, XX, 1);
vmcode[off++] = rvm_asm(RVM_MOV, R1, DA, XX, 2);
rvm_relocmap_add(relocmap, RVM_RELOC_JUMP, off, rvm_codemap_lookup_s(codemap, "l_add2"));
- vmcode[off++] = rvm_asmx(RVM_BXL, DA, XX, XX, 0);
+ vmcode[off++] = rvm_asm(RVM_BXL, DA, XX, XX, 0);
VMTEST_REG(vmcode, off, 0, 3, "BL/RET");
vmcode[off++] = rvm_asm(RVM_MOV, R0, DA, XX, 1);
vmcode[off++] = rvm_asm(RVM_MOV, R1, DA, XX, 2);
vmcode[off++] = rvm_asm(RVM_MOV, R2, DA, XX, 4);
rvm_relocmap_add(relocmap, RVM_RELOC_JUMP, off, rvm_codemap_lookup_s(codemap, "l_add3"));
- vmcode[off++] = rvm_asmx(RVM_BXL, DA, XX, XX, 0);
+ vmcode[off++] = rvm_asm(RVM_BXL, DA, XX, XX, 0);
VMTEST_REG(vmcode, off, 0, 7, "BL/RET");
vmcode[off++] = rvm_asm(RVM_EXT, R0, XX, XX, 0);