Commit a8c611e1 authored by Peter Maydell's avatar Peter Maydell

Merge remote-tracking branch...

Merge remote-tracking branch 'remotes/stsquad/tags/pull-tcg-common-tlb-reset-20170113-r1' into staging

This is the same as the v3 posted except a re-base and a few extra signoffs

# gpg: Signature made Fri 13 Jan 2017 14:26:46 GMT
# gpg:                using RSA key 0xFBD0DB095A9E2A44
# gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>"
# Primary key fingerprint: 6685 AE99 E751 67BC AFC8  DF35 FBD0 DB09 5A9E 2A44

* remotes/stsquad/tags/pull-tcg-common-tlb-reset-20170113-r1:
  cputlb: drop flush_global flag from tlb_flush
  cpu_common_reset: wrap TCG specific code in tcg_enabled()
  qom/cpu: move tlb_flush to cpu_common_reset
Signed-off-by: 's avatarPeter Maydell <peter.maydell@linaro.org>
parents 2ccede18 d10eb08f
......@@ -60,24 +60,15 @@
/* statistics */
int tlb_flush_count;
/* NOTE:
* If flush_global is true (the usual case), flush all tlb entries.
* If flush_global is false, flush (at least) all tlb entries not
* marked global.
*
* Since QEMU doesn't currently implement a global/not-global flag
* for tlb entries, at the moment tlb_flush() will also flush all
* tlb entries in the flush_global == false case. This is OK because
* CPU architectures generally permit an implementation to drop
* entries from the TLB at any time, so flushing more entries than
* required is only an efficiency issue, not a correctness issue.
/* This is OK because CPU architectures generally permit an
* implementation to drop entries from the TLB at any time, so
* flushing more entries than required is only an efficiency issue,
* not a correctness issue.
*/
void tlb_flush(CPUState *cpu, int flush_global)
void tlb_flush(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
tlb_debug("(%d)\n", flush_global);
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
......@@ -144,7 +135,7 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
tlb_flush(cpu, 1);
tlb_flush(cpu);
return;
}
......
......@@ -544,7 +544,7 @@ static int cpu_common_post_load(void *opaque, int version_id)
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
cpu->interrupt_request &= ~0x01;
tlb_flush(cpu, 1);
tlb_flush(cpu);
return 0;
}
......@@ -2426,7 +2426,7 @@ static void tcg_commit(MemoryListener *listener)
*/
d = atomic_rcu_read(&cpuas->as->dispatch);
atomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu, 1);
tlb_flush(cpuas->cpu);
}
void address_space_init_dispatch(AddressSpace *as)
......
......@@ -417,7 +417,7 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr,
case SH7750_PTEH_A7:
/* If asid changes, clear all registered tlb entries. */
if ((s->cpu->env.pteh & 0xff) != (mem_value & 0xff)) {
tlb_flush(CPU(s->cpu), 1);
tlb_flush(CPU(s->cpu));
}
s->cpu->env.pteh = mem_value;
return;
......
......@@ -95,15 +95,13 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
* @flush_global: ignored
*
* Flush the entire TLB for the specified CPU.
* The flush_global flag is in theory an indicator of whether the whole
* TLB should be flushed, or only those entries not marked global.
* In practice QEMU does not implement any global/not global flag for
* TLB entries, and the argument is ignored.
* Flush the entire TLB for the specified CPU. Most CPU architectures
* allow the implementation to drop entries from the TLB at any time
* so this is generally safe. If more selective flushing is required
* use one of the other functions for efficiency.
*/
void tlb_flush(CPUState *cpu, int flush_global);
void tlb_flush(CPUState *cpu);
/**
* tlb_flush_page_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
......@@ -165,7 +163,7 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
}
static inline void tlb_flush(CPUState *cpu, int flush_global)
static inline void tlb_flush(CPUState *cpu)
{
}
......
......@@ -270,8 +270,14 @@ static void cpu_common_reset(CPUState *cpu)
cpu->exception_index = -1;
cpu->crash_occurred = false;
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
if (tcg_enabled()) {
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
#ifdef CONFIG_SOFTMMU
tlb_flush(cpu, 0);
#endif
}
}
......
......@@ -273,7 +273,7 @@ static void alpha_cpu_initfn(Object *obj)
CPUAlphaState *env = &cpu->env;
cs->env_ptr = env;
tlb_flush(cs, 1);
tlb_flush(cs);
alpha_translate_init();
......
......@@ -44,7 +44,7 @@ uint64_t helper_load_pcc(CPUAlphaState *env)
#ifndef CONFIG_USER_ONLY
void helper_tbia(CPUAlphaState *env)
{
tlb_flush(CPU(alpha_env_get_cpu(env)), 1);
tlb_flush(CPU(alpha_env_get_cpu(env)));
}
void helper_tbis(CPUAlphaState *env, uint64_t p)
......
......@@ -122,7 +122,8 @@ static void arm_cpu_reset(CPUState *s)
acc->parent_reset(s);
memset(env, 0, offsetof(CPUARMState, features));
memset(env, 0, offsetof(CPUARMState, end_reset_fields));
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
......@@ -226,8 +227,6 @@ static void arm_cpu_reset(CPUState *s)
&env->vfp.fp_status);
set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.standard_fp_status);
tlb_flush(s, 1);
#ifndef CONFIG_USER_ONLY
if (kvm_enabled()) {
kvm_arm_reset_vcpu(cpu);
......
......@@ -491,9 +491,12 @@ typedef struct CPUARMState {
struct CPUBreakpoint *cpu_breakpoint[16];
struct CPUWatchpoint *cpu_watchpoint[16];
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* These fields after the common ones so they are preserved on reset. */
/* Fields after CPU_COMMON are preserved across CPU reset. */
/* Internal CPU feature flags. */
uint64_t features;
......
......@@ -464,7 +464,7 @@ static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
ARMCPU *cpu = arm_env_get_cpu(env);
raw_write(env, ri, value);
tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
}
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
......@@ -475,7 +475,7 @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Unlike real hardware the qemu TLB uses virtual addresses,
* not modified virtual addresses, so this causes a TLB flush.
*/
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
raw_write(env, ri, value);
}
}
......@@ -491,7 +491,7 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* format) this register includes the ASID, so do a TLB flush.
* For PMSA it is purely a process ID and no action is needed.
*/
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
}
......@@ -502,7 +502,7 @@ static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate all (TLBIALL) */
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
......@@ -520,7 +520,7 @@ static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate by ASID (TLBIASID) */
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu), value == 0);
tlb_flush(CPU(cpu));
}
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
......@@ -539,7 +539,7 @@ static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *other_cs;
CPU_FOREACH(other_cs) {
tlb_flush(other_cs, 1);
tlb_flush(other_cs);
}
}
......@@ -549,7 +549,7 @@ static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *other_cs;
CPU_FOREACH(other_cs) {
tlb_flush(other_cs, value == 0);
tlb_flush(other_cs);
}
}
......@@ -2304,7 +2304,7 @@ static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
u32p += env->cp15.c6_rgnr;
tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
*u32p = value;
}
......@@ -2449,7 +2449,7 @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush.
*/
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
vmsa_ttbcr_raw_write(env, ri, value);
}
......@@ -2473,7 +2473,7 @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
TCR *tcr = raw_ptr(env, ri);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
tcr->raw_tcr = value;
}
......@@ -2486,7 +2486,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (cpreg_field_is_64bit(ri)) {
ARMCPU *cpu = arm_env_get_cpu(env);
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
}
......@@ -3154,7 +3154,7 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value);
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
......@@ -3622,7 +3622,7 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
* HCR_DC Disables stage1 and enables stage2 translation
*/
if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
}
......
......@@ -52,9 +52,8 @@ static void cris_cpu_reset(CPUState *s)
ccc->parent_reset(s);
vr = env->pregs[PR_VR];
memset(env, 0, offsetof(CPUCRISState, load_info));
memset(env, 0, offsetof(CPUCRISState, end_reset_fields));
env->pregs[PR_VR] = vr;
tlb_flush(s, 1);
#if defined(CONFIG_USER_ONLY)
/* start in user mode with interrupts enabled. */
......
......@@ -167,10 +167,13 @@ typedef struct CPUCRISState {
*/
TLBSet tlbsets[2][4][16];
CPU_COMMON
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
/* Members from load_info on are preserved across resets. */
void *load_info;
CPU_COMMON
/* Members from load_info on are preserved across resets. */
void *load_info;
} CPUCRISState;
/**
......
......@@ -2820,8 +2820,6 @@ static void x86_cpu_reset(CPUState *s)
memset(env, 0, offsetof(CPUX86State, end_reset_fields));
tlb_flush(s, 1);
env->old_exception = -1;
/* init to reset state */
......
......@@ -1123,10 +1123,12 @@ typedef struct CPUX86State {
uint8_t nmi_injected;
uint8_t nmi_pending;
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
struct {} end_reset_fields;
/* Fields after CPU_COMMON are preserved across CPU reset. */
/* processor features (e.g. for CPUID insn) */
/* Minimum level/xlevel/xlevel2, based on CPU model + features */
......
......@@ -1465,7 +1465,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
}
if (env->pkru != old_pkru) {
CPUState *cs = CPU(x86_env_get_cpu(env));
tlb_flush(cs, 1);
tlb_flush(cs);
}
}
}
......
......@@ -586,7 +586,7 @@ void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
/* when a20 is changed, all the MMU mappings are invalid, so
we must flush everything */
tlb_flush(cs, 1);
tlb_flush(cs);
env->a20_mask = ~(1 << 20) | (a20_state << 20);
}
}
......@@ -599,7 +599,7 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
#ifdef TARGET_X86_64
......@@ -641,7 +641,7 @@ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
if (env->cr[0] & CR0_PG_MASK) {
qemu_log_mask(CPU_LOG_MMU,
"CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
tlb_flush(CPU(cpu), 0);
tlb_flush(CPU(cpu));
}
}
......@@ -656,7 +656,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
if ((new_cr4 ^ env->cr[4]) &
(CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
/* Clear bits we're going to recompute. */
......
......@@ -387,7 +387,7 @@ static int cpu_post_load(void *opaque, int version_id)
env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
cpu_x86_update_dr7(env, dr7);
}
tlb_flush(cs, 1);
tlb_flush(cs);
if (tcg_enabled()) {
cpu_smm_update(cpu);
......
......@@ -635,5 +635,5 @@ void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
}
env->pkru = val;
tlb_flush(cs, 1);
tlb_flush(cs);
}
......@@ -289,7 +289,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
break;
case TLB_CONTROL_FLUSH_ALL_ASID:
/* FIXME: this is not 100% correct but should work for now */
tlb_flush(cs, 1);
tlb_flush(cs);
break;
}
......
......@@ -128,10 +128,9 @@ static void lm32_cpu_reset(CPUState *s)
lcc->parent_reset(s);
/* reset cpu state */
memset(env, 0, offsetof(CPULM32State, eba));
memset(env, 0, offsetof(CPULM32State, end_reset_fields));
lm32_cpu_init_cfg_reg(cpu);
tlb_flush(s, 1);
}
static void lm32_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
......
......@@ -165,6 +165,9 @@ struct CPULM32State {
struct CPUBreakpoint *cpu_breakpoint[4];
struct CPUWatchpoint *cpu_watchpoint[4];
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
......
......@@ -52,7 +52,7 @@ static void m68k_cpu_reset(CPUState *s)
mcc->parent_reset(s);
memset(env, 0, offsetof(CPUM68KState, features));
memset(env, 0, offsetof(CPUM68KState, end_reset_fields));
#if !defined(CONFIG_USER_ONLY)
env->sr = 0x2700;
#endif
......@@ -61,7 +61,6 @@ static void m68k_cpu_reset(CPUState *s)
cpu_m68k_set_ccr(env, 0);
/* TODO: We should set PC from the interrupt vector. */
env->pc = 0;
tlb_flush(s, 1);
}
static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info)
......
......@@ -112,6 +112,9 @@ typedef struct CPUM68KState {
uint32_t qregs[MAX_QREGS];
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
......
......@@ -103,9 +103,8 @@ static void mb_cpu_reset(CPUState *s)
mcc->parent_reset(s);
memset(env, 0, offsetof(CPUMBState, pvr));
memset(env, 0, offsetof(CPUMBState, end_reset_fields));
env->res_addr = RES_ADDR_NONE;
tlb_flush(s, 1);
/* Disable stack protector. */
env->shr = ~0;
......
......@@ -267,6 +267,9 @@ struct CPUMBState {
struct microblaze_mmu mmu;
#endif
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* These fields are preserved on reset. */
......
......@@ -255,7 +255,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
/* Changes to the zone protection reg flush the QEMU TLB.
Fortunately, these are very uncommon. */
if (v != env->mmu.regs[rn]) {
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
env->mmu.regs[rn] = v;
break;
......
......@@ -100,8 +100,7 @@ static void mips_cpu_reset(CPUState *s)
mcc->parent_reset(s);
memset(env, 0, offsetof(CPUMIPSState, mvp));
tlb_flush(s, 1);
memset(env, 0, offsetof(CPUMIPSState, end_reset_fields));
cpu_state_reset(env);
......
......@@ -607,6 +607,9 @@ struct CPUMIPSState {
uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */
int insn_flags; /* Supported instruction set */
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
......@@ -1051,7 +1054,7 @@ static inline void compute_hflags(CPUMIPSState *env)
}
}
void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global);
void cpu_mips_tlb_flush(CPUMIPSState *env);
void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
......
......@@ -223,12 +223,12 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
return ret;
}
void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global)
void cpu_mips_tlb_flush(CPUMIPSState *env)
{
MIPSCPU *cpu = mips_env_get_cpu(env);
/* Flush qemu's TLB and discard all shadowed entries. */
tlb_flush(CPU(cpu), flush_global);
tlb_flush(CPU(cpu));
env->tlb->tlb_in_use = env->tlb->nb_tlb;
}
......@@ -290,7 +290,7 @@ void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
#if defined(TARGET_MIPS64)
if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
/* Access to at least one of the 64-bit segments has been disabled */
cpu_mips_tlb_flush(env, 1);
cpu_mips_tlb_flush(env);
}
#endif
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
......
......@@ -1409,7 +1409,7 @@ void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
/* If the ASID changes, flush qemu's TLB. */
if ((old & env->CP0_EntryHi_ASID_mask) !=
(val & env->CP0_EntryHi_ASID_mask)) {
cpu_mips_tlb_flush(env, 1);
cpu_mips_tlb_flush(env);
}
}
......@@ -1999,7 +1999,7 @@ void r4k_helper_tlbinv(CPUMIPSState *env)
tlb->EHINV = 1;
}
}
cpu_mips_tlb_flush(env, 1);
cpu_mips_tlb_flush(env);
}
void r4k_helper_tlbinvf(CPUMIPSState *env)
......@@ -2009,7 +2009,7 @@ void r4k_helper_tlbinvf(CPUMIPSState *env)
for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
}
cpu_mips_tlb_flush(env, 1);
cpu_mips_tlb_flush(env);
}
void r4k_helper_tlbwi(CPUMIPSState *env)
......@@ -2123,7 +2123,7 @@ void r4k_helper_tlbr(CPUMIPSState *env)
/* If this will change the current ASID, flush qemu's TLB. */
if (ASID != tlb->ASID)
cpu_mips_tlb_flush (env, 1);
cpu_mips_tlb_flush(env);
r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
......
......@@ -45,10 +45,8 @@ static void moxie_cpu_reset(CPUState *s)
mcc->parent_reset(s);
memset(env, 0, sizeof(CPUMoxieState));
memset(env, 0, offsetof(CPUMoxieState, end_reset_fields));
env->pc = 0x1000;
tlb_flush(s, 1);
}
static void moxie_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
......
......@@ -56,6 +56,9 @@ typedef struct CPUMoxieState {
void *irq[8];
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
} CPUMoxieState;
......
......@@ -44,14 +44,7 @@ static void openrisc_cpu_reset(CPUState *s)
occ->parent_reset(s);
#ifndef CONFIG_USER_ONLY
memset(&cpu->env, 0, offsetof(CPUOpenRISCState, tlb));
#else
memset(&cpu->env, 0, offsetof(CPUOpenRISCState, irq));
#endif
tlb_flush(s, 1);
/*tb_flush(&cpu->env); FIXME: Do we need it? */
memset(&cpu->env, 0, offsetof(CPUOpenRISCState, end_reset_fields));
cpu->env.pc = 0x100;
cpu->env.sr = SR_FO | SR_SM;
......
......@@ -300,6 +300,9 @@ typedef struct CPUOpenRISCState {
in solt so far. */
uint32_t btaken; /* the SR_F bit */
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
......
......@@ -45,7 +45,7 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
/* For machine-state changed between user-mode and supervisor mode,
we need flush TLB when we enter&exit EXCP. */
tlb_flush(cs, 1);
tlb_flush(cs);
env->esr = env->sr;
env->sr &= ~SR_DME;
......
......@@ -53,7 +53,7 @@ void HELPER(rfe)(CPUOpenRISCState *env)
}
if (need_flush_tlb) {
tlb_flush(cs, 1);
tlb_flush(cs);
}
#endif
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
......
......@@ -47,7 +47,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(0, 17): /* SR */
if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^
(rb & (SR_IME | SR_DME | SR_SM))) {
tlb_flush(cs, 1);
tlb_flush(cs);
}
env->sr = rb;
env->sr |= SR_FO; /* FO is const equal to 1 */
......
......@@ -161,7 +161,7 @@ static inline void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
tlb_flush(cs, 1);
tlb_flush(cs);
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
}
......@@ -176,7 +176,7 @@ static inline void check_tlb_flush(CPUPPCState *env, bool global)
CPUPPCState *other_env = &cpu->env;
other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
tlb_flush(other_cs, 1);
tlb_flush(other_cs);
}
}
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
......
......@@ -85,7 +85,7 @@ void helper_store_sdr1(CPUPPCState *env, target_ulong val)
if (!env->external_htab) {
if (env->spr[SPR_SDR1] != val) {
ppc_store_sdr1(env, val);
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
}
}
......@@ -114,7 +114,7 @@ void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value)
if (likely(env->pb[num] != value)) {
env->pb[num] = value;
/* Should be optimized */
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
}
......
......@@ -248,7 +248,7 @@ static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
tlb = &env->tlb.tlb6[nr];
pte_invalidate(&tlb->pte0);
}
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
......@@ -661,7 +661,7 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
tlb = &env->tlb.tlbe[i];
tlb->prot &= ~PAGE_VALID;
}
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
......@@ -863,7 +863,7 @@ static void booke206_flush_tlb(CPUPPCState *env, int flags,
tlb += booke206_tlb_size(env, i);
}
tlb_flush(CPU(cpu), 1);
tlb_flush(CPU(cpu));
}
static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
......@@ -1769,7 +1769,7 @@ void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
#else
tlb_flush(CPU(cpu), 1);