diff --git a/src/hv.c b/src/hv.c index 8c858bdb9..0388293c2 100644 --- a/src/hv.c +++ b/src/hv.c @@ -307,7 +307,12 @@ void hv_rearm(void) msr(CNTP_CTL_EL0, CNTx_CTL_ENABLE); } -void hv_check_rendezvous(struct exc_info *ctx) +bool hv_want_rendezvous(void) +{ + return hv_want_cpu != -1; +} + +void hv_do_rendezvous(struct exc_info *ctx) { if (hv_want_cpu == smp_id()) { hv_want_cpu = -1; @@ -323,12 +328,15 @@ void hv_check_rendezvous(struct exc_info *ctx) } } -void hv_tick(struct exc_info *ctx) +void hv_maybe_exit(void) { if (hv_should_exit) { - spin_unlock(&bhl); hv_exit_guest(); } +} + +void hv_tick(struct exc_info *ctx) +{ hv_wdt_pet(); iodev_handle_events(uartproxy_iodev); if (iodev_can_read(uartproxy_iodev)) { diff --git a/src/hv.h b/src/hv.h index f8af835dc..764192746 100644 --- a/src/hv.h +++ b/src/hv.h @@ -97,7 +97,9 @@ void hv_rendezvous(void); void hv_switch_cpu(int cpu); void hv_arm_tick(void); void hv_rearm(void); -void hv_check_rendezvous(struct exc_info *ctx); +bool hv_want_rendezvous(void); +void hv_do_rendezvous(struct exc_info *ctx); +void hv_maybe_exit(void); void hv_tick(struct exc_info *ctx); #endif diff --git a/src/hv_exc.c b/src/hv_exc.c index 48526ed27..32ad79307 100644 --- a/src/hv_exc.c +++ b/src/hv_exc.c @@ -19,15 +19,17 @@ extern spinlock_t bhl; ((op2) << ESR_ISS_MSR_OP2_SHIFT)) #define SYSREG_ISS(...) _SYSREG_ISS(__VA_ARGS__) -#define D_PERCPU(t, x) t x[MAX_CPUS] -#define PERCPU(x) x[mrs(TPIDR_EL2)] +#define PERCPU(x) pcpu[mrs(TPIDR_EL2)].x -D_PERCPU(static bool, ipi_queued); -D_PERCPU(static bool, ipi_pending); -D_PERCPU(static bool, pmc_pending); -D_PERCPU(static u64, pmc_irq_mode); +struct hv_pcpu_data { + u32 ipi_queued; + u32 ipi_pending; + u32 pmc_pending; + u64 pmc_irq_mode; + u64 exc_entry_pmcr0_cnt; +} ALIGNED(64); -D_PERCPU(static u64, exc_entry_pmcr0_cnt); +struct hv_pcpu_data pcpu[MAX_CPUS]; void hv_exit_guest(void) __attribute__((noreturn)); @@ -216,7 +218,7 @@ static bool hv_handle_msr(struct exc_info *ctx, u64 iss) msr(SYS_IMP_APL_IPI_RR_LOCAL_EL1, regs[rt]); for (int i = 0; i < MAX_CPUS; i++) if (mpidr == smp_get_mpidr(i)) - ipi_queued[i] = true; + pcpu[i].ipi_queued = true; return true; } case SYSREG_ISS(SYS_IMP_APL_IPI_RR_GLOBAL_EL1): @@ -225,7 +227,7 @@ static bool hv_handle_msr(struct exc_info *ctx, u64 iss) msr(SYS_IMP_APL_IPI_RR_GLOBAL_EL1, regs[rt]); for (int i = 0; i < MAX_CPUS; i++) { if (mpidr == (smp_get_mpidr(i) & 0xffff)) - ipi_queued[i] = true; + pcpu[i].ipi_queued = true; } return true; case SYSREG_ISS(SYS_IMP_APL_IPI_SR_EL1): @@ -366,10 +368,27 @@ void hv_exc_irq(struct exc_info *ctx) void hv_exc_fiq(struct exc_info *ctx) { - hv_wdt_breadcrumb('F'); - hv_exc_entry(ctx); + bool tick = false; + + hv_maybe_exit(); + if (mrs(CNTP_CTL_EL0) == (CNTx_CTL_ISTATUS | CNTx_CTL_ENABLE)) { msr(CNTP_CTL_EL0, CNTx_CTL_ISTATUS | CNTx_CTL_IMASK | CNTx_CTL_ENABLE); + tick = true; + } + + if (mrs(TPIDR_EL2) != 0 && !(mrs(ISR_EL1) & 0x40)) { + // Secondary CPU and it was just a timer tick (or spurious), so just update FIQs + hv_update_fiq(); + return; + } + + // Slow (single threaded) path + hv_wdt_breadcrumb('F'); + hv_exc_entry(ctx); + + // Only poll for HV events in CPU 0 + if (tick && mrs(TPIDR_EL2) == 0) { hv_tick(ctx); hv_arm_tick(); } @@ -403,7 +422,7 @@ void hv_exc_fiq(struct exc_info *ctx) msr(SYS_IMP_APL_IPI_SR_EL1, IPI_SR_PENDING); sysop("isb"); } - hv_check_rendezvous(ctx); + hv_do_rendezvous(ctx); // Handles guest timers hv_exc_exit(ctx);