int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
+extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
+static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
!(vcpu->arch.shregs.msr & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3);
- /* H_CEDE has to be handled now, not later */
+ /* H_CEDE has to be handled now */
if (req == H_CEDE) {
kvmppc_cede(vcpu);
- kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
+ if (!kvmppc_xive_rearm_escalation(vcpu)) {
+ /*
+ * Pending escalation so abort
+ * the cede.
+ */
+ vcpu->arch.ceded = 0;
+ }
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
-void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
{
void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+ bool ret = true;
if (!esc_vaddr)
- return;
+ return ret;
/* we are using XIVE with single escalation */
* we also don't want to set xive_esc_on to 1 here in
* case we race with xive_esc_irq().
*/
- vcpu->arch.ceded = 0;
+ ret = false;
/*
* The escalation interrupts are special as we don't EOI them.
* There is no need to use the load-after-store ordering offset
__raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
}
mb();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);