int was_rmapped = 0;
int ret = RET_PF_FIXED;
bool flush = false;
- int make_spte_ret;
+ bool wrprot;
u64 spte;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
was_rmapped = 1;
}
- make_spte_ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
- true, host_writable, sp_ad_disabled(sp), &spte);
+ wrprot = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
+ true, host_writable, sp_ad_disabled(sp), &spte);
if (*sptep == spte) {
ret = RET_PF_SPURIOUS;
flush |= mmu_spte_update(sptep, spte);
}
- if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
+ if (wrprot) {
if (write_fault)
ret = RET_PF_EMULATE;
}
RET_PF_SPURIOUS,
};
-/* Bits which may be returned by set_spte() */
-#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
-#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
-#define SET_SPTE_SPURIOUS BIT(2)
-
int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t pfn, int max_level);
E820_TYPE_RAM);
}
-int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
+bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
bool can_unsync, bool host_writable, bool ad_disabled,
u64 *new_spte)
{
u64 spte = SPTE_MMU_PRESENT_MASK;
- int ret = 0;
+ bool wrprot = false;
if (ad_disabled)
spte |= SPTE_TDP_AD_DISABLED_MASK;
if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync, speculative)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
- ret |= SET_SPTE_WRITE_PROTECTED_PT;
+ wrprot = true;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
}
kvm_vcpu_mark_page_dirty(vcpu, gfn);
*new_spte = spte;
- return ret;
+ return wrprot;
}
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
return gen;
}
-/* Bits which may be returned by set_spte() */
-#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
-#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
-#define SET_SPTE_SPURIOUS BIT(2)
-
-int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
+bool make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
bool can_unsync, bool host_writable, bool ad_disabled,
u64 *new_spte);
{
u64 new_spte;
int ret = RET_PF_FIXED;
- int make_spte_ret = 0;
+ bool wrprot = false;
if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
- make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
+ wrprot = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
fault->pfn, iter->old_spte, fault->prefault, true,
fault->map_writable, !shadow_accessed_mask,
&new_spte);
* protected, emulation is needed. If the emulation was skipped,
* the vCPU would have the same fault again.
*/
- if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
+ if (wrprot) {
if (fault->write)
ret = RET_PF_EMULATE;
}