return sp->role.access;
}
-static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, gfn_t gfn, u32 access)
+static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
+ gfn_t gfn, unsigned int access)
{
if (sp_has_gptes(sp)) {
sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
}
-static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index, u32 access)
+static void kvm_mmu_page_set_access(struct kvm_mmu_page *sp, int index,
+ unsigned int access)
{
gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
static void __rmap_add(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache,
const struct kvm_memory_slot *slot,
- u64 *spte, gfn_t gfn, u32 access)
+ u64 *spte, gfn_t gfn, unsigned int access)
{
struct kvm_mmu_page *sp;
struct kvm_rmap_head *rmap_head;
}
static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
- u64 *spte, gfn_t gfn, u32 access)
+ u64 *spte, gfn_t gfn, unsigned int access)
{
struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;