]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: arm64: Add guard pages for pKVM (protected nVHE) hypervisor stack
authorKalesh Singh <kaleshsingh@google.com>
Wed, 20 Apr 2022 21:42:55 +0000 (14:42 -0700)
committerMarc Zyngier <maz@kernel.org>
Thu, 28 Apr 2022 19:53:13 +0000 (20:53 +0100)
Map the stack pages in the flexible private VA range and allocate
guard pages below the stack as unbacked VA space. The stack is aligned
so that any valid stack address has PAGE_SHIFT bit as 1 - this is used
for overflow detection (implemented in a subsequent patch in the series)

Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220420214317.3303360-5-kaleshsingh@google.com
arch/arm64/kvm/hyp/nvhe/setup.c

index 27af337f9fea5769c3bd4a01dce94d2837bb906e..e8d4ea2fcfa09039e98b966aefeecabb7d586e2a 100644 (file)
@@ -99,17 +99,42 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
                return ret;
 
        for (i = 0; i < hyp_nr_cpus; i++) {
+               struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
+               unsigned long hyp_addr;
+
                start = (void *)kern_hyp_va(per_cpu_base[i]);
                end = start + PAGE_ALIGN(hyp_percpu_size);
                ret = pkvm_create_mappings(start, end, PAGE_HYP);
                if (ret)
                        return ret;
 
-               end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
-               start = end - PAGE_SIZE;
-               ret = pkvm_create_mappings(start, end, PAGE_HYP);
+               /*
+                * Allocate a contiguous HYP private VA range for the stack
+                * and guard page. The allocation is also aligned based on
+                * the order of its size.
+                */
+               ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
+               if (ret)
+                       return ret;
+
+               /*
+                * Since the stack grows downwards, map the stack to the page
+                * at the higher address and leave the lower guard page
+                * unbacked.
+                *
+                * Any valid stack address now has the PAGE_SHIFT bit as 1
+                * and addresses corresponding to the guard page have the
+                * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+                */
+               hyp_spin_lock(&pkvm_pgd_lock);
+               ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
+                                       PAGE_SIZE, params->stack_pa, PAGE_HYP);
+               hyp_spin_unlock(&pkvm_pgd_lock);
                if (ret)
                        return ret;
+
+               /* Update stack_hyp_va to end of the stack's private VA range */
+               params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
        }
 
        /*