return 8;
}
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+ gpa_t gpa, void *data, unsigned long len)
+{
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
+ int ret = kvm_read_guest(kvm, gpa, data, len);
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ return ret;
+}
+
static inline void *kvm_get_hyp_vector(void)
{
- return kvm_ksym_ref(__kvm_hyp_vector);
+ switch(read_cpuid_part()) {
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ case ARM_CPU_PART_CORTEX_A12:
+ case ARM_CPU_PART_CORTEX_A17:
+ {
+ extern char __kvm_hyp_vector_bp_inv[];
+ return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+ }
+
+ case ARM_CPU_PART_BRAHMA_B15:
+ case ARM_CPU_PART_CORTEX_A15:
+ {
+ extern char __kvm_hyp_vector_ic_inv[];
+ return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
+ }
+ #endif
+ default:
+ {
+ extern char __kvm_hyp_vector[];
+ return kvm_ksym_ref(__kvm_hyp_vector);
+ }
+ }
}
static inline int kvm_map_vectors(void)
.dev_groups = amba_dev_groups,
.match = amba_match,
.uevent = amba_uevent,
+ .dma_configure = platform_dma_configure,
.pm = &amba_pm,
- .force_dma = true,
};
+ EXPORT_SYMBOL_GPL(amba_bustype);
static int __init amba_init(void)
{