/* nvif_vmm_vX ... */
};
+struct gp100_vmm_v0 {
+ /* nvif_vmm_vX ... */
+ __u8 version;
+ __u8 fault_replay;
+};
+
struct gp100_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
dma_addr_t null;
void *nullp;
+
+ bool replay;
};
int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
- .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp100_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, -1, NVIF_CLASS_MEM_GF100}, .umap = gf100_mem_map },
- .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gp10b_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
.dma_bits = 47,
.mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
.mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
- .vmm = {{ -1, -1, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, gv100_vmm_new },
.kind = gm200_mmu_kind,
.kind_sys = true,
};
int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base);
int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+int gp100_vmm_new_(const struct nvkm_vmm_func *,
+ struct nvkm_mmu *, bool, u64, u64, void *, u32,
+ struct lock_class_key *, const char *, struct nvkm_vmm **);
int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gp100_vmm_flush(struct nvkm_vmm *, int);
int
gp100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- const u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11); /* 64KiB */
+ u64 base = BIT_ULL(10) /* VER2 */ | BIT_ULL(11) /* 64KiB */;
+ if (vmm->replay) {
+ base |= BIT_ULL(4); /* FAULT_REPLAY_TEX */
+ base |= BIT_ULL(5); /* FAULT_REPLAY_GCC */
+ }
return gf100_vmm_join_(vmm, inst, base);
}
}
};
+int
+gp100_vmm_new_(const struct nvkm_vmm_func *func,
+ struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ union {
+ struct gp100_vmm_vn vn;
+ struct gp100_vmm_v0 v0;
+ } *args = argv;
+ int ret = -ENOSYS;
+ bool replay;
+
+ if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
+ replay = args->v0.fault_replay != 0;
+ } else
+ if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
+ replay = false;
+ } else
+ return ret;
+
+ ret = nvkm_vmm_new_(func, mmu, 0, managed, addr, size, key, name, pvmm);
+ if (ret)
+ return ret;
+
+ (*pvmm)->replay = replay;
+ return 0;
+}
+
int
gp100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&gp100_vmm, mmu, 0, managed, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&gp100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&gp10b_vmm, mmu, 0, managed, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&gp10b_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&gv100_vmm, mmu, 0, managed, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&gv100_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}
void *argv, u32 argc, struct lock_class_key *key,
const char *name, struct nvkm_vmm **pvmm)
{
- return nv04_vmm_new_(&tu102_vmm, mmu, 0, managed, addr, size,
- argv, argc, key, name, pvmm);
+ return gp100_vmm_new_(&tu102_vmm, mmu, managed, addr, size,
+ argv, argc, key, name, pvmm);
}