/* destroy channel object, all children will be killed too */
if (chan->chan) {
+ nvif_object_dtor(&chan->ce);
nouveau_channel_idle(chan->chan);
nouveau_channel_del(&chan->chan);
}
init->nr_subchan = 2;
}
+ /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
+ * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
+ * channel init, now we know what that stuff actually is.
+ *
+ * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
+ *
+ * Userspace was fixed prior to adding Ampere support.
+ */
+ switch (device->info.family) {
+ case NV_DEVICE_INFO_V0_VOLTA:
+ ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
+ NULL, 0, &chan->ce);
+ if (ret)
+ goto done;
+ break;
+ case NV_DEVICE_INFO_V0_TURING:
+ ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
+ NULL, 0, &chan->ce);
+ if (ret)
+ goto done;
+ break;
+ default:
+ break;
+ }
+
/* Named memory object area */
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
struct nouveau_abi16_chan {
struct list_head head;
struct nouveau_channel *chan;
+ struct nvif_object ce;
struct list_head notifiers;
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
*/
#include "priv.h"
+#include <core/gpuobj.h>
+#include <core/object.h>
+
#include <nvif/class.h>
+static int
+gv100_ce_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align,
+ struct nvkm_gpuobj **pgpuobj)
+{
+ struct nvkm_device *device = object->engine->subdev.device;
+ u32 size;
+
+ /* Allocate fault method buffer (magics come from nvgpu). */
+ size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+ size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+ size = roundup(size, PAGE_SIZE);
+
+ return nvkm_gpuobj_new(device, size, align, true, parent, pgpuobj);
+}
+
+const struct nvkm_object_func
+gv100_ce_cclass = {
+ .bind = gv100_ce_cclass_bind,
+};
+
static const struct nvkm_engine_func
gv100_ce = {
.intr = gp100_ce_intr,
+ .cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, VOLTA_DMA_COPY_A },
{}
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
void gp100_ce_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func gv100_ce_cclass;
#endif
static const struct nvkm_engine_func
tu102_ce = {
.intr = gp100_ce_intr,
+ .cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, TURING_DMA_COPY_A },
{}
struct list_head head;
bool killed;
- struct nvkm_memory *mthd;
-
#define GK104_FIFO_ENGN_SW 15
struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
- if (!gk104_fifo_gpfifo_engine_addr(engine))
- return 0;
+ if (!gk104_fifo_gpfifo_engine_addr(engine)) {
+ if (engine->subdev.type != NVKM_ENGINE_CE ||
+ engine->subdev.device->card_type < GV100)
+ return 0;
+ }
ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
+ if (!gk104_fifo_gpfifo_engine_addr(engine))
+ return 0;
+
ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp);
return chan;
}
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
- if (engine->subdev.type == NVKM_ENGINE_CE)
- return gk104_fifo_gpfifo_kick(chan);
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
+ if (ret && suspend)
+ return ret;
+
+ nvkm_kmap(inst);
+ nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+ nvkm_done(inst);
+ return ret;
+ }
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
if (ret && suspend)
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
- if (engine->subdev.type == NVKM_ENGINE_CE)
- return 0;
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
+
+ nvkm_kmap(inst);
+ nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
+ nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
+ nvkm_done(inst);
+
+ return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+ }
nvkm_kmap(inst);
nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
u32 *token, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
- struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
- u64 usermem, mthd;
- u32 size;
+ u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
- /* Allocate fault method buffer (magics come from nvgpu). */
- size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
- size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
- size = roundup(size, PAGE_SIZE);
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
- &chan->mthd);
- if (ret)
- return ret;
-
- mthd = nvkm_memory_bar2(chan->mthd);
- if (mthd == ~0ULL)
- return -EFAULT;
-
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
- nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
- nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
- return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+ return 0;
}
int