]> git.baikalelectronics.ru Git - kernel.git/commitdiff
drm/nouveau/ce/gv100-: move method buffer to ce ctx
authorBen Skeggs <bskeggs@redhat.com>
Wed, 1 Jun 2022 10:46:04 +0000 (20:46 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 13 Jul 2022 03:56:50 +0000 (13:56 +1000)
Didn't really know what this buffer was when initially implemented,
but these days we do, so move it somewhere more appropriate.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_abi16.h
drivers/gpu/drm/nouveau/nvkm/engine/ce/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c

index 4107b70065398ff6803e2ef83315b8dcef595ec8..2278b9243efe7eb2774e9846a73132acdbec9405 100644 (file)
@@ -147,6 +147,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
 
        /* destroy channel object, all children will be killed too */
        if (chan->chan) {
+               nvif_object_dtor(&chan->ce);
                nouveau_channel_idle(chan->chan);
                nouveau_channel_del(&chan->chan);
        }
@@ -325,6 +326,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
                init->nr_subchan = 2;
        }
 
+       /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
+        * Kepler and above.  NVKM no longer always sets CE_CTX_VALID as part of
+        * channel init, now we know what that stuff actually is.
+        *
+        * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
+        *
+        * Userspace was fixed prior to adding Ampere support.
+        */
+       switch (device->info.family) {
+       case NV_DEVICE_INFO_V0_VOLTA:
+               ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
+                                      NULL, 0, &chan->ce);
+               if (ret)
+                       goto done;
+               break;
+       case NV_DEVICE_INFO_V0_TURING:
+               ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
+                                      NULL, 0, &chan->ce);
+               if (ret)
+                       goto done;
+               break;
+       default:
+               break;
+       }
+
        /* Named memory object area */
        ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
                              0, 0, &chan->ntfy);
index 70f6aa5c9dd162ffe1e4df6eff241c2793770ac7..27eae85f33e619ff12a51cc000e5979705707b53 100644 (file)
@@ -21,6 +21,7 @@ struct nouveau_abi16_ntfy {
 struct nouveau_abi16_chan {
        struct list_head head;
        struct nouveau_channel *chan;
+       struct nvif_object ce;
        struct list_head notifiers;
        struct nouveau_bo *ntfy;
        struct nouveau_vma *ntfy_vma;
index cd5e9cdca1cf9609f3d88fcfc4d2de06ce93a527..44021d1395d39106de702349eb19af96bcc9d177 100644 (file)
  */
 #include "priv.h"
 
+#include <core/gpuobj.h>
+#include <core/object.h>
+
 #include <nvif/class.h>
 
+static int
+gv100_ce_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align,
+                    struct nvkm_gpuobj **pgpuobj)
+{
+       struct nvkm_device *device = object->engine->subdev.device;
+       u32 size;
+
+       /* Allocate fault method buffer (magics come from nvgpu). */
+       size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+       size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+       size = roundup(size, PAGE_SIZE);
+
+       return nvkm_gpuobj_new(device, size, align, true, parent, pgpuobj);
+}
+
+const struct nvkm_object_func
+gv100_ce_cclass = {
+       .bind = gv100_ce_cclass_bind,
+};
+
 static const struct nvkm_engine_func
 gv100_ce = {
        .intr = gp100_ce_intr,
+       .cclass = &gv100_ce_cclass,
        .sclass = {
                { -1, -1, VOLTA_DMA_COPY_A },
                {}
index b0c8342db15fd3c000432d0a1d5f4ba5eede5e58..cd53b93664d64a776e40d0532b00192eda2278b9 100644 (file)
@@ -6,4 +6,6 @@
 void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
 void gk104_ce_intr(struct nvkm_engine *);
 void gp100_ce_intr(struct nvkm_engine *);
+
+extern const struct nvkm_object_func gv100_ce_cclass;
 #endif
index e5ff92d9364cb0e8ebabe7b4dead56cfb02b96af..9563c0175142f6651dadc49d9eade581bfe173ff 100644 (file)
@@ -26,6 +26,7 @@
 static const struct nvkm_engine_func
 tu102_ce = {
        .intr = gp100_ce_intr,
+       .cclass = &gv100_ce_cclass,
        .sclass = {
                { -1, -1, TURING_DMA_COPY_A },
                {}
index cfbe096e604f5777cf1310582cd417d74365e259..9713daee6c766a537c2472641446cdce6f42de55 100644 (file)
@@ -14,8 +14,6 @@ struct gk104_fifo_chan {
        struct list_head head;
        bool killed;
 
-       struct nvkm_memory *mthd;
-
 #define GK104_FIFO_ENGN_SW 15
        struct gk104_fifo_engn {
                struct nvkm_gpuobj *inst;
index ae6c4d846eb59949f7f69484fa8a533b57e92aaf..80456ec70e8a041d5a61f142b2e027876378c012 100644 (file)
@@ -175,13 +175,19 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
        struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
        int ret;
 
-       if (!gk104_fifo_gpfifo_engine_addr(engine))
-               return 0;
+       if (!gk104_fifo_gpfifo_engine_addr(engine)) {
+               if (engine->subdev.type != NVKM_ENGINE_CE ||
+                   engine->subdev.device->card_type < GV100)
+                       return 0;
+       }
 
        ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
        if (ret)
                return ret;
 
+       if (!gk104_fifo_gpfifo_engine_addr(engine))
+               return 0;
+
        ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
        if (ret)
                return ret;
@@ -231,7 +237,6 @@ void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
        struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
-       nvkm_memory_unref(&chan->mthd);
        kfree(chan->cgrp);
        return chan;
 }
index 743791c514fef4a7f2ce6d548ac342560c34d7f3..428f9b41165c953d65d30ed01fb6dfc51e6db3fc 100644 (file)
@@ -70,8 +70,17 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
        struct nvkm_gpuobj *inst = chan->base.inst;
        int ret;
 
-       if (engine->subdev.type == NVKM_ENGINE_CE)
-               return gk104_fifo_gpfifo_kick(chan);
+       if (engine->subdev.type == NVKM_ENGINE_CE) {
+               ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
+               if (ret && suspend)
+                       return ret;
+
+               nvkm_kmap(inst);
+               nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
+               nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+               nvkm_done(inst);
+               return ret;
+       }
 
        ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
        if (ret && suspend)
@@ -92,8 +101,16 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
        struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
        struct nvkm_gpuobj *inst = chan->base.inst;
 
-       if (engine->subdev.type == NVKM_ENGINE_CE)
-               return 0;
+       if (engine->subdev.type == NVKM_ENGINE_CE) {
+               const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
+
+               nvkm_kmap(inst);
+               nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
+               nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
+               nvkm_done(inst);
+
+               return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+       }
 
        nvkm_kmap(inst);
        nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
@@ -123,11 +140,9 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
                       u32 *token, const struct nvkm_oclass *oclass,
                       struct nvkm_object **pobject)
 {
-       struct nvkm_device *device = fifo->base.engine.subdev.device;
        struct gk104_fifo_chan *chan;
        int runlist = ffs(*runlists) -1, ret, i;
-       u64 usermem, mthd;
-       u32 size;
+       u64 usermem;
 
        if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
                return -EINVAL;
@@ -173,20 +188,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
        nvkm_done(fifo->user.mem);
        usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
 
-       /* Allocate fault method buffer (magics come from nvgpu). */
-       size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
-       size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
-       size = roundup(size, PAGE_SIZE);
-
-       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
-                             &chan->mthd);
-       if (ret)
-               return ret;
-
-       mthd = nvkm_memory_bar2(chan->mthd);
-       if (mthd == ~0ULL)
-               return -EFAULT;
-
        /* RAMFC */
        nvkm_kmap(chan->base.inst);
        nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -203,10 +204,8 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
        nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
        nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
        nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
-       nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
-       nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
        nvkm_done(chan->base.inst);
-       return gv100_fifo_gpfifo_engine_valid(chan, true, true);
+       return 0;
 }
 
 int