aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/engine/fifo
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/gpu/drm/nouveau/nvkm/engine/fifo
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/engine/fifo')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c389
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c252
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c480
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.c111
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c231
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/g98.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c550
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c969
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c833
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c132
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c156
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c139
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c492
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c543
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c113
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c142
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c252
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c398
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h209
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h133
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c430
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h125
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.h31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c286
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c125
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c409
35 files changed, 8493 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
new file mode 100644
index 000000000..5a074b997
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/fifo/base.o
+nvkm-y += nvkm/engine/fifo/cgrp.o
+nvkm-y += nvkm/engine/fifo/chan.o
+nvkm-y += nvkm/engine/fifo/chid.o
+nvkm-y += nvkm/engine/fifo/runl.o
+nvkm-y += nvkm/engine/fifo/runq.o
+
+nvkm-y += nvkm/engine/fifo/nv04.o
+nvkm-y += nvkm/engine/fifo/nv10.o
+nvkm-y += nvkm/engine/fifo/nv17.o
+nvkm-y += nvkm/engine/fifo/nv40.o
+nvkm-y += nvkm/engine/fifo/nv50.o
+nvkm-y += nvkm/engine/fifo/g84.o
+nvkm-y += nvkm/engine/fifo/g98.o
+nvkm-y += nvkm/engine/fifo/gf100.o
+nvkm-y += nvkm/engine/fifo/gk104.o
+nvkm-y += nvkm/engine/fifo/gk110.o
+nvkm-y += nvkm/engine/fifo/gk208.o
+nvkm-y += nvkm/engine/fifo/gk20a.o
+nvkm-y += nvkm/engine/fifo/gm107.o
+nvkm-y += nvkm/engine/fifo/gm200.o
+nvkm-y += nvkm/engine/fifo/gp100.o
+nvkm-y += nvkm/engine/fifo/gv100.o
+nvkm-y += nvkm/engine/fifo/tu102.o
+nvkm-y += nvkm/engine/fifo/ga100.o
+nvkm-y += nvkm/engine/fifo/ga102.o
+
+nvkm-y += nvkm/engine/fifo/ucgrp.o
+nvkm-y += nvkm/engine/fifo/uchan.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
new file mode 100644
index 000000000..5ea9a2ff0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+#include "runq.h"
+
+#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+
+#include <nvif/cl0080.h>
+#include <nvif/unpack.h>
+
+bool
+nvkm_fifo_ctxsw_in_progress(struct nvkm_engine *engine)
+{
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ nvkm_runl_foreach(runl, engine->subdev.device->fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engn->engine == engine)
+ return engn->func->chsw ? engn->func->chsw(engn) : false;
+ }
+ }
+
+ return false;
+}
+
+void
+nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+ return fifo->func->pause(fifo, flags);
+}
+
+void
+nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags)
+{
+ return fifo->func->start(fifo, flags);
+}
+
+void
+nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
+{
+ return fifo->func->mmu_fault->recover(fifo, info);
+}
+
+static int
+nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+
+ if (oclass->engn == &fifo->func->cgrp.user)
+ return nvkm_ucgrp_new(fifo, oclass, argv, argc, pobject);
+
+ if (oclass->engn == &fifo->func->chan.user)
+ return nvkm_uchan_new(fifo, NULL, oclass, argv, argc, pobject);
+
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
+static const struct nvkm_device_oclass
+nvkm_fifo_class = {
+ .ctor = nvkm_fifo_class_new,
+};
+
+static int
+nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(oclass->engine);
+ const struct nvkm_fifo_func_cgrp *cgrp = &fifo->func->cgrp;
+ const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
+ int c = 0;
+
+ /* *_CHANNEL_GROUP_* */
+ if (cgrp->user.oclass) {
+ if (c++ == index) {
+ oclass->base = cgrp->user;
+ oclass->engn = &fifo->func->cgrp.user;
+ *class = &nvkm_fifo_class;
+ return 0;
+ }
+ }
+
+ /* *_CHANNEL_DMA, *_CHANNEL_GPFIFO_* */
+ if (chan->user.oclass) {
+ if (c++ == index) {
+ oclass->base = chan->user;
+ oclass->engn = &fifo->func->chan.user;
+ *class = &nvkm_fifo_class;
+ return 0;
+ }
+ }
+
+ return c;
+}
+
+static int
+nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ struct nvkm_runl *runl;
+
+ nvkm_inth_block(&fifo->engine.subdev.inth);
+
+ nvkm_runl_foreach(runl, fifo)
+ nvkm_runl_fini(runl);
+
+ return 0;
+}
+
+static int
+nvkm_fifo_init(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ struct nvkm_runq *runq;
+ struct nvkm_runl *runl;
+ u32 mask = 0;
+
+ if (fifo->func->init_pbdmas) {
+ nvkm_runq_foreach(runq, fifo)
+ mask |= BIT(runq->id);
+
+ fifo->func->init_pbdmas(fifo, mask);
+
+ nvkm_runq_foreach(runq, fifo)
+ runq->func->init(runq);
+ }
+
+ nvkm_runl_foreach(runl, fifo) {
+ if (runl->func->init)
+ runl->func->init(runl);
+ }
+
+ if (fifo->func->init)
+ fifo->func->init(fifo);
+
+ nvkm_inth_allow(&fifo->engine.subdev.inth);
+ return 0;
+}
+
+static int
+nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ int ret;
+
+ ret = nvkm_subdev_oneinit(&fifo->engine.subdev);
+ if (ret)
+ return ret;
+
+ switch (mthd) {
+ case NV_DEVICE_HOST_CHANNELS: *data = fifo->chid ? fifo->chid->nr : 0; return 0;
+ case NV_DEVICE_HOST_RUNLISTS:
+ *data = 0;
+ nvkm_runl_foreach(runl, fifo)
+ *data |= BIT(runl->id);
+ return 0;
+ case NV_DEVICE_HOST_RUNLIST_ENGINES:
+ runl = nvkm_runl_get(fifo, *data, 0);
+ if (runl) {
+ *data = 0;
+ nvkm_runl_foreach_engn(engn, runl) {
+#define CASE(n) case NVKM_ENGINE_##n: *data |= NV_DEVICE_HOST_RUNLIST_ENGINES_##n; break
+ switch (engn->engine->subdev.type) {
+ case NVKM_ENGINE_DMAOBJ:
+ break;
+ CASE(SW );
+ CASE(GR );
+ CASE(MPEG );
+ CASE(ME );
+ CASE(CIPHER);
+ CASE(BSP );
+ CASE(VP );
+ CASE(CE );
+ CASE(SEC );
+ CASE(MSVLD );
+ CASE(MSPDEC);
+ CASE(MSPPP );
+ CASE(MSENC );
+ CASE(VIC );
+ CASE(SEC2 );
+ CASE(NVDEC );
+ CASE(NVENC );
+ default:
+ WARN_ON(1);
+ break;
+ }
+#undef CASE
+ }
+ return 0;
+ }
+ return -EINVAL;
+ case NV_DEVICE_HOST_RUNLIST_CHANNELS:
+ if (!fifo->chid) {
+ runl = nvkm_runl_get(fifo, *data, 0);
+ if (runl) {
+ *data = runl->chid->nr;
+ return 0;
+ }
+ }
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+static int
+nvkm_fifo_oneinit(struct nvkm_engine *engine)
+{
+ struct nvkm_subdev *subdev = &engine->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ int ret, nr, i;
+
+ /* Initialise CHID/CGID allocator(s) on GPUs where they aren't per-runlist. */
+ if (fifo->func->chid_nr) {
+ ret = fifo->func->chid_ctor(fifo, fifo->func->chid_nr(fifo));
+ if (ret)
+ return ret;
+ }
+
+ /* Create runqueues for each PBDMA. */
+ if (fifo->func->runq_nr) {
+ for (nr = fifo->func->runq_nr(fifo), i = 0; i < nr; i++) {
+ if (!nvkm_runq_new(fifo, i))
+ return -ENOMEM;
+ }
+ }
+
+ /* Create runlists. */
+ ret = fifo->func->runl_ctor(fifo);
+ if (ret)
+ return ret;
+
+ nvkm_runl_foreach(runl, fifo) {
+ RUNL_DEBUG(runl, "chan:%06x", runl->chan);
+ nvkm_runl_foreach_engn(engn, runl) {
+ ENGN_DEBUG(engn, "");
+ }
+ }
+
+ /* Register interrupt handler. */
+ if (fifo->func->intr) {
+ ret = nvkm_inth_add(&device->mc->intr, NVKM_INTR_SUBDEV, NVKM_INTR_PRIO_NORMAL,
+ subdev, fifo->func->intr, &subdev->inth);
+ if (ret) {
+ nvkm_error(subdev, "intr %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Initialise non-stall intr handling. */
+ if (fifo->func->nonstall_ctor) {
+ ret = fifo->func->nonstall_ctor(fifo);
+ if (ret) {
+ nvkm_error(subdev, "nonstall %d\n", ret);
+ }
+ }
+
+ /* Allocate USERD + BAR1 polling area. */
+ if (fifo->func->chan.func->userd->bar == 1) {
+ struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
+ fifo->func->chan.func->userd->size, 0, true,
+ &fifo->userd.mem);
+ if (ret)
+ return ret;
+
+ ret = nvkm_vmm_get(bar1, 12, nvkm_memory_size(fifo->userd.mem), &fifo->userd.bar1);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(fifo->userd.mem, 0, bar1, fifo->userd.bar1, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+nvkm_fifo_preinit(struct nvkm_engine *engine)
+{
+ nvkm_mc_reset(engine->subdev.device, NVKM_ENGINE_FIFO, 0);
+}
+
+static void *
+nvkm_fifo_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_fifo *fifo = nvkm_fifo(engine);
+ struct nvkm_runl *runl, *runt;
+ struct nvkm_runq *runq, *rtmp;
+
+ if (fifo->userd.bar1)
+ nvkm_vmm_put(nvkm_bar_bar1_vmm(engine->subdev.device), &fifo->userd.bar1);
+ nvkm_memory_unref(&fifo->userd.mem);
+
+ list_for_each_entry_safe(runl, runt, &fifo->runls, head)
+ nvkm_runl_del(runl);
+ list_for_each_entry_safe(runq, rtmp, &fifo->runqs, head)
+ nvkm_runq_del(runq);
+
+ nvkm_chid_unref(&fifo->cgid);
+ nvkm_chid_unref(&fifo->chid);
+
+ nvkm_event_fini(&fifo->nonstall.event);
+ mutex_destroy(&fifo->mutex);
+ return fifo;
+}
+
+static const struct nvkm_engine_func
+nvkm_fifo = {
+ .dtor = nvkm_fifo_dtor,
+ .preinit = nvkm_fifo_preinit,
+ .oneinit = nvkm_fifo_oneinit,
+ .info = nvkm_fifo_info,
+ .init = nvkm_fifo_init,
+ .fini = nvkm_fifo_fini,
+ .base.sclass = nvkm_fifo_class_get,
+};
+
+int
+nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+ enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+ struct nvkm_fifo *fifo;
+ int ret;
+
+ if (!(fifo = *pfifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
+ return -ENOMEM;
+
+ fifo->func = func;
+ INIT_LIST_HEAD(&fifo->runqs);
+ INIT_LIST_HEAD(&fifo->runls);
+ /*TODO: Needs to be >CTXSW_TIMEOUT, so RC can recover before this is hit.
+ * CTXSW_TIMEOUT HW default seems to differ between GPUs, so just a
+ * large number for now until we support changing it.
+ */
+ fifo->timeout.chan_msec = 10000;
+ spin_lock_init(&fifo->lock);
+ mutex_init(&fifo->mutex);
+
+ ret = nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
+ if (ret)
+ return ret;
+
+ if (func->nonstall) {
+ ret = nvkm_event_init(func->nonstall, &fifo->engine.subdev, 1, 1,
+ &fifo->nonstall.event);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
new file mode 100644
index 000000000..ea53fb3d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+#include "priv.h"
+
+#include <core/gpuobj.h>
+#include <subdev/mmu.h>
+
+static void
+nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx)
+{
+ struct nvkm_ectx *ectx = *pectx;
+
+ if (ectx) {
+ struct nvkm_engn *engn = ectx->engn;
+
+ if (refcount_dec_and_test(&ectx->refs)) {
+ CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name);
+ nvkm_object_del(&ectx->object);
+ list_del(&ectx->head);
+ kfree(ectx);
+ }
+
+ *pectx = NULL;
+ }
+}
+
+static int
+nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx,
+ struct nvkm_chan *chan, struct nvkm_client *client)
+{
+ struct nvkm_engine *engine = engn->engine;
+ struct nvkm_oclass cclass = {
+ .client = client,
+ .engine = engine,
+ };
+ struct nvkm_ectx *ectx;
+ int ret = 0;
+
+ /* Look for an existing context for this engine in the channel group. */
+ ectx = nvkm_list_find(ectx, &cgrp->ectxs, head, ectx->engn == engn);
+ if (ectx) {
+ refcount_inc(&ectx->refs);
+ *pectx = ectx;
+ return 0;
+ }
+
+ /* Nope - create a fresh one. */
+ CGRP_TRACE(cgrp, "ctor ectx %d[%s]", engn->id, engn->engine->subdev.name);
+ if (!(ectx = *pectx = kzalloc(sizeof(*ectx), GFP_KERNEL)))
+ return -ENOMEM;
+
+ ectx->engn = engn;
+ refcount_set(&ectx->refs, 1);
+ refcount_set(&ectx->uses, 0);
+ list_add_tail(&ectx->head, &cgrp->ectxs);
+
+ /* Allocate the HW structures. */
+ if (engine->func->fifo.cclass)
+ ret = engine->func->fifo.cclass(chan, &cclass, &ectx->object);
+ else if (engine->func->cclass)
+ ret = nvkm_object_new_(engine->func->cclass, &cclass, NULL, 0, &ectx->object);
+
+ if (ret)
+ nvkm_cgrp_ectx_put(cgrp, pectx);
+
+ return ret;
+}
+
+void
+nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx)
+{
+ struct nvkm_vctx *vctx = *pvctx;
+
+ if (vctx) {
+ struct nvkm_engn *engn = vctx->ectx->engn;
+
+ if (refcount_dec_and_test(&vctx->refs)) {
+ CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name);
+ nvkm_vmm_put(vctx->vmm, &vctx->vma);
+ nvkm_gpuobj_del(&vctx->inst);
+
+ nvkm_cgrp_ectx_put(cgrp, &vctx->ectx);
+ if (vctx->vmm) {
+ atomic_dec(&vctx->vmm->engref[engn->engine->subdev.type]);
+ nvkm_vmm_unref(&vctx->vmm);
+ }
+ list_del(&vctx->head);
+ kfree(vctx);
+ }
+
+ *pvctx = NULL;
+ }
+}
+
+int
+nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_chan *chan,
+ struct nvkm_vctx **pvctx, struct nvkm_client *client)
+{
+ struct nvkm_ectx *ectx;
+ struct nvkm_vctx *vctx;
+ int ret;
+
+ /* Look for an existing sub-context for this engine+VEID in the channel group. */
+ vctx = nvkm_list_find(vctx, &cgrp->vctxs, head,
+ vctx->ectx->engn == engn && vctx->vmm == chan->vmm);
+ if (vctx) {
+ refcount_inc(&vctx->refs);
+ *pvctx = vctx;
+ return 0;
+ }
+
+ /* Nope - create a fresh one. But, context first. */
+ ret = nvkm_cgrp_ectx_get(cgrp, engn, &ectx, chan, client);
+ if (ret) {
+ CGRP_ERROR(cgrp, "ectx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
+ return ret;
+ }
+
+ /* Now, create the sub-context. */
+ CGRP_TRACE(cgrp, "ctor vctx %d[%s]", engn->id, engn->engine->subdev.name);
+ if (!(vctx = *pvctx = kzalloc(sizeof(*vctx), GFP_KERNEL))) {
+ nvkm_cgrp_ectx_put(cgrp, &ectx);
+ return -ENOMEM;
+ }
+
+ vctx->ectx = ectx;
+ vctx->vmm = nvkm_vmm_ref(chan->vmm);
+ refcount_set(&vctx->refs, 1);
+ list_add_tail(&vctx->head, &cgrp->vctxs);
+
+ /* MMU on some GPUs needs to know engine usage for TLB invalidation. */
+ if (vctx->vmm)
+ atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
+
+ /* Allocate the HW structures. */
+ if (engn->func->bind) {
+ ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
+ if (ret == 0 && engn->func->ctor)
+ ret = engn->func->ctor(engn, vctx);
+ }
+
+ if (ret)
+ nvkm_cgrp_vctx_put(cgrp, pvctx);
+
+ return ret;
+}
+
+static void
+nvkm_cgrp_del(struct kref *kref)
+{
+ struct nvkm_cgrp *cgrp = container_of(kref, typeof(*cgrp), kref);
+ struct nvkm_runl *runl = cgrp->runl;
+
+ if (runl->cgid)
+ nvkm_chid_put(runl->cgid, cgrp->id, &cgrp->lock);
+
+ mutex_destroy(&cgrp->mutex);
+ nvkm_vmm_unref(&cgrp->vmm);
+ kfree(cgrp);
+}
+
+void
+nvkm_cgrp_unref(struct nvkm_cgrp **pcgrp)
+{
+ struct nvkm_cgrp *cgrp = *pcgrp;
+
+ if (!cgrp)
+ return;
+
+ kref_put(&cgrp->kref, nvkm_cgrp_del);
+ *pcgrp = NULL;
+}
+
+struct nvkm_cgrp *
+nvkm_cgrp_ref(struct nvkm_cgrp *cgrp)
+{
+ if (cgrp)
+ kref_get(&cgrp->kref);
+
+ return cgrp;
+}
+
+void
+nvkm_cgrp_put(struct nvkm_cgrp **pcgrp, unsigned long irqflags)
+{
+ struct nvkm_cgrp *cgrp = *pcgrp;
+
+ if (!cgrp)
+ return;
+
+ *pcgrp = NULL;
+ spin_unlock_irqrestore(&cgrp->lock, irqflags);
+}
+
+int
+nvkm_cgrp_new(struct nvkm_runl *runl, const char *name, struct nvkm_vmm *vmm, bool hw,
+ struct nvkm_cgrp **pcgrp)
+{
+ struct nvkm_cgrp *cgrp;
+
+ if (!(cgrp = *pcgrp = kmalloc(sizeof(*cgrp), GFP_KERNEL)))
+ return -ENOMEM;
+
+ cgrp->func = runl->fifo->func->cgrp.func;
+ strscpy(cgrp->name, name, sizeof(cgrp->name));
+ cgrp->runl = runl;
+ cgrp->vmm = nvkm_vmm_ref(vmm);
+ cgrp->hw = hw;
+ cgrp->id = -1;
+ kref_init(&cgrp->kref);
+ INIT_LIST_HEAD(&cgrp->chans);
+ cgrp->chan_nr = 0;
+ spin_lock_init(&cgrp->lock);
+ INIT_LIST_HEAD(&cgrp->ectxs);
+ INIT_LIST_HEAD(&cgrp->vctxs);
+ mutex_init(&cgrp->mutex);
+ atomic_set(&cgrp->rc, NVKM_CGRP_RC_NONE);
+
+ if (runl->cgid) {
+ cgrp->id = nvkm_chid_get(runl->cgid, cgrp);
+ if (cgrp->id < 0) {
+ RUNL_ERROR(runl, "!cgids");
+ nvkm_cgrp_unref(pcgrp);
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
new file mode 100644
index 000000000..5f6abd59a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_CGRP_H__
+#define __NVKM_CGRP_H__
+#include <core/os.h>
+struct nvkm_chan;
+struct nvkm_client;
+
+struct nvkm_vctx {
+ struct nvkm_ectx *ectx;
+ struct nvkm_vmm *vmm;
+ refcount_t refs;
+
+ struct nvkm_gpuobj *inst;
+ struct nvkm_vma *vma;
+
+ struct list_head head;
+};
+
+struct nvkm_ectx {
+ struct nvkm_engn *engn;
+ refcount_t refs;
+ refcount_t uses;
+
+ struct nvkm_object *object;
+
+ struct list_head head;
+};
+
+struct nvkm_cgrp {
+ const struct nvkm_cgrp_func {
+ void (*preempt)(struct nvkm_cgrp *);
+ } *func;
+ char name[64];
+ struct nvkm_runl *runl;
+ struct nvkm_vmm *vmm;
+ bool hw;
+ int id;
+ struct kref kref;
+
+ struct list_head chans;
+ int chan_nr;
+
+ spinlock_t lock; /* protects irq handler channel (group) lookup */
+
+ struct list_head ectxs;
+ struct list_head vctxs;
+ struct mutex mutex;
+
+#define NVKM_CGRP_RC_NONE 0
+#define NVKM_CGRP_RC_PENDING 1
+#define NVKM_CGRP_RC_RUNNING 2
+ atomic_t rc;
+
+ struct list_head head;
+};
+
+int nvkm_cgrp_new(struct nvkm_runl *, const char *name, struct nvkm_vmm *, bool hw,
+ struct nvkm_cgrp **);
+struct nvkm_cgrp *nvkm_cgrp_ref(struct nvkm_cgrp *);
+void nvkm_cgrp_unref(struct nvkm_cgrp **);
+int nvkm_cgrp_vctx_get(struct nvkm_cgrp *, struct nvkm_engn *, struct nvkm_chan *,
+ struct nvkm_vctx **, struct nvkm_client *);
+void nvkm_cgrp_vctx_put(struct nvkm_cgrp *, struct nvkm_vctx **);
+
+void nvkm_cgrp_put(struct nvkm_cgrp **, unsigned long irqflags);
+
+#define nvkm_cgrp_foreach_chan(chan,cgrp) list_for_each_entry((chan), &(cgrp)->chans, head)
+#define nvkm_cgrp_foreach_chan_safe(chan,ctmp,cgrp) \
+ list_for_each_entry_safe((chan), (ctmp), &(cgrp)->chans, head)
+
+#define CGRP_PRCLI(c,l,p,f,a...) RUNL_PRINT((c)->runl, l, p, "%04x:[%s]"f, (c)->id, (c)->name, ##a)
+#define CGRP_PRINT(c,l,p,f,a...) RUNL_PRINT((c)->runl, l, p, "%04x:"f, (c)->id, ##a)
+#define CGRP_ERROR(c,f,a...) CGRP_PRCLI((c), ERROR, err, " "f"\n", ##a)
+#define CGRP_TRACE(c,f,a...) CGRP_PRINT((c), TRACE, info, " "f"\n", ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
new file mode 100644
index 000000000..b7c9d6115
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "chan.h"
+#include "chid.h"
+#include "cgrp.h"
+#include "chid.h"
+#include "runl.h"
+#include "priv.h"
+
+#include <core/ramht.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+#include <nvif/if0020.h>
+
+const struct nvkm_event_func
+nvkm_chan_event = {
+};
+
+void
+nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
+{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+ struct nvkm_runl *runl = cgrp->runl;
+ struct nvkm_engine *engine = engn->engine;
+
+ if (!engn->func->bind)
+ return;
+
+ CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
+
+ /* Prevent any channel in channel group from being rescheduled, kick them
+ * off host and any engine(s) they're loaded on.
+ */
+ if (cgrp->hw)
+ nvkm_runl_block(runl);
+ else
+ nvkm_chan_block(chan);
+ nvkm_chan_preempt(chan, true);
+
+ /* Update context pointer. */
+ engn->func->bind(engn, cctx, chan);
+
+ /* Resume normal operation. */
+ if (cgrp->hw)
+ nvkm_runl_allow(runl);
+ else
+ nvkm_chan_allow(chan);
+}
+
+void
+nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
+{
+ struct nvkm_cctx *cctx = *pcctx;
+
+ if (cctx) {
+ struct nvkm_engn *engn = cctx->vctx->ectx->engn;
+
+ if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
+ CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
+ nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
+ list_del(&cctx->head);
+ kfree(cctx);
+ mutex_unlock(&chan->cgrp->mutex);
+ }
+
+ *pcctx = NULL;
+ }
+}
+
+int
+nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
+ struct nvkm_client *client)
+{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+ struct nvkm_vctx *vctx;
+ struct nvkm_cctx *cctx;
+ int ret;
+
+ /* Look for an existing channel context for this engine+VEID. */
+ mutex_lock(&cgrp->mutex);
+ cctx = nvkm_list_find(cctx, &chan->cctxs, head,
+ cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
+ if (cctx) {
+ refcount_inc(&cctx->refs);
+ *pcctx = cctx;
+ mutex_unlock(&chan->cgrp->mutex);
+ return 0;
+ }
+
+ /* Nope - create a fresh one. But, sub-context first. */
+ ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
+ if (ret) {
+ CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
+ goto done;
+ }
+
+ /* Now, create the channel context - to track engine binding. */
+ CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
+ if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
+ nvkm_cgrp_vctx_put(cgrp, &vctx);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ cctx->vctx = vctx;
+ refcount_set(&cctx->refs, 1);
+ refcount_set(&cctx->uses, 0);
+ list_add_tail(&cctx->head, &chan->cctxs);
+done:
+ mutex_unlock(&cgrp->mutex);
+ return ret;
+}
+
+int
+nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+
+ CHAN_TRACE(chan, "preempt");
+ chan->func->preempt(chan);
+ if (!wait)
+ return 0;
+
+ return nvkm_runl_preempt_wait(runl);
+}
+
+int
+nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
+{
+ int ret;
+
+ if (!chan->func->preempt)
+ return 0;
+
+ mutex_lock(&chan->cgrp->runl->mutex);
+ ret = nvkm_chan_preempt_locked(chan, wait);
+ mutex_unlock(&chan->cgrp->runl->mutex);
+ return ret;
+}
+
+void
+nvkm_chan_remove_locked(struct nvkm_chan *chan)
+{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+ struct nvkm_runl *runl = cgrp->runl;
+
+ if (list_empty(&chan->head))
+ return;
+
+ CHAN_TRACE(chan, "remove");
+ if (!--cgrp->chan_nr) {
+ runl->cgrp_nr--;
+ list_del(&cgrp->head);
+ }
+ runl->chan_nr--;
+ list_del_init(&chan->head);
+ atomic_set(&runl->changed, 1);
+}
+
+void
+nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+
+ mutex_lock(&runl->mutex);
+ if (preempt && chan->func->preempt)
+ nvkm_chan_preempt_locked(chan, true);
+ nvkm_chan_remove_locked(chan);
+ nvkm_runl_update_locked(runl, true);
+ mutex_unlock(&runl->mutex);
+}
+
+void
+nvkm_chan_insert(struct nvkm_chan *chan)
+{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+ struct nvkm_runl *runl = cgrp->runl;
+
+ mutex_lock(&runl->mutex);
+ if (WARN_ON(!list_empty(&chan->head))) {
+ mutex_unlock(&runl->mutex);
+ return;
+ }
+
+ CHAN_TRACE(chan, "insert");
+ list_add_tail(&chan->head, &cgrp->chans);
+ runl->chan_nr++;
+ if (!cgrp->chan_nr++) {
+ list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
+ runl->cgrp_nr++;
+ }
+ atomic_set(&runl->changed, 1);
+ nvkm_runl_update_locked(runl, true);
+ mutex_unlock(&runl->mutex);
+}
+
+static void
+nvkm_chan_block_locked(struct nvkm_chan *chan)
+{
+ CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
+ if (atomic_inc_return(&chan->blocked) == 1)
+ chan->func->stop(chan);
+}
+
+void
+nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (atomic_inc_return(&chan->errored) == 1) {
+ CHAN_ERROR(chan, "errored - disabling channel");
+ nvkm_chan_block_locked(chan);
+ if (preempt)
+ chan->func->preempt(chan);
+ nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+void
+nvkm_chan_block(struct nvkm_chan *chan)
+{
+ spin_lock_irq(&chan->lock);
+ nvkm_chan_block_locked(chan);
+ spin_unlock_irq(&chan->lock);
+}
+
+void
+nvkm_chan_allow(struct nvkm_chan *chan)
+{
+ spin_lock_irq(&chan->lock);
+ CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
+ if (atomic_dec_and_test(&chan->blocked))
+ chan->func->start(chan);
+ spin_unlock_irq(&chan->lock);
+}
+
+void
+nvkm_chan_del(struct nvkm_chan **pchan)
+{
+ struct nvkm_chan *chan = *pchan;
+
+ if (!chan)
+ return;
+
+ if (chan->func->ramfc->clear)
+ chan->func->ramfc->clear(chan);
+
+ nvkm_ramht_del(&chan->ramht);
+ nvkm_gpuobj_del(&chan->pgd);
+ nvkm_gpuobj_del(&chan->eng);
+ nvkm_gpuobj_del(&chan->cache);
+ nvkm_gpuobj_del(&chan->ramfc);
+
+ nvkm_memory_unref(&chan->userd.mem);
+
+ if (chan->cgrp) {
+ nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+ nvkm_cgrp_unref(&chan->cgrp);
+ }
+
+ if (chan->vmm) {
+ nvkm_vmm_part(chan->vmm, chan->inst->memory);
+ nvkm_vmm_unref(&chan->vmm);
+ }
+
+ nvkm_gpuobj_del(&chan->push);
+ nvkm_gpuobj_del(&chan->inst);
+ kfree(chan);
+}
+
+void
+nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
+{
+ struct nvkm_chan *chan = *pchan;
+
+ if (!chan)
+ return;
+
+ *pchan = NULL;
+ spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
+}
+
+struct nvkm_chan *
+nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
+{
+ struct nvkm_fifo *fifo = engine->subdev.device->fifo;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ struct nvkm_chan *chan;
+
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (engine == &fifo->engine || engn->engine == engine) {
+ chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
+ if (chan || engn->engine == engine)
+ return chan;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+struct nvkm_chan *
+nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
+{
+ struct nvkm_fifo *fifo = engine->subdev.device->fifo;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn(engn, runl) {
+ if (fifo->chid || engn->engine == engine)
+ return nvkm_runl_chan_get_chid(runl, id, pirqflags);
+ }
+ }
+
+ return NULL;
+}
+
+int
+nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int runq,
+ struct nvkm_cgrp *cgrp, const char *name, bool priv, u32 devm, struct nvkm_vmm *vmm,
+ struct nvkm_dmaobj *dmaobj, u64 offset, u64 length,
+ struct nvkm_memory *userd, u64 ouserd, struct nvkm_chan **pchan)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_chan *chan;
+ int ret;
+
+ /* Validate arguments against class requirements. */
+ if ((runq && runq >= runl->func->runqs) ||
+ (!func->inst->vmm != !vmm) ||
+ ((func->userd->bar < 0) == !userd) ||
+ (!func->ramfc->ctxdma != !dmaobj) ||
+ ((func->ramfc->devm < devm) && devm != BIT(0)) ||
+ (!func->ramfc->priv && priv)) {
+ RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
+ "push:%d:%p devm:%08x:%08x priv:%d:%d",
+ runl->func->runqs, runq, func->inst->vmm, vmm,
+ func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
+ func->ramfc->devm, devm, func->ramfc->priv, priv);
+ return -EINVAL;
+ }
+
+ if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
+ return -ENOMEM;
+
+ chan->func = func;
+ strscpy(chan->name, name, sizeof(chan->name));
+ chan->runq = runq;
+ chan->id = -1;
+ spin_lock_init(&chan->lock);
+ atomic_set(&chan->blocked, 1);
+ atomic_set(&chan->errored, 0);
+ INIT_LIST_HEAD(&chan->cctxs);
+ INIT_LIST_HEAD(&chan->head);
+
+ /* Join channel group.
+ *
+ * GK110 and newer support channel groups (aka TSGs), where individual channels
+ * share a timeslice, and, engine context(s).
+ *
+ * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
+ * channels aren't in an API channel group, and on HW that doesn't support TSGs.
+ */
+ if (!cgrp) {
+ ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
+ if (ret) {
+ RUNL_DEBUG(runl, "cgrp %d", ret);
+ return ret;
+ }
+
+ cgrp = chan->cgrp;
+ } else {
+ if (cgrp->runl != runl || cgrp->vmm != vmm) {
+ RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
+ return -EINVAL;
+ }
+
+ chan->cgrp = nvkm_cgrp_ref(cgrp);
+ }
+
+ /* Allocate instance block. */
+ ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
+ &chan->inst);
+ if (ret) {
+ RUNL_DEBUG(runl, "inst %d", ret);
+ return ret;
+ }
+
+ /* Initialise virtual address-space. */
+ if (func->inst->vmm) {
+ if (WARN_ON(vmm->mmu != device->mmu))
+ return -EINVAL;
+
+ ret = nvkm_vmm_join(vmm, chan->inst->memory);
+ if (ret) {
+ RUNL_DEBUG(runl, "vmm %d", ret);
+ return ret;
+ }
+
+ chan->vmm = nvkm_vmm_ref(vmm);
+ }
+
+ /* Allocate HW ctxdma for push buffer. */
+ if (func->ramfc->ctxdma) {
+ ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
+ if (ret) {
+ RUNL_DEBUG(runl, "bind %d", ret);
+ return ret;
+ }
+ }
+
+ /* Allocate channel ID. */
+ chan->id = nvkm_chid_get(runl->chid, chan);
+ if (chan->id < 0) {
+ RUNL_ERROR(runl, "!chids");
+ return -ENOSPC;
+ }
+
+ if (cgrp->id < 0)
+ cgrp->id = chan->id;
+
+ /* Initialise USERD. */
+ if (func->userd->bar < 0) {
+ if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) {
+ RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+ return -EINVAL;
+ }
+
+ ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+ if (ret) {
+ RUNL_DEBUG(runl, "userd %d", ret);
+ return ret;
+ }
+
+ chan->userd.base = ouserd;
+ } else {
+ chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+ chan->userd.base = chan->id * chan->func->userd->size;
+ }
+
+ if (chan->func->userd->clear)
+ chan->func->userd->clear(chan);
+
+ /* Initialise RAMFC. */
+ ret = chan->func->ramfc->write(chan, offset, length, devm, priv);
+ if (ret) {
+ RUNL_DEBUG(runl, "ramfc %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
new file mode 100644
index 000000000..85b94f699
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_CHAN_H__
+#define __NVKM_CHAN_H__
+#include <engine/fifo.h>
+struct nvkm_dmaobj;
+struct nvkm_engn;
+struct nvkm_runl;
+
+extern const struct nvkm_event_func nvkm_chan_event;
+
+struct nvkm_cctx {
+ struct nvkm_vctx *vctx;
+ refcount_t refs;
+ refcount_t uses;
+
+ struct list_head head;
+};
+
+struct nvkm_chan_func {
+ const struct nvkm_chan_func_inst {
+ u32 size;
+ bool zero;
+ bool vmm;
+ } *inst;
+
+ const struct nvkm_chan_func_userd {
+ int bar;
+ u32 base;
+ u32 size;
+ void (*clear)(struct nvkm_chan *);
+ } *userd;
+
+ const struct nvkm_chan_func_ramfc {
+ const struct nvkm_ramfc_layout {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+ } *layout;
+ int (*write)(struct nvkm_chan *, u64 offset, u64 length, u32 devm, bool priv);
+ void (*clear)(struct nvkm_chan *);
+ bool ctxdma;
+ u32 devm;
+ bool priv;
+ } *ramfc;
+
+ void (*bind)(struct nvkm_chan *);
+ void (*unbind)(struct nvkm_chan *);
+ void (*start)(struct nvkm_chan *);
+ void (*stop)(struct nvkm_chan *);
+ void (*preempt)(struct nvkm_chan *);
+ u32 (*doorbell_handle)(struct nvkm_chan *);
+};
+
+int nvkm_chan_new_(const struct nvkm_chan_func *, struct nvkm_runl *, int runq, struct nvkm_cgrp *,
+ const char *name, bool priv, u32 devm, struct nvkm_vmm *, struct nvkm_dmaobj *,
+ u64 offset, u64 length, struct nvkm_memory *userd, u64 userd_bar1,
+ struct nvkm_chan **);
+void nvkm_chan_del(struct nvkm_chan **);
+void nvkm_chan_allow(struct nvkm_chan *);
+void nvkm_chan_block(struct nvkm_chan *);
+void nvkm_chan_error(struct nvkm_chan *, bool preempt);
+void nvkm_chan_insert(struct nvkm_chan *);
+void nvkm_chan_remove(struct nvkm_chan *, bool preempt);
+void nvkm_chan_remove_locked(struct nvkm_chan *);
+int nvkm_chan_preempt(struct nvkm_chan *, bool wait);
+int nvkm_chan_preempt_locked(struct nvkm_chan *, bool wait);
+int nvkm_chan_cctx_get(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx **,
+ struct nvkm_client * /*TODO: remove need for this */);
+void nvkm_chan_cctx_put(struct nvkm_chan *, struct nvkm_cctx **);
+void nvkm_chan_cctx_bind(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx *);
+
+#define CHAN_PRCLI(c,l,p,f,a...) CGRP_PRINT((c)->cgrp, l, p, "%04x:[%s]"f, (c)->id, (c)->name, ##a)
+#define CHAN_PRINT(c,l,p,f,a...) CGRP_PRINT((c)->cgrp, l, p, "%04x:"f, (c)->id, ##a)
+#define CHAN_ERROR(c,f,a...) CHAN_PRCLI((c), ERROR, err, " "f"\n", ##a)
+#define CHAN_TRACE(c,f,a...) CHAN_PRINT((c), TRACE, info, " "f"\n", ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.c
new file mode 100644
index 000000000..23944d95e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "chid.h"
+
+void
+nvkm_chid_put(struct nvkm_chid *chid, int id, spinlock_t *data_lock)
+{
+ if (id >= 0) {
+ spin_lock_irq(&chid->lock);
+ spin_lock(data_lock);
+ chid->data[id] = NULL;
+ spin_unlock(data_lock);
+ clear_bit(id, chid->used);
+ spin_unlock_irq(&chid->lock);
+ }
+}
+
+int
+nvkm_chid_get(struct nvkm_chid *chid, void *data)
+{
+ int id = -1, cid;
+
+ spin_lock_irq(&chid->lock);
+ cid = find_first_zero_bit(chid->used, chid->nr);
+ if (cid < chid->nr) {
+ set_bit(cid, chid->used);
+ chid->data[cid] = data;
+ id = cid;
+ }
+ spin_unlock_irq(&chid->lock);
+ return id;
+}
+
+static void
+nvkm_chid_del(struct kref *kref)
+{
+ struct nvkm_chid *chid = container_of(kref, typeof(*chid), kref);
+
+ nvkm_event_fini(&chid->event);
+
+ kvfree(chid->data);
+ kfree(chid);
+}
+
+void
+nvkm_chid_unref(struct nvkm_chid **pchid)
+{
+ struct nvkm_chid *chid = *pchid;
+
+ if (!chid)
+ return;
+
+ kref_put(&chid->kref, nvkm_chid_del);
+ *pchid = NULL;
+}
+
+struct nvkm_chid *
+nvkm_chid_ref(struct nvkm_chid *chid)
+{
+ if (chid)
+ kref_get(&chid->kref);
+
+ return chid;
+}
+
+int
+nvkm_chid_new(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
+ int nr, int first, int count, struct nvkm_chid **pchid)
+{
+ struct nvkm_chid *chid;
+ int id;
+
+ if (!(chid = *pchid = kzalloc(struct_size(chid, used, nr), GFP_KERNEL)))
+ return -ENOMEM;
+
+ kref_init(&chid->kref);
+ chid->nr = nr;
+ chid->mask = chid->nr - 1;
+ spin_lock_init(&chid->lock);
+
+ if (!(chid->data = kvzalloc(sizeof(*chid->data) * nr, GFP_KERNEL))) {
+ nvkm_chid_unref(pchid);
+ return -ENOMEM;
+ }
+
+ for (id = 0; id < first; id++)
+ __set_bit(id, chid->used);
+ for (id = first + count; id < nr; id++)
+ __set_bit(id, chid->used);
+
+ return nvkm_event_init(func, subdev, 1, nr, &chid->event);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.h
new file mode 100644
index 000000000..2a42efb18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chid.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_CHID_H__
+#define __NVKM_CHID_H__
+#include <core/event.h>
+
+struct nvkm_chid {
+ struct kref kref;
+ int nr;
+ u32 mask;
+
+ struct nvkm_event event;
+
+ void **data;
+
+ spinlock_t lock;
+ unsigned long used[];
+};
+
+int nvkm_chid_new(const struct nvkm_event_func *, struct nvkm_subdev *,
+ int nr, int first, int count, struct nvkm_chid **pchid);
+struct nvkm_chid *nvkm_chid_ref(struct nvkm_chid *);
+void nvkm_chid_unref(struct nvkm_chid **);
+int nvkm_chid_get(struct nvkm_chid *, void *data);
+void nvkm_chid_put(struct nvkm_chid *, int id, spinlock_t *data_lock);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
new file mode 100644
index 000000000..6b229a3fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static void
+g84_chan_bind(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x002600 + (chan->id * 4), chan->ramfc->addr >> 8);
+}
+
+static int
+g84_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+ const u32 limit2 = ilog2(length / 8);
+ int ret;
+
+ ret = nvkm_gpuobj_new(device, 0x0200, 0, true, chan->inst, &chan->eng);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->inst, &chan->pgd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, chan->inst, &chan->cache);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, chan->inst, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nvkm_ramht_new(device, 0x8000, 16, chan->inst, &chan->ramht);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(offset));
+ nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(offset) | (limit2 << 16));
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000000 | devm);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10);
+ nvkm_wo32(chan->ramfc, 0x98, chan->inst->addr >> 12);
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+g84_chan_ramfc = {
+ .write = g84_chan_ramfc_write,
+ .ctxdma = true,
+ .devm = 0xfff,
+};
+
+const struct nvkm_chan_func
+g84_chan = {
+ .inst = &nv50_chan_inst,
+ .userd = &nv50_chan_userd,
+ .ramfc = &g84_chan_ramfc,
+ .bind = g84_chan_bind,
+ .unbind = nv50_chan_unbind,
+ .start = nv50_chan_start,
+ .stop = nv50_chan_stop,
+};
+
+static void
+g84_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ struct nvkm_subdev *subdev = &chan->cgrp->runl->fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u64 start = 0, limit = 0;
+ u32 flags = 0, ptr0, save;
+
+ switch (engn->engine->subdev.type) {
+ case NVKM_ENGINE_GR : ptr0 = 0x0020; break;
+ case NVKM_ENGINE_VP :
+ case NVKM_ENGINE_MSPDEC: ptr0 = 0x0040; break;
+ case NVKM_ENGINE_MPEG :
+ case NVKM_ENGINE_MSPPP : ptr0 = 0x0060; break;
+ case NVKM_ENGINE_BSP :
+ case NVKM_ENGINE_MSVLD : ptr0 = 0x0080; break;
+ case NVKM_ENGINE_CIPHER:
+ case NVKM_ENGINE_SEC : ptr0 = 0x00a0; break;
+ case NVKM_ENGINE_CE : ptr0 = 0x00c0; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (!cctx) {
+ save = nvkm_mask(device, 0x002520, 0x0000003f, BIT(engn->id - 1));
+ nvkm_wr32(device, 0x0032fc, chan->inst->addr >> 12);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ );
+ nvkm_wr32(device, 0x002520, save);
+ } else {
+ flags = 0x00190000;
+ start = cctx->vctx->inst->addr;
+ limit = start + cctx->vctx->inst->size - 1;
+ }
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, ptr0 + 0x00, flags);
+ nvkm_wo32(chan->eng, ptr0 + 0x04, lower_32_bits(limit));
+ nvkm_wo32(chan->eng, ptr0 + 0x08, lower_32_bits(start));
+ nvkm_wo32(chan->eng, ptr0 + 0x0c, upper_32_bits(limit) << 24 |
+ lower_32_bits(start));
+ nvkm_wo32(chan->eng, ptr0 + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, ptr0 + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+}
+
+const struct nvkm_engn_func
+g84_engn = {
+ .bind = g84_ectx_bind,
+ .ramht_add = nv50_eobj_ramht_add,
+ .ramht_del = nv50_eobj_ramht_del,
+};
+
+static void
+g84_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x40000000, 0x00000000);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+static void
+g84_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x40000000, 0x40000000);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+const struct nvkm_event_func
+g84_fifo_nonstall = {
+ .init = g84_fifo_nonstall_allow,
+ .fini = g84_fifo_nonstall_block,
+};
+
+static int
+g84_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+
+ runl = nvkm_runl_new(fifo, 0, 0, 0);
+ if (IS_ERR(runl))
+ return PTR_ERR(runl);
+
+ nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
+ nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
+ nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_GR, 0);
+ nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MPEG, 0);
+ nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_ME, 0);
+ nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_VP, 0);
+ nvkm_runl_add(runl, 5, fifo->func->engn, NVKM_ENGINE_CIPHER, 0);
+ nvkm_runl_add(runl, 6, fifo->func->engn, NVKM_ENGINE_BSP, 0);
+ return 0;
+}
+
+static const struct nvkm_fifo_func
+g84_fifo = {
+ .chid_nr = nv50_fifo_chid_nr,
+ .chid_ctor = nv50_fifo_chid_ctor,
+ .runl_ctor = g84_fifo_runl_ctor,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .nonstall = &g84_fifo_nonstall,
+ .runl = &nv50_runl,
+ .engn = &g84_engn,
+ .engn_sw = &nv50_engn_sw,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, G82_CHANNEL_GPFIFO }, &g84_chan },
+};
+
+int
+g84_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&g84_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g98.c
new file mode 100644
index 000000000..c6ca050c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g98.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+#include "runl.h"
+
+#include <nvif/class.h>
+
+static int
+g98_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+
+ runl = nvkm_runl_new(fifo, 0, 0, 0);
+ if (IS_ERR(runl))
+ return PTR_ERR(runl);
+
+ nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
+ nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
+ nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_GR, 0);
+ nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MSPPP, 0);
+ nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_CE, 0);
+ nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_runl_add(runl, 5, fifo->func->engn, NVKM_ENGINE_SEC, 0);
+ nvkm_runl_add(runl, 6, fifo->func->engn, NVKM_ENGINE_MSVLD, 0);
+ return 0;
+}
+
+static const struct nvkm_fifo_func
+g98_fifo = {
+ .chid_nr = nv50_fifo_chid_nr,
+ .chid_ctor = nv50_fifo_chid_ctor,
+ .runl_ctor = g98_fifo_runl_ctor,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .nonstall = &g84_fifo_nonstall,
+ .runl = &nv50_runl,
+ .engn = &g84_engn,
+ .engn_sw = &nv50_engn_sw,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, G82_CHANNEL_GPFIFO }, &g84_chan },
+};
+
+int
+g98_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&g98_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
new file mode 100644
index 000000000..12a5d99d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+#include "runq.h"
+
+#include <core/gpuobj.h>
+#include <subdev/top.h>
+#include <subdev/vfn.h>
+
+#include <nvif/class.h>
+
+/*TODO: allocate? */
+#define GA100_FIFO_NONSTALL_VECTOR 0
+
+static u32
+ga100_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return (chan->cgrp->runl->doorbell << 16) | chan->id;
+}
+
+static void
+ga100_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+
+ nvkm_wr32(runl->fifo->engine.subdev.device, runl->chan + (chan->id * 4), 0x00000003);
+}
+
+static void
+ga100_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ const int gfid = 0;
+
+ nvkm_wr32(device, runl->chan + (chan->id * 4), 0x00000002);
+ nvkm_wr32(device, runl->addr + 0x0090, (gfid << 16) | chan->id); /* INTERNAL_DOORBELL. */
+}
+
+static void
+ga100_chan_unbind(struct nvkm_chan *chan)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+
+ nvkm_wr32(runl->fifo->engine.subdev.device, runl->chan + (chan->id * 4), 0xffffffff);
+}
+
+static int
+ga100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ const u32 limit2 = ilog2(length / 8);
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x010, 0x0000face);
+ nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+ nvkm_wo32(chan->inst, 0x048, lower_32_bits(offset));
+ nvkm_wo32(chan->inst, 0x04c, upper_32_bits(offset) | (limit2 << 16));
+ nvkm_wo32(chan->inst, 0x084, 0x20400000);
+ nvkm_wo32(chan->inst, 0x094, 0x30000000 | devm);
+ nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
+ nvkm_wo32(chan->inst, 0x0e8, chan->id);
+ nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
+ nvkm_wo32(chan->inst, 0x0f8, 0x80000000 | GA100_FIFO_NONSTALL_VECTOR);
+ nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+ nvkm_done(chan->inst);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+ga100_chan_ramfc = {
+ .write = ga100_chan_ramfc_write,
+ .devm = 0xfff,
+ .priv = true,
+};
+
+const struct nvkm_chan_func
+ga100_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gv100_chan_userd,
+ .ramfc = &ga100_chan_ramfc,
+ .unbind = ga100_chan_unbind,
+ .start = ga100_chan_start,
+ .stop = ga100_chan_stop,
+ .preempt = gk110_chan_preempt,
+ .doorbell_handle = ga100_chan_doorbell_handle,
+};
+
+static void
+ga100_cgrp_preempt(struct nvkm_cgrp *cgrp)
+{
+ struct nvkm_runl *runl = cgrp->runl;
+
+ nvkm_wr32(runl->fifo->engine.subdev.device, runl->addr + 0x098, 0x01000000 | cgrp->id);
+}
+
+const struct nvkm_cgrp_func
+ga100_cgrp = {
+ .preempt = ga100_cgrp_preempt,
+};
+
+static int
+ga100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
+{
+ struct nvkm_runl *runl = engn->runl;
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ u32 stat = nvkm_rd32(device, runl->addr + 0x200 + engn->id * 0x40);
+
+ ENGN_DEBUG(engn, "status %08x", stat);
+ *cgid = true;
+
+ switch ((stat & 0x0000e000) >> 13) {
+ case 0 /* INVALID */: return -ENODEV;
+ case 1 /* VALID */:
+ case 5 /* SAVE */: return (stat & 0x00000fff);
+ case 6 /* LOAD */: return (stat & 0x0fff0000) >> 16;
+ case 7 /* SWITCH */:
+ if (nvkm_engine_chsw_load(engn->engine))
+ return (stat & 0x0fff0000) >> 16;
+ return (stat & 0x00000fff);
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -ENODEV;
+}
+
+const struct nvkm_engn_func
+ga100_engn = {
+ .cxid = ga100_engn_cxid,
+ .ctor = gk104_ectx_ctor,
+ .bind = gv100_ectx_bind,
+};
+
+const struct nvkm_engn_func
+ga100_engn_ce = {
+ .cxid = ga100_engn_cxid,
+ .ctor = gv100_ectx_ce_ctor,
+ .bind = gv100_ectx_ce_bind,
+};
+
+static bool
+ga100_runq_idle(struct nvkm_runq *runq)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+
+ return !(nvkm_rd32(device, 0x04015c + (runq->id * 0x800)) & 0x0000e000);
+}
+
+static bool
+ga100_runq_intr_1(struct nvkm_runq *runq, struct nvkm_runl *runl)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+ u32 inte = nvkm_rd32(device, 0x040180 + (runq->id * 0x800));
+ u32 intr = nvkm_rd32(device, 0x040148 + (runq->id * 0x800));
+ u32 stat = intr & inte;
+
+ if (!stat) {
+ RUNQ_DEBUG(runq, "inte1 %08x %08x", intr, inte);
+ return false;
+ }
+
+ if (stat & 0x80000000) {
+ u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x0800)) & runl->chid->mask;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ RUNQ_ERROR(runq, "CTXNOTVALID chid:%d", chid);
+ chan = nvkm_runl_chan_get_chid(runl, chid, &flags);
+ if (chan) {
+ nvkm_chan_error(chan, true);
+ nvkm_chan_put(&chan, flags);
+ }
+
+ nvkm_mask(device, 0x0400ac + (runq->id * 0x800), 0x00030000, 0x00030000);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ RUNQ_ERROR(runq, "intr1 %08x", stat);
+ nvkm_wr32(device, 0x0401a0 + (runq->id * 0x800), stat);
+ }
+
+ nvkm_wr32(device, 0x040148 + (runq->id * 0x800), intr);
+ return true;
+}
+
+static bool
+ga100_runq_intr_0(struct nvkm_runq *runq, struct nvkm_runl *runl)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+ u32 inte = nvkm_rd32(device, 0x040170 + (runq->id * 0x800));
+ u32 intr = nvkm_rd32(device, 0x040108 + (runq->id * 0x800));
+ u32 stat = intr & inte;
+
+ if (!stat) {
+ RUNQ_DEBUG(runq, "inte0 %08x %08x", intr, inte);
+ return false;
+ }
+
+ /*TODO: expand on this when fixing up gf100's version. */
+ if (stat & 0xc6afe000) {
+ u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x0800)) & runl->chid->mask;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ RUNQ_ERROR(runq, "intr0 %08x", stat);
+ chan = nvkm_runl_chan_get_chid(runl, chid, &flags);
+ if (chan) {
+ nvkm_chan_error(chan, true);
+ nvkm_chan_put(&chan, flags);
+ }
+
+ stat &= ~0xc6afe000;
+ }
+
+ if (stat) {
+ RUNQ_ERROR(runq, "intr0 %08x", stat);
+ nvkm_wr32(device, 0x040190 + (runq->id * 0x800), stat);
+ }
+
+ nvkm_wr32(device, 0x040108 + (runq->id * 0x800), intr);
+ return true;
+}
+
+static bool
+ga100_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *runl)
+{
+ bool intr0 = ga100_runq_intr_0(runq, runl);
+ bool intr1 = ga100_runq_intr_1(runq, runl);
+
+ return intr0 || intr1;
+}
+
+static void
+ga100_runq_init(struct nvkm_runq *runq)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x040108 + (runq->id * 0x800), 0xffffffff); /* INTR_0 */
+ nvkm_wr32(device, 0x040148 + (runq->id * 0x800), 0xffffffff); /* INTR_1 */
+ nvkm_wr32(device, 0x040170 + (runq->id * 0x800), 0xffffffff); /* INTR_0_EN_SET_TREE */
+ nvkm_wr32(device, 0x040180 + (runq->id * 0x800), 0xffffffff); /* INTR_1_EN_SET_TREE */
+}
+
+const struct nvkm_runq_func
+ga100_runq = {
+ .init = ga100_runq_init,
+ .intr = ga100_runq_intr,
+ .idle = ga100_runq_idle,
+};
+
+static bool
+ga100_runl_preempt_pending(struct nvkm_runl *runl)
+{
+ return nvkm_rd32(runl->fifo->engine.subdev.device, runl->addr + 0x098) & 0x00100000;
+}
+
+static void
+ga100_runl_preempt(struct nvkm_runl *runl)
+{
+ nvkm_wr32(runl->fifo->engine.subdev.device, runl->addr + 0x098, 0x00000000);
+}
+
+static void
+ga100_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, runl->addr + 0x094, 0x00000001, 0x00000000);
+}
+
+static void
+ga100_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, runl->addr + 0x094, 0x00000001, 0x00000001);
+}
+
+static bool
+ga100_runl_pending(struct nvkm_runl *runl)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+
+ return nvkm_rd32(device, runl->addr + 0x08c) & 0x00008000;
+}
+
+static void
+ga100_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ u64 addr = nvkm_memory_addr(memory) + start;
+
+ nvkm_wr32(device, runl->addr + 0x080, lower_32_bits(addr));
+ nvkm_wr32(device, runl->addr + 0x084, upper_32_bits(addr));
+ nvkm_wr32(device, runl->addr + 0x088, count);
+}
+
+static irqreturn_t
+ga100_runl_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_runl *runl = container_of(inth, typeof(*runl), inth);
+ struct nvkm_engn *engn;
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ u32 inte = nvkm_rd32(device, runl->addr + 0x120);
+ u32 intr = nvkm_rd32(device, runl->addr + 0x100);
+ u32 stat = intr & inte;
+ u32 info;
+
+ if (!stat) {
+ RUNL_DEBUG(runl, "inte %08x %08x", intr, inte);
+ return IRQ_NONE;
+ }
+
+ if (stat & 0x00000007) {
+ nvkm_runl_foreach_engn_cond(engn, runl, stat & BIT(engn->id)) {
+ info = nvkm_rd32(device, runl->addr + 0x224 + (engn->id * 0x40));
+
+ tu102_fifo_intr_ctxsw_timeout_info(engn, info);
+
+ nvkm_wr32(device, runl->addr + 0x100, BIT(engn->id));
+ stat &= ~BIT(engn->id);
+ }
+ }
+
+ if (stat & 0x00000300) {
+ nvkm_wr32(device, runl->addr + 0x100, stat & 0x00000300);
+ stat &= ~0x00000300;
+ }
+
+ if (stat & 0x00010000) {
+ if (runl->runq[0]) {
+ if (runl->runq[0]->func->intr(runl->runq[0], runl))
+ stat &= ~0x00010000;
+ }
+ }
+
+ if (stat & 0x00020000) {
+ if (runl->runq[1]) {
+ if (runl->runq[1]->func->intr(runl->runq[1], runl))
+ stat &= ~0x00020000;
+ }
+ }
+
+ if (stat) {
+ RUNL_ERROR(runl, "intr %08x", stat);
+ nvkm_wr32(device, runl->addr + 0x140, stat);
+ }
+
+ nvkm_wr32(device, runl->addr + 0x180, 0x00000001);
+ return IRQ_HANDLED;
+}
+
+static void
+ga100_runl_fini(struct nvkm_runl *runl)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, runl->addr + 0x300, 0x80000000, 0x00000000);
+ nvkm_inth_block(&runl->inth);
+}
+
+static void
+ga100_runl_init(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_runq *runq;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ int i;
+
+ /* Submit NULL runlist and preempt. */
+ nvkm_wr32(device, runl->addr + 0x088, 0x00000000);
+ runl->func->preempt(runl);
+
+ /* Enable doorbell. */
+ nvkm_mask(device, runl->addr + 0x300, 0x80000000, 0x80000000);
+
+ nvkm_wr32(device, runl->addr + 0x100, 0xffffffff); /* INTR_0 */
+ nvkm_wr32(device, runl->addr + 0x140, 0xffffffff); /* INTR_0_EN_CLEAR_TREE(0) */
+ nvkm_wr32(device, runl->addr + 0x120, 0x000f1307); /* INTR_0_EN_SET_TREE(0) */
+ nvkm_wr32(device, runl->addr + 0x148, 0xffffffff); /* INTR_0_EN_CLEAR_TREE(1) */
+ nvkm_wr32(device, runl->addr + 0x128, 0x00000000); /* INTR_0_EN_SET_TREE(1) */
+
+ /* Init PBDMA(s). */
+ for (i = 0; i < runl->runq_nr; i++) {
+ runq = runl->runq[i];
+ runq->func->init(runq);
+ }
+
+ nvkm_inth_allow(&runl->inth);
+}
+
+const struct nvkm_runl_func
+ga100_runl = {
+ .init = ga100_runl_init,
+ .fini = ga100_runl_fini,
+ .size = 16,
+ .update = nv50_runl_update,
+ .insert_cgrp = gv100_runl_insert_cgrp,
+ .insert_chan = gv100_runl_insert_chan,
+ .commit = ga100_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = ga100_runl_pending,
+ .block = ga100_runl_block,
+ .allow = ga100_runl_allow,
+ .preempt = ga100_runl_preempt,
+ .preempt_pending = ga100_runl_preempt_pending,
+};
+
+static int
+ga100_runl_new(struct nvkm_fifo *fifo, int id, u32 addr, struct nvkm_runl **prunl)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_runl *runl;
+ u32 chcfg = nvkm_rd32(device, addr + 0x004);
+ u32 chnum = 1 << (chcfg & 0x0000000f);
+ u32 chaddr = (chcfg & 0xfffffff0);
+ u32 dbcfg = nvkm_rd32(device, addr + 0x008);
+ u32 vector = nvkm_rd32(device, addr + 0x160);
+ int i, ret;
+
+ runl = *prunl = nvkm_runl_new(fifo, id, addr, chnum);
+ if (IS_ERR(runl))
+ return PTR_ERR(runl);
+
+ for (i = 0; i < 2; i++) {
+ u32 pbcfg = nvkm_rd32(device, addr + 0x010 + (i * 0x04));
+ if (pbcfg & 0x80000000) {
+ runl->runq[runl->runq_nr] =
+ nvkm_runq_new(fifo, ((pbcfg & 0x03fffc00) - 0x040000) / 0x800);
+ if (!runl->runq[runl->runq_nr])
+ return -ENOMEM;
+
+ runl->runq_nr++;
+ }
+ }
+
+ ret = nvkm_inth_add(&device->vfn->intr, vector & 0x00000fff, NVKM_INTR_PRIO_NORMAL,
+ &fifo->engine.subdev, ga100_runl_intr, &runl->inth);
+ if (ret)
+ return ret;
+
+ runl->chan = chaddr;
+ runl->doorbell = dbcfg >> 16;
+ return 0;
+}
+
+static irqreturn_t
+ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), nonstall.intr);
+
+ nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+ return IRQ_HANDLED;
+}
+
+static void
+ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+
+ nvkm_inth_block(&fifo->nonstall.intr);
+}
+
+static void
+ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+
+ nvkm_inth_allow(&fifo->nonstall.intr);
+}
+
+const struct nvkm_event_func
+ga100_fifo_nonstall = {
+ .init = ga100_fifo_nonstall_allow,
+ .fini = ga100_fifo_nonstall_block,
+};
+
+int
+ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
+{
+ return nvkm_inth_add(&fifo->engine.subdev.device->vfn->intr, GA100_FIFO_NONSTALL_VECTOR,
+ NVKM_INTR_PRIO_NORMAL, &fifo->engine.subdev, ga100_fifo_nonstall_intr,
+ &fifo->nonstall.intr);
+}
+
+int
+ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_top_device *tdev;
+ struct nvkm_runl *runl;
+ int id = 0, ret;
+
+ nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) {
+ runl = nvkm_runl_get(fifo, -1, tdev->runlist);
+ if (!runl) {
+ ret = ga100_runl_new(fifo, id++, tdev->runlist, &runl);
+ if (ret)
+ return ret;
+ }
+
+ if (tdev->engine < 0)
+ continue;
+
+ nvkm_runl_add(runl, tdev->engine, (tdev->type == NVKM_ENGINE_CE) ?
+ fifo->func->engn_ce : fifo->func->engn, tdev->type, tdev->inst);
+ }
+
+ return 0;
+}
+
+static const struct nvkm_fifo_func
+ga100_fifo = {
+ .runl_ctor = ga100_fifo_runl_ctor,
+ .mmu_fault = &tu102_fifo_mmu_fault,
+ .nonstall_ctor = ga100_fifo_nonstall_ctor,
+ .nonstall = &ga100_fifo_nonstall,
+ .runl = &ga100_runl,
+ .runq = &ga100_runq,
+ .engn = &ga100_engn,
+ .engn_ce = &ga100_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &ga100_cgrp, .force = true },
+ .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_A }, &ga100_chan },
+};
+
+int
+ga100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&ga100_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
new file mode 100644
index 000000000..2cdf5da33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_fifo_func
+ga102_fifo = {
+ .runl_ctor = ga100_fifo_runl_ctor,
+ .mmu_fault = &tu102_fifo_mmu_fault,
+ .nonstall_ctor = ga100_fifo_nonstall_ctor,
+ .nonstall = &ga100_fifo_nonstall,
+ .runl = &ga100_runl,
+ .runq = &ga100_runq,
+ .engn = &ga100_engn,
+ .engn_ce = &ga100_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &ga100_cgrp, .force = true },
+ .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_B }, &ga100_chan },
+};
+
+int
+ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&ga102_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
new file mode 100644
index 000000000..5bb65258c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+#include "runq.h"
+
+#include <core/gpuobj.h>
+#include <subdev/bar.h>
+#include <subdev/fault.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <engine/sw.h>
+
+#include <nvif/class.h>
+
+void
+gf100_chan_preempt(struct nvkm_chan *chan)
+{
+ nvkm_wr32(chan->cgrp->runl->fifo->engine.subdev.device, 0x002634, chan->id);
+}
+
+static void
+gf100_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+}
+
+static void
+gf100_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x003004 + (chan->id * 8), 0x001f0001);
+}
+
+static void gf100_fifo_intr_engine(struct nvkm_fifo *);
+
+static void
+gf100_chan_unbind(struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+
+ /*TODO: Is this cargo-culted, or necessary? RM does *something* here... Why? */
+ gf100_fifo_intr_engine(fifo);
+
+ nvkm_wr32(device, 0x003000 + (chan->id * 8), 0x00000000);
+}
+
+static void
+gf100_chan_bind(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x003000 + (chan->id * 8), 0xc0000000 | chan->inst->addr >> 12);
+}
+
+static int
+gf100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ const u32 limit2 = ilog2(length / 8);
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
+ nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
+ nvkm_wo32(chan->inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
+ nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
+ nvkm_wo32(chan->inst, 0x54, 0x00000002);
+ nvkm_wo32(chan->inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
+ nvkm_wo32(chan->inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->inst, 0xa4, 0x1f1f1f1f);
+ nvkm_wo32(chan->inst, 0xa8, 0x1f1f1f1f);
+ nvkm_wo32(chan->inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->inst);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+gf100_chan_ramfc = {
+ .write = gf100_chan_ramfc_write,
+ .devm = 0xfff,
+};
+
+void
+gf100_chan_userd_clear(struct nvkm_chan *chan)
+{
+ nvkm_kmap(chan->userd.mem);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x040, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x044, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x048, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x04c, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x050, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x058, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x05c, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x060, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x088, 0x00000000);
+ nvkm_wo32(chan->userd.mem, chan->userd.base + 0x08c, 0x00000000);
+ nvkm_done(chan->userd.mem);
+}
+
+static const struct nvkm_chan_func_userd
+gf100_chan_userd = {
+ .bar = 1,
+ .size = 0x1000,
+ .clear = gf100_chan_userd_clear,
+};
+
+const struct nvkm_chan_func_inst
+gf100_chan_inst = {
+ .size = 0x1000,
+ .zero = true,
+ .vmm = true,
+};
+
+static const struct nvkm_chan_func
+gf100_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gf100_chan_userd,
+ .ramfc = &gf100_chan_ramfc,
+ .bind = gf100_chan_bind,
+ .unbind = gf100_chan_unbind,
+ .start = gf100_chan_start,
+ .stop = gf100_chan_stop,
+ .preempt = gf100_chan_preempt,
+};
+
+static void
+gf100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ u64 addr = 0ULL;
+ u32 ptr0;
+
+ switch (engn->engine->subdev.type) {
+ case NVKM_ENGINE_SW : return;
+ case NVKM_ENGINE_GR : ptr0 = 0x0210; break;
+ case NVKM_ENGINE_CE : ptr0 = 0x0230 + (engn->engine->subdev.inst * 0x10); break;
+ case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
+ case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
+ case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (cctx) {
+ addr = cctx->vctx->vma->addr;
+ addr |= 4ULL;
+ }
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
+ nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
+ nvkm_done(chan->inst);
+}
+
+static int
+gf100_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
+{
+ int ret;
+
+ ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
+}
+
+bool
+gf100_engn_mmu_fault_triggered(struct nvkm_engn *engn)
+{
+ struct nvkm_runl *runl = engn->runl;
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 data = nvkm_rd32(device, 0x002a30 + (engn->id * 4));
+
+ ENGN_DEBUG(engn, "%08x: mmu fault triggered", data);
+ if (!(data & 0x00000100))
+ return false;
+
+ spin_lock(&fifo->lock);
+ nvkm_mask(device, 0x002a30 + (engn->id * 4), 0x00000100, 0x00000000);
+ if (atomic_dec_and_test(&runl->rc_triggered))
+ nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
+ spin_unlock(&fifo->lock);
+ return true;
+}
+
+void
+gf100_engn_mmu_fault_trigger(struct nvkm_engn *engn)
+{
+ struct nvkm_runl *runl = engn->runl;
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+
+ ENGN_DEBUG(engn, "triggering mmu fault on 0x%02x", engn->fault);
+ spin_lock(&fifo->lock);
+ if (atomic_inc_return(&runl->rc_triggered) == 1)
+ nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ nvkm_wr32(device, 0x002a30 + (engn->id * 4), 0x00000100 | engn->fault);
+ spin_unlock(&fifo->lock);
+}
+
+/*TODO: clean all this up. */
+struct gf100_engn_status {
+ bool busy;
+ bool save;
+ bool unk0;
+ bool unk1;
+ u8 chid;
+};
+
+static void
+gf100_engn_status(struct nvkm_engn *engn, struct gf100_engn_status *status)
+{
+ u32 stat = nvkm_rd32(engn->engine->subdev.device, 0x002640 + (engn->id * 4));
+
+ status->busy = (stat & 0x10000000);
+ status->save = (stat & 0x00100000);
+ status->unk0 = (stat & 0x00004000);
+ status->unk1 = (stat & 0x00001000);
+ status->chid = (stat & 0x0000007f);
+
+ ENGN_DEBUG(engn, "%08x: busy %d save %d unk0 %d unk1 %d chid %d",
+ stat, status->busy, status->save, status->unk0, status->unk1, status->chid);
+}
+
+static int
+gf100_engn_cxid(struct nvkm_engn *engn, bool *cgid)
+{
+ struct gf100_engn_status status;
+
+ gf100_engn_status(engn, &status);
+ if (status.busy) {
+ *cgid = false;
+ return status.chid;
+ }
+
+ return -ENODEV;
+}
+
+static bool
+gf100_engn_chsw(struct nvkm_engn *engn)
+{
+ struct gf100_engn_status status;
+
+ gf100_engn_status(engn, &status);
+ if (status.busy && (status.unk0 || status.unk1))
+ return true;
+
+ return false;
+}
+
+static const struct nvkm_engn_func
+gf100_engn = {
+ .chsw = gf100_engn_chsw,
+ .cxid = gf100_engn_cxid,
+ .mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
+ .mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
+ .ctor = gf100_ectx_ctor,
+ .bind = gf100_ectx_bind,
+};
+
+const struct nvkm_engn_func
+gf100_engn_sw = {
+};
+
+static const struct nvkm_bitfield
+gf100_runq_intr_0_names[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+bool
+gf100_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null)
+{
+ struct nvkm_subdev *subdev = &runq->fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x04010c + (runq->id * 0x2000));
+ u32 stat = nvkm_rd32(device, 0x040108 + (runq->id * 0x2000)) & mask;
+ u32 addr = nvkm_rd32(device, 0x0400c0 + (runq->id * 0x2000));
+ u32 data = nvkm_rd32(device, 0x0400c4 + (runq->id * 0x2000));
+ u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & runq->fifo->chid->mask;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+ char msg[128];
+
+ if (stat & 0x00800000) {
+ if (device->sw) {
+ if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
+ show &= ~0x00800000;
+ }
+ }
+
+ if (show) {
+ nvkm_snprintbf(msg, sizeof(msg), runq->func->intr_0_names, show);
+ chan = nvkm_chan_get_chid(&runq->fifo->engine, chid, &flags);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
+ "subc %d mthd %04x data %08x\n",
+ runq->id, show, msg, chid, chan ? chan->inst->addr : 0,
+ chan ? chan->name : "unknown", subc, mthd, data);
+
+ /*TODO: use proper procedure for clearing each exception / debug output */
+ if ((stat & 0xc67fe000) && chan)
+ nvkm_chan_error(chan, true);
+ nvkm_chan_put(&chan, flags);
+ }
+
+ nvkm_wr32(device, 0x0400c0 + (runq->id * 0x2000), 0x80600008);
+ nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), stat);
+ return true;
+}
+
+void
+gf100_runq_init(struct nvkm_runq *runq)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x04013c + (runq->id * 0x2000), 0x10000100, 0x00000000);
+ nvkm_wr32(device, 0x040108 + (runq->id * 0x2000), 0xffffffff); /* INTR */
+ nvkm_wr32(device, 0x04010c + (runq->id * 0x2000), 0xfffffeff); /* INTREN */
+}
+
+static const struct nvkm_runq_func
+gf100_runq = {
+ .init = gf100_runq_init,
+ .intr = gf100_runq_intr,
+ .intr_0_names = gf100_runq_intr_0_names,
+};
+
+bool
+gf100_runl_preempt_pending(struct nvkm_runl *runl)
+{
+ return nvkm_rd32(runl->fifo->engine.subdev.device, 0x002634) & 0x00100000;
+}
+
+static void
+gf100_runl_fault_clear(struct nvkm_runl *runl)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x00262c, 0x00000000, 0x00000000);
+}
+
+static void
+gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000);
+}
+
+static void
+gf100_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm);
+}
+
+static bool
+gf100_runl_pending(struct nvkm_runl *runl)
+{
+ return nvkm_rd32(runl->fifo->engine.subdev.device, 0x00227c) & 0x00100000;
+}
+
+static void
+gf100_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ u64 addr = nvkm_memory_addr(memory) + start;
+ int target;
+
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
+ nvkm_wr32(device, 0x002274, 0x01f00000 | count);
+}
+
+static void
+gf100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset + 0, chan->id);
+ nvkm_wo32(memory, offset + 4, 0x00000004);
+}
+
+static const struct nvkm_runl_func
+gf100_runl = {
+ .size = 8,
+ .update = nv50_runl_update,
+ .insert_chan = gf100_runl_insert_chan,
+ .commit = gf100_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = gf100_runl_pending,
+ .block = gf100_runl_block,
+ .allow = gf100_runl_allow,
+ .fault_clear = gf100_runl_fault_clear,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+static void
+gf100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x80000000);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+gf100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
+{
+ struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_mask(fifo->engine.subdev.device, 0x002140, 0x80000000, 0x00000000);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+const struct nvkm_event_func
+gf100_fifo_nonstall = {
+ .init = gf100_fifo_nonstall_allow,
+ .fini = gf100_fifo_nonstall_block,
+};
+
+static const struct nvkm_enum
+gf100_fifo_mmu_fault_engine[] = {
+ { 0x00, "PGRAPH", NULL, NVKM_ENGINE_GR },
+ { 0x03, "PEEPHOLE", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x07, "PFIFO" },
+ { 0x10, "PMSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "PMSPPP", NULL, NVKM_ENGINE_MSPPP },
+ { 0x13, "PCOUNTER" },
+ { 0x14, "PMSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "PCE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "PCE1", NULL, NVKM_ENGINE_CE, 1 },
+ { 0x17, "PMU" },
+ {}
+};
+
+static const struct nvkm_enum
+gf100_fifo_mmu_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+static const struct nvkm_enum
+gf100_fifo_mmu_fault_hubclient[] = {
+ { 0x01, "PCOPY0" },
+ { 0x02, "PCOPY1" },
+ { 0x04, "DISPATCH" },
+ { 0x05, "CTXCTL" },
+ { 0x06, "PFIFO" },
+ { 0x07, "BAR_READ" },
+ { 0x08, "BAR_WRITE" },
+ { 0x0b, "PVP" },
+ { 0x0c, "PMSPPP" },
+ { 0x0d, "PMSVLD" },
+ { 0x11, "PCOUNTER" },
+ { 0x12, "PMU" },
+ { 0x14, "CCACHE" },
+ { 0x15, "CCACHE_POST" },
+ {}
+};
+
+static const struct nvkm_enum
+gf100_fifo_mmu_fault_gpcclient[] = {
+ { 0x01, "TEX" },
+ { 0x0c, "ESETUP" },
+ { 0x0e, "CTXCTL" },
+ { 0x0f, "PROP" },
+ {}
+};
+
+const struct nvkm_enum
+gf100_fifo_mmu_fault_access[] = {
+ { 0x00, "READ" },
+ { 0x01, "WRITE" },
+ {}
+};
+
+void
+gf100_fifo_mmu_fault_recover(struct nvkm_fifo *fifo, struct nvkm_fault_data *info)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ const struct nvkm_enum *er, *ee, *ec, *ea;
+ struct nvkm_engine *engine = NULL;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+ char ct[8] = "HUB/";
+
+ /* Lookup engine by MMU fault ID. */
+ nvkm_runl_foreach(runl, fifo) {
+ engn = nvkm_runl_find_engn(engn, runl, engn->fault == info->engine);
+ if (engn) {
+ /* Fault triggered by CTXSW_TIMEOUT recovery procedure. */
+ if (engn->func->mmu_fault_triggered &&
+ engn->func->mmu_fault_triggered(engn)) {
+ nvkm_runl_rc_engn(runl, engn);
+ return;
+ }
+
+ engine = engn->engine;
+ break;
+ }
+ }
+
+ er = nvkm_enum_find(fifo->func->mmu_fault->reason, info->reason);
+ ee = nvkm_enum_find(fifo->func->mmu_fault->engine, info->engine);
+ if (info->hub) {
+ ec = nvkm_enum_find(fifo->func->mmu_fault->hubclient, info->client);
+ } else {
+ ec = nvkm_enum_find(fifo->func->mmu_fault->gpcclient, info->client);
+ snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
+ }
+ ea = nvkm_enum_find(fifo->func->mmu_fault->access, info->access);
+
+ /* Handle BAR faults. */
+ if (ee && ee->data2) {
+ switch (ee->data2) {
+ case NVKM_SUBDEV_BAR:
+ nvkm_bar_bar1_reset(device);
+ break;
+ case NVKM_SUBDEV_INSTMEM:
+ nvkm_bar_bar2_reset(device);
+ break;
+ case NVKM_ENGINE_IFB:
+ nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
+ break;
+ default:
+ break;
+ }
+ }
+
+ chan = nvkm_chan_get_inst(&fifo->engine, info->inst, &flags);
+
+ nvkm_error(subdev,
+ "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
+ "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
+ info->access, ea ? ea->name : "", info->addr,
+ info->engine, ee ? ee->name : engine ? engine->subdev.name : "",
+ info->client, ct, ec ? ec->name : "",
+ info->reason, er ? er->name : "",
+ chan ? chan->id : -1, info->inst, chan ? chan->name : "unknown");
+
+ /* Handle host/engine faults. */
+ if (chan)
+ nvkm_runl_rc_cgrp(chan->cgrp);
+
+ nvkm_chan_put(&chan, flags);
+}
+
+static const struct nvkm_fifo_func_mmu_fault
+gf100_fifo_mmu_fault = {
+ .recover = gf100_fifo_mmu_fault_recover,
+ .access = gf100_fifo_mmu_fault_access,
+ .engine = gf100_fifo_mmu_fault_engine,
+ .reason = gf100_fifo_mmu_fault_reason,
+ .hubclient = gf100_fifo_mmu_fault_hubclient,
+ .gpcclient = gf100_fifo_mmu_fault_gpcclient,
+};
+
+void
+gf100_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo, u32 engm)
+{
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn, *engn2;
+ bool cgid, cgid2;
+ int id, id2;
+
+ nvkm_runl_foreach(runl, fifo) {
+ /* Stop the runlist, and go through all engines serving it. */
+ nvkm_runl_block(runl);
+ nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id)) {
+ /* Determine what channel (group) the engine is on. */
+ id = engn->func->cxid(engn, &cgid);
+ if (id >= 0) {
+ /* Trigger MMU fault on any engine(s) on that channel (group). */
+ nvkm_runl_foreach_engn_cond(engn2, runl, engn2->func->cxid) {
+ id2 = engn2->func->cxid(engn2, &cgid2);
+ if (cgid2 == cgid && id2 == id)
+ engn2->func->mmu_fault_trigger(engn2);
+ }
+ }
+ }
+ nvkm_runl_allow(runl); /* HW will keep runlist blocked via ERROR_SCHED_DISABLE. */
+ }
+}
+
+static void
+gf100_fifo_intr_sched_ctxsw(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ u32 engm = 0;
+
+ /* Look for any engines that are busy, and awaiting chsw ack. */
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn_cond(engn, runl, engn->func->chsw) {
+ if (WARN_ON(engn->fault < 0) || !engn->func->chsw(engn))
+ continue;
+
+ engm |= BIT(engn->id);
+ }
+ }
+
+ if (!engm)
+ return;
+
+ fifo->func->intr_ctxsw_timeout(fifo, engm);
+}
+
+static const struct nvkm_enum
+gf100_fifo_intr_sched_names[] = {
+ { 0x0a, "CTXSW_TIMEOUT" },
+ {}
+};
+
+void
+gf100_fifo_intr_sched(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en;
+
+ en = nvkm_enum_find(gf100_fifo_intr_sched_names, code);
+
+ nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
+
+ switch (code) {
+ case 0x0a:
+ gf100_fifo_intr_sched_ctxsw(fifo);
+ break;
+ default:
+ break;
+ }
+}
+
+void
+gf100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00001f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
+void
+gf100_fifo_intr_mmu_fault(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ unsigned long mask = nvkm_rd32(device, 0x00259c);
+ int unit;
+
+ for_each_set_bit(unit, &mask, 32) {
+ fifo->func->intr_mmu_fault_unit(fifo, unit);
+ nvkm_wr32(device, 0x00259c, BIT(unit));
+ }
+}
+
+bool
+gf100_fifo_intr_pbdma(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_runq *runq;
+ u32 mask = nvkm_rd32(device, 0x0025a0);
+ bool handled = false;
+
+ nvkm_runq_foreach_cond(runq, fifo, mask & BIT(runq->id)) {
+ if (runq->func->intr(runq, NULL))
+ handled = true;
+
+ nvkm_wr32(device, 0x0025a0, BIT(runq->id));
+ }
+
+ return handled;
+}
+
+static void
+gf100_fifo_intr_runlist(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x002a00);
+
+ if (intr & 0x10000000) {
+ nvkm_wr32(device, 0x002a00, 0x10000000);
+ intr &= ~0x10000000;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "RUNLIST %08x\n", intr);
+ nvkm_wr32(device, 0x002a00, intr);
+ }
+}
+
+static void
+gf100_fifo_intr_engine_unit(struct nvkm_fifo *fifo, int engn)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 intr = nvkm_rd32(device, 0x0025a8 + (engn * 0x04));
+ u32 inte = nvkm_rd32(device, 0x002628);
+ u32 unkn;
+
+ nvkm_wr32(device, 0x0025a8 + (engn * 0x04), intr);
+
+ for (unkn = 0; unkn < 8; unkn++) {
+ u32 ints = (intr >> (unkn * 0x04)) & inte;
+ if (ints & 0x1) {
+ nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+ ints &= ~1;
+ }
+ if (ints) {
+ nvkm_error(subdev, "ENGINE %d %d %01x", engn, unkn, ints);
+ nvkm_mask(device, 0x002628, ints, 0);
+ }
+ }
+}
+
+static void
+gf100_fifo_intr_engine(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 mask = nvkm_rd32(device, 0x0025a4);
+
+ while (mask) {
+ u32 unit = __ffs(mask);
+ gf100_fifo_intr_engine_unit(fifo, unit);
+ mask &= ~(1 << unit);
+ }
+}
+
+static irqreturn_t
+gf100_fifo_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ u32 intr = nvkm_rd32(device, 0x00252c);
+ nvkm_warn(subdev, "INTR 00000001: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000100) {
+ gf100_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ u32 intr = nvkm_rd32(device, 0x00256c);
+ nvkm_warn(subdev, "INTR 00010000: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x01000000) {
+ u32 intr = nvkm_rd32(device, 0x00258c);
+ nvkm_warn(subdev, "INTR 01000000: %08x\n", intr);
+ nvkm_wr32(device, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
+ if (stat & 0x10000000) {
+ gf100_fifo_intr_mmu_fault(fifo);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ if (gf100_fifo_intr_pbdma(fifo))
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gf100_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ gf100_fifo_intr_engine(fifo);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ spin_lock(&fifo->lock);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ spin_unlock(&fifo->lock);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+gf100_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+
+ /* Enable PBDMAs. */
+ nvkm_wr32(device, 0x000204, mask);
+ nvkm_wr32(device, 0x002204, mask);
+
+ /* Assign engines to PBDMAs. */
+ if ((mask & 7) == 7) {
+ nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
+ nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
+ nvkm_wr32(device, 0x002214, ~(1 << 1)); /* PMSVLD */
+ nvkm_wr32(device, 0x002218, ~(1 << 2)); /* PCE0 */
+ nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff);
+}
+
+static void
+gf100_fifo_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x002200, 0x00000001, 0x00000001);
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
+
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0x7fffffff);
+ nvkm_wr32(device, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
+}
+
+static int
+gf100_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+
+ runl = nvkm_runl_new(fifo, 0, 0, 0);
+ if (IS_ERR(runl))
+ return PTR_ERR(runl);
+
+ nvkm_runl_add(runl, 0, fifo->func->engn, NVKM_ENGINE_GR, 0);
+ nvkm_runl_add(runl, 1, fifo->func->engn, NVKM_ENGINE_MSPDEC, 0);
+ nvkm_runl_add(runl, 2, fifo->func->engn, NVKM_ENGINE_MSPPP, 0);
+ nvkm_runl_add(runl, 3, fifo->func->engn, NVKM_ENGINE_MSVLD, 0);
+ nvkm_runl_add(runl, 4, fifo->func->engn, NVKM_ENGINE_CE, 0);
+ nvkm_runl_add(runl, 5, fifo->func->engn, NVKM_ENGINE_CE, 1);
+ nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0);
+ return 0;
+}
+
+int
+gf100_fifo_runq_nr(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 save;
+
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ save = nvkm_mask(device, 0x000204, 0xffffffff, 0xffffffff);
+ save = nvkm_mask(device, 0x000204, 0xffffffff, save);
+ return hweight32(save);
+}
+
+int
+gf100_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
+{
+ return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr, &fifo->chid);
+}
+
+static const struct nvkm_fifo_func
+gf100_fifo = {
+ .chid_nr = nv50_fifo_chid_nr,
+ .chid_ctor = gf100_fifo_chid_ctor,
+ .runq_nr = gf100_fifo_runq_nr,
+ .runl_ctor = gf100_fifo_runl_ctor,
+ .init = gf100_fifo_init,
+ .init_pbdmas = gf100_fifo_init_pbdmas,
+ .intr = gf100_fifo_intr,
+ .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gf100_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gf100_runl,
+ .runq = &gf100_runq,
+ .engn = &gf100_engn,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, FERMI_CHANNEL_GPFIFO }, &gf100_chan },
+};
+
+int
+gf100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gf100_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
new file mode 100644
index 000000000..d8a4d773a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -0,0 +1,833 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+#include "runq.h"
+
+#include <core/gpuobj.h>
+#include <subdev/mc.h>
+#include <subdev/mmu.h>
+#include <subdev/top.h>
+
+#include <nvif/class.h>
+#include <nvif/if900d.h>
+
+void
+gk104_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
+}
+
+void
+gk104_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+}
+
+void
+gk104_chan_unbind(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x00000000);
+}
+
+void
+gk104_chan_bind_inst(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x800000 + (chan->id * 8), 0x80000000 | chan->inst->addr >> 12);
+}
+
+void
+gk104_chan_bind(struct nvkm_chan *chan)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x800004 + (chan->id * 8), 0x000f0000, runl->id << 16);
+ gk104_chan_bind_inst(chan);
+}
+
+static int
+gk104_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ const u32 limit2 = ilog2(length / 8);
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x08, lower_32_bits(userd));
+ nvkm_wo32(chan->inst, 0x0c, upper_32_bits(userd));
+ nvkm_wo32(chan->inst, 0x10, 0x0000face);
+ nvkm_wo32(chan->inst, 0x30, 0xfffff902);
+ nvkm_wo32(chan->inst, 0x48, lower_32_bits(offset));
+ nvkm_wo32(chan->inst, 0x4c, upper_32_bits(offset) | (limit2 << 16));
+ nvkm_wo32(chan->inst, 0x84, 0x20400000);
+ nvkm_wo32(chan->inst, 0x94, 0x30000000 | devm);
+ nvkm_wo32(chan->inst, 0x9c, 0x00000100);
+ nvkm_wo32(chan->inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->inst, 0xe4, priv ? 0x00000020 : 0x00000000);
+ nvkm_wo32(chan->inst, 0xe8, chan->id);
+ nvkm_wo32(chan->inst, 0xb8, 0xf8000000);
+ nvkm_wo32(chan->inst, 0xf8, 0x10003080); /* 0x002310 */
+ nvkm_wo32(chan->inst, 0xfc, 0x10000010); /* 0x002350 */
+ nvkm_done(chan->inst);
+ return 0;
+}
+
+const struct nvkm_chan_func_ramfc
+gk104_chan_ramfc = {
+ .write = gk104_chan_ramfc_write,
+ .devm = 0xfff,
+ .priv = true,
+};
+
+const struct nvkm_chan_func_userd
+gk104_chan_userd = {
+ .bar = 1,
+ .size = 0x200,
+ .clear = gf100_chan_userd_clear,
+};
+
+static const struct nvkm_chan_func
+gk104_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gk104_chan_userd,
+ .ramfc = &gk104_chan_ramfc,
+ .bind = gk104_chan_bind,
+ .unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
+ .preempt = gf100_chan_preempt,
+};
+
+static void
+gk104_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ u32 ptr0, ptr1 = 0;
+ u64 addr = 0ULL;
+
+ switch (engn->engine->subdev.type) {
+ case NVKM_ENGINE_SW : return;
+ case NVKM_ENGINE_GR : ptr0 = 0x0210; break;
+ case NVKM_ENGINE_SEC : ptr0 = 0x0220; break;
+ case NVKM_ENGINE_MSPDEC: ptr0 = 0x0250; break;
+ case NVKM_ENGINE_MSPPP : ptr0 = 0x0260; break;
+ case NVKM_ENGINE_MSVLD : ptr0 = 0x0270; break;
+ case NVKM_ENGINE_VIC : ptr0 = 0x0280; break;
+ case NVKM_ENGINE_MSENC : ptr0 = 0x0290; break;
+ case NVKM_ENGINE_NVDEC :
+ ptr1 = 0x0270;
+ ptr0 = 0x0210;
+ break;
+ case NVKM_ENGINE_NVENC :
+ if (!engn->engine->subdev.inst)
+ ptr1 = 0x0290;
+ ptr0 = 0x0210;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (cctx) {
+ addr = cctx->vctx->vma->addr;
+ addr |= 4ULL;
+ }
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, ptr0 + 0, lower_32_bits(addr));
+ nvkm_wo32(chan->inst, ptr0 + 4, upper_32_bits(addr));
+ if (ptr1) {
+ nvkm_wo32(chan->inst, ptr1 + 0, lower_32_bits(addr));
+ nvkm_wo32(chan->inst, ptr1 + 4, upper_32_bits(addr));
+ }
+ nvkm_done(chan->inst);
+}
+
+int
+gk104_ectx_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
+{
+ struct gf100_vmm_map_v0 args = { .priv = 1 };
+ int ret;
+
+ ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, &args, sizeof(args));
+}
+
+/*TODO: clean this up */
+struct gk104_engn_status {
+ bool busy;
+ bool faulted;
+ bool chsw;
+ bool save;
+ bool load;
+ struct {
+ bool tsg;
+ u32 id;
+ } prev, next, *chan;
+};
+
+static void
+gk104_engn_status(struct nvkm_engn *engn, struct gk104_engn_status *status)
+{
+ u32 stat = nvkm_rd32(engn->runl->fifo->engine.subdev.device, 0x002640 + (engn->id * 0x08));
+
+ status->busy = !!(stat & 0x80000000);
+ status->faulted = !!(stat & 0x40000000);
+ status->next.tsg = !!(stat & 0x10000000);
+ status->next.id = (stat & 0x0fff0000) >> 16;
+ status->chsw = !!(stat & 0x00008000);
+ status->save = !!(stat & 0x00004000);
+ status->load = !!(stat & 0x00002000);
+ status->prev.tsg = !!(stat & 0x00001000);
+ status->prev.id = (stat & 0x00000fff);
+ status->chan = NULL;
+
+ if (status->busy && status->chsw) {
+ if (status->load && status->save) {
+ if (nvkm_engine_chsw_load(engn->engine))
+ status->chan = &status->next;
+ else
+ status->chan = &status->prev;
+ } else
+ if (status->load) {
+ status->chan = &status->next;
+ } else {
+ status->chan = &status->prev;
+ }
+ } else
+ if (status->load) {
+ status->chan = &status->prev;
+ }
+
+ ENGN_DEBUG(engn, "%08x: busy %d faulted %d chsw %d save %d load %d %sid %d%s-> %sid %d%s",
+ stat, status->busy, status->faulted, status->chsw, status->save, status->load,
+ status->prev.tsg ? "tsg" : "ch", status->prev.id,
+ status->chan == &status->prev ? "*" : " ",
+ status->next.tsg ? "tsg" : "ch", status->next.id,
+ status->chan == &status->next ? "*" : " ");
+}
+
+int
+gk104_engn_cxid(struct nvkm_engn *engn, bool *cgid)
+{
+ struct gk104_engn_status status;
+
+ gk104_engn_status(engn, &status);
+ if (status.chan) {
+ *cgid = status.chan->tsg;
+ return status.chan->id;
+ }
+
+ return -ENODEV;
+}
+
+bool
+gk104_engn_chsw(struct nvkm_engn *engn)
+{
+ struct gk104_engn_status status;
+
+ gk104_engn_status(engn, &status);
+ if (status.busy && status.chsw)
+ return true;
+
+ return false;
+}
+
+const struct nvkm_engn_func
+gk104_engn = {
+ .chsw = gk104_engn_chsw,
+ .cxid = gk104_engn_cxid,
+ .mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
+ .mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
+ .ctor = gk104_ectx_ctor,
+ .bind = gk104_ectx_bind,
+};
+
+const struct nvkm_engn_func
+gk104_engn_ce = {
+ .chsw = gk104_engn_chsw,
+ .cxid = gk104_engn_cxid,
+ .mmu_fault_trigger = gf100_engn_mmu_fault_trigger,
+ .mmu_fault_triggered = gf100_engn_mmu_fault_triggered,
+};
+
+bool
+gk104_runq_idle(struct nvkm_runq *runq)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+
+ return !(nvkm_rd32(device, 0x003080 + (runq->id * 4)) & 0x0000e000);
+}
+
+static const struct nvkm_bitfield
+gk104_runq_intr_1_names[] = {
+ { 0x00000001, "HCE_RE_ILLEGAL_OP" },
+ { 0x00000002, "HCE_RE_ALIGNB" },
+ { 0x00000004, "HCE_PRIV" },
+ { 0x00000008, "HCE_ILLEGAL_MTHD" },
+ { 0x00000010, "HCE_ILLEGAL_CLASS" },
+ {}
+};
+
+static bool
+gk104_runq_intr_1(struct nvkm_runq *runq)
+{
+ struct nvkm_subdev *subdev = &runq->fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x04014c + (runq->id * 0x2000));
+ u32 stat = nvkm_rd32(device, 0x040148 + (runq->id * 0x2000)) & mask;
+ u32 chid = nvkm_rd32(device, 0x040120 + (runq->id * 0x2000)) & 0xfff;
+ char msg[128];
+
+ if (stat & 0x80000000) {
+ if (runq->func->intr_1_ctxnotvalid &&
+ runq->func->intr_1_ctxnotvalid(runq, chid))
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_snprintbf(msg, sizeof(msg), gk104_runq_intr_1_names, stat);
+ nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
+ runq->id, stat, msg, chid,
+ nvkm_rd32(device, 0x040150 + (runq->id * 0x2000)),
+ nvkm_rd32(device, 0x040154 + (runq->id * 0x2000)));
+ }
+
+ nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), stat);
+ return true;
+}
+
+const struct nvkm_bitfield
+gk104_runq_intr_0_names[] = {
+ { 0x00000001, "MEMREQ" },
+ { 0x00000002, "MEMACK_TIMEOUT" },
+ { 0x00000004, "MEMACK_EXTRA" },
+ { 0x00000008, "MEMDAT_TIMEOUT" },
+ { 0x00000010, "MEMDAT_EXTRA" },
+ { 0x00000020, "MEMFLUSH" },
+ { 0x00000040, "MEMOP" },
+ { 0x00000080, "LBCONNECT" },
+ { 0x00000100, "LBREQ" },
+ { 0x00000200, "LBACK_TIMEOUT" },
+ { 0x00000400, "LBACK_EXTRA" },
+ { 0x00000800, "LBDAT_TIMEOUT" },
+ { 0x00001000, "LBDAT_EXTRA" },
+ { 0x00002000, "GPFIFO" },
+ { 0x00004000, "GPPTR" },
+ { 0x00008000, "GPENTRY" },
+ { 0x00010000, "GPCRC" },
+ { 0x00020000, "PBPTR" },
+ { 0x00040000, "PBENTRY" },
+ { 0x00080000, "PBCRC" },
+ { 0x00100000, "XBARCONNECT" },
+ { 0x00200000, "METHOD" },
+ { 0x00400000, "METHODCRC" },
+ { 0x00800000, "DEVICE" },
+ { 0x02000000, "SEMAPHORE" },
+ { 0x04000000, "ACQUIRE" },
+ { 0x08000000, "PRI" },
+ { 0x20000000, "NO_CTXSW_SEG" },
+ { 0x40000000, "PBSEG" },
+ { 0x80000000, "SIGNATURE" },
+ {}
+};
+
+bool
+gk104_runq_intr(struct nvkm_runq *runq, struct nvkm_runl *null)
+{
+ bool intr0 = gf100_runq_intr(runq, NULL);
+ bool intr1 = gk104_runq_intr_1(runq);
+
+ return intr0 || intr1;
+}
+
+void
+gk104_runq_init(struct nvkm_runq *runq)
+{
+ struct nvkm_device *device = runq->fifo->engine.subdev.device;
+
+ gf100_runq_init(runq);
+
+ nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0xffffffff); /* HCE.INTR */
+ nvkm_wr32(device, 0x04014c + (runq->id * 0x2000), 0xffffffff); /* HCE.INTREN */
+}
+
+static u32
+gk104_runq_runm(struct nvkm_runq *runq)
+{
+ return nvkm_rd32(runq->fifo->engine.subdev.device, 0x002390 + (runq->id * 0x04));
+}
+
+const struct nvkm_runq_func
+gk104_runq = {
+ .init = gk104_runq_init,
+ .intr = gk104_runq_intr,
+ .intr_0_names = gk104_runq_intr_0_names,
+ .idle = gk104_runq_idle,
+};
+
+void
+gk104_runl_fault_clear(struct nvkm_runl *runl)
+{
+ nvkm_wr32(runl->fifo->engine.subdev.device, 0x00262c, BIT(runl->id));
+}
+
+void
+gk104_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000);
+}
+
+void
+gk104_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id));
+}
+
+bool
+gk104_runl_pending(struct nvkm_runl *runl)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+
+ return nvkm_rd32(device, 0x002284 + (runl->id * 0x08)) & 0x00100000;
+}
+
+void
+gk104_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u64 addr = nvkm_memory_addr(memory) + start;
+ int target;
+
+ switch (nvkm_memory_target(memory)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ spin_lock_irq(&fifo->lock);
+ nvkm_wr32(device, 0x002270, (target << 28) | (addr >> 12));
+ nvkm_wr32(device, 0x002274, (runl->id << 20) | count);
+ spin_unlock_irq(&fifo->lock);
+}
+
+void
+gk104_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset + 0, chan->id);
+ nvkm_wo32(memory, offset + 4, 0x00000000);
+}
+
+static const struct nvkm_runl_func
+gk104_runl = {
+ .size = 8,
+ .update = nv50_runl_update,
+ .insert_chan = gk104_runl_insert_chan,
+ .commit = gk104_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
+ .fault_clear = gk104_runl_fault_clear,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+static const struct nvkm_enum
+gk104_fifo_mmu_fault_engine[] = {
+ { 0x00, "GR", NULL, NVKM_ENGINE_GR },
+ { 0x01, "DISPLAY" },
+ { 0x02, "CAPTURE" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "SCHED" },
+ { 0x07, "HOST0" },
+ { 0x08, "HOST1" },
+ { 0x09, "HOST2" },
+ { 0x0a, "HOST3" },
+ { 0x0b, "HOST4" },
+ { 0x0c, "HOST5" },
+ { 0x0d, "HOST6" },
+ { 0x0e, "HOST7" },
+ { 0x0f, "HOSTSR" },
+ { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
+ { 0x13, "PERF" },
+ { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "CE0", NULL, NVKM_ENGINE_CE, 0 },
+ { 0x16, "CE1", NULL, NVKM_ENGINE_CE, 1 },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+ { 0x1b, "CE2", NULL, NVKM_ENGINE_CE, 2 },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_mmu_fault_reason[] = {
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "BOTH_PTES_VALID" },
+ { 0x0f, "INFO_TYPE_POISONED" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_mmu_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "MSPDEC" },
+ { 0x0c, "MSPPP" },
+ { 0x0d, "MSVLD" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "GR_CE" },
+ { 0x19, "CE2" },
+ { 0x1a, "XV" },
+ { 0x1b, "MMU_NB" },
+ { 0x1c, "MSENC" },
+ { 0x1d, "DFALCON" },
+ { 0x1e, "SKED" },
+ { 0x1f, "AFALCON" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_mmu_fault_gpcclient[] = {
+ { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+ { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+ { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+ { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+ { 0x0c, "RAST" },
+ { 0x0d, "GCC" },
+ { 0x0e, "GPCCS" },
+ { 0x0f, "PROP_0" },
+ { 0x10, "PROP_1" },
+ { 0x11, "PROP_2" },
+ { 0x12, "PROP_3" },
+ { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+ { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+ { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+ { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+ { 0x1f, "GPM" },
+ { 0x20, "LTP_UTLB_0" },
+ { 0x21, "LTP_UTLB_1" },
+ { 0x22, "LTP_UTLB_2" },
+ { 0x23, "LTP_UTLB_3" },
+ { 0x24, "GPC_RGG_UTLB" },
+ {}
+};
+
+const struct nvkm_fifo_func_mmu_fault
+gk104_fifo_mmu_fault = {
+ .recover = gf100_fifo_mmu_fault_recover,
+ .access = gf100_fifo_mmu_fault_access,
+ .engine = gk104_fifo_mmu_fault_engine,
+ .reason = gk104_fifo_mmu_fault_reason,
+ .hubclient = gk104_fifo_mmu_fault_hubclient,
+ .gpcclient = gk104_fifo_mmu_fault_gpcclient,
+};
+
+static const struct nvkm_enum
+gk104_fifo_intr_bind_reason[] = {
+ { 0x01, "BIND_NOT_UNBOUND" },
+ { 0x02, "SNOOP_WITHOUT_BAR1" },
+ { 0x03, "UNBIND_WHILE_RUNNING" },
+ { 0x05, "INVALID_RUNLIST" },
+ { 0x06, "INVALID_CTX_TGT" },
+ { 0x0b, "UNBIND_WHILE_PARKED" },
+ {}
+};
+
+void
+gk104_fifo_intr_bind(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ u32 intr = nvkm_rd32(subdev->device, 0x00252c);
+ u32 code = intr & 0x000000ff;
+ const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code);
+
+ nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
+}
+
+void
+gk104_fifo_intr_chsw(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x00256c);
+
+ nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
+ nvkm_wr32(device, 0x00256c, stat);
+}
+
+static void
+gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ u32 stat = nvkm_rd32(subdev->device, 0x00259c);
+
+ nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
+}
+
+void
+gk104_fifo_intr_runlist(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_runl *runl;
+ u32 mask = nvkm_rd32(device, 0x002a00);
+
+ nvkm_runl_foreach_cond(runl, fifo, mask & BIT(runl->id)) {
+ nvkm_wr32(device, 0x002a00, BIT(runl->id));
+ }
+}
+
+irqreturn_t
+gk104_fifo_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000010) {
+ nvkm_error(subdev, "PIO_ERROR\n");
+ nvkm_wr32(device, 0x002100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000100) {
+ gf100_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x00800000) {
+ nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
+ nvkm_wr32(device, 0x002100, 0x00800000);
+ stat &= ~0x00800000;
+ }
+
+ if (stat & 0x01000000) {
+ nvkm_error(subdev, "LB_ERROR\n");
+ nvkm_wr32(device, 0x002100, 0x01000000);
+ stat &= ~0x01000000;
+ }
+
+ if (stat & 0x08000000) {
+ gk104_fifo_intr_dropped_fault(fifo);
+ nvkm_wr32(device, 0x002100, 0x08000000);
+ stat &= ~0x08000000;
+ }
+
+ if (stat & 0x10000000) {
+ gf100_fifo_intr_mmu_fault(fifo);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ if (gf100_fifo_intr_pbdma(fifo))
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ spin_lock(&fifo->lock);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ spin_unlock(&fifo->lock);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void
+gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x000204, mask);
+ nvkm_mask(device, 0x002a04, 0xbfffffff, 0xbfffffff);
+}
+
+void
+gk104_fifo_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+
+ if (fifo->func->chan.func->userd->bar == 1)
+ nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
+
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0x7fffffff);
+}
+
+int
+gk104_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_top_device *tdev;
+ struct nvkm_runl *runl;
+ struct nvkm_runq *runq;
+ const struct nvkm_engn_func *func;
+
+ nvkm_list_foreach(tdev, &device->top->device, head, tdev->runlist >= 0) {
+ runl = nvkm_runl_get(fifo, tdev->runlist, tdev->runlist);
+ if (!runl) {
+ runl = nvkm_runl_new(fifo, tdev->runlist, tdev->runlist, 0);
+ if (IS_ERR(runl))
+ return PTR_ERR(runl);
+
+ nvkm_runq_foreach_cond(runq, fifo, gk104_runq_runm(runq) & BIT(runl->id)) {
+ if (WARN_ON(runl->runq_nr == ARRAY_SIZE(runl->runq)))
+ return -ENOMEM;
+
+ runl->runq[runl->runq_nr++] = runq;
+ }
+
+ }
+
+ if (tdev->engine < 0)
+ continue;
+
+ switch (tdev->type) {
+ case NVKM_ENGINE_CE:
+ func = fifo->func->engn_ce;
+ break;
+ case NVKM_ENGINE_GR:
+ nvkm_runl_add(runl, 15, &gf100_engn_sw, NVKM_ENGINE_SW, 0);
+ fallthrough;
+ default:
+ func = fifo->func->engn;
+ break;
+ }
+
+ nvkm_runl_add(runl, tdev->engine, func, tdev->type, tdev->inst);
+ }
+
+ return 0;
+}
+
+int
+gk104_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return 4096;
+}
+
+static const struct nvkm_fifo_func
+gk104_fifo = {
+ .chid_nr = gk104_fifo_chid_nr,
+ .chid_ctor = gf100_fifo_chid_ctor,
+ .runq_nr = gf100_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gk104_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gk104_runl,
+ .runq = &gk104_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk104_chan },
+};
+
+int
+gk104_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gk104_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
new file mode 100644
index 000000000..a8ff21cf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+void
+gk110_chan_preempt(struct nvkm_chan *chan)
+{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+
+ if (cgrp->hw) {
+ cgrp->func->preempt(cgrp);
+ return;
+ }
+
+ gf100_chan_preempt(chan);
+}
+
+const struct nvkm_chan_func
+gk110_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gk104_chan_userd,
+ .ramfc = &gk104_chan_ramfc,
+ .bind = gk104_chan_bind,
+ .unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
+};
+
+static void
+gk110_cgrp_preempt(struct nvkm_cgrp *cgrp)
+{
+ nvkm_wr32(cgrp->runl->fifo->engine.subdev.device, 0x002634, 0x01000000 | cgrp->id);
+}
+
+const struct nvkm_cgrp_func
+gk110_cgrp = {
+ .preempt = gk110_cgrp_preempt,
+};
+
+void
+gk110_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset + 0, (cgrp->chan_nr << 26) | (128 << 18) |
+ (3 << 14) | 0x00002000 | cgrp->id);
+ nvkm_wo32(memory, offset + 4, 0x00000000);
+}
+
+const struct nvkm_runl_func
+gk110_runl = {
+ .size = 8,
+ .update = nv50_runl_update,
+ .insert_cgrp = gk110_runl_insert_cgrp,
+ .insert_chan = gk104_runl_insert_chan,
+ .commit = gk104_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
+ .fault_clear = gk104_runl_fault_clear,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+int
+gk110_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
+{
+ int ret;
+
+ ret = nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr, &fifo->cgid);
+ if (ret)
+ return ret;
+
+ return gf100_fifo_chid_ctor(fifo, nr);
+}
+
+static const struct nvkm_fifo_func
+gk110_fifo = {
+ .chid_nr = gk104_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gf100_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gk104_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gk110_runl,
+ .runq = &gk104_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
+ .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_B }, &gk110_chan },
+};
+
+int
+gk110_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gk110_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
new file mode 100644
index 000000000..8fa2b0be1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "runq.h"
+
+#include <nvif/class.h>
+
+void
+gk208_runq_init(struct nvkm_runq *runq)
+{
+ gk104_runq_init(runq);
+
+ nvkm_wr32(runq->fifo->engine.subdev.device, 0x04012c + (runq->id * 0x2000), 0x000f4240);
+}
+
+const struct nvkm_runq_func
+gk208_runq = {
+ .init = gk208_runq_init,
+ .intr = gk104_runq_intr,
+ .intr_0_names = gk104_runq_intr_0_names,
+ .idle = gk104_runq_idle,
+};
+
+static int
+gk208_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return 1024;
+}
+
+static const struct nvkm_fifo_func
+gk208_fifo = {
+ .chid_nr = gk208_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gf100_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gk104_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gk110_runl,
+ .runq = &gk208_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
+ .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk110_chan },
+};
+
+int
+gk208_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gk208_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
new file mode 100644
index 000000000..b63ca8361
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_fifo_func
+gk20a_fifo = {
+ .chid_nr = nv50_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gf100_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gk104_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gk110_runl,
+ .runq = &gk208_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ }, &gk110_cgrp },
+ .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_A }, &gk110_chan },
+};
+
+int
+gk20a_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gk20a_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
new file mode 100644
index 000000000..5ba60021b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "chan.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fault.h>
+
+#include <nvif/class.h>
+
+const struct nvkm_chan_func
+gm107_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gk104_chan_userd,
+ .ramfc = &gk104_chan_ramfc,
+ .bind = gk104_chan_bind_inst,
+ .unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
+};
+
+static void
+gm107_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset + 0, chan->id);
+ nvkm_wo32(memory, offset + 4, chan->inst->addr >> 12);
+}
+
+const struct nvkm_runl_func
+gm107_runl = {
+ .size = 8,
+ .update = nv50_runl_update,
+ .insert_cgrp = gk110_runl_insert_cgrp,
+ .insert_chan = gm107_runl_insert_chan,
+ .commit = gk104_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
+ .fault_clear = gk104_runl_fault_clear,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+static const struct nvkm_enum
+gm107_fifo_mmu_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x02, "CAPTURE" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "SCHED" },
+ { 0x07, "HOST0" },
+ { 0x08, "HOST1" },
+ { 0x09, "HOST2" },
+ { 0x0a, "HOST3" },
+ { 0x0b, "HOST4" },
+ { 0x0c, "HOST5" },
+ { 0x0d, "HOST6" },
+ { 0x0e, "HOST7" },
+ { 0x0f, "HOSTSR" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ {}
+};
+
+const struct nvkm_fifo_func_mmu_fault
+gm107_fifo_mmu_fault = {
+ .recover = gf100_fifo_mmu_fault_recover,
+ .access = gf100_fifo_mmu_fault_access,
+ .engine = gm107_fifo_mmu_fault_engine,
+ .reason = gk104_fifo_mmu_fault_reason,
+ .hubclient = gk104_fifo_mmu_fault_hubclient,
+ .gpcclient = gk104_fifo_mmu_fault_gpcclient,
+};
+
+void
+gm107_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.client = (type & 0x00003f00) >> 8;
+ info.access = (type & 0x00000080) >> 7;
+ info.hub = (type & 0x00000040) >> 6;
+ info.reason = (type & 0x0000000f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
+static int
+gm107_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return 2048;
+}
+
+static const struct nvkm_fifo_func
+gm107_fifo = {
+ .chid_nr = gm107_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gf100_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gm107_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gm107_runl,
+ .runq = &gk208_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
+ .chan = {{ 0, 0, KEPLER_CHANNEL_GPFIFO_B }, &gm107_chan },
+};
+
+int
+gm107_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gm107_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
new file mode 100644
index 000000000..d92d1ac39
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+int
+gm200_fifo_runq_nr(struct nvkm_fifo *fifo)
+{
+ return nvkm_rd32(fifo->engine.subdev.device, 0x002004) & 0x000000ff;
+}
+
+int
+gm200_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return nvkm_rd32(fifo->engine.subdev.device, 0x002008);
+}
+
+static const struct nvkm_fifo_func
+gm200_fifo = {
+ .chid_nr = gm200_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gm200_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gm107_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gm107_runl,
+ .runq = &gk208_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp },
+ .chan = {{ 0, 0, MAXWELL_CHANNEL_GPFIFO_A }, &gm107_chan },
+};
+
+int
+gm200_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gm200_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644
index 000000000..65bdb6a7d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <subdev/fault.h>
+
+#include <nvif/class.h>
+
+static void
+gp100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset + 0, chan->id | chan->runq << 14);
+ nvkm_wo32(memory, offset + 4, chan->inst->addr >> 12);
+}
+
+static const struct nvkm_runl_func
+gp100_runl = {
+ .runqs = 2,
+ .size = 8,
+ .update = nv50_runl_update,
+ .insert_cgrp = gk110_runl_insert_cgrp,
+ .insert_chan = gp100_runl_insert_chan,
+ .commit = gk104_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
+ .fault_clear = gk104_runl_fault_clear,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+static const struct nvkm_enum
+gp100_fifo_mmu_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "HOST0" },
+ { 0x07, "HOST1" },
+ { 0x08, "HOST2" },
+ { 0x09, "HOST3" },
+ { 0x0a, "HOST4" },
+ { 0x0b, "HOST5" },
+ { 0x0c, "HOST6" },
+ { 0x0d, "HOST7" },
+ { 0x0e, "HOST8" },
+ { 0x0f, "HOST9" },
+ { 0x10, "HOST10" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x1f, "PHYSICAL" },
+ {}
+};
+
+static const struct nvkm_fifo_func_mmu_fault
+gp100_fifo_mmu_fault = {
+ .recover = gf100_fifo_mmu_fault_recover,
+ .access = gf100_fifo_mmu_fault_access,
+ .engine = gp100_fifo_mmu_fault_engine,
+ .reason = gk104_fifo_mmu_fault_reason,
+ .hubclient = gk104_fifo_mmu_fault_hubclient,
+ .gpcclient = gk104_fifo_mmu_fault_gpcclient,
+};
+
+static void
+gp100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *fifo, int unit)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
+ u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
+ u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
+ u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
+ struct nvkm_fault_data info;
+
+ info.inst = (u64)inst << 12;
+ info.addr = ((u64)vahi << 32) | valo;
+ info.time = 0;
+ info.engine = unit;
+ info.valid = 1;
+ info.gpc = (type & 0x1f000000) >> 24;
+ info.hub = (type & 0x00100000) >> 20;
+ info.access = (type & 0x00070000) >> 16;
+ info.client = (type & 0x00007f00) >> 8;
+ info.reason = (type & 0x0000001f);
+
+ nvkm_fifo_fault(fifo, &info);
+}
+
+static const struct nvkm_fifo_func
+gp100_fifo = {
+ .chid_nr = gm200_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gm200_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_mmu_fault_unit = gp100_fifo_intr_mmu_fault_unit,
+ .intr_ctxsw_timeout = gf100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gp100_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gp100_runl,
+ .runq = &gk208_runq,
+ .engn = &gk104_engn,
+ .engn_ce = &gk104_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp, .force = true },
+ .chan = {{ 0, 0, PASCAL_CHANNEL_GPFIFO_A }, &gm107_chan },
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gp100_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
new file mode 100644
index 000000000..33066c8cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+#include "chid.h"
+#include "cgrp.h"
+#include "runl.h"
+#include "runq.h"
+
+#include <core/gpuobj.h>
+#include <subdev/mmu.h>
+
+#include <nvif/class.h>
+
+static u32
+gv100_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return chan->id;
+}
+
+static int
+gv100_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ const u64 userd = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ const u32 limit2 = ilog2(length / 8);
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x008, lower_32_bits(userd));
+ nvkm_wo32(chan->inst, 0x00c, upper_32_bits(userd));
+ nvkm_wo32(chan->inst, 0x010, 0x0000face);
+ nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
+ nvkm_wo32(chan->inst, 0x048, lower_32_bits(offset));
+ nvkm_wo32(chan->inst, 0x04c, upper_32_bits(offset) | (limit2 << 16));
+ nvkm_wo32(chan->inst, 0x084, 0x20400000);
+ nvkm_wo32(chan->inst, 0x094, 0x30000000 | devm);
+ nvkm_wo32(chan->inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
+ nvkm_wo32(chan->inst, 0x0e8, chan->id);
+ nvkm_wo32(chan->inst, 0x0f4, 0x00001000 | (priv ? 0x00000100 : 0x00000000));
+ nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
+ nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
+ nvkm_done(chan->inst);
+ return 0;
+}
+
+const struct nvkm_chan_func_ramfc
+gv100_chan_ramfc = {
+ .write = gv100_chan_ramfc_write,
+ .devm = 0xfff,
+ .priv = true,
+};
+
+const struct nvkm_chan_func_userd
+gv100_chan_userd = {
+ .bar = -1,
+ .size = 0x200,
+ .clear = gf100_chan_userd_clear,
+};
+
+static const struct nvkm_chan_func
+gv100_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gv100_chan_userd,
+ .ramfc = &gv100_chan_ramfc,
+ .bind = gk104_chan_bind_inst,
+ .unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
+ .doorbell_handle = gv100_chan_doorbell_handle,
+};
+
+void
+gv100_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ u64 addr = 0ULL;
+
+ if (cctx) {
+ addr = cctx->vctx->vma->addr;
+ addr |= 4ULL;
+ }
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x210, lower_32_bits(addr));
+ nvkm_wo32(chan->inst, 0x214, upper_32_bits(addr));
+ nvkm_mo32(chan->inst, 0x0ac, 0x00010000, cctx ? 0x00010000 : 0x00000000);
+ nvkm_done(chan->inst);
+}
+
+const struct nvkm_engn_func
+gv100_engn = {
+ .chsw = gk104_engn_chsw,
+ .cxid = gk104_engn_cxid,
+ .ctor = gk104_ectx_ctor,
+ .bind = gv100_ectx_bind,
+};
+
+void
+gv100_ectx_ce_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ const u64 bar2 = cctx ? nvkm_memory_bar2(cctx->vctx->inst->memory) : 0ULL;
+
+ nvkm_kmap(chan->inst);
+ nvkm_wo32(chan->inst, 0x220, lower_32_bits(bar2));
+ nvkm_wo32(chan->inst, 0x224, upper_32_bits(bar2));
+ nvkm_mo32(chan->inst, 0x0ac, 0x00020000, cctx ? 0x00020000 : 0x00000000);
+ nvkm_done(chan->inst);
+}
+
+int
+gv100_ectx_ce_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx)
+{
+ if (nvkm_memory_bar2(vctx->inst->memory) == ~0ULL)
+ return -EFAULT;
+
+ return 0;
+}
+
+const struct nvkm_engn_func
+gv100_engn_ce = {
+ .chsw = gk104_engn_chsw,
+ .cxid = gk104_engn_cxid,
+ .ctor = gv100_ectx_ce_ctor,
+ .bind = gv100_ectx_ce_bind,
+};
+
+static bool
+gv100_runq_intr_1_ctxnotvalid(struct nvkm_runq *runq, int chid)
+{
+ struct nvkm_fifo *fifo = runq->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ RUNQ_ERROR(runq, "CTXNOTVALID chid:%d", chid);
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ if (WARN_ON_ONCE(!chan))
+ return false;
+
+ nvkm_chan_error(chan, true);
+ nvkm_chan_put(&chan, flags);
+
+ nvkm_mask(device, 0x0400ac + (runq->id * 0x2000), 0x00030000, 0x00030000);
+ nvkm_wr32(device, 0x040148 + (runq->id * 0x2000), 0x80000000);
+ return true;
+}
+
+const struct nvkm_runq_func
+gv100_runq = {
+ .init = gk208_runq_init,
+ .intr = gk104_runq_intr,
+ .intr_0_names = gk104_runq_intr_0_names,
+ .intr_1_ctxnotvalid = gv100_runq_intr_1_ctxnotvalid,
+ .idle = gk104_runq_idle,
+};
+
+void
+gv100_runl_preempt(struct nvkm_runl *runl)
+{
+ nvkm_wr32(runl->fifo->engine.subdev.device, 0x002638, BIT(runl->id));
+}
+
+void
+gv100_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
+{
+ const u64 user = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+ const u64 inst = chan->inst->addr;
+
+ nvkm_wo32(memory, offset + 0x0, lower_32_bits(user) | chan->runq << 1);
+ nvkm_wo32(memory, offset + 0x4, upper_32_bits(user));
+ nvkm_wo32(memory, offset + 0x8, lower_32_bits(inst) | chan->id);
+ nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
+}
+
+void
+gv100_runl_insert_cgrp(struct nvkm_cgrp *cgrp, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset + 0x0, (128 << 24) | (3 << 16) | 0x00000001);
+ nvkm_wo32(memory, offset + 0x4, cgrp->chan_nr);
+ nvkm_wo32(memory, offset + 0x8, cgrp->id);
+ nvkm_wo32(memory, offset + 0xc, 0x00000000);
+}
+
+static const struct nvkm_runl_func
+gv100_runl = {
+ .runqs = 2,
+ .size = 16,
+ .update = nv50_runl_update,
+ .insert_cgrp = gv100_runl_insert_cgrp,
+ .insert_chan = gv100_runl_insert_chan,
+ .commit = gk104_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
+ .preempt = gv100_runl_preempt,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+const struct nvkm_enum
+gv100_fifo_mmu_fault_gpcclient[] = {
+ { 0x00, "T1_0" },
+ { 0x01, "T1_1" },
+ { 0x02, "T1_2" },
+ { 0x03, "T1_3" },
+ { 0x04, "T1_4" },
+ { 0x05, "T1_5" },
+ { 0x06, "T1_6" },
+ { 0x07, "T1_7" },
+ { 0x08, "PE_0" },
+ { 0x09, "PE_1" },
+ { 0x0a, "PE_2" },
+ { 0x0b, "PE_3" },
+ { 0x0c, "PE_4" },
+ { 0x0d, "PE_5" },
+ { 0x0e, "PE_6" },
+ { 0x0f, "PE_7" },
+ { 0x10, "RAST" },
+ { 0x11, "GCC" },
+ { 0x12, "GPCCS" },
+ { 0x13, "PROP_0" },
+ { 0x14, "PROP_1" },
+ { 0x15, "PROP_2" },
+ { 0x16, "PROP_3" },
+ { 0x17, "GPM" },
+ { 0x18, "LTP_UTLB_0" },
+ { 0x19, "LTP_UTLB_1" },
+ { 0x1a, "LTP_UTLB_2" },
+ { 0x1b, "LTP_UTLB_3" },
+ { 0x1c, "LTP_UTLB_4" },
+ { 0x1d, "LTP_UTLB_5" },
+ { 0x1e, "LTP_UTLB_6" },
+ { 0x1f, "LTP_UTLB_7" },
+ { 0x20, "RGG_UTLB" },
+ { 0x21, "T1_8" },
+ { 0x22, "T1_9" },
+ { 0x23, "T1_10" },
+ { 0x24, "T1_11" },
+ { 0x25, "T1_12" },
+ { 0x26, "T1_13" },
+ { 0x27, "T1_14" },
+ { 0x28, "T1_15" },
+ { 0x29, "TPCCS_0" },
+ { 0x2a, "TPCCS_1" },
+ { 0x2b, "TPCCS_2" },
+ { 0x2c, "TPCCS_3" },
+ { 0x2d, "TPCCS_4" },
+ { 0x2e, "TPCCS_5" },
+ { 0x2f, "TPCCS_6" },
+ { 0x30, "TPCCS_7" },
+ { 0x31, "PE_8" },
+ { 0x32, "PE_9" },
+ { 0x33, "TPCCS_8" },
+ { 0x34, "TPCCS_9" },
+ { 0x35, "T1_16" },
+ { 0x36, "T1_17" },
+ { 0x37, "T1_18" },
+ { 0x38, "T1_19" },
+ { 0x39, "PE_10" },
+ { 0x3a, "PE_11" },
+ { 0x3b, "TPCCS_10" },
+ { 0x3c, "TPCCS_11" },
+ { 0x3d, "T1_20" },
+ { 0x3e, "T1_21" },
+ { 0x3f, "T1_22" },
+ { 0x40, "T1_23" },
+ { 0x41, "PE_12" },
+ { 0x42, "PE_13" },
+ { 0x43, "TPCCS_12" },
+ { 0x44, "TPCCS_13" },
+ { 0x45, "T1_24" },
+ { 0x46, "T1_25" },
+ { 0x47, "T1_26" },
+ { 0x48, "T1_27" },
+ { 0x49, "PE_14" },
+ { 0x4a, "PE_15" },
+ { 0x4b, "TPCCS_14" },
+ { 0x4c, "TPCCS_15" },
+ { 0x4d, "T1_28" },
+ { 0x4e, "T1_29" },
+ { 0x4f, "T1_30" },
+ { 0x50, "T1_31" },
+ { 0x51, "PE_16" },
+ { 0x52, "PE_17" },
+ { 0x53, "TPCCS_16" },
+ { 0x54, "TPCCS_17" },
+ { 0x55, "T1_32" },
+ { 0x56, "T1_33" },
+ { 0x57, "T1_34" },
+ { 0x58, "T1_35" },
+ { 0x59, "PE_18" },
+ { 0x5a, "PE_19" },
+ { 0x5b, "TPCCS_18" },
+ { 0x5c, "TPCCS_19" },
+ { 0x5d, "T1_36" },
+ { 0x5e, "T1_37" },
+ { 0x5f, "T1_38" },
+ { 0x60, "T1_39" },
+ {}
+};
+
+const struct nvkm_enum
+gv100_fifo_mmu_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "NVDEC" },
+ { 0x0d, "NVENC1" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "CE2" },
+ { 0x19, "XV" },
+ { 0x1a, "MMU_NB" },
+ { 0x1b, "NVENC0" },
+ { 0x1c, "DFALCON" },
+ { 0x1d, "SKED" },
+ { 0x1e, "AFALCON" },
+ { 0x1f, "DONT_CARE" },
+ { 0x20, "HSCE0" },
+ { 0x21, "HSCE1" },
+ { 0x22, "HSCE2" },
+ { 0x23, "HSCE3" },
+ { 0x24, "HSCE4" },
+ { 0x25, "HSCE5" },
+ { 0x26, "HSCE6" },
+ { 0x27, "HSCE7" },
+ { 0x28, "HSCE8" },
+ { 0x29, "HSCE9" },
+ { 0x2a, "HSHUB" },
+ { 0x2b, "PTP_X0" },
+ { 0x2c, "PTP_X1" },
+ { 0x2d, "PTP_X2" },
+ { 0x2e, "PTP_X3" },
+ { 0x2f, "PTP_X4" },
+ { 0x30, "PTP_X5" },
+ { 0x31, "PTP_X6" },
+ { 0x32, "PTP_X7" },
+ { 0x33, "NVENC2" },
+ { 0x34, "VPR_SCRUBBER0" },
+ { 0x35, "VPR_SCRUBBER1" },
+ { 0x36, "DWBIF" },
+ { 0x37, "FBFALCON" },
+ { 0x38, "CE_SHIM" },
+ { 0x39, "GSP" },
+ {}
+};
+
+const struct nvkm_enum
+gv100_fifo_mmu_fault_reason[] = {
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "POISONED" },
+ { 0x0f, "ATOMIC_VIOLATION" },
+ {}
+};
+
+static const struct nvkm_enum
+gv100_fifo_mmu_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "PTP" },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "PWR_PMU" },
+ { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x09, "PERF" },
+ { 0x1f, "PHYSICAL" },
+ { 0x20, "HOST0" },
+ { 0x21, "HOST1" },
+ { 0x22, "HOST2" },
+ { 0x23, "HOST3" },
+ { 0x24, "HOST4" },
+ { 0x25, "HOST5" },
+ { 0x26, "HOST6" },
+ { 0x27, "HOST7" },
+ { 0x28, "HOST8" },
+ { 0x29, "HOST9" },
+ { 0x2a, "HOST10" },
+ { 0x2b, "HOST11" },
+ { 0x2c, "HOST12" },
+ { 0x2d, "HOST13" },
+ {}
+};
+
+const struct nvkm_enum
+gv100_fifo_mmu_fault_access[] = {
+ { 0x0, "VIRT_READ" },
+ { 0x1, "VIRT_WRITE" },
+ { 0x2, "VIRT_ATOMIC" },
+ { 0x3, "VIRT_PREFETCH" },
+ { 0x4, "VIRT_ATOMIC_WEAK" },
+ { 0x8, "PHYS_READ" },
+ { 0x9, "PHYS_WRITE" },
+ { 0xa, "PHYS_ATOMIC" },
+ { 0xb, "PHYS_PREFETCH" },
+ {}
+};
+
+static const struct nvkm_fifo_func_mmu_fault
+gv100_fifo_mmu_fault = {
+ .recover = gf100_fifo_mmu_fault_recover,
+ .access = gv100_fifo_mmu_fault_access,
+ .engine = gv100_fifo_mmu_fault_engine,
+ .reason = gv100_fifo_mmu_fault_reason,
+ .hubclient = gv100_fifo_mmu_fault_hubclient,
+ .gpcclient = gv100_fifo_mmu_fault_gpcclient,
+};
+
+static void
+gv100_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo, u32 engm)
+{
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id))
+ nvkm_runl_rc_engn(runl, engn);
+ }
+}
+
+static const struct nvkm_fifo_func
+gv100_fifo = {
+ .chid_nr = gm200_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gm200_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = gk104_fifo_init_pbdmas,
+ .intr = gk104_fifo_intr,
+ .intr_ctxsw_timeout = gv100_fifo_intr_ctxsw_timeout,
+ .mmu_fault = &gv100_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &gv100_runl,
+ .runq = &gv100_runq,
+ .engn = &gv100_engn,
+ .engn_ce = &gv100_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp, .force = true },
+ .chan = {{ 0, 0, VOLTA_CHANNEL_GPFIFO_A }, &gv100_chan },
+};
+
+int
+gv100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&gv100_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
new file mode 100644
index 000000000..674faf002
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include "regsnv04.h"
+
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <engine/sw.h>
+
+#include <nvif/class.h>
+
+void
+nv04_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_memory *fctx = device->imem->ramfc;
+ const struct nvkm_ramfc_layout *c;
+ unsigned long flags;
+ u32 data = chan->ramfc_offset;
+ u32 chid;
+
+ /* prevent fifo context switches */
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
+ if (chid == chan->id) {
+ nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ c = chan->func->ramfc->layout;
+ nvkm_kmap(fctx);
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
+ u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
+ nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
+ nvkm_done(fctx);
+
+ c = chan->func->ramfc->layout;
+ do {
+ nvkm_wr32(device, c->regp, 0x00000000);
+ } while ((++c)->bits);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ /* restore normal operation, after disabling dma mode */
+ nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nv04_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_mask(fifo->engine.subdev.device, NV04_PFIFO_MODE, BIT(chan->id), BIT(chan->id));
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nv04_chan_ramfc_clear(struct nvkm_chan *chan)
+{
+ struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
+ const struct nvkm_ramfc_layout *c = chan->func->ramfc->layout;
+
+ nvkm_kmap(ramfc);
+ do {
+ nvkm_wo32(ramfc, chan->ramfc_offset + c->ctxp, 0x00000000);
+ } while ((++c)->bits);
+ nvkm_done(ramfc);
+}
+
+static int
+nv04_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
+ const u32 base = chan->id * 32;
+
+ chan->ramfc_offset = base;
+
+ nvkm_kmap(ramfc);
+ nvkm_wo32(ramfc, base + 0x00, offset);
+ nvkm_wo32(ramfc, base + 0x04, offset);
+ nvkm_wo32(ramfc, base + 0x08, chan->push->addr >> 4);
+ nvkm_wo32(ramfc, base + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(ramfc);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+nv04_chan_ramfc = {
+ .layout = (const struct nvkm_ramfc_layout[]) {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+ },
+ .write = nv04_chan_ramfc_write,
+ .clear = nv04_chan_ramfc_clear,
+ .ctxdma = true,
+};
+
+const struct nvkm_chan_func_userd
+nv04_chan_userd = {
+ .bar = 0,
+ .base = 0x800000,
+ .size = 0x010000,
+};
+
+const struct nvkm_chan_func_inst
+nv04_chan_inst = {
+ .size = 0x1000,
+};
+
+static const struct nvkm_chan_func
+nv04_chan = {
+ .inst = &nv04_chan_inst,
+ .userd = &nv04_chan_userd,
+ .ramfc = &nv04_chan_ramfc,
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
+};
+
+const struct nvkm_cgrp_func
+nv04_cgrp = {
+};
+
+void
+nv04_eobj_ramht_del(struct nvkm_chan *chan, int hash)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
+
+ mutex_lock(&fifo->mutex);
+ nvkm_ramht_remove(imem->ramht, hash);
+ mutex_unlock(&fifo->mutex);
+}
+
+static int
+nv04_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
+ u32 context = 0x80000000 | chan->id << 24 | engn->id << 16;
+ int hash;
+
+ mutex_lock(&fifo->mutex);
+ hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context);
+ mutex_unlock(&fifo->mutex);
+ return hash;
+}
+
+const struct nvkm_engn_func
+nv04_engn = {
+ .ramht_add = nv04_eobj_ramht_add,
+ .ramht_del = nv04_eobj_ramht_del,
+};
+
+void
+nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags)
+__acquires(fifo->lock)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ *pflags = flags;
+
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+
+ /* in some cases the puller may be left in an inconsistent state
+ * if you try to stop it while it's busy translating handles.
+ * sometimes you get a CACHE_ERROR, sometimes it just fails
+ * silently; sending incorrect instance offsets to PGRAPH after
+ * it's started up again.
+ *
+ * to avoid this, we invalidate the most recently calculated
+ * instance.
+ */
+ nvkm_msec(device, 2000,
+ u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
+ if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
+ break;
+ );
+
+ if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+}
+
+void
+nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags)
+__releases(fifo->lock)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ unsigned long flags = *pflags;
+
+ nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
+
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+const struct nvkm_runl_func
+nv04_runl = {
+};
+
+static const char *
+nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
+static bool
+nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
+{
+ struct nvkm_sw *sw = device->sw;
+ const int subc = (addr & 0x0000e000) >> 13;
+ const int mthd = (addr & 0x00001ffc);
+ const u32 mask = 0x0000000f << (subc * 4);
+ u32 engine = nvkm_rd32(device, 0x003280);
+ bool handled = false;
+
+ switch (mthd) {
+ case 0x0000 ... 0x0000: /* subchannel's engine -> software */
+ nvkm_wr32(device, 0x003280, (engine &= ~mask));
+ fallthrough;
+ case 0x0180 ... 0x01fc: /* handle -> instance */
+ data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+ fallthrough;
+ case 0x0100 ... 0x017c:
+ case 0x0200 ... 0x1ffc: /* pass method down to sw */
+ if (!(engine & mask) && sw)
+ handled = nvkm_sw_mthd(sw, chid, subc, mthd, data);
+ break;
+ default:
+ break;
+ }
+
+ return handled;
+}
+
+static void
+nv04_fifo_intr_cache_error(struct nvkm_fifo *fifo, u32 chid, u32 get)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+ u32 pull0 = nvkm_rd32(device, 0x003250);
+ u32 mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
+ * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
+ * show that it wraps around to the start at GET=0x800.. No clue as to
+ * why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (device->card_type < NV_40) {
+ mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!(pull0 & 0x00000100) ||
+ !nv04_fifo_swmthd(device, chid, mthd, data)) {
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ nvkm_error(subdev, "CACHE_ERROR - "
+ "ch %d [%s] subc %d mthd %04x data %08x\n",
+ chid, chan ? chan->name : "unknown",
+ (mthd >> 13) & 7, mthd & 0x1ffc, data);
+ nvkm_chan_put(&chan, flags);
+ }
+
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+ nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
+ nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+}
+
+static void
+nv04_fifo_intr_dma_pusher(struct nvkm_fifo *fifo, u32 chid)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 dma_get = nvkm_rd32(device, 0x003244);
+ u32 dma_put = nvkm_rd32(device, 0x003240);
+ u32 push = nvkm_rd32(device, 0x003220);
+ u32 state = nvkm_rd32(device, 0x003228);
+ struct nvkm_chan *chan;
+ unsigned long flags;
+ const char *name;
+
+ chan = nvkm_chan_get_chid(&fifo->engine, chid, &flags);
+ name = chan ? chan->name : "unknown";
+ if (device->card_type == NV_50) {
+ u32 ho_get = nvkm_rd32(device, 0x003328);
+ u32 ho_put = nvkm_rd32(device, 0x003320);
+ u32 ib_get = nvkm_rd32(device, 0x003334);
+ u32 ib_put = nvkm_rd32(device, 0x003330);
+
+ nvkm_error(subdev, "DMA_PUSHER - "
+ "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
+ "ib_put %08x state %08x (err: %s) push %08x\n",
+ chid, name, ho_get, dma_get, ho_put, dma_put,
+ ib_get, ib_put, state, nv_dma_state_err(state),
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nvkm_wr32(device, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nvkm_wr32(device, 0x003244, dma_put);
+ nvkm_wr32(device, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put)
+ nvkm_wr32(device, 0x003334, ib_put);
+ } else {
+ nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
+ "state %08x (err: %s) push %08x\n",
+ chid, name, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nvkm_wr32(device, 0x003244, dma_put);
+ }
+ nvkm_chan_put(&chan, flags);
+
+ nvkm_wr32(device, 0x003228, 0x00000000);
+ nvkm_wr32(device, 0x003220, 0x00000001);
+ nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+}
+
+irqreturn_t
+nv04_fifo_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
+ u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
+ u32 reassign, chid, get, sem;
+
+ reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
+
+ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
+ get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
+
+ if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
+ nv04_fifo_intr_cache_error(fifo, chid, get);
+ stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
+ nv04_fifo_intr_dma_pusher(fifo, chid);
+ stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (stat & NV_PFIFO_INTR_SEMAPHORE) {
+ stat &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (device->card_type == NV_50) {
+ if (stat & 0x00000010) {
+ stat &= ~0x00000010;
+ nvkm_wr32(device, 0x002100, 0x00000010);
+ }
+
+ if (stat & 0x40000000) {
+ nvkm_wr32(device, 0x002100, 0x40000000);
+ nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+ stat &= ~0x40000000;
+ }
+ }
+
+ if (stat) {
+ nvkm_warn(subdev, "intr %08x\n", stat);
+ nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
+ }
+
+ nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
+ return IRQ_HANDLED;
+}
+
+void
+nv04_fifo_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+ nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+}
+
+int
+nv04_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+
+ runl = nvkm_runl_new(fifo, 0, 0, 0);
+ if (IS_ERR(runl))
+ return PTR_ERR(runl);
+
+ nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_SW, 0);
+ nvkm_runl_add(runl, 0, fifo->func->engn_sw, NVKM_ENGINE_DMAOBJ, 0);
+ nvkm_runl_add(runl, 1, fifo->func->engn , NVKM_ENGINE_GR, 0);
+ nvkm_runl_add(runl, 2, fifo->func->engn , NVKM_ENGINE_MPEG, 0); /* NV31- */
+ return 0;
+}
+
+int
+nv04_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
+{
+ /* The last CHID is reserved by HW as a "channel invalid" marker. */
+ return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 0, nr - 1, &fifo->chid);
+}
+
+static int
+nv04_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return 16;
+}
+
+static const struct nvkm_fifo_func
+nv04_fifo = {
+ .chid_nr = nv04_fifo_chid_nr,
+ .chid_ctor = nv04_fifo_chid_ctor,
+ .runl_ctor = nv04_fifo_runl_ctor,
+ .init = nv04_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .runl = &nv04_runl,
+ .engn = &nv04_engn,
+ .engn_sw = &nv04_engn,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, NV03_CHANNEL_DMA }, &nv04_chan },
+};
+
+int
+nv04_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&nv04_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
new file mode 100644
index 000000000..a4bcf6b0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <subdev/instmem.h>
+
+#include "regsnv04.h"
+
+#include <nvif/class.h>
+
+static int
+nv10_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
+ const u32 base = chan->id * 32;
+
+ chan->ramfc_offset = base;
+
+ nvkm_kmap(ramfc);
+ nvkm_wo32(ramfc, base + 0x00, offset);
+ nvkm_wo32(ramfc, base + 0x04, offset);
+ nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
+ nvkm_wo32(ramfc, base + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(ramfc);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+nv10_chan_ramfc = {
+ .layout = (const struct nvkm_ramfc_layout[]) {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+ },
+ .write = nv10_chan_ramfc_write,
+ .clear = nv04_chan_ramfc_clear,
+ .ctxdma = true,
+};
+
+static const struct nvkm_chan_func
+nv10_chan = {
+ .inst = &nv04_chan_inst,
+ .userd = &nv04_chan_userd,
+ .ramfc = &nv10_chan_ramfc,
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
+};
+
+int
+nv10_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return 32;
+}
+
+static const struct nvkm_fifo_func
+nv10_fifo = {
+ .chid_nr = nv10_fifo_chid_nr,
+ .chid_ctor = nv04_fifo_chid_ctor,
+ .runl_ctor = nv04_fifo_runl_ctor,
+ .init = nv04_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .runl = &nv04_runl,
+ .engn = &nv04_engn,
+ .engn_sw = &nv04_engn,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, NV10_CHANNEL_DMA }, &nv10_chan },
+};
+
+int
+nv10_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&nv10_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
new file mode 100644
index 000000000..c70f44fd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include "regsnv04.h"
+
+#include <core/ramht.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+
+static int
+nv17_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
+ const u32 base = chan->id * 64;
+
+ chan->ramfc_offset = base;
+
+ nvkm_kmap(ramfc);
+ nvkm_wo32(ramfc, base + 0x00, offset);
+ nvkm_wo32(ramfc, base + 0x04, offset);
+ nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
+ nvkm_wo32(ramfc, base + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_done(ramfc);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+nv17_chan_ramfc = {
+ .layout = (const struct nvkm_ramfc_layout[]) {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+ },
+ .write = nv17_chan_ramfc_write,
+ .clear = nv04_chan_ramfc_clear,
+ .ctxdma = true,
+};
+
+static const struct nvkm_chan_func
+nv17_chan = {
+ .inst = &nv04_chan_inst,
+ .userd = &nv04_chan_userd,
+ .ramfc = &nv17_chan_ramfc,
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
+};
+
+static void
+nv17_fifo_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+ nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
+ 0x00010000);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+}
+
+static const struct nvkm_fifo_func
+nv17_fifo = {
+ .chid_nr = nv10_fifo_chid_nr,
+ .chid_ctor = nv04_fifo_chid_ctor,
+ .runl_ctor = nv04_fifo_runl_ctor,
+ .init = nv17_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .runl = &nv04_runl,
+ .engn = &nv04_engn,
+ .engn_sw = &nv04_engn,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, NV17_CHANNEL_DMA }, &nv17_chan },
+};
+
+int
+nv17_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&nv17_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
new file mode 100644
index 000000000..e50a94b6d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include "regsnv04.h"
+
+#include <core/ramht.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+
+#include <nvif/class.h>
+
+static int
+nv40_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_memory *ramfc = chan->cgrp->runl->fifo->engine.subdev.device->imem->ramfc;
+ const u32 base = chan->id * 128;
+
+ chan->ramfc_offset = base;
+
+ nvkm_kmap(ramfc);
+ nvkm_wo32(ramfc, base + 0x00, offset);
+ nvkm_wo32(ramfc, base + 0x04, offset);
+ nvkm_wo32(ramfc, base + 0x0c, chan->push->addr >> 4);
+ nvkm_wo32(ramfc, base + 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nvkm_wo32(ramfc, base + 0x3c, 0x0001ffff);
+ nvkm_done(ramfc);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+nv40_chan_ramfc = {
+ .layout = (const struct nvkm_ramfc_layout[]) {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+ },
+ .write = nv40_chan_ramfc_write,
+ .clear = nv04_chan_ramfc_clear,
+ .ctxdma = true,
+};
+
+static const struct nvkm_chan_func_userd
+nv40_chan_userd = {
+ .bar = 0,
+ .base = 0xc00000,
+ .size = 0x001000,
+};
+
+static const struct nvkm_chan_func
+nv40_chan = {
+ .inst = &nv04_chan_inst,
+ .userd = &nv40_chan_userd,
+ .ramfc = &nv40_chan_ramfc,
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
+};
+
+static int
+nv40_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_instmem *imem = fifo->engine.subdev.device->imem;
+ u32 context = chan->id << 23 | engn->id << 20;
+ int hash;
+
+ mutex_lock(&fifo->mutex);
+ hash = nvkm_ramht_insert(imem->ramht, eobj, chan->id, 4, eobj->handle, context);
+ mutex_unlock(&fifo->mutex);
+ return hash;
+}
+
+static void
+nv40_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_memory *ramfc = device->imem->ramfc;
+ u32 inst = 0x00000000, reg, ctx;
+ int chid;
+
+ switch (engn->engine->subdev.type) {
+ case NVKM_ENGINE_GR:
+ reg = 0x0032e0;
+ ctx = 0x38;
+ break;
+ case NVKM_ENGINE_MPEG:
+ if (WARN_ON(device->chipset < 0x44))
+ return;
+ reg = 0x00330c;
+ ctx = 0x54;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (cctx)
+ inst = cctx->vctx->inst->addr >> 4;
+
+ spin_lock_irq(&fifo->lock);
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000000);
+
+ chid = nvkm_rd32(device, 0x003204) & (fifo->chid->nr - 1);
+ if (chid == chan->id)
+ nvkm_wr32(device, reg, inst);
+
+ nvkm_kmap(ramfc);
+ nvkm_wo32(ramfc, chan->ramfc_offset + ctx, inst);
+ nvkm_done(ramfc);
+
+ nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irq(&fifo->lock);
+}
+
+static const struct nvkm_engn_func
+nv40_engn = {
+ .bind = nv40_ectx_bind,
+ .ramht_add = nv40_eobj_ramht_add,
+ .ramht_del = nv04_eobj_ramht_del,
+};
+
+static const struct nvkm_engn_func
+nv40_engn_sw = {
+ .ramht_add = nv40_eobj_ramht_add,
+ .ramht_del = nv04_eobj_ramht_del,
+};
+
+static void
+nv40_fifo_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
+ struct nvkm_instmem *imem = device->imem;
+ struct nvkm_ramht *ramht = imem->ramht;
+ struct nvkm_memory *ramro = imem->ramro;
+ struct nvkm_memory *ramfc = imem->ramfc;
+
+ nvkm_wr32(device, 0x002040, 0x000000ff);
+ nvkm_wr32(device, 0x002044, 0x2101ffff);
+ nvkm_wr32(device, 0x002058, 0x00000001);
+
+ nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((ramht->bits - 9) << 16) |
+ (ramht->gpuobj->addr >> 8));
+ nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
+
+ switch (device->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nvkm_wr32(device, 0x002230, 0x00000001);
+ fallthrough;
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x48:
+ nvkm_wr32(device, 0x002220, 0x00030002);
+ break;
+ default:
+ nvkm_wr32(device, 0x002230, 0x00000000);
+ nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
+ nvkm_memory_addr(ramfc)) >> 16) |
+ 0x00030000);
+ break;
+ }
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->chid->mask);
+
+ nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
+ nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
+ nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
+}
+
+static const struct nvkm_fifo_func
+nv40_fifo = {
+ .chid_nr = nv10_fifo_chid_nr,
+ .chid_ctor = nv04_fifo_chid_ctor,
+ .runl_ctor = nv04_fifo_runl_ctor,
+ .init = nv40_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .runl = &nv04_runl,
+ .engn = &nv40_engn,
+ .engn_sw = &nv40_engn_sw,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, NV40_CHANNEL_DMA }, &nv40_chan },
+};
+
+int
+nv40_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&nv40_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
new file mode 100644
index 000000000..954b5f3a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include <core/ramht.h>
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+void
+nv50_eobj_ramht_del(struct nvkm_chan *chan, int hash)
+{
+ nvkm_ramht_remove(chan->ramht, hash);
+}
+
+int
+nv50_eobj_ramht_add(struct nvkm_engn *engn, struct nvkm_object *eobj, struct nvkm_chan *chan)
+{
+ return nvkm_ramht_insert(chan->ramht, eobj, 0, 4, eobj->handle, engn->id << 20);
+}
+
+void
+nv50_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+}
+
+void
+nv50_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x80000000);
+}
+
+void
+nv50_chan_unbind(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x002600 + (chan->id * 4), 0x00000000);
+}
+
+static void
+nv50_chan_bind(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x002600 + (chan->id * 4), chan->ramfc->addr >> 12);
+}
+
+static int
+nv50_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+ const u32 limit2 = ilog2(length / 8);
+ int ret;
+
+ ret = nvkm_gpuobj_new(device, 0x0200, 0x1000, true, chan->inst, &chan->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x1200, 0, true, chan->inst, &chan->eng);
+ if (ret)
+ return ret;
+
+ ret = nvkm_gpuobj_new(device, 0x4000, 0, false, chan->inst, &chan->pgd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_ramht_new(device, 0x8000, 16, chan->inst, &chan->ramht);
+ if (ret)
+ return ret;
+
+ nvkm_kmap(chan->ramfc);
+ nvkm_wo32(chan->ramfc, 0x3c, 0x403f6078);
+ nvkm_wo32(chan->ramfc, 0x44, 0x01003fff);
+ nvkm_wo32(chan->ramfc, 0x48, chan->push->node->offset >> 4);
+ nvkm_wo32(chan->ramfc, 0x50, lower_32_bits(offset));
+ nvkm_wo32(chan->ramfc, 0x54, upper_32_bits(offset) | (limit2 << 16));
+ nvkm_wo32(chan->ramfc, 0x60, 0x7fffffff);
+ nvkm_wo32(chan->ramfc, 0x78, 0x00000000);
+ nvkm_wo32(chan->ramfc, 0x7c, 0x30000000 | devm);
+ nvkm_wo32(chan->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->node->offset >> 4));
+ nvkm_done(chan->ramfc);
+ return 0;
+}
+
+static const struct nvkm_chan_func_ramfc
+nv50_chan_ramfc = {
+ .write = nv50_chan_ramfc_write,
+ .ctxdma = true,
+ .devm = 0xfff,
+};
+
+const struct nvkm_chan_func_userd
+nv50_chan_userd = {
+ .bar = 0,
+ .base = 0xc00000,
+ .size = 0x002000,
+};
+
+const struct nvkm_chan_func_inst
+nv50_chan_inst = {
+ .size = 0x10000,
+ .vmm = true,
+};
+
+static const struct nvkm_chan_func
+nv50_chan = {
+ .inst = &nv50_chan_inst,
+ .userd = &nv50_chan_userd,
+ .ramfc = &nv50_chan_ramfc,
+ .bind = nv50_chan_bind,
+ .unbind = nv50_chan_unbind,
+ .start = nv50_chan_start,
+ .stop = nv50_chan_stop,
+};
+
+static void
+nv50_ectx_bind(struct nvkm_engn *engn, struct nvkm_cctx *cctx, struct nvkm_chan *chan)
+{
+ struct nvkm_subdev *subdev = &chan->cgrp->runl->fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u64 start = 0, limit = 0;
+ u32 flags = 0, ptr0, save;
+
+ switch (engn->engine->subdev.type) {
+ case NVKM_ENGINE_GR : ptr0 = 0x0000; break;
+ case NVKM_ENGINE_MPEG : ptr0 = 0x0060; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ if (!cctx) {
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+ save = nvkm_mask(device, 0x00b860, 0x00000001, 0x00000001);
+
+ /* Tell engines to save out contexts. */
+ nvkm_wr32(device, 0x0032fc, chan->inst->addr >> 12);
+ nvkm_msec(device, 2000,
+ if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
+ break;
+ );
+ nvkm_wr32(device, 0x00b860, save);
+ } else {
+ flags = 0x00190000;
+ start = cctx->vctx->inst->addr;
+ limit = start + cctx->vctx->inst->size - 1;
+ }
+
+ nvkm_kmap(chan->eng);
+ nvkm_wo32(chan->eng, ptr0 + 0x00, flags);
+ nvkm_wo32(chan->eng, ptr0 + 0x04, lower_32_bits(limit));
+ nvkm_wo32(chan->eng, ptr0 + 0x08, lower_32_bits(start));
+ nvkm_wo32(chan->eng, ptr0 + 0x0c, upper_32_bits(limit) << 24 |
+ lower_32_bits(start));
+ nvkm_wo32(chan->eng, ptr0 + 0x10, 0x00000000);
+ nvkm_wo32(chan->eng, ptr0 + 0x14, 0x00000000);
+ nvkm_done(chan->eng);
+}
+
+static const struct nvkm_engn_func
+nv50_engn = {
+ .bind = nv50_ectx_bind,
+ .ramht_add = nv50_eobj_ramht_add,
+ .ramht_del = nv50_eobj_ramht_del,
+};
+
+const struct nvkm_engn_func
+nv50_engn_sw = {
+ .ramht_add = nv50_eobj_ramht_add,
+ .ramht_del = nv50_eobj_ramht_del,
+};
+
+static bool
+nv50_runl_pending(struct nvkm_runl *runl)
+{
+ return nvkm_rd32(runl->fifo->engine.subdev.device, 0x0032ec) & 0x00000100;
+}
+
+int
+nv50_runl_wait(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+
+ nvkm_msec(fifo->engine.subdev.device, fifo->timeout.chan_msec,
+ if (!nvkm_runl_update_pending(runl))
+ return 0;
+ usleep_range(1, 2);
+ );
+
+ return -ETIMEDOUT;
+}
+
+static void
+nv50_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ u64 addr = nvkm_memory_addr(memory) + start;
+
+ nvkm_wr32(device, 0x0032f4, addr >> 12);
+ nvkm_wr32(device, 0x0032ec, count);
+}
+
+static void
+nv50_runl_insert_chan(struct nvkm_chan *chan, struct nvkm_memory *memory, u64 offset)
+{
+ nvkm_wo32(memory, offset, chan->id);
+}
+
+static struct nvkm_memory *
+nv50_runl_alloc(struct nvkm_runl *runl, u32 *offset)
+{
+ const u32 segment = ALIGN((runl->cgrp_nr + runl->chan_nr) * runl->func->size, 0x1000);
+ const u32 maxsize = (runl->cgid ? runl->cgid->nr : 0) + runl->chid->nr;
+ int ret;
+
+ if (unlikely(!runl->mem)) {
+ ret = nvkm_memory_new(runl->fifo->engine.subdev.device, NVKM_MEM_TARGET_INST,
+ maxsize * 2 * runl->func->size, 0, false, &runl->mem);
+ if (ret) {
+ RUNL_ERROR(runl, "alloc %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ } else {
+ if (runl->offset + segment >= nvkm_memory_size(runl->mem)) {
+ ret = runl->func->wait(runl);
+ if (ret) {
+ RUNL_DEBUG(runl, "rewind timeout");
+ return ERR_PTR(ret);
+ }
+
+ runl->offset = 0;
+ }
+ }
+
+ *offset = runl->offset;
+ runl->offset += segment;
+ return runl->mem;
+}
+
+int
+nv50_runl_update(struct nvkm_runl *runl)
+{
+ struct nvkm_memory *memory;
+ struct nvkm_cgrp *cgrp;
+ struct nvkm_chan *chan;
+ u32 start, offset, count;
+
+ /*TODO: prio, interleaving. */
+
+ RUNL_TRACE(runl, "RAMRL: update cgrps:%d chans:%d", runl->cgrp_nr, runl->chan_nr);
+ memory = nv50_runl_alloc(runl, &start);
+ if (IS_ERR(memory))
+ return PTR_ERR(memory);
+
+ RUNL_TRACE(runl, "RAMRL: update start:%08x", start);
+ offset = start;
+
+ nvkm_kmap(memory);
+ nvkm_runl_foreach_cgrp(cgrp, runl) {
+ if (cgrp->hw) {
+ CGRP_TRACE(cgrp, " RAMRL+%08x: chans:%d", offset, cgrp->chan_nr);
+ runl->func->insert_cgrp(cgrp, memory, offset);
+ offset += runl->func->size;
+ }
+
+ nvkm_cgrp_foreach_chan(chan, cgrp) {
+ CHAN_TRACE(chan, "RAMRL+%08x: [%s]", offset, chan->name);
+ runl->func->insert_chan(chan, memory, offset);
+ offset += runl->func->size;
+ }
+ }
+ nvkm_done(memory);
+
+ /*TODO: look into using features on newer HW to guarantee forward progress. */
+ list_rotate_left(&runl->cgrps);
+
+ count = (offset - start) / runl->func->size;
+ RUNL_TRACE(runl, "RAMRL: commit start:%08x count:%d", start, count);
+
+ runl->func->commit(runl, memory, start, count);
+ return 0;
+}
+
+const struct nvkm_runl_func
+nv50_runl = {
+ .size = 4,
+ .update = nv50_runl_update,
+ .insert_chan = nv50_runl_insert_chan,
+ .commit = nv50_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = nv50_runl_pending,
+};
+
+void
+nv50_fifo_init(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl = nvkm_runl_first(fifo);
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ int i;
+
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000000);
+ nvkm_mask(device, 0x000200, 0x00000100, 0x00000100);
+ nvkm_wr32(device, 0x00250c, 0x6f3cfc34);
+ nvkm_wr32(device, 0x002044, 0x01003fff);
+
+ nvkm_wr32(device, 0x002100, 0xffffffff);
+ nvkm_wr32(device, 0x002140, 0xbfffffff);
+
+ for (i = 0; i < 128; i++)
+ nvkm_wr32(device, 0x002600 + (i * 4), 0x00000000);
+
+ atomic_set(&runl->changed, 1);
+ runl->func->update(runl);
+
+ nvkm_wr32(device, 0x003200, 0x00000001);
+ nvkm_wr32(device, 0x003250, 0x00000001);
+ nvkm_wr32(device, 0x002500, 0x00000001);
+}
+
+int
+nv50_fifo_chid_ctor(struct nvkm_fifo *fifo, int nr)
+{
+ /* CHID 0 is unusable (some kind of PIO channel?), 127 is "channel invalid". */
+ return nvkm_chid_new(&nvkm_chan_event, &fifo->engine.subdev, nr, 1, nr - 2, &fifo->chid);
+}
+
+int
+nv50_fifo_chid_nr(struct nvkm_fifo *fifo)
+{
+ return 128;
+}
+
+static const struct nvkm_fifo_func
+nv50_fifo = {
+ .chid_nr = nv50_fifo_chid_nr,
+ .chid_ctor = nv50_fifo_chid_ctor,
+ .runl_ctor = nv04_fifo_runl_ctor,
+ .init = nv50_fifo_init,
+ .intr = nv04_fifo_intr,
+ .pause = nv04_fifo_pause,
+ .start = nv04_fifo_start,
+ .runl = &nv50_runl,
+ .engn = &nv50_engn,
+ .engn_sw = &nv50_engn_sw,
+ .cgrp = {{ }, &nv04_cgrp },
+ .chan = {{ 0, 0, NV50_CHANNEL_GPFIFO }, &nv50_chan },
+};
+
+int
+nv50_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&nv50_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
new file mode 100644
index 000000000..4d448be19
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_FIFO_PRIV_H__
+#define __NVKM_FIFO_PRIV_H__
+#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
+#include <engine/fifo.h>
+#include <core/enum.h>
+struct nvkm_cctx;
+struct nvkm_cgrp;
+struct nvkm_engn;
+struct nvkm_memory;
+struct nvkm_runl;
+struct nvkm_runq;
+struct nvkm_vctx;
+
+struct nvkm_fifo_func {
+ int (*chid_nr)(struct nvkm_fifo *);
+ int (*chid_ctor)(struct nvkm_fifo *, int nr);
+ int (*runq_nr)(struct nvkm_fifo *);
+ int (*runl_ctor)(struct nvkm_fifo *);
+
+ void (*init)(struct nvkm_fifo *);
+ void (*init_pbdmas)(struct nvkm_fifo *, u32 mask);
+
+ irqreturn_t (*intr)(struct nvkm_inth *);
+ void (*intr_mmu_fault_unit)(struct nvkm_fifo *, int unit);
+ void (*intr_ctxsw_timeout)(struct nvkm_fifo *, u32 engm);
+
+ const struct nvkm_fifo_func_mmu_fault {
+ void (*recover)(struct nvkm_fifo *, struct nvkm_fault_data *);
+ const struct nvkm_enum *access;
+ const struct nvkm_enum *engine;
+ const struct nvkm_enum *reason;
+ const struct nvkm_enum *hubclient;
+ const struct nvkm_enum *gpcclient;
+ } *mmu_fault;
+
+ void (*pause)(struct nvkm_fifo *, unsigned long *);
+ void (*start)(struct nvkm_fifo *, unsigned long *);
+
+ int (*nonstall_ctor)(struct nvkm_fifo *);
+ const struct nvkm_event_func *nonstall;
+
+ const struct nvkm_runl_func *runl;
+ const struct nvkm_runq_func *runq;
+ const struct nvkm_engn_func *engn;
+ const struct nvkm_engn_func *engn_sw;
+ const struct nvkm_engn_func *engn_ce;
+
+ struct nvkm_fifo_func_cgrp {
+ struct nvkm_sclass user;
+ const struct nvkm_cgrp_func *func;
+ bool force;
+ } cgrp;
+
+ struct nvkm_fifo_func_chan {
+ struct nvkm_sclass user;
+ const struct nvkm_chan_func *func;
+ } chan;
+};
+
+int nvkm_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+ struct nvkm_fifo **);
+
+int nv04_fifo_chid_ctor(struct nvkm_fifo *, int);
+int nv04_fifo_runl_ctor(struct nvkm_fifo *);
+void nv04_fifo_init(struct nvkm_fifo *);
+irqreturn_t nv04_fifo_intr(struct nvkm_inth *);
+void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
+void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
+extern const struct nvkm_runl_func nv04_runl;
+extern const struct nvkm_engn_func nv04_engn;
+extern const struct nvkm_cgrp_func nv04_cgrp;
+extern const struct nvkm_chan_func_inst nv04_chan_inst;
+extern const struct nvkm_chan_func_userd nv04_chan_userd;
+void nv04_chan_ramfc_clear(struct nvkm_chan *);
+void nv04_chan_start(struct nvkm_chan *);
+void nv04_chan_stop(struct nvkm_chan *);
+void nv04_eobj_ramht_del(struct nvkm_chan *, int);
+
+int nv10_fifo_chid_nr(struct nvkm_fifo *);
+
+int nv50_fifo_chid_nr(struct nvkm_fifo *);
+int nv50_fifo_chid_ctor(struct nvkm_fifo *, int);
+void nv50_fifo_init(struct nvkm_fifo *);
+extern const struct nvkm_runl_func nv50_runl;
+int nv50_runl_update(struct nvkm_runl *);
+int nv50_runl_wait(struct nvkm_runl *);
+extern const struct nvkm_engn_func nv50_engn_sw;
+extern const struct nvkm_chan_func_inst nv50_chan_inst;
+extern const struct nvkm_chan_func_userd nv50_chan_userd;
+void nv50_chan_unbind(struct nvkm_chan *);
+void nv50_chan_start(struct nvkm_chan *);
+void nv50_chan_stop(struct nvkm_chan *);
+void nv50_chan_preempt(struct nvkm_chan *);
+int nv50_eobj_ramht_add(struct nvkm_engn *, struct nvkm_object *, struct nvkm_chan *);
+void nv50_eobj_ramht_del(struct nvkm_chan *, int);
+
+extern const struct nvkm_event_func g84_fifo_nonstall;
+extern const struct nvkm_engn_func g84_engn;
+extern const struct nvkm_chan_func g84_chan;
+
+int gf100_fifo_chid_ctor(struct nvkm_fifo *, int);
+int gf100_fifo_runq_nr(struct nvkm_fifo *);
+bool gf100_fifo_intr_pbdma(struct nvkm_fifo *);
+void gf100_fifo_intr_mmu_fault(struct nvkm_fifo *);
+void gf100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *, int);
+void gf100_fifo_intr_sched(struct nvkm_fifo *);
+void gf100_fifo_intr_ctxsw_timeout(struct nvkm_fifo *, u32);
+void gf100_fifo_mmu_fault_recover(struct nvkm_fifo *, struct nvkm_fault_data *);
+extern const struct nvkm_enum gf100_fifo_mmu_fault_access[];
+extern const struct nvkm_event_func gf100_fifo_nonstall;
+bool gf100_runl_preempt_pending(struct nvkm_runl *);
+void gf100_runq_init(struct nvkm_runq *);
+bool gf100_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
+void gf100_engn_mmu_fault_trigger(struct nvkm_engn *);
+bool gf100_engn_mmu_fault_triggered(struct nvkm_engn *);
+extern const struct nvkm_engn_func gf100_engn_sw;
+extern const struct nvkm_chan_func_inst gf100_chan_inst;
+void gf100_chan_userd_clear(struct nvkm_chan *);
+void gf100_chan_preempt(struct nvkm_chan *);
+
+int gk104_fifo_chid_nr(struct nvkm_fifo *);
+int gk104_fifo_runl_ctor(struct nvkm_fifo *);
+void gk104_fifo_init(struct nvkm_fifo *);
+void gk104_fifo_init_pbdmas(struct nvkm_fifo *, u32);
+irqreturn_t gk104_fifo_intr(struct nvkm_inth *);
+void gk104_fifo_intr_runlist(struct nvkm_fifo *);
+void gk104_fifo_intr_chsw(struct nvkm_fifo *);
+void gk104_fifo_intr_bind(struct nvkm_fifo *);
+extern const struct nvkm_fifo_func_mmu_fault gk104_fifo_mmu_fault;
+extern const struct nvkm_enum gk104_fifo_mmu_fault_reason[];
+extern const struct nvkm_enum gk104_fifo_mmu_fault_hubclient[];
+extern const struct nvkm_enum gk104_fifo_mmu_fault_gpcclient[];
+void gk104_runl_insert_chan(struct nvkm_chan *, struct nvkm_memory *, u64);
+void gk104_runl_commit(struct nvkm_runl *, struct nvkm_memory *, u32, int);
+bool gk104_runl_pending(struct nvkm_runl *);
+void gk104_runl_block(struct nvkm_runl *, u32);
+void gk104_runl_allow(struct nvkm_runl *, u32);
+void gk104_runl_fault_clear(struct nvkm_runl *);
+extern const struct nvkm_runq_func gk104_runq;
+void gk104_runq_init(struct nvkm_runq *);
+bool gk104_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
+extern const struct nvkm_bitfield gk104_runq_intr_0_names[];
+bool gk104_runq_idle(struct nvkm_runq *);
+extern const struct nvkm_engn_func gk104_engn;
+bool gk104_engn_chsw(struct nvkm_engn *);
+int gk104_engn_cxid(struct nvkm_engn *, bool *cgid);
+int gk104_ectx_ctor(struct nvkm_engn *, struct nvkm_vctx *);
+extern const struct nvkm_engn_func gk104_engn_ce;
+extern const struct nvkm_chan_func_userd gk104_chan_userd;
+extern const struct nvkm_chan_func_ramfc gk104_chan_ramfc;
+void gk104_chan_bind(struct nvkm_chan *);
+void gk104_chan_bind_inst(struct nvkm_chan *);
+void gk104_chan_unbind(struct nvkm_chan *);
+void gk104_chan_start(struct nvkm_chan *);
+void gk104_chan_stop(struct nvkm_chan *);
+
+int gk110_fifo_chid_ctor(struct nvkm_fifo *, int);
+extern const struct nvkm_runl_func gk110_runl;
+extern const struct nvkm_cgrp_func gk110_cgrp;
+void gk110_runl_insert_cgrp(struct nvkm_cgrp *, struct nvkm_memory *, u64);
+extern const struct nvkm_chan_func gk110_chan;
+void gk110_chan_preempt(struct nvkm_chan *);
+
+extern const struct nvkm_runq_func gk208_runq;
+void gk208_runq_init(struct nvkm_runq *);
+
+void gm107_fifo_intr_mmu_fault_unit(struct nvkm_fifo *, int);
+extern const struct nvkm_fifo_func_mmu_fault gm107_fifo_mmu_fault;
+extern const struct nvkm_runl_func gm107_runl;
+extern const struct nvkm_chan_func gm107_chan;
+
+int gm200_fifo_chid_nr(struct nvkm_fifo *);
+int gm200_fifo_runq_nr(struct nvkm_fifo *);
+
+extern const struct nvkm_enum gv100_fifo_mmu_fault_access[];
+extern const struct nvkm_enum gv100_fifo_mmu_fault_reason[];
+extern const struct nvkm_enum gv100_fifo_mmu_fault_hubclient[];
+extern const struct nvkm_enum gv100_fifo_mmu_fault_gpcclient[];
+void gv100_runl_insert_cgrp(struct nvkm_cgrp *, struct nvkm_memory *, u64);
+void gv100_runl_insert_chan(struct nvkm_chan *, struct nvkm_memory *, u64);
+void gv100_runl_preempt(struct nvkm_runl *);
+extern const struct nvkm_runq_func gv100_runq;
+extern const struct nvkm_engn_func gv100_engn;
+void gv100_ectx_bind(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
+extern const struct nvkm_engn_func gv100_engn_ce;
+int gv100_ectx_ce_ctor(struct nvkm_engn *, struct nvkm_vctx *);
+void gv100_ectx_ce_bind(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
+extern const struct nvkm_chan_func_userd gv100_chan_userd;
+extern const struct nvkm_chan_func_ramfc gv100_chan_ramfc;
+
+void tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *, u32 info);
+extern const struct nvkm_fifo_func_mmu_fault tu102_fifo_mmu_fault;
+
+int ga100_fifo_runl_ctor(struct nvkm_fifo *);
+int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
+extern const struct nvkm_event_func ga100_fifo_nonstall;
+extern const struct nvkm_runl_func ga100_runl;
+extern const struct nvkm_runq_func ga100_runq;
+extern const struct nvkm_engn_func ga100_engn;
+extern const struct nvkm_engn_func ga100_engn_ce;
+extern const struct nvkm_cgrp_func ga100_cgrp;
+extern const struct nvkm_chan_func ga100_chan;
+
+int nvkm_uchan_new(struct nvkm_fifo *, struct nvkm_cgrp *, const struct nvkm_oclass *,
+ void *argv, u32 argc, struct nvkm_object **);
+int nvkm_ucgrp_new(struct nvkm_fifo *, const struct nvkm_oclass *, void *argv, u32 argc,
+ struct nvkm_object **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
new file mode 100644
index 000000000..4445a12b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NV04_FIFO_REGS_H__
+#define __NV04_FIFO_REGS_H__
+
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
new file mode 100644
index 000000000..b5836cbc2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "runl.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "priv.h"
+#include "runq.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+#include <subdev/top.h>
+
+struct nvkm_cgrp *
+nvkm_engn_cgrp_get(struct nvkm_engn *engn, unsigned long *pirqflags)
+{
+ struct nvkm_cgrp *cgrp = NULL;
+ struct nvkm_chan *chan;
+ bool cgid;
+ int id;
+
+ id = engn->func->cxid(engn, &cgid);
+ if (id < 0)
+ return NULL;
+
+ if (!cgid) {
+ chan = nvkm_runl_chan_get_chid(engn->runl, id, pirqflags);
+ if (chan)
+ cgrp = chan->cgrp;
+ } else {
+ cgrp = nvkm_runl_cgrp_get_cgid(engn->runl, id, pirqflags);
+ }
+
+ WARN_ON(!cgrp);
+ return cgrp;
+}
+
+static void
+nvkm_runl_rc(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_cgrp *cgrp, *gtmp;
+ struct nvkm_chan *chan, *ctmp;
+ struct nvkm_engn *engn;
+ unsigned long flags;
+ int rc, state, i;
+ bool reset;
+
+ /* Runlist is blocked before scheduling recovery - fetch count. */
+ BUG_ON(!mutex_is_locked(&runl->mutex));
+ rc = atomic_xchg(&runl->rc_pending, 0);
+ if (!rc)
+ return;
+
+ /* Look for channel groups flagged for RC. */
+ nvkm_runl_foreach_cgrp_safe(cgrp, gtmp, runl) {
+ state = atomic_cmpxchg(&cgrp->rc, NVKM_CGRP_RC_PENDING, NVKM_CGRP_RC_RUNNING);
+ if (state == NVKM_CGRP_RC_PENDING) {
+ /* Disable all channels in them, and remove from runlist. */
+ nvkm_cgrp_foreach_chan_safe(chan, ctmp, cgrp) {
+ nvkm_chan_error(chan, false);
+ nvkm_chan_remove_locked(chan);
+ }
+ }
+ }
+
+ /* On GPUs with runlist preempt, wait for PBDMA(s) servicing runlist to go idle. */
+ if (runl->func->preempt) {
+ for (i = 0; i < runl->runq_nr; i++) {
+ struct nvkm_runq *runq = runl->runq[i];
+
+ if (runq) {
+ nvkm_msec(fifo->engine.subdev.device, 2000,
+ if (runq->func->idle(runq))
+ break;
+ );
+ }
+ }
+ }
+
+ /* Look for engines that are still on flagged channel groups - reset them. */
+ nvkm_runl_foreach_engn_cond(engn, runl, engn->func->cxid) {
+ cgrp = nvkm_engn_cgrp_get(engn, &flags);
+ if (!cgrp) {
+ ENGN_DEBUG(engn, "cxid not valid");
+ continue;
+ }
+
+ reset = atomic_read(&cgrp->rc) == NVKM_CGRP_RC_RUNNING;
+ nvkm_cgrp_put(&cgrp, flags);
+ if (!reset) {
+ ENGN_DEBUG(engn, "cxid not in recovery");
+ continue;
+ }
+
+ ENGN_DEBUG(engn, "resetting...");
+ /*TODO: can we do something less of a potential catastrophe on failure? */
+ WARN_ON(nvkm_engine_reset(engn->engine));
+ }
+
+ /* Submit runlist update, and clear any remaining exception state. */
+ runl->func->update(runl);
+ if (runl->func->fault_clear)
+ runl->func->fault_clear(runl);
+
+ /* Unblock runlist processing. */
+ while (rc--)
+ nvkm_runl_allow(runl);
+ runl->func->wait(runl);
+}
+
+static void
+nvkm_runl_rc_runl(struct nvkm_runl *runl)
+{
+ RUNL_ERROR(runl, "rc scheduled");
+
+ nvkm_runl_block(runl);
+ if (runl->func->preempt)
+ runl->func->preempt(runl);
+
+ atomic_inc(&runl->rc_pending);
+ schedule_work(&runl->work);
+}
+
+void
+nvkm_runl_rc_cgrp(struct nvkm_cgrp *cgrp)
+{
+ if (atomic_cmpxchg(&cgrp->rc, NVKM_CGRP_RC_NONE, NVKM_CGRP_RC_PENDING) != NVKM_CGRP_RC_NONE)
+ return;
+
+ CGRP_ERROR(cgrp, "rc scheduled");
+ nvkm_runl_rc_runl(cgrp->runl);
+}
+
+void
+nvkm_runl_rc_engn(struct nvkm_runl *runl, struct nvkm_engn *engn)
+{
+ struct nvkm_cgrp *cgrp;
+ unsigned long flags;
+
+ /* Lookup channel group currently on engine. */
+ cgrp = nvkm_engn_cgrp_get(engn, &flags);
+ if (!cgrp) {
+ ENGN_DEBUG(engn, "rc skipped, not on channel");
+ return;
+ }
+
+ nvkm_runl_rc_cgrp(cgrp);
+ nvkm_cgrp_put(&cgrp, flags);
+}
+
+static void
+nvkm_runl_work(struct work_struct *work)
+{
+ struct nvkm_runl *runl = container_of(work, typeof(*runl), work);
+
+ mutex_lock(&runl->mutex);
+ nvkm_runl_rc(runl);
+ mutex_unlock(&runl->mutex);
+
+}
+
+struct nvkm_chan *
+nvkm_runl_chan_get_inst(struct nvkm_runl *runl, u64 inst, unsigned long *pirqflags)
+{
+ struct nvkm_chid *chid = runl->chid;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&chid->lock, flags);
+ for_each_set_bit(id, chid->used, chid->nr) {
+ chan = chid->data[id];
+ if (likely(chan)) {
+ if (chan->inst->addr == inst) {
+ spin_lock(&chan->cgrp->lock);
+ *pirqflags = flags;
+ spin_unlock(&chid->lock);
+ return chan;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&chid->lock, flags);
+ return NULL;
+}
+
+struct nvkm_chan *
+nvkm_runl_chan_get_chid(struct nvkm_runl *runl, int id, unsigned long *pirqflags)
+{
+ struct nvkm_chid *chid = runl->chid;
+ struct nvkm_chan *chan;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chid->lock, flags);
+ if (!WARN_ON(id >= chid->nr)) {
+ chan = chid->data[id];
+ if (likely(chan)) {
+ spin_lock(&chan->cgrp->lock);
+ *pirqflags = flags;
+ spin_unlock(&chid->lock);
+ return chan;
+ }
+ }
+ spin_unlock_irqrestore(&chid->lock, flags);
+ return NULL;
+}
+
+struct nvkm_cgrp *
+nvkm_runl_cgrp_get_cgid(struct nvkm_runl *runl, int id, unsigned long *pirqflags)
+{
+ struct nvkm_chid *cgid = runl->cgid;
+ struct nvkm_cgrp *cgrp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cgid->lock, flags);
+ if (!WARN_ON(id >= cgid->nr)) {
+ cgrp = cgid->data[id];
+ if (likely(cgrp)) {
+ spin_lock(&cgrp->lock);
+ *pirqflags = flags;
+ spin_unlock(&cgid->lock);
+ return cgrp;
+ }
+ }
+ spin_unlock_irqrestore(&cgid->lock, flags);
+ return NULL;
+}
+
+int
+nvkm_runl_preempt_wait(struct nvkm_runl *runl)
+{
+ return nvkm_msec(runl->fifo->engine.subdev.device, runl->fifo->timeout.chan_msec,
+ if (!runl->func->preempt_pending(runl))
+ break;
+
+ nvkm_runl_rc(runl);
+ usleep_range(1, 2);
+ ) < 0 ? -ETIMEDOUT : 0;
+}
+
+bool
+nvkm_runl_update_pending(struct nvkm_runl *runl)
+{
+ if (!runl->func->pending(runl))
+ return false;
+
+ nvkm_runl_rc(runl);
+ return true;
+}
+
+void
+nvkm_runl_update_locked(struct nvkm_runl *runl, bool wait)
+{
+ if (atomic_xchg(&runl->changed, 0) && runl->func->update) {
+ runl->func->update(runl);
+ if (wait)
+ runl->func->wait(runl);
+ }
+}
+
+void
+nvkm_runl_allow(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (!--runl->blocked) {
+ RUNL_TRACE(runl, "running");
+ runl->func->allow(runl, ~0);
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nvkm_runl_block(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (!runl->blocked++) {
+ RUNL_TRACE(runl, "stopped");
+ runl->func->block(runl, ~0);
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nvkm_runl_fini(struct nvkm_runl *runl)
+{
+ if (runl->func->fini)
+ runl->func->fini(runl);
+
+ flush_work(&runl->work);
+}
+
+void
+nvkm_runl_del(struct nvkm_runl *runl)
+{
+ struct nvkm_engn *engn, *engt;
+
+ nvkm_memory_unref(&runl->mem);
+
+ list_for_each_entry_safe(engn, engt, &runl->engns, head) {
+ list_del(&engn->head);
+ kfree(engn);
+ }
+
+ nvkm_chid_unref(&runl->chid);
+ nvkm_chid_unref(&runl->cgid);
+
+ list_del(&runl->head);
+ mutex_destroy(&runl->mutex);
+ kfree(runl);
+}
+
+struct nvkm_engn *
+nvkm_runl_add(struct nvkm_runl *runl, int engi, const struct nvkm_engn_func *func,
+ enum nvkm_subdev_type type, int inst)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_engine *engine;
+ struct nvkm_engn *engn;
+
+ engine = nvkm_device_engine(device, type, inst);
+ if (!engine) {
+ RUNL_DEBUG(runl, "engn %d.%d[%s] not found", engi, inst, nvkm_subdev_type[type]);
+ return NULL;
+ }
+
+ if (!(engn = kzalloc(sizeof(*engn), GFP_KERNEL)))
+ return NULL;
+
+ engn->func = func;
+ engn->runl = runl;
+ engn->id = engi;
+ engn->engine = engine;
+ engn->fault = -1;
+ list_add_tail(&engn->head, &runl->engns);
+
+ /* Lookup MMU engine ID for fault handling. */
+ if (device->top)
+ engn->fault = nvkm_top_fault_id(device, engine->subdev.type, engine->subdev.inst);
+
+ if (engn->fault < 0 && fifo->func->mmu_fault) {
+ const struct nvkm_enum *map = fifo->func->mmu_fault->engine;
+
+ while (map->name) {
+ if (map->data2 == engine->subdev.type && map->inst == engine->subdev.inst) {
+ engn->fault = map->value;
+ break;
+ }
+ map++;
+ }
+ }
+
+ return engn;
+}
+
+struct nvkm_runl *
+nvkm_runl_get(struct nvkm_fifo *fifo, int runi, u32 addr)
+{
+ struct nvkm_runl *runl;
+
+ nvkm_runl_foreach(runl, fifo) {
+ if ((runi >= 0 && runl->id == runi) || (runi < 0 && runl->addr == addr))
+ return runl;
+ }
+
+ return NULL;
+}
+
+struct nvkm_runl *
+nvkm_runl_new(struct nvkm_fifo *fifo, int runi, u32 addr, int id_nr)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_runl *runl;
+ int ret;
+
+ if (!(runl = kzalloc(sizeof(*runl), GFP_KERNEL)))
+ return NULL;
+
+ runl->func = fifo->func->runl;
+ runl->fifo = fifo;
+ runl->id = runi;
+ runl->addr = addr;
+ INIT_LIST_HEAD(&runl->engns);
+ INIT_LIST_HEAD(&runl->cgrps);
+ atomic_set(&runl->changed, 0);
+ mutex_init(&runl->mutex);
+ INIT_WORK(&runl->work, nvkm_runl_work);
+ atomic_set(&runl->rc_triggered, 0);
+ atomic_set(&runl->rc_pending, 0);
+ list_add_tail(&runl->head, &fifo->runls);
+
+ if (!fifo->chid) {
+ if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->cgid)) ||
+ (ret = nvkm_chid_new(&nvkm_chan_event, subdev, id_nr, 0, id_nr, &runl->chid))) {
+ RUNL_ERROR(runl, "cgid/chid: %d", ret);
+ nvkm_runl_del(runl);
+ return NULL;
+ }
+ } else {
+ runl->cgid = nvkm_chid_ref(fifo->cgid);
+ runl->chid = nvkm_chid_ref(fifo->chid);
+ }
+
+ return runl;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
new file mode 100644
index 000000000..c93d21bb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
@@ -0,0 +1,125 @@
+#ifndef __NVKM_RUNL_H__
+#define __NVKM_RUNL_H__
+#include <core/intr.h>
+struct nvkm_cctx;
+struct nvkm_cgrp;
+struct nvkm_chan;
+struct nvkm_memory;
+struct nvkm_object;
+struct nvkm_vctx;
+enum nvkm_subdev_type;
+
+struct nvkm_engn {
+ const struct nvkm_engn_func {
+ bool (*chsw)(struct nvkm_engn *);
+ int (*cxid)(struct nvkm_engn *, bool *cgid);
+ void (*mmu_fault_trigger)(struct nvkm_engn *);
+ bool (*mmu_fault_triggered)(struct nvkm_engn *);
+ int (*ctor)(struct nvkm_engn *, struct nvkm_vctx *);
+ void (*bind)(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
+ int (*ramht_add)(struct nvkm_engn *, struct nvkm_object *, struct nvkm_chan *);
+ void (*ramht_del)(struct nvkm_chan *, int hash);
+ } *func;
+ struct nvkm_runl *runl;
+ int id;
+
+ struct nvkm_engine *engine;
+
+ int fault;
+
+ struct list_head head;
+};
+
+#define ENGN_PRINT(e,l,p,f,a...) \
+ RUNL_PRINT((e)->runl, l, p, "%02d[%8s]:"f, (e)->id, (e)->engine->subdev.name, ##a)
+#define ENGN_DEBUG(e,f,a...) ENGN_PRINT((e), DEBUG, info, " "f"\n", ##a)
+
+struct nvkm_runl {
+ const struct nvkm_runl_func {
+ void (*init)(struct nvkm_runl *);
+ void (*fini)(struct nvkm_runl *);
+ int runqs;
+ u8 size;
+ int (*update)(struct nvkm_runl *);
+ void (*insert_cgrp)(struct nvkm_cgrp *, struct nvkm_memory *, u64 offset);
+ void (*insert_chan)(struct nvkm_chan *, struct nvkm_memory *, u64 offset);
+ void (*commit)(struct nvkm_runl *, struct nvkm_memory *, u32 start, int count);
+ int (*wait)(struct nvkm_runl *);
+ bool (*pending)(struct nvkm_runl *);
+ void (*block)(struct nvkm_runl *, u32 engm);
+ void (*allow)(struct nvkm_runl *, u32 engm);
+ void (*fault_clear)(struct nvkm_runl *);
+ void (*preempt)(struct nvkm_runl *);
+ bool (*preempt_pending)(struct nvkm_runl *);
+ } *func;
+ struct nvkm_fifo *fifo;
+ int id;
+ u32 addr;
+ u32 chan;
+ u16 doorbell;
+
+ struct nvkm_chid *cgid;
+#define NVKM_CHAN_EVENT_ERRORED BIT(0)
+ struct nvkm_chid *chid;
+
+ struct list_head engns;
+
+ struct nvkm_runq *runq[2];
+ int runq_nr;
+
+ struct nvkm_inth inth;
+
+ struct list_head cgrps;
+ int cgrp_nr;
+ int chan_nr;
+ atomic_t changed;
+ struct nvkm_memory *mem;
+ u32 offset;
+ struct mutex mutex;
+
+ int blocked;
+
+ struct work_struct work;
+ atomic_t rc_triggered;
+ atomic_t rc_pending;
+
+ struct list_head head;
+};
+
+struct nvkm_runl *nvkm_runl_new(struct nvkm_fifo *, int runi, u32 addr, int id_nr);
+struct nvkm_runl *nvkm_runl_get(struct nvkm_fifo *, int runi, u32 addr);
+struct nvkm_engn *nvkm_runl_add(struct nvkm_runl *, int engi, const struct nvkm_engn_func *,
+ enum nvkm_subdev_type, int inst);
+void nvkm_runl_del(struct nvkm_runl *);
+void nvkm_runl_fini(struct nvkm_runl *);
+void nvkm_runl_block(struct nvkm_runl *);
+void nvkm_runl_allow(struct nvkm_runl *);
+void nvkm_runl_update_locked(struct nvkm_runl *, bool wait);
+bool nvkm_runl_update_pending(struct nvkm_runl *);
+int nvkm_runl_preempt_wait(struct nvkm_runl *);
+
+void nvkm_runl_rc_engn(struct nvkm_runl *, struct nvkm_engn *);
+void nvkm_runl_rc_cgrp(struct nvkm_cgrp *);
+
+struct nvkm_cgrp *nvkm_runl_cgrp_get_cgid(struct nvkm_runl *, int cgid, unsigned long *irqflags);
+struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
+struct nvkm_chan *nvkm_runl_chan_get_inst(struct nvkm_runl *, u64 inst, unsigned long *irqflags);
+
+#define nvkm_runl_find_engn(engn,runl,cond) nvkm_list_find(engn, &(runl)->engns, head, (cond))
+
+#define nvkm_runl_first(fifo) list_first_entry(&(fifo)->runls, struct nvkm_runl, head)
+#define nvkm_runl_foreach(runl,fifo) list_for_each_entry((runl), &(fifo)->runls, head)
+#define nvkm_runl_foreach_cond(runl,fifo,cond) nvkm_list_foreach(runl, &(fifo)->runls, head, (cond))
+#define nvkm_runl_foreach_engn(engn,runl) list_for_each_entry((engn), &(runl)->engns, head)
+#define nvkm_runl_foreach_engn_cond(engn,runl,cond) \
+ nvkm_list_foreach(engn, &(runl)->engns, head, (cond))
+#define nvkm_runl_foreach_cgrp(cgrp,runl) list_for_each_entry((cgrp), &(runl)->cgrps, head)
+#define nvkm_runl_foreach_cgrp_safe(cgrp,gtmp,runl) \
+ list_for_each_entry_safe((cgrp), (gtmp), &(runl)->cgrps, head)
+
+#define RUNL_PRINT(r,l,p,f,a...) \
+ nvkm_printk__(&(r)->fifo->engine.subdev, NV_DBG_##l, p, "%06x:"f, (r)->addr, ##a)
+#define RUNL_ERROR(r,f,a...) RUNL_PRINT((r), ERROR, err, " "f"\n", ##a)
+#define RUNL_DEBUG(r,f,a...) RUNL_PRINT((r), DEBUG, info, " "f"\n", ##a)
+#define RUNL_TRACE(r,f,a...) RUNL_PRINT((r), TRACE, info, " "f"\n", ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.c
new file mode 100644
index 000000000..33bcf5fb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "runq.h"
+#include "priv.h"
+
+void
+nvkm_runq_del(struct nvkm_runq *runq)
+{
+ list_del(&runq->head);
+ kfree(runq);
+}
+
+struct nvkm_runq *
+nvkm_runq_new(struct nvkm_fifo *fifo, int pbid)
+{
+ struct nvkm_runq *runq;
+
+ if (!(runq = kzalloc(sizeof(*runq), GFP_KERNEL)))
+ return NULL;
+
+ runq->func = fifo->func->runq;
+ runq->fifo = fifo;
+ runq->id = pbid;
+ list_add_tail(&runq->head, &fifo->runqs);
+ return runq;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.h
new file mode 100644
index 000000000..2cb4836e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/runq.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_RUNQ_H__
+#define __NVKM_RUNQ_H__
+#include <core/os.h>
+struct nvkm_runl;
+
+struct nvkm_runq {
+ const struct nvkm_runq_func {
+ void (*init)(struct nvkm_runq *);
+ bool (*intr)(struct nvkm_runq *, struct nvkm_runl *);
+ const struct nvkm_bitfield *intr_0_names;
+ bool (*intr_1_ctxnotvalid)(struct nvkm_runq *, int chid);
+ bool (*idle)(struct nvkm_runq *);
+ } *func;
+ struct nvkm_fifo *fifo;
+ int id;
+
+ struct list_head head;
+};
+
+struct nvkm_runq *nvkm_runq_new(struct nvkm_fifo *, int pbid);
+void nvkm_runq_del(struct nvkm_runq *);
+
+#define nvkm_runq_foreach(runq,fifo) list_for_each_entry((runq), &(fifo)->runqs, head)
+#define nvkm_runq_foreach_cond(runq,fifo,cond) nvkm_list_foreach(runq, &(fifo)->runqs, head, (cond))
+
+#define RUNQ_PRINT(r,l,p,f,a...) \
+ nvkm_printk__(&(r)->fifo->engine.subdev, NV_DBG_##l, p, "PBDMA%d:"f, (r)->id, ##a)
+#define RUNQ_ERROR(r,f,a...) RUNQ_PRINT((r), ERROR, err, " "f"\n", ##a)
+#define RUNQ_DEBUG(r,f,a...) RUNQ_PRINT((r), DEBUG, info, " "f"\n", ##a)
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
new file mode 100644
index 000000000..ea9e151db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "runl.h"
+
+#include <core/memory.h>
+#include <subdev/mc.h>
+#include <subdev/vfn.h>
+
+#include <nvif/class.h>
+
+static u32
+tu102_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return (chan->cgrp->runl->id << 16) | chan->id;
+}
+
+static void
+tu102_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ gk104_chan_start(chan);
+ nvkm_wr32(device, device->vfn->addr.user + 0x0090, chan->func->doorbell_handle(chan));
+}
+
+static const struct nvkm_chan_func
+tu102_chan = {
+ .inst = &gf100_chan_inst,
+ .userd = &gv100_chan_userd,
+ .ramfc = &gv100_chan_ramfc,
+ .bind = gk104_chan_bind_inst,
+ .unbind = gk104_chan_unbind,
+ .start = tu102_chan_start,
+ .stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
+ .doorbell_handle = tu102_chan_doorbell_handle,
+};
+
+static bool
+tu102_runl_pending(struct nvkm_runl *runl)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+
+ return nvkm_rd32(device, 0x002b0c + (runl->id * 0x10)) & 0x00008000;
+}
+
+static void
+tu102_runl_commit(struct nvkm_runl *runl, struct nvkm_memory *memory, u32 start, int count)
+{
+ struct nvkm_device *device = runl->fifo->engine.subdev.device;
+ u64 addr = nvkm_memory_addr(memory) + start;
+ /*XXX: target? */
+
+ nvkm_wr32(device, 0x002b00 + (runl->id * 0x10), lower_32_bits(addr));
+ nvkm_wr32(device, 0x002b04 + (runl->id * 0x10), upper_32_bits(addr));
+ nvkm_wr32(device, 0x002b08 + (runl->id * 0x10), count);
+}
+
+static const struct nvkm_runl_func
+tu102_runl = {
+ .runqs = 2,
+ .size = 16,
+ .update = nv50_runl_update,
+ .insert_cgrp = gv100_runl_insert_cgrp,
+ .insert_chan = gv100_runl_insert_chan,
+ .commit = tu102_runl_commit,
+ .wait = nv50_runl_wait,
+ .pending = tu102_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
+ .preempt = gv100_runl_preempt,
+ .preempt_pending = gf100_runl_preempt_pending,
+};
+
+static const struct nvkm_enum
+tu102_fifo_mmu_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "PTP" },
+ { 0x06, "PWR_PMU" },
+ { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x09, "PERF" },
+ { 0x1f, "PHYSICAL" },
+ { 0x20, "HOST0" },
+ { 0x21, "HOST1" },
+ { 0x22, "HOST2" },
+ { 0x23, "HOST3" },
+ { 0x24, "HOST4" },
+ { 0x25, "HOST5" },
+ { 0x26, "HOST6" },
+ { 0x27, "HOST7" },
+ { 0x28, "HOST8" },
+ { 0x29, "HOST9" },
+ { 0x2a, "HOST10" },
+ { 0x2b, "HOST11" },
+ { 0x2c, "HOST12" },
+ { 0x2d, "HOST13" },
+ { 0x2e, "HOST14" },
+ { 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ {}
+};
+
+const struct nvkm_fifo_func_mmu_fault
+tu102_fifo_mmu_fault = {
+ .recover = gf100_fifo_mmu_fault_recover,
+ .access = gv100_fifo_mmu_fault_access,
+ .engine = tu102_fifo_mmu_fault_engine,
+ .reason = gv100_fifo_mmu_fault_reason,
+ .hubclient = gv100_fifo_mmu_fault_hubclient,
+ .gpcclient = gv100_fifo_mmu_fault_gpcclient,
+};
+
+void
+tu102_fifo_intr_ctxsw_timeout_info(struct nvkm_engn *engn, u32 info)
+{
+ struct nvkm_runl *runl = engn->runl;
+ struct nvkm_cgrp *cgrp;
+ unsigned long flags;
+
+ /* Check that engine hasn't become unstuck since timeout raised. */
+ ENGN_DEBUG(engn, "CTXSW_TIMEOUT %08x", info);
+ if (info & 0xc0000000)
+ return;
+
+ /* Determine channel group the engine is stuck on, and schedule recovery. */
+ switch (info & 0x0000c000) {
+ case 0x00004000: /* LOAD */
+ cgrp = nvkm_runl_cgrp_get_cgid(runl, info & 0x3fff0000, &flags);
+ break;
+ case 0x00008000: /* SAVE */
+ case 0x0000c000: /* SWITCH */
+ cgrp = nvkm_runl_cgrp_get_cgid(runl, info & 0x00003fff, &flags);
+ break;
+ default:
+ cgrp = NULL;
+ break;
+ }
+
+ if (!WARN_ON(!cgrp)) {
+ nvkm_runl_rc_cgrp(cgrp);
+ nvkm_cgrp_put(&cgrp, flags);
+ }
+}
+
+static void
+tu102_fifo_intr_ctxsw_timeout(struct nvkm_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->engine.subdev.device;
+ struct nvkm_runl *runl;
+ struct nvkm_engn *engn;
+ u32 engm = nvkm_rd32(device, 0x002a30);
+ u32 info;
+
+ nvkm_runl_foreach(runl, fifo) {
+ nvkm_runl_foreach_engn_cond(engn, runl, engm & BIT(engn->id)) {
+ info = nvkm_rd32(device, 0x003200 + (engn->id * 4));
+ tu102_fifo_intr_ctxsw_timeout_info(engn, info);
+ }
+ }
+
+ nvkm_wr32(device, 0x002a30, engm);
+}
+
+static void
+tu102_fifo_intr_sched(struct nvkm_fifo *fifo)
+{
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ u32 intr = nvkm_rd32(subdev->device, 0x00254c);
+ u32 code = intr & 0x000000ff;
+
+ nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
+}
+
+static irqreturn_t
+tu102_fifo_intr(struct nvkm_inth *inth)
+{
+ struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
+ struct nvkm_subdev *subdev = &fifo->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x002140);
+ u32 stat = nvkm_rd32(device, 0x002100) & mask;
+
+ if (stat & 0x00000001) {
+ gk104_fifo_intr_bind(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000001);
+ stat &= ~0x00000001;
+ }
+
+ if (stat & 0x00000002) {
+ tu102_fifo_intr_ctxsw_timeout(fifo);
+ stat &= ~0x00000002;
+ }
+
+ if (stat & 0x00000100) {
+ tu102_fifo_intr_sched(fifo);
+ nvkm_wr32(device, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x00010000) {
+ gk104_fifo_intr_chsw(fifo);
+ nvkm_wr32(device, 0x002100, 0x00010000);
+ stat &= ~0x00010000;
+ }
+
+ if (stat & 0x20000000) {
+ if (gf100_fifo_intr_pbdma(fifo))
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ gk104_fifo_intr_runlist(fifo);
+ stat &= ~0x40000000;
+ }
+
+ if (stat & 0x80000000) {
+ nvkm_wr32(device, 0x002100, 0x80000000);
+ nvkm_event_ntfy(&fifo->nonstall.event, 0, NVKM_FIFO_NONSTALL_EVENT);
+ stat &= ~0x80000000;
+ }
+
+ if (stat) {
+ nvkm_error(subdev, "INTR %08x\n", stat);
+ spin_lock(&fifo->lock);
+ nvkm_mask(device, 0x002140, stat, 0x00000000);
+ spin_unlock(&fifo->lock);
+ nvkm_wr32(device, 0x002100, stat);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+tu102_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
+{
+ /* Not directly related to PBDMAs, but, enables doorbell to function. */
+ nvkm_mask(fifo->engine.subdev.device, 0xb65000, 0x80000000, 0x80000000);
+}
+
+static const struct nvkm_fifo_func
+tu102_fifo = {
+ .chid_nr = gm200_fifo_chid_nr,
+ .chid_ctor = gk110_fifo_chid_ctor,
+ .runq_nr = gm200_fifo_runq_nr,
+ .runl_ctor = gk104_fifo_runl_ctor,
+ .init = gk104_fifo_init,
+ .init_pbdmas = tu102_fifo_init_pbdmas,
+ .intr = tu102_fifo_intr,
+ .mmu_fault = &tu102_fifo_mmu_fault,
+ .nonstall = &gf100_fifo_nonstall,
+ .runl = &tu102_runl,
+ .runq = &gv100_runq,
+ .engn = &gv100_engn,
+ .engn_ce = &gv100_engn_ce,
+ .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A }, &gk110_cgrp, .force = true },
+ .chan = {{ 0, 0, TURING_CHANNEL_GPFIFO_A }, &tu102_chan },
+};
+
+int
+tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_fifo **pfifo)
+{
+ return nvkm_fifo_new_(&tu102_fifo, device, type, inst, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c
new file mode 100644
index 000000000..52c594dfb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ucgrp.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define nvkm_ucgrp(p) container_of((p), struct nvkm_ucgrp, object)
+#include "priv.h"
+#include "cgrp.h"
+#include "runl.h"
+
+#include <subdev/mmu.h>
+
+#include <nvif/if0021.h>
+
+struct nvkm_ucgrp {
+ struct nvkm_object object;
+ struct nvkm_cgrp *cgrp;
+};
+
+static int
+nvkm_ucgrp_chan_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_cgrp *cgrp = nvkm_ucgrp(oclass->parent)->cgrp;
+
+ return nvkm_uchan_new(cgrp->runl->fifo, cgrp, oclass, argv, argc, pobject);
+}
+
+static int
+nvkm_ucgrp_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+ struct nvkm_cgrp *cgrp = nvkm_ucgrp(object)->cgrp;
+ struct nvkm_fifo *fifo = cgrp->runl->fifo;
+ const struct nvkm_fifo_func_chan *chan = &fifo->func->chan;
+ int c = 0;
+
+ /* *_CHANNEL_GPFIFO_* */
+ if (chan->user.oclass) {
+ if (c++ == index) {
+ oclass->base = chan->user;
+ oclass->ctor = nvkm_ucgrp_chan_new;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void *
+nvkm_ucgrp_dtor(struct nvkm_object *object)
+{
+ struct nvkm_ucgrp *ucgrp = nvkm_ucgrp(object);
+
+ nvkm_cgrp_unref(&ucgrp->cgrp);
+ return ucgrp;
+}
+
+static const struct nvkm_object_func
+nvkm_ucgrp = {
+ .dtor = nvkm_ucgrp_dtor,
+ .sclass = nvkm_ucgrp_sclass,
+};
+
+int
+nvkm_ucgrp_new(struct nvkm_fifo *fifo, const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ union nvif_cgrp_args *args = argv;
+ struct nvkm_runl *runl;
+ struct nvkm_vmm *vmm;
+ struct nvkm_ucgrp *ucgrp;
+ int ret;
+
+ if (argc < sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ argc -= sizeof(args->v0);
+
+ if (args->v0.namelen != argc)
+ return -EINVAL;
+
+ /* Lookup objects referenced in args. */
+ runl = nvkm_runl_get(fifo, args->v0.runlist, 0);
+ if (!runl)
+ return -EINVAL;
+
+ vmm = nvkm_uvmm_search(oclass->client, args->v0.vmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+
+ /* Allocate channel group. */
+ if (!(ucgrp = kzalloc(sizeof(*ucgrp), GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ nvkm_object_ctor(&nvkm_ucgrp, oclass, &ucgrp->object);
+ *pobject = &ucgrp->object;
+
+ ret = nvkm_cgrp_new(runl, args->v0.name, vmm, true, &ucgrp->cgrp);
+ if (ret)
+ goto done;
+
+ /* Return channel group info to caller. */
+ args->v0.cgid = ucgrp->cgrp->id;
+
+done:
+ nvkm_vmm_unref(&vmm);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
new file mode 100644
index 000000000..1dac95ae7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
@@ -0,0 +1,409 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define nvkm_uchan(p) container_of((p), struct nvkm_uchan, object)
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <core/oproxy.h>
+#include <subdev/mmu.h>
+#include <engine/dma.h>
+
+#include <nvif/if0020.h>
+
+struct nvkm_uchan {
+ struct nvkm_object object;
+ struct nvkm_chan *chan;
+};
+
+static int
+nvkm_uchan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
+{
+ struct nvkm_chan *chan = nvkm_uchan(object)->chan;
+ struct nvkm_runl *runl = chan->cgrp->runl;
+ union nvif_chan_event_args *args = argv;
+
+ if (!uevent)
+ return 0;
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+
+ switch (args->v0.type) {
+ case NVIF_CHAN_EVENT_V0_NON_STALL_INTR:
+ return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, 0,
+ NVKM_FIFO_NONSTALL_EVENT, NULL);
+ case NVIF_CHAN_EVENT_V0_KILLED:
+ return nvkm_uevent_add(uevent, &runl->chid->event, chan->id,
+ NVKM_CHAN_EVENT_ERRORED, NULL);
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+struct nvkm_uobj {
+ struct nvkm_oproxy oproxy;
+ struct nvkm_chan *chan;
+ struct nvkm_cctx *cctx;
+ int hash;
+};
+
+static int
+nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend)
+{
+ struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
+ struct nvkm_chan *chan = uobj->chan;
+ struct nvkm_cctx *cctx = uobj->cctx;
+ struct nvkm_ectx *ectx = cctx->vctx->ectx;
+
+ if (!ectx->object)
+ return 0;
+
+ /* Unbind engine context from channel, if no longer required. */
+ if (refcount_dec_and_mutex_lock(&cctx->uses, &chan->cgrp->mutex)) {
+ nvkm_chan_cctx_bind(chan, ectx->engn, NULL);
+
+ if (refcount_dec_and_test(&ectx->uses))
+ nvkm_object_fini(ectx->object, false);
+ mutex_unlock(&chan->cgrp->mutex);
+ }
+
+ return 0;
+}
+
+static int
+nvkm_uchan_object_init_0(struct nvkm_oproxy *oproxy)
+{
+ struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
+ struct nvkm_chan *chan = uobj->chan;
+ struct nvkm_cctx *cctx = uobj->cctx;
+ struct nvkm_ectx *ectx = cctx->vctx->ectx;
+ int ret = 0;
+
+ if (!ectx->object)
+ return 0;
+
+ /* Bind engine context to channel, if it hasn't been already. */
+ if (!refcount_inc_not_zero(&cctx->uses)) {
+ mutex_lock(&chan->cgrp->mutex);
+ if (!refcount_inc_not_zero(&cctx->uses)) {
+ if (!refcount_inc_not_zero(&ectx->uses)) {
+ ret = nvkm_object_init(ectx->object);
+ if (ret == 0)
+ refcount_set(&ectx->uses, 1);
+ }
+
+ if (ret == 0) {
+ nvkm_chan_cctx_bind(chan, ectx->engn, cctx);
+ refcount_set(&cctx->uses, 1);
+ }
+ }
+ mutex_unlock(&chan->cgrp->mutex);
+ }
+
+ return ret;
+}
+
+static void
+nvkm_uchan_object_dtor(struct nvkm_oproxy *oproxy)
+{
+ struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy);
+ struct nvkm_engn *engn;
+
+ if (!uobj->cctx)
+ return;
+
+ engn = uobj->cctx->vctx->ectx->engn;
+ if (engn->func->ramht_del)
+ engn->func->ramht_del(uobj->chan, uobj->hash);
+
+ nvkm_chan_cctx_put(uobj->chan, &uobj->cctx);
+}
+
+static const struct nvkm_oproxy_func
+nvkm_uchan_object = {
+ .dtor[1] = nvkm_uchan_object_dtor,
+ .init[0] = nvkm_uchan_object_init_0,
+ .fini[1] = nvkm_uchan_object_fini_1,
+};
+
+static int
+nvkm_uchan_object_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ struct nvkm_chan *chan = nvkm_uchan(oclass->parent)->chan;
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+ struct nvkm_engn *engn;
+ struct nvkm_uobj *uobj;
+ int ret;
+
+ /* Lookup host engine state for target engine. */
+ engn = nvkm_runl_find_engn(engn, cgrp->runl, engn->engine == oclass->engine);
+ if (WARN_ON(!engn))
+ return -EINVAL;
+
+ /* Allocate SW object. */
+ if (!(uobj = kzalloc(sizeof(*uobj), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_oproxy_ctor(&nvkm_uchan_object, oclass, &uobj->oproxy);
+ uobj->chan = chan;
+ *pobject = &uobj->oproxy.base;
+
+ /* Ref. channel context for target engine.*/
+ ret = nvkm_chan_cctx_get(chan, engn, &uobj->cctx, oclass->client);
+ if (ret)
+ return ret;
+
+ /* Allocate HW object. */
+ ret = oclass->base.ctor(&(const struct nvkm_oclass) {
+ .base = oclass->base,
+ .engn = oclass->engn,
+ .handle = oclass->handle,
+ .object = oclass->object,
+ .client = oclass->client,
+ .parent = uobj->cctx->vctx->ectx->object ?: oclass->parent,
+ .engine = engn->engine,
+ }, argv, argc, &uobj->oproxy.object);
+ if (ret)
+ return ret;
+
+ if (engn->func->ramht_add) {
+ uobj->hash = engn->func->ramht_add(engn, uobj->oproxy.object, uobj->chan);
+ if (uobj->hash < 0)
+ return uobj->hash;
+ }
+
+ return 0;
+}
+
+static int
+nvkm_uchan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
+{
+ struct nvkm_chan *chan = nvkm_uchan(object)->chan;
+ struct nvkm_engn *engn;
+ int ret, runq = 0;
+
+ nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+ struct nvkm_engine *engine = engn->engine;
+ int c = 0;
+
+ /* Each runqueue, on runlists with multiple, has its own LCE. */
+ if (engn->runl->func->runqs) {
+ if (engine->subdev.type == NVKM_ENGINE_CE) {
+ if (chan->runq != runq++)
+ continue;
+ }
+ }
+
+ oclass->engine = engine;
+ oclass->base.oclass = 0;
+
+ if (engine->func->fifo.sclass) {
+ ret = engine->func->fifo.sclass(oclass, index);
+ if (oclass->base.oclass) {
+ if (!oclass->base.ctor)
+ oclass->base.ctor = nvkm_object_new;
+ oclass->ctor = nvkm_uchan_object_new;
+ return 0;
+ }
+
+ index -= ret;
+ continue;
+ }
+
+ while (engine->func->sclass[c].oclass) {
+ if (c++ == index) {
+ oclass->base = engine->func->sclass[index];
+ if (!oclass->base.ctor)
+ oclass->base.ctor = nvkm_object_new;
+ oclass->ctor = nvkm_uchan_object_new;
+ return 0;
+ }
+ }
+
+ index -= c;
+ }
+
+ return -EINVAL;
+}
+
+static int
+nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_chan *chan = nvkm_uchan(object)->chan;
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ if (chan->func->userd->bar < 0)
+ return -ENOSYS;
+
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = device->func->resource_addr(device, chan->func->userd->bar) +
+ chan->func->userd->base + chan->userd.base;
+ *size = chan->func->userd->size;
+ return 0;
+}
+
+static int
+nvkm_uchan_fini(struct nvkm_object *object, bool suspend)
+{
+ struct nvkm_chan *chan = nvkm_uchan(object)->chan;
+
+ nvkm_chan_block(chan);
+ nvkm_chan_remove(chan, true);
+
+ if (chan->func->unbind)
+ chan->func->unbind(chan);
+
+ return 0;
+}
+
+static int
+nvkm_uchan_init(struct nvkm_object *object)
+{
+ struct nvkm_chan *chan = nvkm_uchan(object)->chan;
+
+ if (atomic_read(&chan->errored))
+ return 0;
+
+ if (chan->func->bind)
+ chan->func->bind(chan);
+
+ nvkm_chan_allow(chan);
+ nvkm_chan_insert(chan);
+ return 0;
+}
+
+static void *
+nvkm_uchan_dtor(struct nvkm_object *object)
+{
+ struct nvkm_uchan *uchan = nvkm_uchan(object);
+
+ nvkm_chan_del(&uchan->chan);
+ return uchan;
+}
+
+static const struct nvkm_object_func
+nvkm_uchan = {
+ .dtor = nvkm_uchan_dtor,
+ .init = nvkm_uchan_init,
+ .fini = nvkm_uchan_fini,
+ .map = nvkm_uchan_map,
+ .sclass = nvkm_uchan_sclass,
+ .uevent = nvkm_uchan_uevent,
+};
+
+int
+nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
+ void *argv, u32 argc, struct nvkm_object **pobject)
+{
+ union nvif_chan_args *args = argv;
+ struct nvkm_runl *runl;
+ struct nvkm_vmm *vmm = NULL;
+ struct nvkm_dmaobj *ctxdma = NULL;
+ struct nvkm_memory *userd = NULL;
+ struct nvkm_uchan *uchan;
+ struct nvkm_chan *chan;
+ int ret;
+
+ if (argc < sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ argc -= sizeof(args->v0);
+
+ if (args->v0.namelen != argc)
+ return -EINVAL;
+
+ /* Lookup objects referenced in args. */
+ runl = nvkm_runl_get(fifo, args->v0.runlist, 0);
+ if (!runl)
+ return -EINVAL;
+
+ if (args->v0.vmm) {
+ vmm = nvkm_uvmm_search(oclass->client, args->v0.vmm);
+ if (IS_ERR(vmm))
+ return PTR_ERR(vmm);
+ }
+
+ if (args->v0.ctxdma) {
+ ctxdma = nvkm_dmaobj_search(oclass->client, args->v0.ctxdma);
+ if (IS_ERR(ctxdma)) {
+ ret = PTR_ERR(ctxdma);
+ goto done;
+ }
+ }
+
+ if (args->v0.huserd) {
+ userd = nvkm_umem_search(oclass->client, args->v0.huserd);
+ if (IS_ERR(userd)) {
+ ret = PTR_ERR(userd);
+ userd = NULL;
+ goto done;
+ }
+ }
+
+ /* Allocate channel. */
+ if (!(uchan = kzalloc(sizeof(*uchan), GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ nvkm_object_ctor(&nvkm_uchan, oclass, &uchan->object);
+ *pobject = &uchan->object;
+
+ ret = nvkm_chan_new_(fifo->func->chan.func, runl, args->v0.runq, cgrp, args->v0.name,
+ args->v0.priv != 0, args->v0.devm, vmm, ctxdma, args->v0.offset,
+ args->v0.length, userd, args->v0.ouserd, &uchan->chan);
+ if (ret)
+ goto done;
+
+ chan = uchan->chan;
+
+ /* Return channel info to caller. */
+ if (chan->func->doorbell_handle)
+ args->v0.token = chan->func->doorbell_handle(chan);
+ else
+ args->v0.token = ~0;
+
+ args->v0.chid = chan->id;
+
+ switch (nvkm_memory_target(chan->inst->memory)) {
+ case NVKM_MEM_TARGET_INST: args->v0.aper = NVIF_CHAN_V0_INST_APER_INST; break;
+ case NVKM_MEM_TARGET_VRAM: args->v0.aper = NVIF_CHAN_V0_INST_APER_VRAM; break;
+ case NVKM_MEM_TARGET_HOST: args->v0.aper = NVIF_CHAN_V0_INST_APER_HOST; break;
+ case NVKM_MEM_TARGET_NCOH: args->v0.aper = NVIF_CHAN_V0_INST_APER_NCOH; break;
+ default:
+ WARN_ON(1);
+ ret = -EFAULT;
+ break;
+ }
+
+ args->v0.inst = nvkm_memory_addr(chan->inst->memory);
+done:
+ nvkm_memory_unref(&userd);
+ nvkm_vmm_unref(&vmm);
+ return ret;
+}