aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/pxp
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/gpu/drm/i915/pxp
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/gpu/drm/i915/pxp')
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c329
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h35
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c141
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h28
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h26
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h35
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c81
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c70
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.h13
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c102
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.h40
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c61
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h37
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c181
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h20
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c311
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.h22
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h87
20 files changed, 1655 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
new file mode 100644
index 000000000..5efe61f67
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
+ * It allows execution and flip to display of protected (i.e. encrypted)
+ * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
+ *
+ * Objects can opt-in to PXP encryption at creation time via the
+ * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
+ * correctly protected they must be used in conjunction with a context created
+ * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
+ * of those two uapi flags for details and restrictions.
+ *
+ * Protected objects are tied to a pxp session; currently we only support one
+ * session, which i915 manages and whose index is available in the uapi
+ * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
+ * protected objects.
+ * The session is invalidated by the HW when certain events occur (e.g.
+ * suspend/resume). When this happens, all the objects that were used with the
+ * session are marked as invalid and all contexts marked as using protected
+ * content are banned. Any further attempt at using them in an execbuf call is
+ * rejected, while flips are converted to black frames.
+ *
+ * Some of the PXP setup operations are performed by the Management Engine,
+ * which is handled by the mei driver; communication between i915 and mei is
+ * performed via the mei_pxp component module.
+ */
+
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp)
+{
+ return container_of(pxp, struct intel_gt, pxp);
+}
+
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return pxp->ce;
+}
+
+bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return pxp->arb_is_valid;
+}
+
+/* KCR register definitions */
+#define KCR_INIT _MMIO(0x320f0)
+/* Setting KCR Init bit is required after system boot */
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+static void kcr_pxp_enable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static void kcr_pxp_disable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static int create_vcs_context(struct intel_pxp *pxp)
+{
+ static struct lock_class_key pxp_lock;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ int i;
+
+ /*
+ * Find the first VCS engine present. We're guaranteed there is one
+ * if we're in this function due to the check in has_pxp
+ */
+ for (i = 0, engine = NULL; !engine; i++)
+ engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
+
+ GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_PXP_ADDR,
+ &pxp_lock, "pxp_context");
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n");
+ return PTR_ERR(ce);
+ }
+
+ pxp->ce = ce;
+
+ return 0;
+}
+
+static void destroy_vcs_context(struct intel_pxp *pxp)
+{
+ if (pxp->ce)
+ intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
+}
+
+static void pxp_init_full(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ /*
+ * we'll use the completion to check if there is a termination pending,
+ * so we start it as completed and we reinit it when a termination
+ * is triggered.
+ */
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+
+ intel_pxp_session_management_init(pxp);
+
+ ret = create_vcs_context(pxp);
+ if (ret)
+ return;
+
+ ret = intel_pxp_tee_component_init(pxp);
+ if (ret)
+ goto out_context;
+
+ drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
+
+ return;
+
+out_context:
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_init(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /* we rely on the mei PXP module */
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP))
+ return;
+
+ /*
+ * If HuC is loaded by GSC but PXP is disabled, we can skip the init of
+ * the full PXP session/object management and just init the tee channel.
+ */
+ if (HAS_PXP(gt->i915))
+ pxp_init_full(pxp);
+ else if (intel_huc_is_loaded_by_gsc(&gt->uc.huc) && intel_uc_uses_huc(&gt->uc))
+ intel_pxp_tee_component_init(pxp);
+}
+
+void intel_pxp_fini(struct intel_pxp *pxp)
+{
+ pxp->arb_is_valid = false;
+
+ intel_pxp_tee_component_fini(pxp);
+
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
+{
+ pxp->arb_is_valid = false;
+ reinit_completion(&pxp->termination);
+}
+
+static void pxp_queue_termination(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We want to get the same effect as if we received a termination
+ * interrupt, so just pretend that we did.
+ */
+ spin_lock_irq(gt->irq_lock);
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST;
+ queue_work(system_unbound_wq, &pxp->session_work);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static bool pxp_component_bound(struct intel_pxp *pxp)
+{
+ bool bound = false;
+
+ mutex_lock(&pxp->tee_mutex);
+ if (pxp->pxp_component)
+ bound = true;
+ mutex_unlock(&pxp->tee_mutex);
+
+ return bound;
+}
+
+/*
+ * the arb session is restarted from the irq work when we receive the
+ * termination completion interrupt
+ */
+int intel_pxp_start(struct intel_pxp *pxp)
+{
+ int ret = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return -ENXIO;
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (pxp->arb_is_valid)
+ goto unlock;
+
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(250))) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* make sure the compiler doesn't optimize the double access */
+ barrier();
+
+ if (!pxp->arb_is_valid)
+ ret = -EIO;
+
+unlock:
+ mutex_unlock(&pxp->arb_mutex);
+ return ret;
+}
+
+void intel_pxp_init_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_enable(pxp_to_gt(pxp));
+ intel_pxp_irq_enable(pxp);
+}
+
+void intel_pxp_fini_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_disable(pxp_to_gt(pxp));
+
+ intel_pxp_irq_disable(pxp);
+}
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ if (!i915_gem_object_is_protected(obj))
+ return -EINVAL;
+
+ GEM_BUG_ON(!pxp->key_instance);
+
+ /*
+ * If this is the first time we're using this object, it's not
+ * encrypted yet; it will be encrypted with the current key, so mark it
+ * as such. If the object is already encrypted, check instead if the
+ * used key is still valid.
+ */
+ if (!obj->pxp_key_instance && assign)
+ obj->pxp_key_instance = pxp->key_instance;
+
+ if (obj->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+void intel_pxp_invalidate(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_gem_context *ctx, *cn;
+
+ /* ban all contexts marked as protected */
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ if (likely(!i915_gem_context_uses_protected_content(ctx))) {
+ i915_gem_context_put(ctx);
+ continue;
+ }
+
+ spin_unlock_irq(&i915->gem.contexts.lock);
+
+ /*
+ * By the time we get here we are either going to suspend with
+ * quiesced execution or the HW keys are already long gone and
+ * in this case it is worthless to attempt to close the context
+ * and wait for its execution. It will hang the GPU if it has
+ * not already. So, as a fast mitigation, we can ban the
+ * context as quick as we can. That might race with the
+ * execbuffer, but currently this is the best that can be done.
+ */
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ intel_context_ban(ce, NULL);
+ i915_gem_context_unlock_engines(ctx);
+
+ /*
+ * The context has been banned, no need to keep the wakeref.
+ * This is safe from races because the only other place this
+ * is touched is context_release and we're holding a ctx ref
+ */
+ if (ctx->pxp_wakeref) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ ctx->pxp_wakeref);
+ ctx->pxp_wakeref = 0;
+ }
+
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock_irq(&i915->gem.contexts.lock);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
new file mode 100644
index 000000000..2da309088
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_H__
+#define __INTEL_PXP_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct intel_pxp;
+struct drm_i915_gem_object;
+
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp);
+bool intel_pxp_is_enabled(const struct intel_pxp *pxp);
+bool intel_pxp_is_active(const struct intel_pxp *pxp);
+
+void intel_pxp_init(struct intel_pxp *pxp);
+void intel_pxp_fini(struct intel_pxp *pxp);
+
+void intel_pxp_init_hw(struct intel_pxp *pxp);
+void intel_pxp_fini_hw(struct intel_pxp *pxp);
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+
+int intel_pxp_start(struct intel_pxp *pxp);
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign);
+
+void intel_pxp_invalidate(struct intel_pxp *pxp);
+
+#endif /* __INTEL_PXP_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
new file mode 100644
index 000000000..f41e45763
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_trace.h"
+
+/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+
+static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
+{
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp off */
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* select session */
+ *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
+
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp on */
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+
+ *cs++ = MFX_WAIT_PXP;
+
+ return cs;
+}
+
+static u32 *pxp_emit_inline_termination(u32 *cs)
+{
+ /* session inline termination */
+ *cs++ = CRYPTO_KEY_EXCHANGE;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
+{
+ cs = pxp_emit_session_selection(cs, idx);
+ cs = pxp_emit_inline_termination(cs);
+
+ return cs;
+}
+
+static u32 *pxp_emit_wait(u32 *cs)
+{
+ /* wait for cmds to go through */
+ *cs++ = MFX_WAIT_PXP;
+ *cs++ = 0;
+
+ return cs;
+}
+
+/*
+ * if we ever need to terminate more than one session, we can submit multiple
+ * selections and terminations back-to-back with a single wait at the end
+ */
+#define SELECTION_LEN 10
+#define TERMINATION_LEN 2
+#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
+#define WAIT_LEN 2
+
+static void pxp_request_commit(struct i915_request *rq)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+ mutex_unlock(&tl->mutex);
+}
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
+{
+ struct i915_request *rq;
+ struct intel_context *ce = pxp->ce;
+ u32 *cs;
+ int err = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ cs = pxp_emit_session_termination(cs, id);
+ cs = pxp_emit_wait(cs);
+
+ intel_ring_advance(rq, cs);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ pxp_request_commit(rq);
+
+ if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
new file mode 100644
index 000000000..6d6299543
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_CMD_H__
+#define __INTEL_PXP_CMD_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 idx);
+
+#endif /* __INTEL_PXP_CMD_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
new file mode 100644
index 000000000..739f9072f
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_FW_INTERFACE_42_H__
+#define __INTEL_PXP_FW_INTERFACE_42_H__
+
+#include <linux/types.h>
+#include "intel_pxp_cmd_interface_cmn.h"
+
+/* PXP-Opcode for Init Session */
+#define PXP42_CMDID_INIT_SESSION 0x1e
+
+/* PXP-Input-Packet: Init Session (Arb-Session) */
+struct pxp42_create_arb_in {
+ struct pxp_cmd_header header;
+ u32 protection_mode;
+#define PXP42_ARB_SESSION_MODE_HEAVY 0x2
+ u32 session_id;
+} __packed;
+
+/* PXP-Output-Packet: Init Session */
+struct pxp42_create_arb_out {
+ struct pxp_cmd_header header;
+} __packed;
+
+#endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
new file mode 100644
index 000000000..ad67e3f49
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_FW_INTERFACE_43_H__
+#define __INTEL_PXP_FW_INTERFACE_43_H__
+
+#include <linux/types.h>
+#include "intel_pxp_cmd_interface_cmn.h"
+
+/* PXP-Cmd-Op definitions */
+#define PXP43_CMDID_START_HUC_AUTH 0x0000003A
+
+/* PXP-Input-Packet: HUC-Authentication */
+struct pxp43_start_huc_auth_in {
+ struct pxp_cmd_header header;
+ __le64 huc_base_address;
+} __packed;
+
+/* PXP-Output-Packet: HUC-Authentication */
+struct pxp43_start_huc_auth_out {
+ struct pxp_cmd_header header;
+} __packed;
+
+#endif /* __INTEL_PXP_FW_INTERFACE_43_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
new file mode 100644
index 000000000..c2f23394f
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_FW_INTERFACE_CMN_H__
+#define __INTEL_PXP_FW_INTERFACE_CMN_H__
+
+#include <linux/types.h>
+
+#define PXP_APIVER(x, y) (((x) & 0xFFFF) << 16 | ((y) & 0xFFFF))
+
+/*
+ * there are a lot of status codes for PXP, but we only define the cross-API
+ * common ones that we actually can handle in the kernel driver. Other failure
+ * codes should be printed to error msg for debug.
+ */
+enum pxp_status {
+ PXP_STATUS_SUCCESS = 0x0,
+ PXP_STATUS_OP_NOT_PERMITTED = 0x4013
+};
+
+/* Common PXP FW message header */
+struct pxp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ union {
+ u32 status; /* out */
+ u32 stream_id; /* in */
+ };
+ /* Length of the message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+#endif /* __INTEL_PXP_FW_INTERFACE_CMN_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
new file mode 100644
index 000000000..4359e8be4
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/string_helpers.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_debugfs.h"
+#include "intel_pxp_irq.h"
+
+static int pxp_info_show(struct seq_file *m, void *data)
+{
+ struct intel_pxp *pxp = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ bool enabled = intel_pxp_is_enabled(pxp);
+
+ if (!enabled) {
+ drm_printf(&p, "pxp disabled\n");
+ return 0;
+ }
+
+ drm_printf(&p, "active: %s\n", str_yes_no(intel_pxp_is_active(pxp)));
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(pxp_info);
+
+static int pxp_terminate_get(void *data, u64 *val)
+{
+ /* nothing to read */
+ return -EPERM;
+}
+
+static int pxp_terminate_set(void *data, u64 val)
+{
+ struct intel_pxp *pxp = data;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(gt->irq_lock);
+ intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(gt->irq_lock);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n");
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *gt_root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "info", &pxp_info_fops, NULL },
+ { "terminate_state", &pxp_terminate_fops, NULL },
+ };
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ if (!HAS_PXP((pxp_to_gt(pxp)->i915)))
+ return;
+
+ root = debugfs_create_dir("pxp", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
new file mode 100644
index 000000000..7e0c3d2f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PXP_DEBUGFS_H__
+#define __INTEL_PXP_DEBUGFS_H__
+
+struct intel_pxp;
+struct dentry;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root);
+#else
+static inline void
+intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
new file mode 100644
index 000000000..2e1165522
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2021-2022, Intel Corporation. All rights reserved.
+ */
+
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+
+#include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_huc.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+#include "intel_pxp_cmd_interface_43.h"
+
+int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_huc *huc = &gt->uc.huc;
+ struct pxp43_start_huc_auth_in huc_in = {0};
+ struct pxp43_start_huc_auth_out huc_out = {0};
+ dma_addr_t huc_phys_addr;
+ u8 client_id = 0;
+ u8 fence_id = 0;
+ int err;
+
+ if (!pxp->pxp_component)
+ return -ENODEV;
+
+ huc_phys_addr = i915_gem_object_get_dma_address(huc->fw.obj, 0);
+
+ /* write the PXP message into the lmem (the sg list) */
+ huc_in.header.api_version = PXP_APIVER(4, 3);
+ huc_in.header.command_id = PXP43_CMDID_START_HUC_AUTH;
+ huc_in.header.status = 0;
+ huc_in.header.buffer_len = sizeof(huc_in.huc_base_address);
+ huc_in.huc_base_address = huc_phys_addr;
+
+ err = intel_pxp_tee_stream_message(pxp, client_id, fence_id,
+ &huc_in, sizeof(huc_in),
+ &huc_out, sizeof(huc_out));
+ if (err < 0) {
+ drm_err(&gt->i915->drm,
+ "Failed to send HuC load and auth command to GSC [%d]!\n",
+ err);
+ return err;
+ }
+
+ /*
+ * HuC does sometimes survive suspend/resume (it depends on how "deep"
+ * a sleep state the device reaches) so we can end up here on resume
+ * with HuC already loaded, in which case the GSC will return
+ * PXP_STATUS_OP_NOT_PERMITTED. We can therefore consider the GuC
+ * correctly transferred in this scenario; if the same error is ever
+ * returned with HuC not loaded we'll still catch it when we check the
+ * authentication bit later.
+ */
+ if (huc_out.header.status != PXP_STATUS_SUCCESS &&
+ huc_out.header.status != PXP_STATUS_OP_NOT_PERMITTED) {
+ drm_err(&gt->i915->drm,
+ "HuC load failed with GSC error = 0x%x\n",
+ huc_out.header.status);
+ return -EPROTO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.h b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.h
new file mode 100644
index 000000000..e40847a91
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2021-2022, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_HUC_H__
+#define __INTEL_PXP_HUC_H__
+
+struct intel_pxp;
+
+int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp);
+
+#endif /* __INTEL_PXP_HUC_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
new file mode 100644
index 000000000..c28be4307
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_gt_types.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "intel_runtime_pm.h"
+
+/**
+ * intel_pxp_irq_handler - Handles PXP interrupts.
+ * @pxp: pointer to pxp struct
+ * @iir: interrupt vector
+ */
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
+ return;
+
+ lockdep_assert_held(gt->irq_lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT |
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
+ /* immediately mark PXP as inactive on termination */
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ }
+
+ if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->session_events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->session_events)
+ queue_work(system_unbound_wq, &pxp->session_work);
+}
+
+static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 mask = interrupts << 16;
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask);
+}
+
+static inline void pxp_irq_reset(struct intel_gt *gt)
+{
+ spin_lock_irq(gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ spin_lock_irq(gt->irq_lock);
+
+ if (!pxp->irq_enabled)
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
+
+ __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
+ pxp->irq_enabled = true;
+
+ spin_unlock_irq(gt->irq_lock);
+}
+
+void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We always need to submit a global termination when we re-enable the
+ * interrupts, so there is no need to make sure that the session state
+ * makes sense at the end of this function. Just make sure this is not
+ * called in a path were the driver consider the session as valid and
+ * doesn't call a termination on restart.
+ */
+ GEM_WARN_ON(intel_pxp_is_active(pxp));
+
+ spin_lock_irq(gt->irq_lock);
+
+ pxp->irq_enabled = false;
+ __pxp_set_interrupts(gt, 0);
+
+ spin_unlock_irq(gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ pxp_irq_reset(gt);
+
+ flush_work(&pxp->session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
new file mode 100644
index 000000000..8c292dc86
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_IRQ_H__
+#define __INTEL_PXP_IRQ_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT BIT(1)
+#define GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT BIT(2)
+#define GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT BIT(3)
+
+#define GEN12_PXP_INTERRUPTS \
+ (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | \
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT | \
+ GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_irq_enable(struct intel_pxp *pxp);
+void intel_pxp_irq_disable(struct intel_pxp *pxp);
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir);
+#else
+static inline void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+}
+
+static inline void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
new file mode 100644
index 000000000..6a7d4e2ee
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_pm.h"
+#include "intel_pxp_session.h"
+#include "i915_drv.h"
+
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_invalidate(pxp);
+}
+
+void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
+ intel_pxp_fini_hw(pxp);
+ pxp->hw_state_invalidated = false;
+ }
+}
+
+void intel_pxp_resume(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ /*
+ * The PXP component gets automatically unbound when we go into S3 and
+ * re-bound after we come out, so in that scenario we can defer the
+ * hw init to the bind call.
+ */
+ if (!pxp->pxp_component)
+ return;
+
+ intel_pxp_init_hw(pxp);
+}
+
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
new file mode 100644
index 000000000..586be7691
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_PM_H__
+#define __INTEL_PXP_PM_H__
+
+struct intel_pxp;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp);
+void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_resume(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+}
+#endif
+static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
+{
+ intel_pxp_resume(pxp);
+}
+#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
new file mode 100644
index 000000000..85572360c
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+
+#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */
+
+#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */
+
+/* PXP global terminate register for session termination */
+#define PXP_GLOBAL_TERMINATE _MMIO(0x320f8)
+
+static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 sip = 0;
+
+ /* if we're suspended the session is considered off */
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
+ sip = intel_uncore_read(uncore, GEN12_KCR_SIP);
+
+ return sip & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 mask = BIT(id);
+ int ret;
+
+ /* if we're suspended the session is considered off */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref)
+ return in_play ? -ENODEV : 0;
+
+ ret = intel_wait_for_register(uncore,
+ GEN12_KCR_SIP,
+ mask,
+ in_play ? mask : 0,
+ 100);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return ret;
+}
+
+static int pxp_create_arb_session(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ pxp->arb_is_valid = false;
+
+ if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ drm_err(&gt->i915->drm, "arb session already in play at creation time\n");
+ return -EEXIST;
+ }
+
+ ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_err(&gt->i915->drm, "arb session failed to go in play\n");
+ return ret;
+ }
+ drm_dbg(&gt->i915->drm, "PXP ARB session is alive\n");
+
+ if (!++pxp->key_instance)
+ ++pxp->key_instance;
+
+ pxp->arb_is_valid = true;
+
+ return 0;
+}
+
+static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /* must mark termination in progress calling this function */
+ GEM_WARN_ON(pxp->arb_is_valid);
+
+ /* terminate the hw sessions */
+ ret = intel_pxp_terminate_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to submit session termination\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Session state did not clear\n");
+ return ret;
+ }
+
+ intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+
+ return ret;
+}
+
+static void pxp_terminate(struct intel_pxp *pxp)
+{
+ int ret;
+
+ pxp->hw_state_invalidated = true;
+
+ /*
+ * if we fail to submit the termination there is no point in waiting for
+ * it to complete. PXP will be marked as non-active until the next
+ * termination is issued.
+ */
+ ret = pxp_terminate_arb_session_and_global(pxp);
+ if (ret)
+ complete_all(&pxp->termination);
+}
+
+static void pxp_terminate_complete(struct intel_pxp *pxp)
+{
+ /* Re-create the arb session after teardown handle complete */
+ if (fetch_and_zero(&pxp->hw_state_invalidated))
+ pxp_create_arb_session(pxp);
+
+ complete_all(&pxp->termination);
+}
+
+static void pxp_session_work(struct work_struct *work)
+{
+ struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ intel_wakeref_t wakeref;
+ u32 events = 0;
+
+ spin_lock_irq(gt->irq_lock);
+ events = fetch_and_zero(&pxp->session_events);
+ spin_unlock_irq(gt->irq_lock);
+
+ if (!events)
+ return;
+
+ if (events & PXP_INVAL_REQUIRED)
+ intel_pxp_invalidate(pxp);
+
+ /*
+ * If we're processing an event while suspending then don't bother,
+ * we're going to re-init everything on resume anyway.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
+ if (!wakeref)
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ pxp_terminate(pxp);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
+
+void intel_pxp_session_management_init(struct intel_pxp *pxp)
+{
+ mutex_init(&pxp->arb_mutex);
+ INIT_WORK(&pxp->session_work, pxp_session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
new file mode 100644
index 000000000..903ac52cf
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_SESSION_H__
+#define __INTEL_PXP_SESSION_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_session_management_init(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_session_management_init(struct intel_pxp *pxp)
+{
+}
+#endif
+#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
new file mode 100644
index 000000000..b0c9170b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <linux/component.h>
+
+#include <drm/i915_pxp_tee_interface.h>
+#include <drm/i915_component.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_cmd_interface_42.h"
+#include "intel_pxp_huc.h"
+
+static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+
+ return &to_gt(i915)->pxp;
+}
+
+static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
+ void *msg_in, u32 msg_in_size,
+ void *msg_out, u32 msg_out_max_size,
+ u32 *msg_out_rcv_size)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ int ret = 0;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ /*
+ * The binding of the component is asynchronous from i915 probe, so we
+ * can't be sure it has happened.
+ */
+ if (!pxp_component) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send PXP TEE message\n");
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
+ goto unlock;
+ }
+
+ if (ret > msg_out_max_size) {
+ drm_err(&i915->drm,
+ "Failed to receive PXP TEE message due to unexpected output size\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (msg_out_rcv_size)
+ *msg_out_rcv_size = ret;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
+ u8 client_id, u32 fence_id,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
+{
+ /* TODO: for bigger objects we need to use a sg of 4k pages */
+ const size_t max_msg_size = PAGE_SIZE;
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ unsigned int offset = 0;
+ struct scatterlist *sg;
+ int ret;
+
+ if (msg_in_len > max_msg_size || msg_out_len > max_msg_size)
+ return -ENOSPC;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ if (unlikely(!pxp_component || !pxp_component->ops->gsc_command)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ GEM_BUG_ON(!pxp->stream_cmd.obj);
+
+ sg = i915_gem_object_get_sg_dma(pxp->stream_cmd.obj, 0, &offset);
+
+ memcpy(pxp->stream_cmd.vaddr, msg_in, msg_in_len);
+
+ ret = pxp_component->ops->gsc_command(pxp_component->tee_dev, client_id,
+ fence_id, sg, msg_in_len, sg);
+ if (ret < 0)
+ drm_err(&i915->drm, "Failed to send PXP TEE gsc command\n");
+ else
+ memcpy(msg_out, pxp->stream_cmd.vaddr, msg_out_len);
+
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+/**
+ * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
+ * @i915_kdev: pointer to i915 kernel device
+ * @tee_kdev: pointer to tee kernel device
+ * @data: pointer to pxp_tee_master containing the function pointers
+ *
+ * This bind function is called during the system boot or resume from system sleep.
+ *
+ * Return: return 0 if successful.
+ */
+static int i915_pxp_tee_component_bind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ struct intel_uc *uc = &pxp_to_gt(pxp)->uc;
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = data;
+ pxp->pxp_component->tee_dev = tee_kdev;
+ mutex_unlock(&pxp->tee_mutex);
+
+ if (intel_uc_uses_huc(uc) && intel_huc_is_loaded_by_gsc(&uc->huc)) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ /* load huc via pxp */
+ ret = intel_huc_fw_load_and_auth_via_gsc(&uc->huc);
+ if (ret < 0)
+ drm_err(&i915->drm, "failed to load huc via gsc %d\n", ret);
+ }
+ }
+
+ /* if we are suspended, the HW will be re-initialized on resume */
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
+ if (!wakeref)
+ return 0;
+
+ /* the component is required to fully start the PXP HW */
+ if (intel_pxp_is_enabled(pxp))
+ intel_pxp_init_hw(pxp);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return ret;
+}
+
+static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
+
+ if (intel_pxp_is_enabled(pxp))
+ with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = NULL;
+ mutex_unlock(&pxp->tee_mutex);
+}
+
+static const struct component_ops i915_pxp_tee_component_ops = {
+ .bind = i915_pxp_tee_component_bind,
+ .unbind = i915_pxp_tee_component_unbind,
+};
+
+static int alloc_streaming_command(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_gem_object *obj = NULL;
+ void *cmd;
+ int err;
+
+ pxp->stream_cmd.obj = NULL;
+ pxp->stream_cmd.vaddr = NULL;
+
+ if (!IS_DGFX(i915))
+ return 0;
+
+ /* allocate lmem object of one page for PXP command memory and store it */
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj)) {
+ drm_err(&i915->drm, "Failed to allocate pxp streaming command!\n");
+ return PTR_ERR(obj);
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ drm_err(&i915->drm, "Failed to pin gsc message page!\n");
+ goto out_put;
+ }
+
+ /* map the lmem into the virtual memory pointer */
+ cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(cmd)) {
+ drm_err(&i915->drm, "Failed to map gsc message page!\n");
+ err = PTR_ERR(cmd);
+ goto out_unpin;
+ }
+
+ memset(cmd, 0, obj->base.size);
+
+ pxp->stream_cmd.obj = obj;
+ pxp->stream_cmd.vaddr = cmd;
+
+ return 0;
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void free_streaming_command(struct intel_pxp *pxp)
+{
+ struct drm_i915_gem_object *obj = fetch_and_zero(&pxp->stream_cmd.obj);
+
+ if (!obj)
+ return;
+
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+}
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct drm_i915_private *i915 = gt->i915;
+
+ mutex_init(&pxp->tee_mutex);
+
+ ret = alloc_streaming_command(pxp);
+ if (ret)
+ return ret;
+
+ ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
+ I915_COMPONENT_PXP);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
+ goto out_free;
+ }
+
+ pxp->pxp_component_added = true;
+
+ return 0;
+
+out_free:
+ free_streaming_command(pxp);
+ return ret;
+}
+
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+
+ if (!pxp->pxp_component_added)
+ return;
+
+ component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
+ pxp->pxp_component_added = false;
+
+ free_streaming_command(pxp);
+}
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct pxp42_create_arb_in msg_in = {0};
+ struct pxp42_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_APIVER(4, 2);
+ msg_in.header.command_id = PXP42_CMDID_INIT_SESSION;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP42_ARB_SESSION_MODE_HEAVY;
+ msg_in.session_id = arb_session_id;
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret);
+ else if (msg_out.header.status != 0x0)
+ drm_warn(&i915->drm, "PXP firmware failed arb session init request ret=[0x%08x]\n",
+ msg_out.header.status);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
new file mode 100644
index 000000000..aeb3dfe7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_H__
+#define __INTEL_PXP_TEE_H__
+
+#include "intel_pxp.h"
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp);
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp);
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id);
+
+int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
+ u8 client_id, u32 fence_id,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+
+#endif /* __INTEL_PXP_TEE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
new file mode 100644
index 000000000..f74b1e11a
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TYPES_H__
+#define __INTEL_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_context;
+struct i915_pxp_component;
+
+/**
+ * struct intel_pxp - pxp state
+ */
+struct intel_pxp {
+ /**
+ * @pxp_component: i915_pxp_component struct of the bound mei_pxp
+ * module. Only set and cleared inside component bind/unbind functions,
+ * which are protected by &tee_mutex.
+ */
+ struct i915_pxp_component *pxp_component;
+ /**
+ * @pxp_component_added: track if the pxp component has been added.
+ * Set and cleared in tee init and fini functions respectively.
+ */
+ bool pxp_component_added;
+
+ /** @ce: kernel-owned context used for PXP operations */
+ struct intel_context *ce;
+
+ /** @arb_mutex: protects arb session start */
+ struct mutex arb_mutex;
+ /**
+ * @arb_is_valid: tracks arb session status.
+ * After a teardown, the arb session can still be in play on the HW
+ * even if the keys are gone, so we can't rely on the HW state of the
+ * session to know if it's valid and need to track the status in SW.
+ */
+ bool arb_is_valid;
+
+ /**
+ * @key_instance: tracks which key instance we're on, so we can use it
+ * to determine if an object was created using the current key or a
+ * previous one.
+ */
+ u32 key_instance;
+
+ /** @tee_mutex: protects the tee channel binding and messaging. */
+ struct mutex tee_mutex;
+
+ /** @stream_cmd: LMEM obj used to send stream PXP commands to the GSC */
+ struct {
+ struct drm_i915_gem_object *obj; /* contains PXP command memory */
+ void *vaddr; /* virtual memory for PXP command */
+ } stream_cmd;
+
+ /**
+ * @hw_state_invalidated: if the HW perceives an attack on the integrity
+ * of the encryption it will invalidate the keys and expect SW to
+ * re-initialize the session. We keep track of this state to make sure
+ * we only re-start the arb session when required.
+ */
+ bool hw_state_invalidated;
+
+ /** @irq_enabled: tracks the status of the kcr irqs */
+ bool irq_enabled;
+ /**
+ * @termination: tracks the status of a pending termination. Only
+ * re-initialized under gt->irq_lock and completed in &session_work.
+ */
+ struct completion termination;
+
+ /** @session_work: worker that manages session events. */
+ struct work_struct session_work;
+ /** @session_events: pending session events, protected with gt->irq_lock. */
+ u32 session_events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+#define PXP_INVAL_REQUIRED BIT(2)
+};
+
+#endif /* __INTEL_PXP_TYPES_H__ */