aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c')
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c1049
1 files changed, 1049 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
new file mode 100644
index 000000000..bd2c4ac45
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
+
+#include "mdp5_kms.h"
+
+struct mdp5_plane {
+ struct drm_plane base;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+};
+#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
+
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest);
+
+static struct mdp5_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp5_kms(to_mdp_kms(priv->kms));
+}
+
+static bool plane_enabled(struct drm_plane_state *state)
+{
+ return state->visible;
+}
+
+static void mdp5_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+
+ drm_plane_cleanup(plane);
+
+ kfree(mdp5_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+static void mdp5_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ unsigned int zpos;
+
+ drm_plane_create_rotation_property(plane,
+ DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_ROTATE_180 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ zpos = STAGE_BASE;
+ else
+ zpos = STAGE0 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 255);
+}
+
+static void
+mdp5_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ struct mdp5_kms *mdp5_kms = get_kms(state->plane);
+
+ drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
+ pstate->hwpipe->name : "(null)");
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright-hwpipe=%s\n",
+ pstate->r_hwpipe ? pstate->r_hwpipe->name :
+ "(null)");
+ drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
+ drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
+ drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
+ drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
+ drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
+}
+
+static void mdp5_plane_reset(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
+ mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
+ __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
+}
+
+static struct drm_plane_state *
+mdp5_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *mdp5_state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
+ sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
+
+ return &mdp5_state->base;
+}
+
+static void mdp5_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ kfree(pstate);
+}
+
+static const struct drm_plane_funcs mdp5_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = mdp5_plane_destroy,
+ .reset = mdp5_plane_reset,
+ .atomic_duplicate_state = mdp5_plane_duplicate_state,
+ .atomic_destroy_state = mdp5_plane_destroy_state,
+ .atomic_print_state = mdp5_plane_atomic_print_state,
+};
+
+static int mdp5_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
+
+ if (!new_state->fb)
+ return 0;
+
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
+
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
+}
+
+static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct msm_kms *kms = &mdp5_kms->base.base;
+ struct drm_framebuffer *fb = old_state->fb;
+ bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
+
+ if (!fb)
+ return;
+
+ DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
+}
+
+static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *state)
+{
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_plane *plane = state->plane;
+ struct drm_plane_state *old_state = plane->state;
+ struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
+ bool new_hwpipe = false;
+ bool need_right_hwpipe = false;
+ uint32_t max_width, max_height;
+ bool out_of_bounds = false;
+ uint32_t caps = 0;
+ int min_scale, max_scale;
+ int ret;
+
+ DBG("%s: check (%d -> %d)", plane->name,
+ plane_enabled(old_state), plane_enabled(state));
+
+ max_width = config->hw->lm.max_width << 16;
+ max_height = config->hw->lm.max_height << 16;
+
+ /* Make sure source dimensions are within bounds. */
+ if (state->src_h > max_height)
+ out_of_bounds = true;
+
+ if (state->src_w > max_width) {
+ /* If source split is supported, we can go up to 2x
+ * the max LM width, but we'd need to stage another
+ * hwpipe to the right LM. So, the drm_plane would
+ * consist of 2 hwpipes.
+ */
+ if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
+ (state->src_w <= 2 * max_width))
+ need_right_hwpipe = true;
+ else
+ out_of_bounds = true;
+ }
+
+ if (out_of_bounds) {
+ struct drm_rect src = drm_plane_state_src(state);
+ DBG("Invalid source size "DRM_RECT_FP_FMT,
+ DRM_RECT_FP_ARG(&src));
+ return -ERANGE;
+ }
+
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (plane_enabled(state)) {
+ unsigned int rotation;
+ const struct mdp_format *format;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ uint32_t blkcfg = 0;
+
+ format = to_mdp_format(msm_framebuffer_format(state->fb));
+ if (MDP_FORMAT_IS_YUV(format))
+ caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
+
+ if (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))
+ caps |= MDP_PIPE_CAP_SCALE;
+
+ rotation = drm_rotation_simplify(state->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+
+ if (rotation & DRM_MODE_REFLECT_X)
+ caps |= MDP_PIPE_CAP_HFLIP;
+
+ if (rotation & DRM_MODE_REFLECT_Y)
+ caps |= MDP_PIPE_CAP_VFLIP;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ caps |= MDP_PIPE_CAP_CURSOR;
+
+ /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
+ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
+ new_hwpipe = true;
+
+ /*
+ * (re)allocte hw pipe if we're either requesting for 2 hw pipes
+ * or we're switching from 2 hw pipes to 1 hw pipe because the
+ * new src_w can be supported by 1 hw pipe itself.
+ */
+ if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
+ (!need_right_hwpipe && mdp5_state->r_hwpipe))
+ new_hwpipe = true;
+
+ if (mdp5_kms->smp) {
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(state->fb));
+
+ blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
+ state->src_w >> 16, false);
+
+ if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
+ new_hwpipe = true;
+ }
+
+ /* (re)assign hwpipe if needed, otherwise keep old one: */
+ if (new_hwpipe) {
+ /* TODO maybe we want to re-assign hwpipe sometimes
+ * in cases when we no-longer need some caps to make
+ * it available for other planes?
+ */
+ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ struct mdp5_hw_pipe *old_right_hwpipe =
+ mdp5_state->r_hwpipe;
+ struct mdp5_hw_pipe *new_hwpipe = NULL;
+ struct mdp5_hw_pipe *new_right_hwpipe = NULL;
+
+ ret = mdp5_pipe_assign(state->state, plane, caps,
+ blkcfg, &new_hwpipe,
+ need_right_hwpipe ?
+ &new_right_hwpipe : NULL);
+ if (ret) {
+ DBG("%s: failed to assign hwpipe(s)!",
+ plane->name);
+ return ret;
+ }
+
+ mdp5_state->hwpipe = new_hwpipe;
+ if (need_right_hwpipe)
+ mdp5_state->r_hwpipe = new_right_hwpipe;
+ else
+ /*
+ * set it to NULL so that the driver knows we
+ * don't have a right hwpipe when committing a
+ * new state
+ */
+ mdp5_state->r_hwpipe = NULL;
+
+
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
+ }
+ } else {
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
+ mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
+ }
+
+ return 0;
+}
+
+static int mdp5_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
+}
+
+static void mdp5_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+
+ DBG("%s: update", plane->name);
+
+ if (plane_enabled(new_state)) {
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane,
+ new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
+ struct drm_crtc_state *crtc_state;
+ int min_scale, max_scale;
+ int ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ if (!crtc_state->active)
+ return -EINVAL;
+
+ /* don't use fast path if we don't have a hwpipe allocated yet */
+ if (!mdp5_state->hwpipe)
+ return -EINVAL;
+
+ /* only allow changing of position(crtc x/y or src x/y) in fast path */
+ if (plane->state->crtc != new_plane_state->crtc ||
+ plane->state->src_w != new_plane_state->src_w ||
+ plane->state->src_h != new_plane_state->src_h ||
+ plane->state->crtc_w != new_plane_state->crtc_w ||
+ plane->state->crtc_h != new_plane_state->crtc_h ||
+ !plane->state->fb ||
+ plane->state->fb != new_plane_state->fb)
+ return -EINVAL;
+
+ min_scale = FRAC_16_16(1, 8);
+ max_scale = FRAC_16_16(8, 1);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+ if (ret)
+ return ret;
+
+ /*
+ * if the visibility of the plane changes (i.e, if the cursor is
+ * clipped out completely, we can't take the async path because
+ * we need to stage/unstage the plane from the Layer Mixer(s). We
+ * also assign/unassign the hwpipe(s) tied to the plane. We avoid
+ * taking the fast path for both these reasons.
+ */
+ if (new_plane_state->visible != plane->state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+
+ if (plane_enabled(new_state)) {
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline =
+ mdp5_crtc_get_pipeline(new_state->crtc);
+ int ret;
+
+ ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
+ WARN_ON(ret < 0);
+
+ ctl = mdp5_crtc_get_ctl(new_state->crtc);
+
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
+ }
+
+ *to_mdp5_plane_state(plane->state) =
+ *to_mdp5_plane_state(new_state);
+
+ new_state->fb = old_fb;
+}
+
+static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
+ .prepare_fb = mdp5_plane_prepare_fb,
+ .cleanup_fb = mdp5_plane_cleanup_fb,
+ .atomic_check = mdp5_plane_atomic_check,
+ .atomic_update = mdp5_plane_atomic_update,
+ .atomic_async_check = mdp5_plane_atomic_async_check,
+ .atomic_async_update = mdp5_plane_atomic_async_update,
+};
+
+static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
+ enum mdp5_pipe pipe,
+ struct drm_framebuffer *fb)
+{
+ struct msm_kms *kms = &mdp5_kms->base.base;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
+ MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
+ MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 0));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 1));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 2));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
+ msm_framebuffer_iova(fb, kms->aspace, 3));
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
+{
+ uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
+ ~MDP5_PIPE_OP_MODE_CSC_1_EN;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
+}
+
+/* Note: mdp5_plane->pipe_lock must be locked */
+static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ struct csc_cfg *csc)
+{
+ uint32_t i, mode = 0; /* RGB, no CSC */
+ uint32_t *matrix;
+
+ if (unlikely(!csc))
+ return;
+
+ if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
+ if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
+ mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
+ mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
+
+ matrix = csc->matrix;
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
+ MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
+
+ for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
+ uint32_t *pre_clamp = csc->pre_clamp;
+ uint32_t *post_clamp = csc->post_clamp;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
+ MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
+ MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
+ }
+}
+
+#define PHASE_STEP_SHIFT 21
+#define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
+
+static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
+{
+ uint32_t unit;
+
+ if (src == 0 || dst == 0)
+ return -EINVAL;
+
+ /*
+ * PHASE_STEP_X/Y is coded on 26 bits (25:0),
+ * where 2^21 represents the unity "1" in fixed-point hardware design.
+ * This leaves 5 bits for the integer part (downscale case):
+ * -> maximum downscale ratio = 0b1_1111 = 31
+ */
+ if (src > (dst * DOWN_SCALE_RATIO_MAX))
+ return -EOVERFLOW;
+
+ unit = 1 << PHASE_STEP_SHIFT;
+ *out_phase = mult_frac(unit, src, dst);
+
+ return 0;
+}
+
+static int calc_scalex_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasex_steps[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
+ uint32_t phasex_step;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasex_step);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ return ret;
+ }
+
+ phasex_steps[COMP_0] = phasex_step;
+ phasex_steps[COMP_3] = phasex_step;
+ phasex_steps[COMP_1_2] = phasex_step / info->hsub;
+
+ return 0;
+}
+
+static int calc_scaley_steps(struct drm_plane *plane,
+ uint32_t pixel_format, uint32_t src, uint32_t dest,
+ uint32_t phasey_steps[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(pixel_format);
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ struct device *dev = mdp5_kms->dev->dev;
+ uint32_t phasey_step;
+ int ret;
+
+ ret = calc_phase_step(src, dest, &phasey_step);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ return ret;
+ }
+
+ phasey_steps[COMP_0] = phasey_step;
+ phasey_steps[COMP_3] = phasey_step;
+ phasey_steps[COMP_1_2] = phasey_step / info->vsub;
+
+ return 0;
+}
+
+static uint32_t get_scale_config(const struct mdp_format *format,
+ uint32_t src, uint32_t dst, bool horz)
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ bool scaling = format->is_yuv ? true : (src != dst);
+ uint32_t sub;
+ uint32_t ya_filter, uv_filter;
+ bool yuv = format->is_yuv;
+
+ if (!scaling)
+ return 0;
+
+ if (yuv) {
+ sub = horz ? info->hsub : info->vsub;
+ uv_filter = ((src / sub) <= dst) ?
+ SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+ }
+ ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
+
+ if (horz)
+ return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
+ COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
+ else
+ return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
+ MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
+ COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
+}
+
+static void calc_pixel_ext(const struct mdp_format *format,
+ uint32_t src, uint32_t dst, uint32_t phase_step[2],
+ int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
+ bool horz)
+{
+ bool scaling = format->is_yuv ? true : (src != dst);
+ int i;
+
+ /*
+ * Note:
+ * We assume here that:
+ * 1. PCMN filter is used for downscale
+ * 2. bilinear filter is used for upscale
+ * 3. we are in a single pipe configuration
+ */
+
+ for (i = 0; i < COMP_MAX; i++) {
+ pix_ext_edge1[i] = 0;
+ pix_ext_edge2[i] = scaling ? 1 : 0;
+ }
+}
+
+static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
+ const struct mdp_format *format,
+ uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
+ uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
+{
+ const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
+ uint32_t lr, tb, req;
+ int i;
+
+ for (i = 0; i < COMP_MAX; i++) {
+ uint32_t roi_w = src_w;
+ uint32_t roi_h = src_h;
+
+ if (format->is_yuv && i == COMP_1_2) {
+ roi_w /= info->hsub;
+ roi_h /= info->vsub;
+ }
+
+ lr = (pe_left[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
+ MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
+
+ lr |= (pe_right[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
+ MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
+
+ tb = (pe_top[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
+ MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
+
+ tb |= (pe_bottom[i] >= 0) ?
+ MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
+ MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
+
+ req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
+ pe_left[i] + pe_right[i]);
+
+ req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
+ pe_top[i] + pe_bottom[i]);
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
+
+ DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
+ FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
+ FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
+
+ DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
+ FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
+ FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
+ }
+}
+
+struct pixel_ext {
+ int left[COMP_MAX];
+ int right[COMP_MAX];
+ int top[COMP_MAX];
+ int bottom[COMP_MAX];
+};
+
+struct phase_step {
+ u32 x[COMP_MAX];
+ u32 y[COMP_MAX];
+};
+
+static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
+ struct mdp5_hw_pipe *hwpipe,
+ struct drm_framebuffer *fb,
+ struct phase_step *step,
+ struct pixel_ext *pe,
+ u32 scale_config, u32 hdecm, u32 vdecm,
+ bool hflip, bool vflip,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_img_w, u32 src_img_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
+ COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ mdp5_write_pixel_ext(mdp5_kms, pipe, format,
+ src_w, pe->left, pe->right,
+ src_h, pe->top, pe->bottom);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ step->x[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ step->y[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ step->x[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ step->y[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ scale_config);
+ }
+
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
+
+ set_scanout_locked(mdp5_kms, pipe, fb);
+}
+
+static int mdp5_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct drm_rect *src, struct drm_rect *dest)
+{
+ struct drm_plane_state *pstate = plane->state;
+ struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
+ struct mdp5_kms *mdp5_kms = get_kms(plane);
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ struct mdp5_hw_pipe *right_hwpipe;
+ const struct mdp_format *format;
+ uint32_t nplanes, config = 0;
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
+ uint32_t hdecm = 0, vdecm = 0;
+ uint32_t pix_format;
+ unsigned int rotation;
+ bool vflip, hflip;
+ int crtc_x, crtc_y;
+ unsigned int crtc_w, crtc_h;
+ uint32_t src_x, src_y;
+ uint32_t src_w, src_h;
+ uint32_t src_img_w, src_img_h;
+ int ret;
+
+ nplanes = fb->format->num_planes;
+
+ /* bad formats should already be rejected: */
+ if (WARN_ON(nplanes > pipe2nclients(pipe)))
+ return -EINVAL;
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+ pix_format = format->base.pixel_format;
+
+ src_x = src->x1;
+ src_y = src->y1;
+ src_w = drm_rect_width(src);
+ src_h = drm_rect_height(src);
+
+ crtc_x = dest->x1;
+ crtc_y = dest->y1;
+ crtc_w = drm_rect_width(dest);
+ crtc_h = drm_rect_height(dest);
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ src_img_w = min(fb->width, src_w);
+ src_img_h = min(fb->height, src_h);
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
+ if (right_hwpipe) {
+ /*
+ * if the plane comprises of 2 hw pipes, assume that the width
+ * is split equally across them. The only parameters that varies
+ * between the 2 pipes are src_x and crtc_x
+ */
+ crtc_w /= 2;
+ src_w /= 2;
+ src_img_w /= 2;
+ }
+
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
+ if (ret)
+ return ret;
+
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
+ if (ret)
+ return ret;
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
+ calc_pixel_ext(format, src_w, crtc_w, step.x,
+ pe.left, pe.right, true);
+ calc_pixel_ext(format, src_h, crtc_h, step.y,
+ pe.top, pe.bottom, false);
+ }
+
+ /* TODO calc hdecm, vdecm */
+
+ /* SCALE is used to both scale and up-sample chroma components */
+ config |= get_scale_config(format, src_w, crtc_w, true);
+ config |= get_scale_config(format, src_h, crtc_h, false);
+ DBG("scale config = %x", config);
+
+ rotation = drm_rotation_simplify(pstate->rotation,
+ DRM_MODE_ROTATE_0 |
+ DRM_MODE_REFLECT_X |
+ DRM_MODE_REFLECT_Y);
+ hflip = !!(rotation & DRM_MODE_REFLECT_X);
+ vflip = !!(rotation & DRM_MODE_REFLECT_Y);
+
+ mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x, src_y, src_w, src_h);
+ if (right_hwpipe)
+ mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x + src_w, src_y, src_w, src_h);
+
+ return ret;
+}
+
+/*
+ * Use this func and the one below only after the atomic state has been
+ * successfully swapped
+ */
+enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (WARN_ON(!pstate->hwpipe))
+ return SSPP_NONE;
+
+ return pstate->hwpipe->pipe;
+}
+
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (!pstate->r_hwpipe)
+ return SSPP_NONE;
+
+ return pstate->r_hwpipe->pipe;
+}
+
+uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+ u32 mask;
+
+ if (WARN_ON(!pstate->hwpipe))
+ return 0;
+
+ mask = pstate->hwpipe->flush_mask;
+
+ if (pstate->r_hwpipe)
+ mask |= pstate->r_hwpipe->flush_mask;
+
+ return mask;
+}
+
+/* initialize plane */
+struct drm_plane *mdp5_plane_init(struct drm_device *dev,
+ enum drm_plane_type type)
+{
+ struct drm_plane *plane = NULL;
+ struct mdp5_plane *mdp5_plane;
+ int ret;
+
+ mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
+ if (!mdp5_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp5_plane->base;
+
+ mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
+ ARRAY_SIZE(mdp5_plane->formats), false);
+
+ ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
+ mdp5_plane->formats, mdp5_plane->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
+
+ mdp5_plane_install_properties(plane, &plane->base);
+
+ drm_plane_enable_fb_damage_clips(plane);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp5_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}