aboutsummaryrefslogtreecommitdiff
path: root/drivers/clk/rockchip/clk-pll.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/clk/rockchip/clk-pll.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to '')
-rw-r--r--drivers/clk/rockchip/clk-pll.c1206
1 files changed, 1206 insertions, 0 deletions
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
new file mode 100644
index 000000000..2d42eb628
--- /dev/null
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -0,0 +1,1206 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * Copyright (c) 2015 Rockchip Electronics Co. Ltd.
+ * Author: Xing Zheng <zhengxing@rock-chips.com>
+ */
+
+#include <asm/div64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include "clk.h"
+
+#define PLL_MODE_MASK 0x3
+#define PLL_MODE_SLOW 0x0
+#define PLL_MODE_NORM 0x1
+#define PLL_MODE_DEEP 0x2
+#define PLL_RK3328_MODE_MASK 0x1
+
+struct rockchip_clk_pll {
+ struct clk_hw hw;
+
+ struct clk_mux pll_mux;
+ const struct clk_ops *pll_mux_ops;
+
+ struct notifier_block clk_nb;
+
+ void __iomem *reg_base;
+ int lock_offset;
+ unsigned int lock_shift;
+ enum rockchip_pll_type type;
+ u8 flags;
+ const struct rockchip_pll_rate_table *rate_table;
+ unsigned int rate_count;
+ spinlock_t *lock;
+
+ struct rockchip_clk_provider *ctx;
+};
+
+#define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
+#define to_rockchip_clk_pll_nb(nb) \
+ container_of(nb, struct rockchip_clk_pll, clk_nb)
+
+static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
+ struct rockchip_clk_pll *pll, unsigned long rate)
+{
+ const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++) {
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+ }
+
+ return NULL;
+}
+
+static long rockchip_pll_round_rate(struct clk_hw *hw,
+ unsigned long drate, unsigned long *prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++) {
+ if (drate >= rate_table[i].rate)
+ return rate_table[i].rate;
+ }
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
+/*
+ * Wait for the pll to reach the locked state.
+ * The calling set_rate function is responsible for making sure the
+ * grf regmap is available.
+ */
+static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+ struct regmap *grf = pll->ctx->grf;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(grf, pll->lock_offset, val,
+ val & BIT(pll->lock_shift), 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
+
+ return ret;
+}
+
+/*
+ * PLL used in RK3036
+ */
+
+#define RK3036_PLLCON(i) (i * 0x4)
+#define RK3036_PLLCON0_FBDIV_MASK 0xfff
+#define RK3036_PLLCON0_FBDIV_SHIFT 0
+#define RK3036_PLLCON0_POSTDIV1_MASK 0x7
+#define RK3036_PLLCON0_POSTDIV1_SHIFT 12
+#define RK3036_PLLCON1_REFDIV_MASK 0x3f
+#define RK3036_PLLCON1_REFDIV_SHIFT 0
+#define RK3036_PLLCON1_POSTDIV2_MASK 0x7
+#define RK3036_PLLCON1_POSTDIV2_SHIFT 6
+#define RK3036_PLLCON1_LOCK_STATUS BIT(10)
+#define RK3036_PLLCON1_DSMPD_MASK 0x1
+#define RK3036_PLLCON1_DSMPD_SHIFT 12
+#define RK3036_PLLCON1_PWRDOWN BIT(13)
+#define RK3036_PLLCON2_FRAC_MASK 0xffffff
+#define RK3036_PLLCON2_FRAC_SHIFT 0
+
+static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+ u32 pllcon;
+ int ret;
+
+ /*
+ * Lock time typical 250, max 500 input clock cycles @24MHz
+ * So define a very safe maximum of 1000us, meaning 24000 cycles.
+ */
+ ret = readl_relaxed_poll_timeout(pll->reg_base + RK3036_PLLCON(1),
+ pllcon,
+ pllcon & RK3036_PLLCON1_LOCK_STATUS,
+ 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
+
+ return ret;
+}
+
+static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll,
+ struct rockchip_pll_rate_table *rate)
+{
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(0));
+ rate->fbdiv = ((pllcon >> RK3036_PLLCON0_FBDIV_SHIFT)
+ & RK3036_PLLCON0_FBDIV_MASK);
+ rate->postdiv1 = ((pllcon >> RK3036_PLLCON0_POSTDIV1_SHIFT)
+ & RK3036_PLLCON0_POSTDIV1_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(1));
+ rate->refdiv = ((pllcon >> RK3036_PLLCON1_REFDIV_SHIFT)
+ & RK3036_PLLCON1_REFDIV_MASK);
+ rate->postdiv2 = ((pllcon >> RK3036_PLLCON1_POSTDIV2_SHIFT)
+ & RK3036_PLLCON1_POSTDIV2_MASK);
+ rate->dsmpd = ((pllcon >> RK3036_PLLCON1_DSMPD_SHIFT)
+ & RK3036_PLLCON1_DSMPD_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(2));
+ rate->frac = ((pllcon >> RK3036_PLLCON2_FRAC_SHIFT)
+ & RK3036_PLLCON2_FRAC_MASK);
+}
+
+static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ struct rockchip_pll_rate_table cur;
+ u64 rate64 = prate;
+
+ rockchip_rk3036_pll_get_params(pll, &cur);
+
+ rate64 *= cur.fbdiv;
+ do_div(rate64, cur.refdiv);
+
+ if (cur.dsmpd == 0) {
+ /* fractional mode */
+ u64 frac_rate64 = prate * cur.frac;
+
+ do_div(frac_rate64, cur.refdiv);
+ rate64 += frac_rate64 >> 24;
+ }
+
+ do_div(rate64, cur.postdiv1);
+ do_div(rate64, cur.postdiv2);
+
+ return (unsigned long)rate64;
+}
+
+static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
+ const struct rockchip_pll_rate_table *rate)
+{
+ const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+ struct clk_mux *pll_mux = &pll->pll_mux;
+ struct rockchip_pll_rate_table cur;
+ u32 pllcon;
+ int rate_change_remuxed = 0;
+ int cur_parent;
+ int ret;
+
+ pr_debug("%s: rate settings for %lu fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ __func__, rate->rate, rate->fbdiv, rate->postdiv1, rate->refdiv,
+ rate->postdiv2, rate->dsmpd, rate->frac);
+
+ rockchip_rk3036_pll_get_params(pll, &cur);
+ cur.rate = 0;
+
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
+
+ /* update pll values */
+ writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3036_PLLCON0_FBDIV_MASK,
+ RK3036_PLLCON0_FBDIV_SHIFT) |
+ HIWORD_UPDATE(rate->postdiv1, RK3036_PLLCON0_POSTDIV1_MASK,
+ RK3036_PLLCON0_POSTDIV1_SHIFT),
+ pll->reg_base + RK3036_PLLCON(0));
+
+ writel_relaxed(HIWORD_UPDATE(rate->refdiv, RK3036_PLLCON1_REFDIV_MASK,
+ RK3036_PLLCON1_REFDIV_SHIFT) |
+ HIWORD_UPDATE(rate->postdiv2, RK3036_PLLCON1_POSTDIV2_MASK,
+ RK3036_PLLCON1_POSTDIV2_SHIFT) |
+ HIWORD_UPDATE(rate->dsmpd, RK3036_PLLCON1_DSMPD_MASK,
+ RK3036_PLLCON1_DSMPD_SHIFT),
+ pll->reg_base + RK3036_PLLCON(1));
+
+ /* GPLL CON2 is not HIWORD_MASK */
+ pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(2));
+ pllcon &= ~(RK3036_PLLCON2_FRAC_MASK << RK3036_PLLCON2_FRAC_SHIFT);
+ pllcon |= rate->frac << RK3036_PLLCON2_FRAC_SHIFT;
+ writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2));
+
+ /* wait for the pll to lock */
+ ret = rockchip_rk3036_pll_wait_lock(pll);
+ if (ret) {
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
+ __func__);
+ rockchip_rk3036_pll_set_params(pll, &cur);
+ }
+
+ if (rate_change_remuxed)
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+
+ return ret;
+}
+
+static int rockchip_rk3036_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, __clk_get_name(hw->clk), drate, prate);
+
+ /* Get required rate settings from table */
+ rate = rockchip_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ return rockchip_rk3036_pll_set_params(pll, rate);
+}
+
+static int rockchip_rk3036_pll_enable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(0, RK3036_PLLCON1_PWRDOWN, 0),
+ pll->reg_base + RK3036_PLLCON(1));
+ rockchip_rk3036_pll_wait_lock(pll);
+
+ return 0;
+}
+
+static void rockchip_rk3036_pll_disable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(RK3036_PLLCON1_PWRDOWN,
+ RK3036_PLLCON1_PWRDOWN, 0),
+ pll->reg_base + RK3036_PLLCON(1));
+}
+
+static int rockchip_rk3036_pll_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ u32 pllcon = readl(pll->reg_base + RK3036_PLLCON(1));
+
+ return !(pllcon & RK3036_PLLCON1_PWRDOWN);
+}
+
+static int rockchip_rk3036_pll_init(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+ struct rockchip_pll_rate_table cur;
+ unsigned long drate;
+
+ if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+ return 0;
+
+ drate = clk_hw_get_rate(hw);
+ rate = rockchip_get_pll_settings(pll, drate);
+
+ /* when no rate setting for the current rate, rely on clk_set_rate */
+ if (!rate)
+ return 0;
+
+ rockchip_rk3036_pll_get_params(pll, &cur);
+
+ pr_debug("%s: pll %s@%lu: Hz\n", __func__, __clk_get_name(hw->clk),
+ drate);
+ pr_debug("old - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2,
+ cur.dsmpd, cur.frac);
+ pr_debug("new - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ rate->fbdiv, rate->postdiv1, rate->refdiv, rate->postdiv2,
+ rate->dsmpd, rate->frac);
+
+ if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
+ rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
+ rate->dsmpd != cur.dsmpd ||
+ (!cur.dsmpd && (rate->frac != cur.frac))) {
+ struct clk *parent = clk_get_parent(hw->clk);
+
+ if (!parent) {
+ pr_warn("%s: parent of %s not available\n",
+ __func__, __clk_get_name(hw->clk));
+ return 0;
+ }
+
+ pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
+ __func__, __clk_get_name(hw->clk));
+ rockchip_rk3036_pll_set_params(pll, rate);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
+ .recalc_rate = rockchip_rk3036_pll_recalc_rate,
+ .enable = rockchip_rk3036_pll_enable,
+ .disable = rockchip_rk3036_pll_disable,
+ .is_enabled = rockchip_rk3036_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
+ .recalc_rate = rockchip_rk3036_pll_recalc_rate,
+ .round_rate = rockchip_pll_round_rate,
+ .set_rate = rockchip_rk3036_pll_set_rate,
+ .enable = rockchip_rk3036_pll_enable,
+ .disable = rockchip_rk3036_pll_disable,
+ .is_enabled = rockchip_rk3036_pll_is_enabled,
+ .init = rockchip_rk3036_pll_init,
+};
+
+/*
+ * PLL used in RK3066, RK3188 and RK3288
+ */
+
+#define RK3066_PLL_RESET_DELAY(nr) ((nr * 500) / 24 + 1)
+
+#define RK3066_PLLCON(i) (i * 0x4)
+#define RK3066_PLLCON0_OD_MASK 0xf
+#define RK3066_PLLCON0_OD_SHIFT 0
+#define RK3066_PLLCON0_NR_MASK 0x3f
+#define RK3066_PLLCON0_NR_SHIFT 8
+#define RK3066_PLLCON1_NF_MASK 0x1fff
+#define RK3066_PLLCON1_NF_SHIFT 0
+#define RK3066_PLLCON2_NB_MASK 0xfff
+#define RK3066_PLLCON2_NB_SHIFT 0
+#define RK3066_PLLCON3_RESET (1 << 5)
+#define RK3066_PLLCON3_PWRDOWN (1 << 1)
+#define RK3066_PLLCON3_BYPASS (1 << 0)
+
+static void rockchip_rk3066_pll_get_params(struct rockchip_clk_pll *pll,
+ struct rockchip_pll_rate_table *rate)
+{
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
+ rate->nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT)
+ & RK3066_PLLCON0_NR_MASK) + 1;
+ rate->no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT)
+ & RK3066_PLLCON0_OD_MASK) + 1;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
+ rate->nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT)
+ & RK3066_PLLCON1_NF_MASK) + 1;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
+ rate->nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT)
+ & RK3066_PLLCON2_NB_MASK) + 1;
+}
+
+static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ struct rockchip_pll_rate_table cur;
+ u64 rate64 = prate;
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
+ if (pllcon & RK3066_PLLCON3_BYPASS) {
+ pr_debug("%s: pll %s is bypassed\n", __func__,
+ clk_hw_get_name(hw));
+ return prate;
+ }
+
+ rockchip_rk3066_pll_get_params(pll, &cur);
+
+ rate64 *= cur.nf;
+ do_div(rate64, cur.nr);
+ do_div(rate64, cur.no);
+
+ return (unsigned long)rate64;
+}
+
+static int rockchip_rk3066_pll_set_params(struct rockchip_clk_pll *pll,
+ const struct rockchip_pll_rate_table *rate)
+{
+ const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+ struct clk_mux *pll_mux = &pll->pll_mux;
+ struct rockchip_pll_rate_table cur;
+ int rate_change_remuxed = 0;
+ int cur_parent;
+ int ret;
+
+ pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
+ __func__, rate->rate, rate->nr, rate->no, rate->nf);
+
+ rockchip_rk3066_pll_get_params(pll, &cur);
+ cur.rate = 0;
+
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
+
+ /* enter reset mode */
+ writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0),
+ pll->reg_base + RK3066_PLLCON(3));
+
+ /* update pll values */
+ writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK,
+ RK3066_PLLCON0_NR_SHIFT) |
+ HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK,
+ RK3066_PLLCON0_OD_SHIFT),
+ pll->reg_base + RK3066_PLLCON(0));
+
+ writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
+ RK3066_PLLCON1_NF_SHIFT),
+ pll->reg_base + RK3066_PLLCON(1));
+ writel_relaxed(HIWORD_UPDATE(rate->nb - 1, RK3066_PLLCON2_NB_MASK,
+ RK3066_PLLCON2_NB_SHIFT),
+ pll->reg_base + RK3066_PLLCON(2));
+
+ /* leave reset and wait the reset_delay */
+ writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0),
+ pll->reg_base + RK3066_PLLCON(3));
+ udelay(RK3066_PLL_RESET_DELAY(rate->nr));
+
+ /* wait for the pll to lock */
+ ret = rockchip_pll_wait_lock(pll);
+ if (ret) {
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
+ __func__);
+ rockchip_rk3066_pll_set_params(pll, &cur);
+ }
+
+ if (rate_change_remuxed)
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+
+ return ret;
+}
+
+static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, clk_hw_get_name(hw), drate, prate);
+
+ /* Get required rate settings from table */
+ rate = rockchip_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ return rockchip_rk3066_pll_set_params(pll, rate);
+}
+
+static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0),
+ pll->reg_base + RK3066_PLLCON(3));
+ rockchip_pll_wait_lock(pll);
+
+ return 0;
+}
+
+static void rockchip_rk3066_pll_disable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN,
+ RK3066_PLLCON3_PWRDOWN, 0),
+ pll->reg_base + RK3066_PLLCON(3));
+}
+
+static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3));
+
+ return !(pllcon & RK3066_PLLCON3_PWRDOWN);
+}
+
+static int rockchip_rk3066_pll_init(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+ struct rockchip_pll_rate_table cur;
+ unsigned long drate;
+
+ if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+ return 0;
+
+ drate = clk_hw_get_rate(hw);
+ rate = rockchip_get_pll_settings(pll, drate);
+
+ /* when no rate setting for the current rate, rely on clk_set_rate */
+ if (!rate)
+ return 0;
+
+ rockchip_rk3066_pll_get_params(pll, &cur);
+
+ pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), nb(%d:%d)\n",
+ __func__, clk_hw_get_name(hw), drate, rate->nr, cur.nr,
+ rate->no, cur.no, rate->nf, cur.nf, rate->nb, cur.nb);
+ if (rate->nr != cur.nr || rate->no != cur.no || rate->nf != cur.nf
+ || rate->nb != cur.nb) {
+ pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
+ __func__, clk_hw_get_name(hw));
+ rockchip_rk3066_pll_set_params(pll, rate);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
+ .recalc_rate = rockchip_rk3066_pll_recalc_rate,
+ .enable = rockchip_rk3066_pll_enable,
+ .disable = rockchip_rk3066_pll_disable,
+ .is_enabled = rockchip_rk3066_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
+ .recalc_rate = rockchip_rk3066_pll_recalc_rate,
+ .round_rate = rockchip_pll_round_rate,
+ .set_rate = rockchip_rk3066_pll_set_rate,
+ .enable = rockchip_rk3066_pll_enable,
+ .disable = rockchip_rk3066_pll_disable,
+ .is_enabled = rockchip_rk3066_pll_is_enabled,
+ .init = rockchip_rk3066_pll_init,
+};
+
+/*
+ * PLL used in RK3399
+ */
+
+#define RK3399_PLLCON(i) (i * 0x4)
+#define RK3399_PLLCON0_FBDIV_MASK 0xfff
+#define RK3399_PLLCON0_FBDIV_SHIFT 0
+#define RK3399_PLLCON1_REFDIV_MASK 0x3f
+#define RK3399_PLLCON1_REFDIV_SHIFT 0
+#define RK3399_PLLCON1_POSTDIV1_MASK 0x7
+#define RK3399_PLLCON1_POSTDIV1_SHIFT 8
+#define RK3399_PLLCON1_POSTDIV2_MASK 0x7
+#define RK3399_PLLCON1_POSTDIV2_SHIFT 12
+#define RK3399_PLLCON2_FRAC_MASK 0xffffff
+#define RK3399_PLLCON2_FRAC_SHIFT 0
+#define RK3399_PLLCON2_LOCK_STATUS BIT(31)
+#define RK3399_PLLCON3_PWRDOWN BIT(0)
+#define RK3399_PLLCON3_DSMPD_MASK 0x1
+#define RK3399_PLLCON3_DSMPD_SHIFT 3
+
+static int rockchip_rk3399_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+ u32 pllcon;
+ int ret;
+
+ /*
+ * Lock time typical 250, max 500 input clock cycles @24MHz
+ * So define a very safe maximum of 1000us, meaning 24000 cycles.
+ */
+ ret = readl_relaxed_poll_timeout(pll->reg_base + RK3399_PLLCON(2),
+ pllcon,
+ pllcon & RK3399_PLLCON2_LOCK_STATUS,
+ 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
+
+ return ret;
+}
+
+static void rockchip_rk3399_pll_get_params(struct rockchip_clk_pll *pll,
+ struct rockchip_pll_rate_table *rate)
+{
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(0));
+ rate->fbdiv = ((pllcon >> RK3399_PLLCON0_FBDIV_SHIFT)
+ & RK3399_PLLCON0_FBDIV_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(1));
+ rate->refdiv = ((pllcon >> RK3399_PLLCON1_REFDIV_SHIFT)
+ & RK3399_PLLCON1_REFDIV_MASK);
+ rate->postdiv1 = ((pllcon >> RK3399_PLLCON1_POSTDIV1_SHIFT)
+ & RK3399_PLLCON1_POSTDIV1_MASK);
+ rate->postdiv2 = ((pllcon >> RK3399_PLLCON1_POSTDIV2_SHIFT)
+ & RK3399_PLLCON1_POSTDIV2_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
+ rate->frac = ((pllcon >> RK3399_PLLCON2_FRAC_SHIFT)
+ & RK3399_PLLCON2_FRAC_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(3));
+ rate->dsmpd = ((pllcon >> RK3399_PLLCON3_DSMPD_SHIFT)
+ & RK3399_PLLCON3_DSMPD_MASK);
+}
+
+static unsigned long rockchip_rk3399_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ struct rockchip_pll_rate_table cur;
+ u64 rate64 = prate;
+
+ rockchip_rk3399_pll_get_params(pll, &cur);
+
+ rate64 *= cur.fbdiv;
+ do_div(rate64, cur.refdiv);
+
+ if (cur.dsmpd == 0) {
+ /* fractional mode */
+ u64 frac_rate64 = prate * cur.frac;
+
+ do_div(frac_rate64, cur.refdiv);
+ rate64 += frac_rate64 >> 24;
+ }
+
+ do_div(rate64, cur.postdiv1);
+ do_div(rate64, cur.postdiv2);
+
+ return (unsigned long)rate64;
+}
+
+static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll,
+ const struct rockchip_pll_rate_table *rate)
+{
+ const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+ struct clk_mux *pll_mux = &pll->pll_mux;
+ struct rockchip_pll_rate_table cur;
+ u32 pllcon;
+ int rate_change_remuxed = 0;
+ int cur_parent;
+ int ret;
+
+ pr_debug("%s: rate settings for %lu fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ __func__, rate->rate, rate->fbdiv, rate->postdiv1, rate->refdiv,
+ rate->postdiv2, rate->dsmpd, rate->frac);
+
+ rockchip_rk3399_pll_get_params(pll, &cur);
+ cur.rate = 0;
+
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
+
+ /* update pll values */
+ writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3399_PLLCON0_FBDIV_MASK,
+ RK3399_PLLCON0_FBDIV_SHIFT),
+ pll->reg_base + RK3399_PLLCON(0));
+
+ writel_relaxed(HIWORD_UPDATE(rate->refdiv, RK3399_PLLCON1_REFDIV_MASK,
+ RK3399_PLLCON1_REFDIV_SHIFT) |
+ HIWORD_UPDATE(rate->postdiv1, RK3399_PLLCON1_POSTDIV1_MASK,
+ RK3399_PLLCON1_POSTDIV1_SHIFT) |
+ HIWORD_UPDATE(rate->postdiv2, RK3399_PLLCON1_POSTDIV2_MASK,
+ RK3399_PLLCON1_POSTDIV2_SHIFT),
+ pll->reg_base + RK3399_PLLCON(1));
+
+ /* xPLL CON2 is not HIWORD_MASK */
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
+ pllcon &= ~(RK3399_PLLCON2_FRAC_MASK << RK3399_PLLCON2_FRAC_SHIFT);
+ pllcon |= rate->frac << RK3399_PLLCON2_FRAC_SHIFT;
+ writel_relaxed(pllcon, pll->reg_base + RK3399_PLLCON(2));
+
+ writel_relaxed(HIWORD_UPDATE(rate->dsmpd, RK3399_PLLCON3_DSMPD_MASK,
+ RK3399_PLLCON3_DSMPD_SHIFT),
+ pll->reg_base + RK3399_PLLCON(3));
+
+ /* wait for the pll to lock */
+ ret = rockchip_rk3399_pll_wait_lock(pll);
+ if (ret) {
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
+ __func__);
+ rockchip_rk3399_pll_set_params(pll, &cur);
+ }
+
+ if (rate_change_remuxed)
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+
+ return ret;
+}
+
+static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, __clk_get_name(hw->clk), drate, prate);
+
+ /* Get required rate settings from table */
+ rate = rockchip_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ return rockchip_rk3399_pll_set_params(pll, rate);
+}
+
+static int rockchip_rk3399_pll_enable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(0, RK3399_PLLCON3_PWRDOWN, 0),
+ pll->reg_base + RK3399_PLLCON(3));
+ rockchip_rk3399_pll_wait_lock(pll);
+
+ return 0;
+}
+
+static void rockchip_rk3399_pll_disable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(RK3399_PLLCON3_PWRDOWN,
+ RK3399_PLLCON3_PWRDOWN, 0),
+ pll->reg_base + RK3399_PLLCON(3));
+}
+
+static int rockchip_rk3399_pll_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ u32 pllcon = readl(pll->reg_base + RK3399_PLLCON(3));
+
+ return !(pllcon & RK3399_PLLCON3_PWRDOWN);
+}
+
+static int rockchip_rk3399_pll_init(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+ struct rockchip_pll_rate_table cur;
+ unsigned long drate;
+
+ if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+ return 0;
+
+ drate = clk_hw_get_rate(hw);
+ rate = rockchip_get_pll_settings(pll, drate);
+
+ /* when no rate setting for the current rate, rely on clk_set_rate */
+ if (!rate)
+ return 0;
+
+ rockchip_rk3399_pll_get_params(pll, &cur);
+
+ pr_debug("%s: pll %s@%lu: Hz\n", __func__, __clk_get_name(hw->clk),
+ drate);
+ pr_debug("old - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2,
+ cur.dsmpd, cur.frac);
+ pr_debug("new - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ rate->fbdiv, rate->postdiv1, rate->refdiv, rate->postdiv2,
+ rate->dsmpd, rate->frac);
+
+ if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
+ rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
+ rate->dsmpd != cur.dsmpd ||
+ (!cur.dsmpd && (rate->frac != cur.frac))) {
+ struct clk *parent = clk_get_parent(hw->clk);
+
+ if (!parent) {
+ pr_warn("%s: parent of %s not available\n",
+ __func__, __clk_get_name(hw->clk));
+ return 0;
+ }
+
+ pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
+ __func__, __clk_get_name(hw->clk));
+ rockchip_rk3399_pll_set_params(pll, rate);
+ }
+
+ return 0;
+}
+
+static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
+ .recalc_rate = rockchip_rk3399_pll_recalc_rate,
+ .enable = rockchip_rk3399_pll_enable,
+ .disable = rockchip_rk3399_pll_disable,
+ .is_enabled = rockchip_rk3399_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
+ .recalc_rate = rockchip_rk3399_pll_recalc_rate,
+ .round_rate = rockchip_pll_round_rate,
+ .set_rate = rockchip_rk3399_pll_set_rate,
+ .enable = rockchip_rk3399_pll_enable,
+ .disable = rockchip_rk3399_pll_disable,
+ .is_enabled = rockchip_rk3399_pll_is_enabled,
+ .init = rockchip_rk3399_pll_init,
+};
+
+/*
+ * PLL used in RK3588
+ */
+
+#define RK3588_PLLCON(i) (i * 0x4)
+#define RK3588_PLLCON0_M_MASK 0x3ff
+#define RK3588_PLLCON0_M_SHIFT 0
+#define RK3588_PLLCON1_P_MASK 0x3f
+#define RK3588_PLLCON1_P_SHIFT 0
+#define RK3588_PLLCON1_S_MASK 0x7
+#define RK3588_PLLCON1_S_SHIFT 6
+#define RK3588_PLLCON2_K_MASK 0xffff
+#define RK3588_PLLCON2_K_SHIFT 0
+#define RK3588_PLLCON1_PWRDOWN BIT(13)
+#define RK3588_PLLCON6_LOCK_STATUS BIT(15)
+
+static int rockchip_rk3588_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+ u32 pllcon;
+ int ret;
+
+ /*
+ * Lock time typical 250, max 500 input clock cycles @24MHz
+ * So define a very safe maximum of 1000us, meaning 24000 cycles.
+ */
+ ret = readl_relaxed_poll_timeout(pll->reg_base + RK3588_PLLCON(6),
+ pllcon,
+ pllcon & RK3588_PLLCON6_LOCK_STATUS,
+ 0, 1000);
+ if (ret)
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
+
+ return ret;
+}
+
+static void rockchip_rk3588_pll_get_params(struct rockchip_clk_pll *pll,
+ struct rockchip_pll_rate_table *rate)
+{
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(0));
+ rate->m = ((pllcon >> RK3588_PLLCON0_M_SHIFT) & RK3588_PLLCON0_M_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(1));
+ rate->p = ((pllcon >> RK3588_PLLCON1_P_SHIFT) & RK3588_PLLCON1_P_MASK);
+ rate->s = ((pllcon >> RK3588_PLLCON1_S_SHIFT) & RK3588_PLLCON1_S_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3588_PLLCON(2));
+ rate->k = ((pllcon >> RK3588_PLLCON2_K_SHIFT) & RK3588_PLLCON2_K_MASK);
+}
+
+static unsigned long rockchip_rk3588_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ struct rockchip_pll_rate_table cur;
+ u64 rate64 = prate, postdiv;
+
+ rockchip_rk3588_pll_get_params(pll, &cur);
+
+ rate64 *= cur.m;
+ do_div(rate64, cur.p);
+
+ if (cur.k) {
+ /* fractional mode */
+ u64 frac_rate64 = prate * cur.k;
+
+ postdiv = cur.p * 65535;
+ do_div(frac_rate64, postdiv);
+ rate64 += frac_rate64;
+ }
+ rate64 = rate64 >> cur.s;
+
+ return (unsigned long)rate64;
+}
+
+static int rockchip_rk3588_pll_set_params(struct rockchip_clk_pll *pll,
+ const struct rockchip_pll_rate_table *rate)
+{
+ const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+ struct clk_mux *pll_mux = &pll->pll_mux;
+ struct rockchip_pll_rate_table cur;
+ int rate_change_remuxed = 0;
+ int cur_parent;
+ int ret;
+
+ pr_debug("%s: rate settings for %lu p: %d, m: %d, s: %d, k: %d\n",
+ __func__, rate->rate, rate->p, rate->m, rate->s, rate->k);
+
+ rockchip_rk3588_pll_get_params(pll, &cur);
+ cur.rate = 0;
+
+ if (pll->type == pll_rk3588) {
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
+ }
+
+ /* set pll power down */
+ writel(HIWORD_UPDATE(RK3588_PLLCON1_PWRDOWN,
+ RK3588_PLLCON1_PWRDOWN, 0),
+ pll->reg_base + RK3399_PLLCON(1));
+
+ /* update pll values */
+ writel_relaxed(HIWORD_UPDATE(rate->m, RK3588_PLLCON0_M_MASK, RK3588_PLLCON0_M_SHIFT),
+ pll->reg_base + RK3399_PLLCON(0));
+
+ writel_relaxed(HIWORD_UPDATE(rate->p, RK3588_PLLCON1_P_MASK, RK3588_PLLCON1_P_SHIFT) |
+ HIWORD_UPDATE(rate->s, RK3588_PLLCON1_S_MASK, RK3588_PLLCON1_S_SHIFT),
+ pll->reg_base + RK3399_PLLCON(1));
+
+ writel_relaxed(HIWORD_UPDATE(rate->k, RK3588_PLLCON2_K_MASK, RK3588_PLLCON2_K_SHIFT),
+ pll->reg_base + RK3399_PLLCON(2));
+
+ /* set pll power up */
+ writel(HIWORD_UPDATE(0, RK3588_PLLCON1_PWRDOWN, 0),
+ pll->reg_base + RK3588_PLLCON(1));
+
+ /* wait for the pll to lock */
+ ret = rockchip_rk3588_pll_wait_lock(pll);
+ if (ret) {
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
+ __func__);
+ rockchip_rk3588_pll_set_params(pll, &cur);
+ }
+
+ if ((pll->type == pll_rk3588) && rate_change_remuxed)
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+
+ return ret;
+}
+
+static int rockchip_rk3588_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, __clk_get_name(hw->clk), drate, prate);
+
+ /* Get required rate settings from table */
+ rate = rockchip_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ return rockchip_rk3588_pll_set_params(pll, rate);
+}
+
+static int rockchip_rk3588_pll_enable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(0, RK3588_PLLCON1_PWRDOWN, 0),
+ pll->reg_base + RK3588_PLLCON(1));
+ rockchip_rk3588_pll_wait_lock(pll);
+
+ return 0;
+}
+
+static void rockchip_rk3588_pll_disable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(RK3588_PLLCON1_PWRDOWN, RK3588_PLLCON1_PWRDOWN, 0),
+ pll->reg_base + RK3588_PLLCON(1));
+}
+
+static int rockchip_rk3588_pll_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ u32 pllcon = readl(pll->reg_base + RK3588_PLLCON(1));
+
+ return !(pllcon & RK3588_PLLCON1_PWRDOWN);
+}
+
+static int rockchip_rk3588_pll_init(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+ return 0;
+
+ return 0;
+}
+
+static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
+ .recalc_rate = rockchip_rk3588_pll_recalc_rate,
+ .enable = rockchip_rk3588_pll_enable,
+ .disable = rockchip_rk3588_pll_disable,
+ .is_enabled = rockchip_rk3588_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
+ .recalc_rate = rockchip_rk3588_pll_recalc_rate,
+ .round_rate = rockchip_pll_round_rate,
+ .set_rate = rockchip_rk3588_pll_set_rate,
+ .enable = rockchip_rk3588_pll_enable,
+ .disable = rockchip_rk3588_pll_disable,
+ .is_enabled = rockchip_rk3588_pll_is_enabled,
+ .init = rockchip_rk3588_pll_init,
+};
+
+/*
+ * Common registering of pll clocks
+ */
+
+struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
+ enum rockchip_pll_type pll_type,
+ const char *name, const char *const *parent_names,
+ u8 num_parents, int con_offset, int grf_lock_offset,
+ int lock_shift, int mode_offset, int mode_shift,
+ struct rockchip_pll_rate_table *rate_table,
+ unsigned long flags, u8 clk_pll_flags)
+{
+ const char *pll_parents[3];
+ struct clk_init_data init;
+ struct rockchip_clk_pll *pll;
+ struct clk_mux *pll_mux;
+ struct clk *pll_clk, *mux_clk;
+ char pll_name[20];
+
+ if ((pll_type != pll_rk3328 && num_parents != 2) ||
+ (pll_type == pll_rk3328 && num_parents != 1)) {
+ pr_err("%s: needs two parent clocks\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* name the actual pll */
+ snprintf(pll_name, sizeof(pll_name), "pll_%s", name);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ /* create the mux on top of the real pll */
+ pll->pll_mux_ops = &clk_mux_ops;
+ pll_mux = &pll->pll_mux;
+ pll_mux->reg = ctx->reg_base + mode_offset;
+ pll_mux->shift = mode_shift;
+ if (pll_type == pll_rk3328)
+ pll_mux->mask = PLL_RK3328_MODE_MASK;
+ else
+ pll_mux->mask = PLL_MODE_MASK;
+ pll_mux->flags = 0;
+ pll_mux->lock = &ctx->lock;
+ pll_mux->hw.init = &init;
+
+ if (pll_type == pll_rk3036 ||
+ pll_type == pll_rk3066 ||
+ pll_type == pll_rk3328 ||
+ pll_type == pll_rk3399 ||
+ pll_type == pll_rk3588)
+ pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
+ /* the actual muxing is xin24m, pll-output, xin32k */
+ pll_parents[0] = parent_names[0];
+ pll_parents[1] = pll_name;
+ pll_parents[2] = parent_names[1];
+
+ init.name = name;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.ops = pll->pll_mux_ops;
+ init.parent_names = pll_parents;
+ if (pll_type == pll_rk3328)
+ init.num_parents = 2;
+ else
+ init.num_parents = ARRAY_SIZE(pll_parents);
+
+ mux_clk = clk_register(NULL, &pll_mux->hw);
+ if (IS_ERR(mux_clk))
+ goto err_mux;
+
+ /* now create the actual pll */
+ init.name = pll_name;
+
+ /* keep all plls untouched for now */
+ init.flags = flags | CLK_IGNORE_UNUSED;
+
+ init.parent_names = &parent_names[0];
+ init.num_parents = 1;
+
+ if (rate_table) {
+ int len;
+
+ /* find count of rates in rate_table */
+ for (len = 0; rate_table[len].rate != 0; )
+ len++;
+
+ pll->rate_count = len;
+ pll->rate_table = kmemdup(rate_table,
+ pll->rate_count *
+ sizeof(struct rockchip_pll_rate_table),
+ GFP_KERNEL);
+ WARN(!pll->rate_table,
+ "%s: could not allocate rate table for %s\n",
+ __func__, name);
+ }
+
+ switch (pll_type) {
+ case pll_rk3036:
+ case pll_rk3328:
+ if (!pll->rate_table)
+ init.ops = &rockchip_rk3036_pll_clk_norate_ops;
+ else
+ init.ops = &rockchip_rk3036_pll_clk_ops;
+ break;
+ case pll_rk3066:
+ if (!pll->rate_table || IS_ERR(ctx->grf))
+ init.ops = &rockchip_rk3066_pll_clk_norate_ops;
+ else
+ init.ops = &rockchip_rk3066_pll_clk_ops;
+ break;
+ case pll_rk3399:
+ if (!pll->rate_table)
+ init.ops = &rockchip_rk3399_pll_clk_norate_ops;
+ else
+ init.ops = &rockchip_rk3399_pll_clk_ops;
+ break;
+ case pll_rk3588:
+ case pll_rk3588_core:
+ if (!pll->rate_table)
+ init.ops = &rockchip_rk3588_pll_clk_norate_ops;
+ else
+ init.ops = &rockchip_rk3588_pll_clk_ops;
+ init.flags = flags;
+ break;
+ default:
+ pr_warn("%s: Unknown pll type for pll clk %s\n",
+ __func__, name);
+ }
+
+ pll->hw.init = &init;
+ pll->type = pll_type;
+ pll->reg_base = ctx->reg_base + con_offset;
+ pll->lock_offset = grf_lock_offset;
+ pll->lock_shift = lock_shift;
+ pll->flags = clk_pll_flags;
+ pll->lock = &ctx->lock;
+ pll->ctx = ctx;
+
+ pll_clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(pll_clk)) {
+ pr_err("%s: failed to register pll clock %s : %ld\n",
+ __func__, name, PTR_ERR(pll_clk));
+ goto err_pll;
+ }
+
+ return mux_clk;
+
+err_pll:
+ kfree(pll->rate_table);
+ clk_unregister(mux_clk);
+ mux_clk = pll_clk;
+err_mux:
+ kfree(pll);
+ return mux_clk;
+}