aboutsummaryrefslogtreecommitdiff
path: root/drivers/firmware/arm_scmi/clock.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/firmware/arm_scmi/clock.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/firmware/arm_scmi/clock.c')
-rw-r--r--drivers/firmware/arm_scmi/clock.c619
1 files changed, 619 insertions, 0 deletions
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
new file mode 100644
index 000000000..96060bf90
--- /dev/null
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Clock Protocol
+ *
+ * Copyright (C) 2018-2022 ARM Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/limits.h>
+#include <linux/sort.h>
+
+#include "protocols.h"
+#include "notify.h"
+
+enum scmi_clock_protocol_cmd {
+ CLOCK_ATTRIBUTES = 0x3,
+ CLOCK_DESCRIBE_RATES = 0x4,
+ CLOCK_RATE_SET = 0x5,
+ CLOCK_RATE_GET = 0x6,
+ CLOCK_CONFIG_SET = 0x7,
+ CLOCK_NAME_GET = 0x8,
+ CLOCK_RATE_NOTIFY = 0x9,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
+};
+
+struct scmi_msg_resp_clock_protocol_attributes {
+ __le16 num_clocks;
+ u8 max_async_req;
+ u8 reserved;
+};
+
+struct scmi_msg_resp_clock_attributes {
+ __le32 attributes;
+#define CLOCK_ENABLE BIT(0)
+#define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31))
+#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
+ u8 name[SCMI_SHORT_NAME_MAX_SIZE];
+ __le32 clock_enable_latency;
+};
+
+struct scmi_clock_set_config {
+ __le32 id;
+ __le32 attributes;
+};
+
+struct scmi_msg_clock_describe_rates {
+ __le32 id;
+ __le32 rate_index;
+};
+
+struct scmi_msg_resp_clock_describe_rates {
+ __le32 num_rates_flags;
+#define NUM_RETURNED(x) ((x) & 0xfff)
+#define RATE_DISCRETE(x) !((x) & BIT(12))
+#define NUM_REMAINING(x) ((x) >> 16)
+ struct {
+ __le32 value_low;
+ __le32 value_high;
+ } rate[];
+#define RATE_TO_U64(X) \
+({ \
+ typeof(X) x = (X); \
+ le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
+})
+};
+
+struct scmi_clock_set_rate {
+ __le32 flags;
+#define CLOCK_SET_ASYNC BIT(0)
+#define CLOCK_SET_IGNORE_RESP BIT(1)
+#define CLOCK_SET_ROUND_UP BIT(2)
+#define CLOCK_SET_ROUND_AUTO BIT(3)
+ __le32 id;
+ __le32 value_low;
+ __le32 value_high;
+};
+
+struct scmi_msg_resp_set_rate_complete {
+ __le32 id;
+ __le32 rate_low;
+ __le32 rate_high;
+};
+
+struct scmi_msg_clock_rate_notify {
+ __le32 clk_id;
+ __le32 notify_enable;
+};
+
+struct scmi_clock_rate_notify_payld {
+ __le32 agent_id;
+ __le32 clock_id;
+ __le32 rate_low;
+ __le32 rate_high;
+};
+
+struct clock_info {
+ u32 version;
+ int num_clocks;
+ int max_async_req;
+ atomic_t cur_async_req;
+ struct scmi_clock_info *clk;
+};
+
+static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
+ CLOCK_RATE_NOTIFY,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
+};
+
+static int
+scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
+ struct clock_info *ci)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_protocol_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
+ 0, sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ ci->num_clocks = le16_to_cpu(attr->num_clocks);
+ ci->max_async_req = attr->max_async_req;
+ }
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, struct scmi_clock_info *clk,
+ u32 version)
+{
+ int ret;
+ u32 attributes;
+ struct scmi_xfer *t;
+ struct scmi_msg_resp_clock_attributes *attr;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
+ sizeof(clk_id), sizeof(*attr), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+ attr = t->rx.buf;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ u32 latency = 0;
+ attributes = le32_to_cpu(attr->attributes);
+ strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
+ /* clock_enable_latency field is present only since SCMI v3.1 */
+ if (PROTOCOL_REV_MAJOR(version) >= 0x2)
+ latency = le32_to_cpu(attr->clock_enable_latency);
+ clk->enable_latency = latency ? : U32_MAX;
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ /*
+ * If supported overwrite short name with the extended one;
+ * on error just carry on and use already provided short name.
+ */
+ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
+ if (SUPPORTS_EXTENDED_NAMES(attributes))
+ ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
+ clk->name,
+ SCMI_MAX_STR_SIZE);
+
+ if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
+ clk->rate_changed_notifications = true;
+ if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
+ clk->rate_change_requested_notifications = true;
+ }
+
+ return ret;
+}
+
+static int rate_cmp_func(const void *_r1, const void *_r2)
+{
+ const u64 *r1 = _r1, *r2 = _r2;
+
+ if (*r1 < *r2)
+ return -1;
+ else if (*r1 == *r2)
+ return 0;
+ else
+ return 1;
+}
+
+struct scmi_clk_ipriv {
+ struct device *dev;
+ u32 clk_id;
+ struct scmi_clock_info *clk;
+};
+
+static void iter_clk_describe_prepare_message(void *message,
+ const unsigned int desc_index,
+ const void *priv)
+{
+ struct scmi_msg_clock_describe_rates *msg = message;
+ const struct scmi_clk_ipriv *p = priv;
+
+ msg->id = cpu_to_le32(p->clk_id);
+ /* Set the number of rates to be skipped/already read */
+ msg->rate_index = cpu_to_le32(desc_index);
+}
+
+static int
+iter_clk_describe_update_state(struct scmi_iterator_state *st,
+ const void *response, void *priv)
+{
+ u32 flags;
+ struct scmi_clk_ipriv *p = priv;
+ const struct scmi_msg_resp_clock_describe_rates *r = response;
+
+ flags = le32_to_cpu(r->num_rates_flags);
+ st->num_remaining = NUM_REMAINING(flags);
+ st->num_returned = NUM_RETURNED(flags);
+ p->clk->rate_discrete = RATE_DISCRETE(flags);
+
+ /* Warn about out of spec replies ... */
+ if (!p->clk->rate_discrete &&
+ (st->num_returned != 3 || st->num_remaining != 0)) {
+ dev_warn(p->dev,
+ "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
+ p->clk->name, st->num_returned, st->num_remaining,
+ st->rx_len);
+
+ /*
+ * A known quirk: a triplet is returned but num_returned != 3
+ * Check for a safe payload size and fix.
+ */
+ if (st->num_returned != 3 && st->num_remaining == 0 &&
+ st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
+ st->num_returned = 3;
+ st->num_remaining = 0;
+ } else {
+ dev_err(p->dev,
+ "Cannot fix out-of-spec reply !\n");
+ return -EPROTO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
+ const void *response,
+ struct scmi_iterator_state *st, void *priv)
+{
+ int ret = 0;
+ struct scmi_clk_ipriv *p = priv;
+ const struct scmi_msg_resp_clock_describe_rates *r = response;
+
+ if (!p->clk->rate_discrete) {
+ switch (st->desc_index + st->loop_idx) {
+ case 0:
+ p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
+ break;
+ case 1:
+ p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
+ break;
+ case 2:
+ p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ } else {
+ u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
+
+ *rate = RATE_TO_U64(r->rate[st->loop_idx]);
+ p->clk->list.num_rates++;
+ }
+
+ return ret;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ int ret;
+ void *iter;
+ struct scmi_iterator_ops ops = {
+ .prepare_message = iter_clk_describe_prepare_message,
+ .update_state = iter_clk_describe_update_state,
+ .process_response = iter_clk_describe_process_response,
+ };
+ struct scmi_clk_ipriv cpriv = {
+ .clk_id = clk_id,
+ .clk = clk,
+ .dev = ph->dev,
+ };
+
+ iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
+ CLOCK_DESCRIBE_RATES,
+ sizeof(struct scmi_msg_clock_describe_rates),
+ &cpriv);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = ph->hops->iter_response_run(iter);
+ if (ret)
+ return ret;
+
+ if (!clk->rate_discrete) {
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
+ clk->range.min_rate, clk->range.max_rate,
+ clk->range.step_size);
+ } else if (clk->list.num_rates) {
+ sort(clk->list.rates, clk->list.num_rates,
+ sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
+ }
+
+ return ret;
+}
+
+static int
+scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 *value)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
+ sizeof(__le32), sizeof(u64), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret)
+ *value = get_unaligned_le64(t->rx.buf);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
+ u32 clk_id, u64 rate)
+{
+ int ret;
+ u32 flags = 0;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_rate *cfg;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ if (ci->max_async_req &&
+ atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
+ flags |= CLOCK_SET_ASYNC;
+
+ cfg = t->tx.buf;
+ cfg->flags = cpu_to_le32(flags);
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->value_low = cpu_to_le32(rate & 0xffffffff);
+ cfg->value_high = cpu_to_le32(rate >> 32);
+
+ if (flags & CLOCK_SET_ASYNC) {
+ ret = ph->xops->do_xfer_with_response(ph, t);
+ if (!ret) {
+ struct scmi_msg_resp_set_rate_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == clk_id)
+ dev_dbg(ph->dev,
+ "Clk ID %d set async to %llu\n", clk_id,
+ get_unaligned_le64(&resp->rate_low));
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ ret = ph->xops->do_xfer(ph, t);
+ }
+
+ if (ci->max_async_req)
+ atomic_dec(&ci->cur_async_req);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int
+scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u32 config, bool atomic)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_clock_set_config *cfg;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
+ sizeof(*cfg), 0, &t);
+ if (ret)
+ return ret;
+
+ t->hdr.poll_completion = atomic;
+
+ cfg = t->tx.buf;
+ cfg->id = cpu_to_le32(clk_id);
+ cfg->attributes = cpu_to_le32(config);
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
+}
+
+static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0, false);
+}
+
+static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
+}
+
+static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
+ u32 clk_id)
+{
+ return scmi_clock_config_set(ph, clk_id, 0, true);
+}
+
+static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_clock_info *
+scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
+{
+ struct scmi_clock_info *clk;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (clk_id >= ci->num_clocks)
+ return NULL;
+
+ clk = ci->clk + clk_id;
+ if (!clk->name[0])
+ return NULL;
+
+ return clk;
+}
+
+static const struct scmi_clk_proto_ops clk_proto_ops = {
+ .count_get = scmi_clock_count_get,
+ .info_get = scmi_clock_info_get,
+ .rate_get = scmi_clock_rate_get,
+ .rate_set = scmi_clock_rate_set,
+ .enable = scmi_clock_enable,
+ .disable = scmi_clock_disable,
+ .enable_atomic = scmi_clock_enable_atomic,
+ .disable_atomic = scmi_clock_disable_atomic,
+};
+
+static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
+ u32 clk_id, int message_id, bool enable)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_clock_rate_notify *notify;
+
+ ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
+ if (ret)
+ return ret;
+
+ notify = t->tx.buf;
+ notify->clk_id = cpu_to_le32(clk_id);
+ notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+ ret = ph->xops->do_xfer(ph, t);
+
+ ph->xops->xfer_put(ph, t);
+ return ret;
+}
+
+static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id, bool enable)
+{
+ int ret, cmd_id;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return -EINVAL;
+
+ cmd_id = evt_2_cmd[evt_id];
+ ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
+ if (ret)
+ pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+ evt_id, src_id, ret);
+
+ return ret;
+}
+
+static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
+ u8 evt_id, ktime_t timestamp,
+ const void *payld, size_t payld_sz,
+ void *report, u32 *src_id)
+{
+ const struct scmi_clock_rate_notify_payld *p = payld;
+ struct scmi_clock_rate_notif_report *r = report;
+
+ if (sizeof(*p) != payld_sz ||
+ (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
+ evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
+ return NULL;
+
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->clock_id = le32_to_cpu(p->clock_id);
+ r->rate = get_unaligned_le64(&p->rate_low);
+ *src_id = r->clock_id;
+
+ return r;
+}
+
+static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (!ci)
+ return -EINVAL;
+
+ return ci->num_clocks;
+}
+
+static const struct scmi_event clk_events[] = {
+ {
+ .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
+ .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+ .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+ },
+ {
+ .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
+ .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+ .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+ },
+};
+
+static const struct scmi_event_ops clk_event_ops = {
+ .get_num_sources = scmi_clk_get_num_sources,
+ .set_notify_enabled = scmi_clk_set_notify_enabled,
+ .fill_custom_report = scmi_clk_fill_custom_report,
+};
+
+static const struct scmi_protocol_events clk_protocol_events = {
+ .queue_sz = SCMI_PROTO_QUEUE_SZ,
+ .ops = &clk_event_ops,
+ .evts = clk_events,
+ .num_events = ARRAY_SIZE(clk_events),
+};
+
+static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
+{
+ u32 version;
+ int clkid, ret;
+ struct clock_info *cinfo;
+
+ ret = ph->xops->version_get(ph, &version);
+ if (ret)
+ return ret;
+
+ dev_dbg(ph->dev, "Clock Version %d.%d\n",
+ PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
+
+ cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ ret = scmi_clock_protocol_attributes_get(ph, cinfo);
+ if (ret)
+ return ret;
+
+ cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
+ sizeof(*cinfo->clk), GFP_KERNEL);
+ if (!cinfo->clk)
+ return -ENOMEM;
+
+ for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
+ struct scmi_clock_info *clk = cinfo->clk + clkid;
+
+ ret = scmi_clock_attributes_get(ph, clkid, clk, version);
+ if (!ret)
+ scmi_clock_describe_rates_get(ph, clkid, clk);
+ }
+
+ cinfo->version = version;
+ return ph->set_priv(ph, cinfo);
+}
+
+static const struct scmi_protocol scmi_clock = {
+ .id = SCMI_PROTOCOL_CLOCK,
+ .owner = THIS_MODULE,
+ .instance_init = &scmi_clock_protocol_init,
+ .ops = &clk_proto_ops,
+ .events = &clk_protocol_events,
+};
+
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)