aboutsummaryrefslogtreecommitdiff
path: root/sound/soc/mediatek/common/mtk-afe-fe-dai.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /sound/soc/mediatek/common/mtk-afe-fe-dai.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'sound/soc/mediatek/common/mtk-afe-fe-dai.c')
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c612
1 files changed, 612 insertions, 0 deletions
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
new file mode 100644
index 000000000..395be97f1
--- /dev/null
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mtk-afe-fe-dais.c -- Mediatek afe fe dai operator
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include "mtk-afe-platform-driver.h"
+#include <sound/pcm_params.h>
+#include "mtk-afe-fe-dai.h"
+#include "mtk-base-afe.h"
+
+#define AFE_BASE_END_OFFSET 8
+
+static int mtk_regmap_update_bits(struct regmap *map, int reg,
+ unsigned int mask,
+ unsigned int val, int shift)
+{
+ if (reg < 0 || WARN_ON_ONCE(shift < 0))
+ return 0;
+ return regmap_update_bits(map, reg, mask << shift, val << shift);
+}
+
+static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val)
+{
+ if (reg < 0)
+ return 0;
+ return regmap_write(map, reg, val);
+}
+
+int mtk_afe_fe_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int memif_num = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[memif_num];
+ const struct snd_pcm_hardware *mtk_afe_hardware = afe->mtk_afe_hardware;
+ int ret;
+
+ memif->substream = substream;
+
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
+ /* enable agent */
+ mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
+ 1, 0, memif->data->agent_disable_shift);
+
+ snd_soc_set_runtime_hwparams(substream, mtk_afe_hardware);
+
+ /*
+ * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
+ * smaller than period_size due to AFE's internal buffer.
+ * This easily leads to overrun when avail_min is period_size.
+ * One more period can hold the possible unread buffer.
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ int periods_max = mtk_afe_hardware->periods_max;
+
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS,
+ 3, periods_max);
+ if (ret < 0) {
+ dev_err(afe->dev, "hw_constraint_minmax failed\n");
+ return ret;
+ }
+ }
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ dev_err(afe->dev, "snd_pcm_hw_constraint_integer failed\n");
+
+ /* dynamic allocate irq to memif */
+ if (memif->irq_usage < 0) {
+ int irq_id = mtk_dynamic_irq_acquire(afe);
+
+ if (irq_id != afe->irqs_size) {
+ /* link */
+ memif->irq_usage = irq_id;
+ } else {
+ dev_err(afe->dev, "%s() error: no more asys irq\n",
+ __func__);
+ ret = -EBUSY;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_startup);
+
+void mtk_afe_fe_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ struct mtk_base_afe_memif *memif = &afe->memif[asoc_rtd_to_cpu(rtd, 0)->id];
+ int irq_id;
+
+ irq_id = memif->irq_usage;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->agent_disable_reg,
+ 1, 1, memif->data->agent_disable_shift);
+
+ if (!memif->const_irq) {
+ mtk_dynamic_irq_release(afe, irq_id);
+ memif->irq_usage = -1;
+ memif->substream = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_shutdown);
+
+int mtk_afe_fe_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ int ret;
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+ snd_pcm_format_t format = params_format(params);
+
+ if (afe->request_dram_resource)
+ afe->request_dram_resource(afe->dev);
+
+ dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n",
+ __func__, memif->data->name,
+ channels, rate, format,
+ &substream->runtime->dma_addr,
+ substream->runtime->dma_area,
+ substream->runtime->dma_bytes);
+
+ memset_io((void __force __iomem *)substream->runtime->dma_area, 0,
+ substream->runtime->dma_bytes);
+
+ /* set addr */
+ ret = mtk_memif_set_addr(afe, id,
+ substream->runtime->dma_area,
+ substream->runtime->dma_addr,
+ substream->runtime->dma_bytes);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n",
+ __func__, id, ret);
+ return ret;
+ }
+
+ /* set channel */
+ ret = mtk_memif_set_channel(afe, id, channels);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n",
+ __func__, id, channels, ret);
+ return ret;
+ }
+
+ /* set rate */
+ ret = mtk_memif_set_rate_substream(substream, id, rate);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n",
+ __func__, id, rate, ret);
+ return ret;
+ }
+
+ /* set format */
+ ret = mtk_memif_set_format(afe, id, format);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n",
+ __func__, id, format, ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_params);
+
+int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+
+ if (afe->release_dram_resource)
+ afe->release_dram_resource(afe->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
+
+int mtk_afe_fe_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+ unsigned int counter = runtime->period_size;
+ int fs;
+ int ret;
+
+ dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = mtk_memif_set_enable(afe, id);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
+ __func__, id, ret);
+ return ret;
+ }
+
+ /* set irq counter */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+ irq_data->irq_cnt_maskbit, counter,
+ irq_data->irq_cnt_shift);
+
+ /* set irq fs */
+ fs = afe->irq_fs(substream, runtime->rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_fs_reg,
+ irq_data->irq_fs_maskbit, fs,
+ irq_data->irq_fs_shift);
+
+ /* enable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1, 1, irq_data->irq_en_shift);
+
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ ret = mtk_memif_set_disable(afe, id);
+ if (ret) {
+ dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
+ __func__, id, ret);
+ }
+
+ /* disable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1, 0, irq_data->irq_en_shift);
+ /* and clear pending IRQ */
+ mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
+ 1 << irq_data->irq_clr_shift);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_trigger);
+
+int mtk_afe_fe_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+ int id = asoc_rtd_to_cpu(rtd, 0)->id;
+ int pbuf_size;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (afe->get_memif_pbuf_size) {
+ pbuf_size = afe->get_memif_pbuf_size(substream);
+ mtk_memif_set_pbuf_size(afe, id, pbuf_size);
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
+
+const struct snd_soc_dai_ops mtk_afe_fe_ops = {
+ .startup = mtk_afe_fe_startup,
+ .shutdown = mtk_afe_fe_shutdown,
+ .hw_params = mtk_afe_fe_hw_params,
+ .hw_free = mtk_afe_fe_hw_free,
+ .prepare = mtk_afe_fe_prepare,
+ .trigger = mtk_afe_fe_trigger,
+};
+EXPORT_SYMBOL_GPL(mtk_afe_fe_ops);
+
+int mtk_dynamic_irq_acquire(struct mtk_base_afe *afe)
+{
+ int i;
+
+ mutex_lock(&afe->irq_alloc_lock);
+ for (i = 0; i < afe->irqs_size; ++i) {
+ if (afe->irqs[i].irq_occupyed == 0) {
+ afe->irqs[i].irq_occupyed = 1;
+ mutex_unlock(&afe->irq_alloc_lock);
+ return i;
+ }
+ }
+ mutex_unlock(&afe->irq_alloc_lock);
+ return afe->irqs_size;
+}
+EXPORT_SYMBOL_GPL(mtk_dynamic_irq_acquire);
+
+int mtk_dynamic_irq_release(struct mtk_base_afe *afe, int irq_id)
+{
+ mutex_lock(&afe->irq_alloc_lock);
+ if (irq_id >= 0 && irq_id < afe->irqs_size) {
+ afe->irqs[irq_id].irq_occupyed = 0;
+ mutex_unlock(&afe->irq_alloc_lock);
+ return 0;
+ }
+ mutex_unlock(&afe->irq_alloc_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mtk_dynamic_irq_release);
+
+int mtk_afe_suspend(struct snd_soc_component *component)
+{
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+ struct device *dev = afe->dev;
+ struct regmap *regmap = afe->regmap;
+ int i;
+
+ if (pm_runtime_status_suspended(dev) || afe->suspended)
+ return 0;
+
+ if (!afe->reg_back_up)
+ afe->reg_back_up =
+ devm_kcalloc(dev, afe->reg_back_up_list_num,
+ sizeof(unsigned int), GFP_KERNEL);
+
+ if (afe->reg_back_up) {
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ regmap_read(regmap, afe->reg_back_up_list[i],
+ &afe->reg_back_up[i]);
+ }
+
+ afe->suspended = true;
+ afe->runtime_suspend(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_suspend);
+
+int mtk_afe_resume(struct snd_soc_component *component)
+{
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+ struct device *dev = afe->dev;
+ struct regmap *regmap = afe->regmap;
+ int i;
+
+ if (pm_runtime_status_suspended(dev) || !afe->suspended)
+ return 0;
+
+ afe->runtime_resume(dev);
+
+ if (!afe->reg_back_up) {
+ dev_dbg(dev, "%s no reg_backup\n", __func__);
+ } else {
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+ afe->reg_back_up[i]);
+ }
+
+ afe->suspended = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_afe_resume);
+
+int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
+{
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+
+ if (memif->data->enable_shift < 0) {
+ dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
+ __func__, id);
+ return 0;
+ }
+ return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
+ 1, 1, memif->data->enable_shift);
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_enable);
+
+int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
+{
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+
+ if (memif->data->enable_shift < 0) {
+ dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
+ __func__, id);
+ return 0;
+ }
+ return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
+ 1, 0, memif->data->enable_shift);
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_disable);
+
+int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
+ unsigned char *dma_area,
+ dma_addr_t dma_addr,
+ size_t dma_bytes)
+{
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ int msb_at_bit33 = upper_32_bits(dma_addr) ? 1 : 0;
+ unsigned int phys_buf_addr = lower_32_bits(dma_addr);
+ unsigned int phys_buf_addr_upper_32 = upper_32_bits(dma_addr);
+
+ memif->dma_area = dma_area;
+ memif->dma_addr = dma_addr;
+ memif->dma_bytes = dma_bytes;
+
+ /* start */
+ mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
+ phys_buf_addr);
+ /* end */
+ if (memif->data->reg_ofs_end)
+ mtk_regmap_write(afe->regmap,
+ memif->data->reg_ofs_end,
+ phys_buf_addr + dma_bytes - 1);
+ else
+ mtk_regmap_write(afe->regmap,
+ memif->data->reg_ofs_base +
+ AFE_BASE_END_OFFSET,
+ phys_buf_addr + dma_bytes - 1);
+
+ /* set start, end, upper 32 bits */
+ if (memif->data->reg_ofs_base_msb) {
+ mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
+ phys_buf_addr_upper_32);
+ mtk_regmap_write(afe->regmap,
+ memif->data->reg_ofs_end_msb,
+ phys_buf_addr_upper_32);
+ }
+
+ /*
+ * set MSB to 33-bit, for memif address
+ * only for memif base address, if msb_end_reg exists
+ */
+ if (memif->data->msb_reg)
+ mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
+ 1, msb_at_bit33, memif->data->msb_shift);
+
+ /* set MSB to 33-bit, for memif end address */
+ if (memif->data->msb_end_reg)
+ mtk_regmap_update_bits(afe->regmap, memif->data->msb_end_reg,
+ 1, msb_at_bit33,
+ memif->data->msb_end_shift);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_addr);
+
+int mtk_memif_set_channel(struct mtk_base_afe *afe,
+ int id, unsigned int channel)
+{
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ unsigned int mono;
+
+ if (memif->data->mono_shift < 0)
+ return 0;
+
+ if (memif->data->quad_ch_mask) {
+ unsigned int quad_ch = (channel == 4) ? 1 : 0;
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
+ memif->data->quad_ch_mask,
+ quad_ch, memif->data->quad_ch_shift);
+ }
+
+ if (memif->data->mono_invert)
+ mono = (channel == 1) ? 0 : 1;
+ else
+ mono = (channel == 1) ? 1 : 0;
+
+ /* for specific configuration of memif mono mode */
+ if (memif->data->int_odd_flag_reg)
+ mtk_regmap_update_bits(afe->regmap,
+ memif->data->int_odd_flag_reg,
+ 1, mono,
+ memif->data->int_odd_flag_shift);
+
+ return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
+ 1, mono, memif->data->mono_shift);
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_channel);
+
+static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
+ int id, int fs)
+{
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+
+ if (memif->data->fs_shift >= 0)
+ mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
+ memif->data->fs_maskbit,
+ fs, memif->data->fs_shift);
+
+ return 0;
+}
+
+int mtk_memif_set_rate(struct mtk_base_afe *afe,
+ int id, unsigned int rate)
+{
+ int fs = 0;
+
+ if (!afe->get_dai_fs) {
+ dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fs = afe->get_dai_fs(afe, id, rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ return mtk_memif_set_rate_fs(afe, id, fs);
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_rate);
+
+int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
+ int id, unsigned int rate)
+{
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_soc_component *component =
+ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+
+ int fs = 0;
+
+ if (!afe->memif_fs) {
+ dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fs = afe->memif_fs(substream, rate);
+
+ if (fs < 0)
+ return -EINVAL;
+
+ return mtk_memif_set_rate_fs(afe, id, fs);
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_rate_substream);
+
+int mtk_memif_set_format(struct mtk_base_afe *afe,
+ int id, snd_pcm_format_t format)
+{
+ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ int hd_audio = 0;
+ int hd_align = 0;
+
+ /* set hd mode */
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ case SNDRV_PCM_FORMAT_U16_LE:
+ hd_audio = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ case SNDRV_PCM_FORMAT_U32_LE:
+ if (afe->memif_32bit_supported) {
+ hd_audio = 2;
+ hd_align = 0;
+ } else {
+ hd_audio = 1;
+ hd_align = 1;
+ }
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_U24_LE:
+ hd_audio = 1;
+ break;
+ default:
+ dev_err(afe->dev, "%s() error: unsupported format %d\n",
+ __func__, format);
+ break;
+ }
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
+ 0x3, hd_audio, memif->data->hd_shift);
+
+ mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
+ 0x1, hd_align, memif->data->hd_align_mshift);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_format);
+
+int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
+ int id, int pbuf_size)
+{
+ const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
+
+ if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0)
+ return 0;
+
+ mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
+ memif_data->pbuf_mask,
+ pbuf_size, memif_data->pbuf_shift);
+
+ mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,
+ memif_data->minlen_mask,
+ pbuf_size, memif_data->minlen_shift);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mtk_memif_set_pbuf_size);
+
+MODULE_DESCRIPTION("Mediatek simple fe dai operator");
+MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+MODULE_LICENSE("GPL v2");