From 5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 21 Feb 2023 18:24:12 -0800 Subject: Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ... --- arch/arm/mach-omap2/prm3xxx.c | 727 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 727 insertions(+) create mode 100644 arch/arm/mach-omap2/prm3xxx.c (limited to 'arch/arm/mach-omap2/prm3xxx.c') diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c new file mode 100644 index 000000000..1b5d08f59 --- /dev/null +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -0,0 +1,727 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * OMAP3xxx PRM module functions + * + * Copyright (C) 2010-2012 Texas Instruments, Inc. + * Copyright (C) 2010 Nokia Corporation + * BenoƮt Cousson + * Paul Walmsley + * Rajendra Nayak + */ + +#include +#include +#include +#include +#include +#include + +#include "soc.h" +#include "common.h" +#include "vp.h" +#include "powerdomain.h" +#include "prm3xxx.h" +#include "prm2xxx_3xxx.h" +#include "cm2xxx_3xxx.h" +#include "prm-regbits-34xx.h" +#include "cm3xxx.h" +#include "cm-regbits-34xx.h" +#include "clock.h" + +static void omap3xxx_prm_read_pending_irqs(unsigned long *events); +static void omap3xxx_prm_ocp_barrier(void); +static void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask); +static void omap3xxx_prm_restore_irqen(u32 *saved_mask); +static void omap3xxx_prm_iva_idle(void); + +static const struct omap_prcm_irq omap3_prcm_irqs[] = { + OMAP_PRCM_IRQ("wkup", 0, 0), + OMAP_PRCM_IRQ("io", 9, 1), +}; + +static struct omap_prcm_irq_setup omap3_prcm_irq_setup = { + .ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET, + .mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET, + .nr_regs = 1, + .irqs = omap3_prcm_irqs, + .nr_irqs = ARRAY_SIZE(omap3_prcm_irqs), + .irq = 11 + OMAP_INTC_START, + .read_pending_irqs = &omap3xxx_prm_read_pending_irqs, + .ocp_barrier = &omap3xxx_prm_ocp_barrier, + .save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen, + .restore_irqen = &omap3xxx_prm_restore_irqen, + .reconfigure_io_chain = NULL, +}; + +/* + * omap3_prm_reset_src_map - map from bits in the PRM_RSTST hardware + * register (which are specific to OMAP3xxx SoCs) to reset source ID + * bit shifts (which is an OMAP SoC-independent enumeration) + */ +static struct prm_reset_src_map omap3xxx_prm_reset_src_map[] = { + { OMAP3430_GLOBAL_COLD_RST_SHIFT, OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT }, + { OMAP3430_GLOBAL_SW_RST_SHIFT, OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT }, + { OMAP3430_SECURITY_VIOL_RST_SHIFT, OMAP_SECU_VIOL_RST_SRC_ID_SHIFT }, + { OMAP3430_MPU_WD_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, + { OMAP3430_SECURE_WD_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, + { OMAP3430_EXTERNAL_WARM_RST_SHIFT, OMAP_EXTWARM_RST_SRC_ID_SHIFT }, + { OMAP3430_VDD1_VOLTAGE_MANAGER_RST_SHIFT, + OMAP_VDD_MPU_VM_RST_SRC_ID_SHIFT }, + { OMAP3430_VDD2_VOLTAGE_MANAGER_RST_SHIFT, + OMAP_VDD_CORE_VM_RST_SRC_ID_SHIFT }, + { OMAP3430_ICEPICK_RST_SHIFT, OMAP_ICEPICK_RST_SRC_ID_SHIFT }, + { OMAP3430_ICECRUSHER_RST_SHIFT, OMAP_ICECRUSHER_RST_SRC_ID_SHIFT }, + { -1, -1 }, +}; + +/* PRM VP */ + +/* + * struct omap3_vp - OMAP3 VP register access description. + * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg + */ +struct omap3_vp { + u32 tranxdone_status; +}; + +static struct omap3_vp omap3_vp[] = { + [OMAP3_VP_VDD_MPU_ID] = { + .tranxdone_status = OMAP3430_VP1_TRANXDONE_ST_MASK, + }, + [OMAP3_VP_VDD_CORE_ID] = { + .tranxdone_status = OMAP3430_VP2_TRANXDONE_ST_MASK, + }, +}; + +#define MAX_VP_ID ARRAY_SIZE(omap3_vp); + +static u32 omap3_prm_vp_check_txdone(u8 vp_id) +{ + struct omap3_vp *vp = &omap3_vp[vp_id]; + u32 irqstatus; + + irqstatus = omap2_prm_read_mod_reg(OCP_MOD, + OMAP3_PRM_IRQSTATUS_MPU_OFFSET); + return irqstatus & vp->tranxdone_status; +} + +static void omap3_prm_vp_clear_txdone(u8 vp_id) +{ + struct omap3_vp *vp = &omap3_vp[vp_id]; + + omap2_prm_write_mod_reg(vp->tranxdone_status, + OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); +} + +u32 omap3_prm_vcvp_read(u8 offset) +{ + return omap2_prm_read_mod_reg(OMAP3430_GR_MOD, offset); +} + +void omap3_prm_vcvp_write(u32 val, u8 offset) +{ + omap2_prm_write_mod_reg(val, OMAP3430_GR_MOD, offset); +} + +u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) +{ + return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset); +} + +/** + * omap3xxx_prm_dpll3_reset - use DPLL3 reset to reboot the OMAP SoC + * + * Set the DPLL3 reset bit, which should reboot the SoC. This is the + * recommended way to restart the SoC, considering Errata i520. No + * return value. + */ +static void omap3xxx_prm_dpll3_reset(void) +{ + omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, OMAP3430_GR_MOD, + OMAP2_RM_RSTCTRL); + /* OCP barrier */ + omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP2_RM_RSTCTRL); +} + +/** + * omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events + * @events: ptr to a u32, preallocated by caller + * + * Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM + * MPU IRQs, and store the result into the u32 pointed to by @events. + * No return value. + */ +static void omap3xxx_prm_read_pending_irqs(unsigned long *events) +{ + u32 mask, st; + + /* XXX Can the mask read be avoided (e.g., can it come from RAM?) */ + mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); + st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); + + events[0] = mask & st; +} + +/** + * omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete + * + * Force any buffered writes to the PRM IP block to complete. Needed + * by the PRM IRQ handler, which reads and writes directly to the IP + * block, to avoid race conditions after acknowledging or clearing IRQ + * bits. No return value. + */ +static void omap3xxx_prm_ocp_barrier(void) +{ + omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); +} + +/** + * omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg + * @saved_mask: ptr to a u32 array to save IRQENABLE bits + * + * Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask + * must be allocated by the caller. Intended to be used in the PRM + * interrupt handler suspend callback. The OCP barrier is needed to + * ensure the write to disable PRM interrupts reaches the PRM before + * returning; otherwise, spurious interrupts might occur. No return + * value. + */ +static void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask) +{ + saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD, + OMAP3_PRM_IRQENABLE_MPU_OFFSET); + omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); + + /* OCP barrier */ + omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET); +} + +/** + * omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args + * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously + * + * Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended + * to be used in the PRM interrupt handler resume callback to restore + * values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP + * barrier should be needed here; any pending PRM interrupts will fire + * once the writes reach the PRM. No return value. + */ +static void omap3xxx_prm_restore_irqen(u32 *saved_mask) +{ + omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD, + OMAP3_PRM_IRQENABLE_MPU_OFFSET); +} + +/** + * omap3xxx_prm_clear_mod_irqs - clear wake-up events from PRCM interrupt + * @module: PRM module to clear wakeups from + * @regs: register set to clear, 1 or 3 + * @wkst_mask: wkst bits to clear + * + * The purpose of this function is to clear any wake-up events latched + * in the PRCM PM_WKST_x registers. It is possible that a wake-up event + * may occur whilst attempting to clear a PM_WKST_x register and thus + * set another bit in this register. A while loop is used to ensure + * that any peripheral wake-up events occurring while attempting to + * clear the PM_WKST_x are detected and cleared. + */ +static int omap3xxx_prm_clear_mod_irqs(s16 module, u8 regs, u32 wkst_mask) +{ + u32 wkst, fclk, iclk, clken; + u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; + u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1; + u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1; + u16 grpsel_off = (regs == 3) ? + OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL; + int c = 0; + + wkst = omap2_prm_read_mod_reg(module, wkst_off); + wkst &= omap2_prm_read_mod_reg(module, grpsel_off); + wkst &= wkst_mask; + if (wkst) { + iclk = omap2_cm_read_mod_reg(module, iclk_off); + fclk = omap2_cm_read_mod_reg(module, fclk_off); + while (wkst) { + clken = wkst; + omap2_cm_set_mod_reg_bits(clken, module, iclk_off); + /* + * For USBHOST, we don't know whether HOST1 or + * HOST2 woke us up, so enable both f-clocks + */ + if (module == OMAP3430ES2_USBHOST_MOD) + clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT; + omap2_cm_set_mod_reg_bits(clken, module, fclk_off); + omap2_prm_write_mod_reg(wkst, module, wkst_off); + wkst = omap2_prm_read_mod_reg(module, wkst_off); + wkst &= wkst_mask; + c++; + } + omap2_cm_write_mod_reg(iclk, module, iclk_off); + omap2_cm_write_mod_reg(fclk, module, fclk_off); + } + + return c; +} + +/** + * omap3_prm_reset_modem - toggle reset signal for modem + * + * Toggles the reset signal to modem IP block. Required to allow + * OMAP3430 without stacked modem to idle properly. + */ +static void __init omap3_prm_reset_modem(void) +{ + omap2_prm_write_mod_reg( + OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK | + OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK, + CORE_MOD, OMAP2_RM_RSTCTRL); + omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL); +} + +/** + * omap3_prm_init_pm - initialize PM related registers for PRM + * @has_uart4: SoC has UART4 + * @has_iva: SoC has IVA + * + * Initializes PRM registers for PM use. Called from PM init. + */ +void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) +{ + u32 en_uart4_mask; + u32 grpsel_uart4_mask; + + /* + * Enable control of expternal oscillator through + * sys_clkreq. In the long run clock framework should + * take care of this. + */ + omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK, + 1 << OMAP_AUTOEXTCLKMODE_SHIFT, + OMAP3430_GR_MOD, + OMAP3_PRM_CLKSRC_CTRL_OFFSET); + + /* setup wakup source */ + omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK | + OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK, + WKUP_MOD, PM_WKEN); + /* No need to write EN_IO, that is always enabled */ + omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK | + OMAP3430_GRPSEL_GPT1_MASK | + OMAP3430_GRPSEL_GPT12_MASK, + WKUP_MOD, OMAP3430_PM_MPUGRPSEL); + + /* Enable PM_WKEN to support DSS LPR */ + omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK, + OMAP3430_DSS_MOD, PM_WKEN); + + if (has_uart4) { + en_uart4_mask = OMAP3630_EN_UART4_MASK; + grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; + } else { + en_uart4_mask = 0; + grpsel_uart4_mask = 0; + } + + /* Enable wakeups in PER */ + omap2_prm_write_mod_reg(en_uart4_mask | + OMAP3430_EN_GPIO2_MASK | + OMAP3430_EN_GPIO3_MASK | + OMAP3430_EN_GPIO4_MASK | + OMAP3430_EN_GPIO5_MASK | + OMAP3430_EN_GPIO6_MASK | + OMAP3430_EN_UART3_MASK | + OMAP3430_EN_MCBSP2_MASK | + OMAP3430_EN_MCBSP3_MASK | + OMAP3430_EN_MCBSP4_MASK, + OMAP3430_PER_MOD, PM_WKEN); + + /* and allow them to wake up MPU */ + omap2_prm_write_mod_reg(grpsel_uart4_mask | + OMAP3430_GRPSEL_GPIO2_MASK | + OMAP3430_GRPSEL_GPIO3_MASK | + OMAP3430_GRPSEL_GPIO4_MASK | + OMAP3430_GRPSEL_GPIO5_MASK | + OMAP3430_GRPSEL_GPIO6_MASK | + OMAP3430_GRPSEL_UART3_MASK | + OMAP3430_GRPSEL_MCBSP2_MASK | + OMAP3430_GRPSEL_MCBSP3_MASK | + OMAP3430_GRPSEL_MCBSP4_MASK, + OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); + + /* Don't attach IVA interrupts */ + if (has_iva) { + omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); + omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); + omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3); + omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, + OMAP3430_PM_IVAGRPSEL); + } + + /* Clear any pending 'reset' flags */ + omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST); + omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST); + omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST); + omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST); + omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST); + omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST); + omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, + OMAP2_RM_RSTST); + + /* Clear any pending PRCM interrupts */ + omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); + + /* We need to idle iva2_pwrdm even on am3703 with no iva2. */ + omap3xxx_prm_iva_idle(); + + omap3_prm_reset_modem(); +} + +/** + * omap3430_pre_es3_1_reconfigure_io_chain - restart wake-up daisy chain + * + * The ST_IO_CHAIN bit does not exist in 3430 before es3.1. The only + * thing we can do is toggle EN_IO bit for earlier omaps. + */ +static void omap3430_pre_es3_1_reconfigure_io_chain(void) +{ + omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); + omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); + omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN); +} + +/** + * omap3_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain + * + * Clear any previously-latched I/O wakeup events and ensure that the + * I/O wakeup gates are aligned with the current mux settings. Works + * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then + * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No + * return value. These registers are only available in 3430 es3.1 and later. + */ +static void omap3_prm_reconfigure_io_chain(void) +{ + int i = 0; + + omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, + PM_WKEN); + + omap_test_timeout(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST) & + OMAP3430_ST_IO_CHAIN_MASK, + MAX_IOPAD_LATCH_TIME, i); + if (i == MAX_IOPAD_LATCH_TIME) + pr_warn("PRM: I/O chain clock line assertion timed out\n"); + + omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD, + PM_WKEN); + + omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, WKUP_MOD, + PM_WKST); + + omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST); +} + +/** + * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches + * + * Activates the I/O wakeup event latches and allows events logged by + * those latches to signal a wakeup event to the PRCM. For I/O + * wakeups to occur, WAKEUPENABLE bits must be set in the pad mux + * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. + * No return value. + */ +static void omap3xxx_prm_enable_io_wakeup(void) +{ + if (prm_features & PRM_HAS_IO_WAKEUP) + omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, + PM_WKEN); +} + +/** + * omap3xxx_prm_read_reset_sources - return the last SoC reset source + * + * Return a u32 representing the last reset sources of the SoC. The + * returned reset source bits are standardized across OMAP SoCs. + */ +static u32 omap3xxx_prm_read_reset_sources(void) +{ + struct prm_reset_src_map *p; + u32 r = 0; + u32 v; + + v = omap2_prm_read_mod_reg(WKUP_MOD, OMAP2_RM_RSTST); + + p = omap3xxx_prm_reset_src_map; + while (p->reg_shift >= 0 && p->std_shift >= 0) { + if (v & (1 << p->reg_shift)) + r |= 1 << p->std_shift; + p++; + } + + return r; +} + +/** + * omap3xxx_prm_iva_idle - ensure IVA is in idle so it can be put into retention + * + * In cases where IVA2 is activated by bootcode, it may prevent + * full-chip retention or off-mode because it is not idle. This + * function forces the IVA2 into idle state so it can go + * into retention/off and thus allow full-chip retention/off. + */ +static void omap3xxx_prm_iva_idle(void) +{ + /* ensure IVA2 clock is disabled */ + omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN); + + /* if no clock activity, nothing else to do */ + if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) & + OMAP3430_CLKACTIVITY_IVA2_MASK)) + return; + + /* Reset IVA2 */ + omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK | + OMAP3430_RST2_IVA2_MASK | + OMAP3430_RST3_IVA2_MASK, + OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); + + /* Enable IVA2 clock */ + omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK, + OMAP3430_IVA2_MOD, CM_FCLKEN); + + /* Un-reset IVA2 */ + omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); + + /* Disable IVA2 clock */ + omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN); + + /* Reset IVA2 */ + omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK | + OMAP3430_RST2_IVA2_MASK | + OMAP3430_RST3_IVA2_MASK, + OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); +} + +/** + * omap3xxx_prm_clear_global_cold_reset - checks the global cold reset status + * and clears it if asserted + * + * Checks if cold-reset has occurred and clears the status bit if yes. Returns + * 1 if cold-reset has occurred, 0 otherwise. + */ +int omap3xxx_prm_clear_global_cold_reset(void) +{ + if (omap2_prm_read_mod_reg(OMAP3430_GR_MOD, OMAP3_PRM_RSTST_OFFSET) & + OMAP3430_GLOBAL_COLD_RST_MASK) { + omap2_prm_set_mod_reg_bits(OMAP3430_GLOBAL_COLD_RST_MASK, + OMAP3430_GR_MOD, + OMAP3_PRM_RSTST_OFFSET); + return 1; + } + + return 0; +} + +void omap3_prm_save_scratchpad_contents(u32 *ptr) +{ + *ptr++ = omap2_prm_read_mod_reg(OMAP3430_GR_MOD, + OMAP3_PRM_CLKSRC_CTRL_OFFSET); + + *ptr++ = omap2_prm_read_mod_reg(OMAP3430_GR_MOD, + OMAP3_PRM_CLKSEL_OFFSET); +} + +/* Powerdomain low-level functions */ + +static int omap3_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) +{ + omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK, + (pwrst << OMAP_POWERSTATE_SHIFT), + pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); + return 0; +} + +static int omap3_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP2_PM_PWSTCTRL, + OMAP_POWERSTATE_MASK); +} + +static int omap3_pwrdm_read_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP2_PM_PWSTST, + OMAP_POWERSTATEST_MASK); +} + +/* Applicable only for OMAP3. Not supported on OMAP2 */ +static int omap3_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP3430_PM_PREPWSTST, + OMAP3430_LASTPOWERSTATEENTERED_MASK); +} + +static int omap3_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP2_PM_PWSTST, + OMAP3430_LOGICSTATEST_MASK); +} + +static int omap3_pwrdm_read_logic_retst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP2_PM_PWSTCTRL, + OMAP3430_LOGICSTATEST_MASK); +} + +static int omap3_pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm) +{ + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP3430_PM_PREPWSTST, + OMAP3430_LASTLOGICSTATEENTERED_MASK); +} + +static int omap3_get_mem_bank_lastmemst_mask(u8 bank) +{ + switch (bank) { + case 0: + return OMAP3430_LASTMEM1STATEENTERED_MASK; + case 1: + return OMAP3430_LASTMEM2STATEENTERED_MASK; + case 2: + return OMAP3430_LASTSHAREDL2CACHEFLATSTATEENTERED_MASK; + case 3: + return OMAP3430_LASTL2FLATMEMSTATEENTERED_MASK; + default: + WARN_ON(1); /* should never happen */ + return -EEXIST; + } + return 0; +} + +static int omap3_pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank) +{ + u32 m; + + m = omap3_get_mem_bank_lastmemst_mask(bank); + + return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs, + OMAP3430_PM_PREPWSTST, m); +} + +static int omap3_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) +{ + omap2_prm_write_mod_reg(0, pwrdm->prcm_offs, OMAP3430_PM_PREPWSTST); + return 0; +} + +static int omap3_pwrdm_enable_hdwr_sar(struct powerdomain *pwrdm) +{ + return omap2_prm_rmw_mod_reg_bits(0, + 1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT, + pwrdm->prcm_offs, OMAP2_PM_PWSTCTRL); +} + +static int omap3_pwrdm_disable_hdwr_sar(struct powerdomain *pwrdm) +{ + return omap2_prm_rmw_mod_reg_bits(1 << OMAP3430ES2_SAVEANDRESTORE_SHIFT, + 0, pwrdm->prcm_offs, + OMAP2_PM_PWSTCTRL); +} + +struct pwrdm_ops omap3_pwrdm_operations = { + .pwrdm_set_next_pwrst = omap3_pwrdm_set_next_pwrst, + .pwrdm_read_next_pwrst = omap3_pwrdm_read_next_pwrst, + .pwrdm_read_pwrst = omap3_pwrdm_read_pwrst, + .pwrdm_read_prev_pwrst = omap3_pwrdm_read_prev_pwrst, + .pwrdm_set_logic_retst = omap2_pwrdm_set_logic_retst, + .pwrdm_read_logic_pwrst = omap3_pwrdm_read_logic_pwrst, + .pwrdm_read_logic_retst = omap3_pwrdm_read_logic_retst, + .pwrdm_read_prev_logic_pwrst = omap3_pwrdm_read_prev_logic_pwrst, + .pwrdm_set_mem_onst = omap2_pwrdm_set_mem_onst, + .pwrdm_set_mem_retst = omap2_pwrdm_set_mem_retst, + .pwrdm_read_mem_pwrst = omap2_pwrdm_read_mem_pwrst, + .pwrdm_read_mem_retst = omap2_pwrdm_read_mem_retst, + .pwrdm_read_prev_mem_pwrst = omap3_pwrdm_read_prev_mem_pwrst, + .pwrdm_clear_all_prev_pwrst = omap3_pwrdm_clear_all_prev_pwrst, + .pwrdm_enable_hdwr_sar = omap3_pwrdm_enable_hdwr_sar, + .pwrdm_disable_hdwr_sar = omap3_pwrdm_disable_hdwr_sar, + .pwrdm_wait_transition = omap2_pwrdm_wait_transition, +}; + +/* + * + */ + +static int omap3xxx_prm_late_init(void); + +static struct prm_ll_data omap3xxx_prm_ll_data = { + .read_reset_sources = &omap3xxx_prm_read_reset_sources, + .late_init = &omap3xxx_prm_late_init, + .assert_hardreset = &omap2_prm_assert_hardreset, + .deassert_hardreset = &omap2_prm_deassert_hardreset, + .is_hardreset_asserted = &omap2_prm_is_hardreset_asserted, + .reset_system = &omap3xxx_prm_dpll3_reset, + .clear_mod_irqs = &omap3xxx_prm_clear_mod_irqs, + .vp_check_txdone = &omap3_prm_vp_check_txdone, + .vp_clear_txdone = &omap3_prm_vp_clear_txdone, +}; + +int __init omap3xxx_prm_init(const struct omap_prcm_init_data *data) +{ + omap2_clk_legacy_provider_init(TI_CLKM_PRM, + prm_base.va + OMAP3430_IVA2_MOD); + if (omap3_has_io_wakeup()) + prm_features |= PRM_HAS_IO_WAKEUP; + + return prm_register(&omap3xxx_prm_ll_data); +} + +static const struct of_device_id omap3_prm_dt_match_table[] = { + { .compatible = "ti,omap3-prm" }, + { } +}; + +static int omap3xxx_prm_late_init(void) +{ + struct device_node *np; + int irq_num; + + if (!(prm_features & PRM_HAS_IO_WAKEUP)) + return 0; + + if (omap3_has_io_chain_ctrl()) + omap3_prcm_irq_setup.reconfigure_io_chain = + omap3_prm_reconfigure_io_chain; + else + omap3_prcm_irq_setup.reconfigure_io_chain = + omap3430_pre_es3_1_reconfigure_io_chain; + + np = of_find_matching_node(NULL, omap3_prm_dt_match_table); + if (!np) { + pr_err("PRM: no device tree node for interrupt?\n"); + + return -ENODEV; + } + + irq_num = of_irq_get(np, 0); + of_node_put(np); + if (irq_num == -EPROBE_DEFER) + return irq_num; + + omap3_prcm_irq_setup.irq = irq_num; + + omap3xxx_prm_enable_io_wakeup(); + + return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); +} + +static void __exit omap3xxx_prm_exit(void) +{ + prm_unregister(&omap3xxx_prm_ll_data); +} +__exitcall(omap3xxx_prm_exit); -- cgit v1.2.3