aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/ioremap.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /arch/arm/mm/ioremap.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r--arch/arm/mm/ioremap.c496
1 files changed, 496 insertions, 0 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
new file mode 100644
index 000000000..212907006
--- /dev/null
+++ b/arch/arm/mm/ioremap.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/arch/arm/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ *
+ * Hacked for ARM by Phil Blundell <philb@gnu.org>
+ * Hacked to allow all architectures to build, and various cleanups
+ * by Russell King
+ *
+ * This allows a driver to remap an arbitrary region of bus memory into
+ * virtual space. One should *only* use readl, writel, memcpy_toio and
+ * so on with such remapped areas.
+ *
+ * Because the ARM only has a 32-bit address space we can't address the
+ * whole of the (physical) PCI space at once. PCI huge-mode addressing
+ * allows us to circumvent this restriction by splitting PCI space into
+ * two 2GB chunks and mapping only one at a time into processor memory.
+ * We use MMU protection domains to trap any attempt to access the bank
+ * that is not currently mapped. (This isn't fully implemented yet.)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/sizes.h>
+#include <linux/memblock.h>
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/cacheflush.h>
+#include <asm/early_ioremap.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/set_memory.h>
+#include <asm/system_info.h>
+
+#include <asm/mach/map.h>
+#include <asm/mach/pci.h>
+#include "mm.h"
+
+
+LIST_HEAD(static_vmlist);
+
+static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
+ size_t size, unsigned int mtype)
+{
+ struct static_vm *svm;
+ struct vm_struct *vm;
+
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
+ continue;
+
+ if (vm->phys_addr > paddr ||
+ paddr + size - 1 > vm->phys_addr + vm->size - 1)
+ continue;
+
+ return svm;
+ }
+
+ return NULL;
+}
+
+struct static_vm *find_static_vm_vaddr(void *vaddr)
+{
+ struct static_vm *svm;
+ struct vm_struct *vm;
+
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
+
+ /* static_vmlist is ascending order */
+ if (vm->addr > vaddr)
+ break;
+
+ if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
+ return svm;
+ }
+
+ return NULL;
+}
+
+void __init add_static_vm_early(struct static_vm *svm)
+{
+ struct static_vm *curr_svm;
+ struct vm_struct *vm;
+ void *vaddr;
+
+ vm = &svm->vm;
+ vm_area_add_early(vm);
+ vaddr = vm->addr;
+
+ list_for_each_entry(curr_svm, &static_vmlist, list) {
+ vm = &curr_svm->vm;
+
+ if (vm->addr > vaddr)
+ break;
+ }
+ list_add_tail(&svm->list, &curr_svm->list);
+}
+
+int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype)
+{
+ return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
+ __pgprot(mtype->prot_pte));
+}
+EXPORT_SYMBOL(ioremap_page);
+
+void __check_vmalloc_seq(struct mm_struct *mm)
+{
+ int seq;
+
+ do {
+ seq = atomic_read(&init_mm.context.vmalloc_seq);
+ memcpy(pgd_offset(mm, VMALLOC_START),
+ pgd_offset_k(VMALLOC_START),
+ sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
+ pgd_index(VMALLOC_START)));
+ /*
+ * Use a store-release so that other CPUs that observe the
+ * counter's new value are guaranteed to see the results of the
+ * memcpy as well.
+ */
+ atomic_set_release(&mm->context.vmalloc_seq, seq);
+ } while (seq != atomic_read(&init_mm.context.vmalloc_seq));
+}
+
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+/*
+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,
+ * the other CPUs will not see this change until their next context switch.
+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
+ * which requires the new ioremap'd region to be referenced, the CPU will
+ * reference the _old_ region.
+ *
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
+ */
+static void unmap_area_sections(unsigned long virt, unsigned long size)
+{
+ unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
+ pmd_t *pmdp = pmd_off_k(addr);
+
+ do {
+ pmd_t pmd = *pmdp;
+
+ if (!pmd_none(pmd)) {
+ /*
+ * Clear the PMD from the page table, and
+ * increment the vmalloc sequence so others
+ * notice this change.
+ *
+ * Note: this is still racy on SMP machines.
+ */
+ pmd_clear(pmdp);
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
+
+ /*
+ * Free the page table, if there was one.
+ */
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
+ }
+
+ addr += PMD_SIZE;
+ pmdp += 2;
+ } while (addr < end);
+
+ /*
+ * Ensure that the active_mm is up to date - we want to
+ * catch any use-after-iounmap cases.
+ */
+ check_vmalloc_seq(current->active_mm);
+
+ flush_tlb_kernel_range(virt, end);
+}
+
+static int
+remap_area_sections(unsigned long virt, unsigned long pfn,
+ size_t size, const struct mem_type *type)
+{
+ unsigned long addr = virt, end = virt + size;
+ pmd_t *pmd = pmd_off_k(addr);
+
+ /*
+ * Remove and free any PTE-based mapping, and
+ * sync the current kernel mapping.
+ */
+ unmap_area_sections(virt, size);
+
+ do {
+ pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
+ pfn += SZ_1M >> PAGE_SHIFT;
+ pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
+ pfn += SZ_1M >> PAGE_SHIFT;
+ flush_pmd_entry(pmd);
+
+ addr += PMD_SIZE;
+ pmd += 2;
+ } while (addr < end);
+
+ return 0;
+}
+
+static int
+remap_area_supersections(unsigned long virt, unsigned long pfn,
+ size_t size, const struct mem_type *type)
+{
+ unsigned long addr = virt, end = virt + size;
+ pmd_t *pmd = pmd_off_k(addr);
+
+ /*
+ * Remove and free any PTE-based mapping, and
+ * sync the current kernel mapping.
+ */
+ unmap_area_sections(virt, size);
+ do {
+ unsigned long super_pmd_val, i;
+
+ super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
+ PMD_SECT_SUPER;
+ super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
+
+ for (i = 0; i < 8; i++) {
+ pmd[0] = __pmd(super_pmd_val);
+ pmd[1] = __pmd(super_pmd_val);
+ flush_pmd_entry(pmd);
+
+ addr += PMD_SIZE;
+ pmd += 2;
+ }
+
+ pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
+ } while (addr < end);
+
+ return 0;
+}
+#endif
+
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
+{
+ const struct mem_type *type;
+ int err;
+ unsigned long addr;
+ struct vm_struct *area;
+ phys_addr_t paddr = __pfn_to_phys(pfn);
+
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * High mappings must be supersection aligned
+ */
+ if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
+ return NULL;
+#endif
+
+ type = get_mem_type(mtype);
+ if (!type)
+ return NULL;
+
+ /*
+ * Page align the mapping size, taking account of any offset.
+ */
+ size = PAGE_ALIGN(offset + size);
+
+ /*
+ * Try to reuse one of the static mapping whenever possible.
+ */
+ if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
+ struct static_vm *svm;
+
+ svm = find_static_vm_paddr(paddr, size, mtype);
+ if (svm) {
+ addr = (unsigned long)svm->vm.addr;
+ addr += paddr - svm->vm.phys_addr;
+ return (void __iomem *) (offset + addr);
+ }
+ }
+
+ /*
+ * Don't allow RAM to be mapped with mismatched attributes - this
+ * causes problems with ARMv6+
+ */
+ if (WARN_ON(memblock_is_map_memory(PFN_PHYS(pfn)) &&
+ mtype != MT_MEMORY_RW))
+ return NULL;
+
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+ area->phys_addr = paddr;
+
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+ if (DOMAIN_IO == 0 &&
+ (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
+ cpu_is_xsc3()) && pfn >= 0x100000 &&
+ !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_supersections(addr, pfn, size, type);
+ } else if (!((paddr | size | addr) & ~PMD_MASK)) {
+ area->flags |= VM_ARM_SECTION_MAPPING;
+ err = remap_area_sections(addr, pfn, size, type);
+ } else
+#endif
+ err = ioremap_page_range(addr, addr + size, paddr,
+ __pgprot(type->prot_pte));
+
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ flush_cache_vmap(addr, addr + size);
+ return (void __iomem *) (offset + addr);
+}
+
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ phys_addr_t last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys_addr);
+
+ /*
+ * Don't allow wraparound or zero size
+ */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
+ unsigned int, void *) =
+ __arm_ioremap_caller;
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space as memory. Needed when the kernel wants to execute
+ * code in external memory. This is needed for reprogramming source
+ * clocks that would affect normal memory for example. Please see
+ * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
+ */
+void __iomem *
+__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
+{
+ unsigned int mtype;
+
+ if (cached)
+ mtype = MT_MEMORY_RWX;
+ else
+ mtype = MT_MEMORY_RWX_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+}
+
+void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
+{
+ set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
+}
+
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
+{
+ return (__force void *)arch_ioremap_caller(phys_addr, size,
+ MT_MEMORY_RW,
+ __builtin_return_address(0));
+}
+
+void iounmap(volatile void __iomem *io_addr)
+{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+ struct static_vm *svm;
+
+ /* If this is a static mapping, we must leave it alone */
+ svm = find_static_vm_vaddr(addr);
+ if (svm)
+ return;
+
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
+ {
+ struct vm_struct *vm;
+
+ vm = find_vm_area(addr);
+
+ /*
+ * If this is a section based mapping we need to handle it
+ * specially as the VM subsystem does not know how to handle
+ * such a beast.
+ */
+ if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
+ unmap_area_sections((unsigned long)vm->addr, vm->size);
+ }
+#endif
+
+ vunmap(addr);
+}
+EXPORT_SYMBOL(iounmap);
+
+#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
+static int pci_ioremap_mem_type = MT_DEVICE;
+
+void pci_ioremap_set_mem_type(int mem_type)
+{
+ pci_ioremap_mem_type = mem_type;
+}
+
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
+{
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+ if (!(res->flags & IORESOURCE_IO))
+ return -EINVAL;
+
+ if (res->end > IO_SPACE_LIMIT)
+ return -EINVAL;
+
+ return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
+}
+EXPORT_SYMBOL(pci_remap_iospace);
+
+void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
+#endif
+
+/*
+ * Must be called after early_fixmap_init
+ */
+void __init early_ioremap_init(void)
+{
+ early_ioremap_setup();
+}
+
+bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long pfn = PHYS_PFN(offset);
+
+ return memblock_is_map_memory(pfn);
+}