aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_pool.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/gpu/drm/ttm/ttm_pool.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_pool.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c781
1 files changed, 781 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
new file mode 100644
index 000000000..9f6764bf3
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+/* Pooling of allocated pages is necessary because changing the caching
+ * attributes on x86 of the linear mapping requires a costly cross CPU TLB
+ * invalidate for those addresses.
+ *
+ * Additional to that allocations from the DMA coherent API are pooled as well
+ * cause they are rather slow compared to alloc_pages+map.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/sched/mm.h>
+
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
+#include <drm/ttm/ttm_pool.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "ttm_module.h"
+
+/**
+ * struct ttm_pool_dma - Helper object for coherent DMA mappings
+ *
+ * @addr: original DMA address returned for the mapping
+ * @vaddr: original vaddr return for the mapping and order in the lower bits
+ */
+struct ttm_pool_dma {
+ dma_addr_t addr;
+ unsigned long vaddr;
+};
+
+static unsigned long page_pool_size;
+
+MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
+module_param(page_pool_size, ulong, 0644);
+
+static atomic_long_t allocated_pages;
+
+static struct ttm_pool_type global_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_uncached[MAX_ORDER];
+
+static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
+
+static spinlock_t shrinker_lock;
+static struct list_head shrinker_list;
+static struct shrinker mm_shrinker;
+
+/* Allocate pages of size 1 << order with the given gfp_flags */
+static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
+ unsigned int order)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ struct page *p;
+ void *vaddr;
+
+ /* Don't set the __GFP_COMP flag for higher order allocations.
+ * Mapping pages directly into an userspace process and calling
+ * put_page() on a TTM allocated page is illegal.
+ */
+ if (order)
+ gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_KSWAPD_RECLAIM;
+
+ if (!pool->use_dma_alloc) {
+ p = alloc_pages(gfp_flags, order);
+ if (p)
+ p->private = order;
+ return p;
+ }
+
+ dma = kmalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
+ &dma->addr, gfp_flags, attr);
+ if (!vaddr)
+ goto error_free;
+
+ /* TODO: This is an illegal abuse of the DMA API, but we need to rework
+ * TTM page fault handling and extend the DMA API to clean this up.
+ */
+ if (is_vmalloc_addr(vaddr))
+ p = vmalloc_to_page(vaddr);
+ else
+ p = virt_to_page(vaddr);
+
+ dma->vaddr = (unsigned long)vaddr | order;
+ p->private = (unsigned long)dma;
+ return p;
+
+error_free:
+ kfree(dma);
+ return NULL;
+}
+
+/* Reset the caching and pages of size 1 << order */
+static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
+ unsigned int order, struct page *p)
+{
+ unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
+ struct ttm_pool_dma *dma;
+ void *vaddr;
+
+#ifdef CONFIG_X86
+ /* We don't care that set_pages_wb is inefficient here. This is only
+ * used when we have to shrink and CPU overhead is irrelevant then.
+ */
+ if (caching != ttm_cached && !PageHighMem(p))
+ set_pages_wb(p, 1 << order);
+#endif
+
+ if (!pool || !pool->use_dma_alloc) {
+ __free_pages(p, order);
+ return;
+ }
+
+ if (order)
+ attr |= DMA_ATTR_NO_WARN;
+
+ dma = (void *)p->private;
+ vaddr = (void *)(dma->vaddr & PAGE_MASK);
+ dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
+ attr);
+ kfree(dma);
+}
+
+/* Apply a new caching to an array of pages */
+static int ttm_pool_apply_caching(struct page **first, struct page **last,
+ enum ttm_caching caching)
+{
+#ifdef CONFIG_X86
+ unsigned int num_pages = last - first;
+
+ if (!num_pages)
+ return 0;
+
+ switch (caching) {
+ case ttm_cached:
+ break;
+ case ttm_write_combined:
+ return set_pages_array_wc(first, num_pages);
+ case ttm_uncached:
+ return set_pages_array_uc(first, num_pages);
+ }
+#endif
+ return 0;
+}
+
+/* Map pages of 1 << order size and fill the DMA address array */
+static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr)
+{
+ dma_addr_t addr;
+ unsigned int i;
+
+ if (pool->use_dma_alloc) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ addr = dma->addr;
+ } else {
+ size_t size = (1ULL << order) * PAGE_SIZE;
+
+ addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(pool->dev, addr))
+ return -EFAULT;
+ }
+
+ for (i = 1 << order; i ; --i) {
+ *(*dma_addr)++ = addr;
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/* Unmap pages of 1 << order size */
+static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
+ unsigned int num_pages)
+{
+ /* Unmapped while freeing the page */
+ if (pool->use_dma_alloc)
+ return;
+
+ dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
+ DMA_BIDIRECTIONAL);
+}
+
+/* Give pages into a specific pool_type */
+static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
+{
+ unsigned int i, num_pages = 1 << pt->order;
+
+ for (i = 0; i < num_pages; ++i) {
+ if (PageHighMem(p))
+ clear_highpage(p + i);
+ else
+ clear_page(page_address(p + i));
+ }
+
+ spin_lock(&pt->lock);
+ list_add(&p->lru, &pt->pages);
+ spin_unlock(&pt->lock);
+ atomic_long_add(1 << pt->order, &allocated_pages);
+}
+
+/* Take pages from a specific pool_type, return NULL when nothing available */
+static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
+{
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
+ if (p) {
+ atomic_long_sub(1 << pt->order, &allocated_pages);
+ list_del(&p->lru);
+ }
+ spin_unlock(&pt->lock);
+
+ return p;
+}
+
+/* Initialize and add a pool type to the global shrinker list */
+static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
+ enum ttm_caching caching, unsigned int order)
+{
+ pt->pool = pool;
+ pt->caching = caching;
+ pt->order = order;
+ spin_lock_init(&pt->lock);
+ INIT_LIST_HEAD(&pt->pages);
+
+ spin_lock(&shrinker_lock);
+ list_add_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+}
+
+/* Remove a pool_type from the global shrinker list and free all pages */
+static void ttm_pool_type_fini(struct ttm_pool_type *pt)
+{
+ struct page *p;
+
+ spin_lock(&shrinker_lock);
+ list_del(&pt->shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ while ((p = ttm_pool_type_take(pt)))
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+}
+
+/* Return the pool_type to use for the given caching and order */
+static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
+ enum ttm_caching caching,
+ unsigned int order)
+{
+ if (pool->use_dma_alloc)
+ return &pool->caching[caching].orders[order];
+
+#ifdef CONFIG_X86
+ switch (caching) {
+ case ttm_write_combined:
+ if (pool->use_dma32)
+ return &global_dma32_write_combined[order];
+
+ return &global_write_combined[order];
+ case ttm_uncached:
+ if (pool->use_dma32)
+ return &global_dma32_uncached[order];
+
+ return &global_uncached[order];
+ default:
+ break;
+ }
+#endif
+
+ return NULL;
+}
+
+/* Free pages using the global shrinker list */
+static unsigned int ttm_pool_shrink(void)
+{
+ struct ttm_pool_type *pt;
+ unsigned int num_pages;
+ struct page *p;
+
+ spin_lock(&shrinker_lock);
+ pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
+
+ p = ttm_pool_type_take(pt);
+ if (p) {
+ ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
+ num_pages = 1 << pt->order;
+ } else {
+ num_pages = 0;
+ }
+
+ return num_pages;
+}
+
+/* Return the allocation order based for a page */
+static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
+{
+ if (pool->use_dma_alloc) {
+ struct ttm_pool_dma *dma = (void *)p->private;
+
+ return dma->vaddr & ~PAGE_MASK;
+ }
+
+ return p->private;
+}
+
+/* Called when we got a page, either from a pool or newly allocated */
+static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ struct page *p, dma_addr_t **dma_addr,
+ unsigned long *num_pages,
+ struct page ***pages)
+{
+ unsigned int i;
+ int r;
+
+ if (*dma_addr) {
+ r = ttm_pool_map(pool, order, p, dma_addr);
+ if (r)
+ return r;
+ }
+
+ *num_pages -= 1 << order;
+ for (i = 1 << order; i; --i, ++(*pages), ++p)
+ **pages = p;
+
+ return 0;
+}
+
+/**
+ * ttm_pool_alloc - Fill a ttm_tt object
+ *
+ * @pool: ttm_pool to use
+ * @tt: ttm_tt object to fill
+ * @ctx: operation context
+ *
+ * Fill the ttm_tt object with pages and also make sure to DMA map them when
+ * necessary.
+ *
+ * Returns: 0 on successe, negative error code otherwise.
+ */
+int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ struct ttm_operation_ctx *ctx)
+{
+ unsigned long num_pages = tt->num_pages;
+ dma_addr_t *dma_addr = tt->dma_address;
+ struct page **caching = tt->pages;
+ struct page **pages = tt->pages;
+ gfp_t gfp_flags = GFP_USER;
+ unsigned int i, order;
+ struct page *p;
+ int r;
+
+ WARN_ON(!num_pages || ttm_tt_is_populated(tt));
+ WARN_ON(dma_addr && !pool->dev);
+
+ if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ if (ctx->gfp_retry_mayfail)
+ gfp_flags |= __GFP_RETRY_MAYFAIL;
+
+ if (pool->use_dma32)
+ gfp_flags |= GFP_DMA32;
+ else
+ gfp_flags |= GFP_HIGHUSER;
+
+ for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
+ num_pages;
+ order = min_t(unsigned int, order, __fls(num_pages))) {
+ struct ttm_pool_type *pt;
+
+ pt = ttm_pool_select_type(pool, tt->caching, order);
+ p = pt ? ttm_pool_type_take(pt) : NULL;
+ if (p) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+
+ do {
+ r = ttm_pool_page_allocated(pool, order, p,
+ &dma_addr,
+ &num_pages,
+ &pages);
+ if (r)
+ goto error_free_page;
+
+ if (num_pages < (1 << order))
+ break;
+
+ p = ttm_pool_type_take(pt);
+ } while (p);
+ caching = pages;
+ }
+
+ while (num_pages >= (1 << order) &&
+ (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
+
+ if (PageHighMem(p)) {
+ r = ttm_pool_apply_caching(caching, pages,
+ tt->caching);
+ if (r)
+ goto error_free_page;
+ }
+ r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
+ &num_pages, &pages);
+ if (r)
+ goto error_free_page;
+ if (PageHighMem(p))
+ caching = pages;
+ }
+
+ if (!p) {
+ if (order) {
+ --order;
+ continue;
+ }
+ r = -ENOMEM;
+ goto error_free_all;
+ }
+ }
+
+ r = ttm_pool_apply_caching(caching, pages, tt->caching);
+ if (r)
+ goto error_free_all;
+
+ return 0;
+
+error_free_page:
+ ttm_pool_free_page(pool, tt->caching, order, p);
+
+error_free_all:
+ num_pages = tt->num_pages - num_pages;
+ for (i = 0; i < num_pages; ) {
+ order = ttm_pool_page_order(pool, tt->pages[i]);
+ ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
+ i += 1 << order;
+ }
+
+ return r;
+}
+EXPORT_SYMBOL(ttm_pool_alloc);
+
+/**
+ * ttm_pool_free - Free the backing pages from a ttm_tt object
+ *
+ * @pool: Pool to give pages back to.
+ * @tt: ttm_tt object to unpopulate
+ *
+ * Give the packing pages back to a pool or free them
+ */
+void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+{
+ unsigned int i;
+
+ for (i = 0; i < tt->num_pages; ) {
+ struct page *p = tt->pages[i];
+ unsigned int order, num_pages;
+ struct ttm_pool_type *pt;
+
+ order = ttm_pool_page_order(pool, p);
+ num_pages = 1ULL << order;
+ if (tt->dma_address)
+ ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+
+ pt = ttm_pool_select_type(pool, tt->caching, order);
+ if (pt)
+ ttm_pool_type_give(pt, tt->pages[i]);
+ else
+ ttm_pool_free_page(pool, tt->caching, order,
+ tt->pages[i]);
+
+ i += num_pages;
+ }
+
+ while (atomic_long_read(&allocated_pages) > page_pool_size)
+ ttm_pool_shrink();
+}
+EXPORT_SYMBOL(ttm_pool_free);
+
+/**
+ * ttm_pool_init - Initialize a pool
+ *
+ * @pool: the pool to initialize
+ * @dev: device for DMA allocations and mappings
+ * @use_dma_alloc: true if coherent DMA alloc should be used
+ * @use_dma32: true if GFP_DMA32 should be used
+ *
+ * Initialize the pool and its pool types.
+ */
+void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
+ bool use_dma_alloc, bool use_dma32)
+{
+ unsigned int i, j;
+
+ WARN_ON(!dev && use_dma_alloc);
+
+ pool->dev = dev;
+ pool->use_dma_alloc = use_dma_alloc;
+ pool->use_dma32 = use_dma32;
+
+ if (use_dma_alloc) {
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_init(&pool->caching[i].orders[j],
+ pool, i, j);
+ }
+}
+
+/**
+ * ttm_pool_fini - Cleanup a pool
+ *
+ * @pool: the pool to clean up
+ *
+ * Free all pages in the pool and unregister the types from the global
+ * shrinker.
+ */
+void ttm_pool_fini(struct ttm_pool *pool)
+{
+ unsigned int i, j;
+
+ if (pool->use_dma_alloc) {
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ }
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+ * that no shrinker is concurrently freeing pages from the pool.
+ */
+ synchronize_shrinkers();
+}
+
+/* As long as pages are available make sure to release at least one */
+static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_freed = 0;
+
+ do
+ num_freed += ttm_pool_shrink();
+ while (!num_freed && atomic_long_read(&allocated_pages));
+
+ return num_freed;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_pages = atomic_long_read(&allocated_pages);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+ unsigned int count = 0;
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ /* Only used for debugfs, the overhead doesn't matter */
+ list_for_each_entry(p, &pt->pages, lru)
+ ++count;
+ spin_unlock(&pt->lock);
+
+ return count;
+}
+
+/* Print a nice header for the order */
+static void ttm_pool_debugfs_header(struct seq_file *m)
+{
+ unsigned int i;
+
+ seq_puts(m, "\t ");
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " ---%2u---", i);
+ seq_puts(m, "\n");
+}
+
+/* Dump information about the different pool types */
+static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
+ struct seq_file *m)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
+ seq_puts(m, "\n");
+}
+
+/* Dump the total amount of allocated pages */
+static void ttm_pool_debugfs_footer(struct seq_file *m)
+{
+ seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
+ atomic_long_read(&allocated_pages), page_pool_size);
+}
+
+/* Dump the information for the global pools */
+static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
+{
+ ttm_pool_debugfs_header(m);
+
+ spin_lock(&shrinker_lock);
+ seq_puts(m, "wc\t:");
+ ttm_pool_debugfs_orders(global_write_combined, m);
+ seq_puts(m, "uc\t:");
+ ttm_pool_debugfs_orders(global_uncached, m);
+ seq_puts(m, "wc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_write_combined, m);
+ seq_puts(m, "uc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_uncached, m);
+ spin_unlock(&shrinker_lock);
+
+ ttm_pool_debugfs_footer(m);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
+
+/**
+ * ttm_pool_debugfs - Debugfs dump function for a pool
+ *
+ * @pool: the pool to dump the information for
+ * @m: seq_file to dump to
+ *
+ * Make a debugfs dump with the per pool and global information.
+ */
+int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
+{
+ unsigned int i;
+
+ if (!pool->use_dma_alloc) {
+ seq_puts(m, "unused\n");
+ return 0;
+ }
+
+ ttm_pool_debugfs_header(m);
+
+ spin_lock(&shrinker_lock);
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
+ seq_puts(m, "DMA ");
+ switch (i) {
+ case ttm_cached:
+ seq_puts(m, "\t:");
+ break;
+ case ttm_write_combined:
+ seq_puts(m, "wc\t:");
+ break;
+ case ttm_uncached:
+ seq_puts(m, "uc\t:");
+ break;
+ }
+ ttm_pool_debugfs_orders(pool->caching[i].orders, m);
+ }
+ spin_unlock(&shrinker_lock);
+
+ ttm_pool_debugfs_footer(m);
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_debugfs);
+
+/* Test the shrinker functions and dump the result */
+static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
+{
+ struct shrink_control sc = { .gfp_mask = GFP_NOFS };
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
+ ttm_pool_shrinker_scan(&mm_shrinker, &sc));
+ fs_reclaim_release(GFP_KERNEL);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
+
+#endif
+
+/**
+ * ttm_pool_mgr_init - Initialize globals
+ *
+ * @num_pages: default number of pages
+ *
+ * Initialize the global locks and lists for the MM shrinker.
+ */
+int ttm_pool_mgr_init(unsigned long num_pages)
+{
+ unsigned int i;
+
+ if (!page_pool_size)
+ page_pool_size = num_pages;
+
+ spin_lock_init(&shrinker_lock);
+ INIT_LIST_HEAD(&shrinker_list);
+
+ for (i = 0; i < MAX_ORDER; ++i) {
+ ttm_pool_type_init(&global_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+
+ ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_dma32_uncached[i], NULL,
+ ttm_uncached, i);
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
+ &ttm_pool_debugfs_globals_fops);
+ debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_pool_debugfs_shrink_fops);
+#endif
+
+ mm_shrinker.count_objects = ttm_pool_shrinker_count;
+ mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker.seeks = 1;
+ return register_shrinker(&mm_shrinker, "drm-ttm_pool");
+}
+
+/**
+ * ttm_pool_mgr_fini - Finalize globals
+ *
+ * Cleanup the global pools and unregister the MM shrinker.
+ */
+void ttm_pool_mgr_fini(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_ORDER; ++i) {
+ ttm_pool_type_fini(&global_write_combined[i]);
+ ttm_pool_type_fini(&global_uncached[i]);
+
+ ttm_pool_type_fini(&global_dma32_write_combined[i]);
+ ttm_pool_type_fini(&global_dma32_uncached[i]);
+ }
+
+ unregister_shrinker(&mm_shrinker);
+ WARN_ON(!list_empty(&shrinker_list));
+}