aboutsummaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns/hns_roce_hem.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /drivers/infiniband/hw/hns/hns_roce_hem.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_hem.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c1485
1 files changed, 1485 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
new file mode 100644
index 000000000..aa8a08d1c
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -0,0 +1,1485 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "hns_roce_device.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_common.h"
+
+#define HEM_INDEX_BUF BIT(0)
+#define HEM_INDEX_L0 BIT(1)
+#define HEM_INDEX_L1 BIT(2)
+struct hns_roce_hem_index {
+ u64 buf;
+ u64 l0;
+ u64 l1;
+ u32 inited; /* indicate which index is available */
+};
+
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
+{
+ int hop_num = 0;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_SCCC:
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
+ case HEM_TYPE_GMV:
+ hop_num = hr_dev->caps.gmv_hop_num;
+ break;
+ default:
+ return false;
+ }
+
+ return hop_num ? true : false;
+}
+
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
+ u32 bt_chunk_num, u64 hem_max_num)
+{
+ u64 start_idx = round_down(hem_idx, bt_chunk_num);
+ u64 check_max_num = start_idx + bt_chunk_num;
+ u64 i;
+
+ for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
+ if (i != hem_idx && hem[i])
+ return false;
+
+ return true;
+}
+
+static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
+{
+ u64 start_idx = round_down(ba_idx, bt_chunk_num);
+ int i;
+
+ for (i = 0; i < bt_chunk_num; i++)
+ if (i != ba_idx && bt[start_idx + i])
+ return false;
+
+ return true;
+}
+
+static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
+{
+ if (check_whether_bt_num_3(table_type, hop_num))
+ return 3;
+ else if (check_whether_bt_num_2(table_type, hop_num))
+ return 2;
+ else if (check_whether_bt_num_1(table_type, hop_num))
+ return 1;
+ else
+ return 0;
+}
+
+static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_mhop *mhop,
+ u32 type)
+{
+ struct device *dev = hr_dev->dev;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
+ mhop->hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SCCC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
+ mhop->hop_num = hr_dev->caps.sccc_hop_num;
+ break;
+ case HEM_TYPE_QPC_TIMER:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
+ break;
+ case HEM_TYPE_CQC_TIMER:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
+ mhop->hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_GMV:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
+ PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
+ PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
+ mhop->hop_num = hr_dev->caps.gmv_hop_num;
+ break;
+ default:
+ dev_err(dev, "table %u not support multi-hop addressing!\n",
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop)
+{
+ struct device *dev = hr_dev->dev;
+ u32 chunk_ba_num;
+ u32 chunk_size;
+ u32 table_idx;
+ u32 bt_num;
+
+ if (get_hem_table_config(hr_dev, mhop, table->type))
+ return -EINVAL;
+
+ if (!obj)
+ return 0;
+
+ /*
+ * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
+ mhop->bt_chunk_size;
+ table_idx = *obj / (chunk_size / table->obj_size);
+ switch (bt_num) {
+ case 3:
+ mhop->l2_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
+ mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
+ break;
+ case 2:
+ mhop->l1_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l0_idx = table_idx / chunk_ba_num;
+ break;
+ case 1:
+ mhop->l0_idx = table_idx;
+ break;
+ default:
+ dev_err(dev, "table %u not support hop_num = %u!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+ if (mhop->l0_idx >= mhop->ba_l0_num)
+ mhop->l0_idx %= mhop->ba_l0_num;
+
+ return 0;
+}
+
+static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+ int npages,
+ unsigned long hem_alloc_size,
+ gfp_t gfp_mask)
+{
+ struct hns_roce_hem_chunk *chunk = NULL;
+ struct hns_roce_hem *hem;
+ struct scatterlist *mem;
+ int order;
+ void *buf;
+
+ WARN_ON(gfp_mask & __GFP_HIGHMEM);
+
+ hem = kmalloc(sizeof(*hem),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!hem)
+ return NULL;
+
+ INIT_LIST_HEAD(&hem->chunk_list);
+
+ order = get_order(hem_alloc_size);
+
+ while (npages > 0) {
+ if (!chunk) {
+ chunk = kmalloc(sizeof(*chunk),
+ gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ if (!chunk)
+ goto fail;
+
+ sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
+ chunk->npages = 0;
+ chunk->nsg = 0;
+ memset(chunk->buf, 0, sizeof(chunk->buf));
+ list_add_tail(&chunk->list, &hem->chunk_list);
+ }
+
+ while (1 << order > npages)
+ --order;
+
+ /*
+ * Alloc memory one time. If failed, don't alloc small block
+ * memory, directly return fail.
+ */
+ mem = &chunk->mem[chunk->npages];
+ buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
+ &sg_dma_address(mem), gfp_mask);
+ if (!buf)
+ goto fail;
+
+ chunk->buf[chunk->npages] = buf;
+ sg_dma_len(mem) = PAGE_SIZE << order;
+
+ ++chunk->npages;
+ ++chunk->nsg;
+ npages -= 1 << order;
+ }
+
+ return hem;
+
+fail:
+ hns_roce_free_hem(hr_dev, hem);
+ return NULL;
+}
+
+void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
+{
+ struct hns_roce_hem_chunk *chunk, *tmp;
+ int i;
+
+ if (!hem)
+ return;
+
+ list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i)
+ dma_free_coherent(hr_dev->dev,
+ sg_dma_len(&chunk->mem[i]),
+ chunk->buf[i],
+ sg_dma_address(&chunk->mem[i]));
+ kfree(chunk);
+ }
+
+ kfree(hem);
+}
+
+static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long mhop_obj = obj;
+ u32 l0_idx, l1_idx, l2_idx;
+ u32 chunk_ba_num;
+ u32 bt_num;
+ int ret;
+
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
+ if (ret)
+ return ret;
+
+ l0_idx = mhop->l0_idx;
+ l1_idx = mhop->l1_idx;
+ l2_idx = mhop->l2_idx;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+ switch (bt_num) {
+ case 3:
+ index->l1 = l0_idx * chunk_ba_num + l1_idx;
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
+ l1_idx * chunk_ba_num + l2_idx;
+ break;
+ case 2:
+ index->l0 = l0_idx;
+ index->buf = l0_idx * chunk_ba_num + l1_idx;
+ break;
+ case 1:
+ index->buf = l0_idx;
+ break;
+ default:
+ ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+
+ if (unlikely(index->buf >= table->num_hem)) {
+ ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
+ table->type, index->buf, table->num_hem);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void free_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+
+ if (index->inited & HEM_INDEX_BUF) {
+ hns_roce_free_hem(hr_dev, table->hem[index->buf]);
+ table->hem[index->buf] = NULL;
+ }
+
+ if (index->inited & HEM_INDEX_L1) {
+ dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
+ table->bt_l1_dma_addr[index->l1]);
+ table->bt_l1[index->l1] = NULL;
+ }
+
+ if (index->inited & HEM_INDEX_L0) {
+ dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
+ table->bt_l0_dma_addr[index->l0]);
+ table->bt_l0[index->l0] = NULL;
+ }
+}
+
+static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ u32 bt_size = mhop->bt_chunk_size;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_iter iter;
+ gfp_t flag;
+ u64 bt_ba;
+ u32 size;
+ int ret;
+
+ /* alloc L1 BA's chunk */
+ if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
+ check_whether_bt_num_2(table->type, mhop->hop_num)) &&
+ !table->bt_l0[index->l0]) {
+ table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l0_dma_addr[index->l0],
+ GFP_KERNEL);
+ if (!table->bt_l0[index->l0]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ index->inited |= HEM_INDEX_L0;
+ }
+
+ /* alloc L2 BA's chunk */
+ if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
+ !table->bt_l1[index->l1]) {
+ table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
+ &table->bt_l1_dma_addr[index->l1],
+ GFP_KERNEL);
+ if (!table->bt_l1[index->l1]) {
+ ret = -ENOMEM;
+ goto err_alloc_hem;
+ }
+ index->inited |= HEM_INDEX_L1;
+ *(table->bt_l0[index->l0] + mhop->l1_idx) =
+ table->bt_l1_dma_addr[index->l1];
+ }
+
+ /*
+ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
+ flag = GFP_KERNEL | __GFP_NOWARN;
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
+ size, flag);
+ if (!table->hem[index->buf]) {
+ ret = -ENOMEM;
+ goto err_alloc_hem;
+ }
+
+ index->inited |= HEM_INDEX_BUF;
+ hns_roce_hem_first(table->hem[index->buf], &iter);
+ bt_ba = hns_roce_hem_addr(&iter);
+ if (table->type < HEM_TYPE_MTT) {
+ if (mhop->hop_num == 2)
+ *(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
+ else if (mhop->hop_num == 1)
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ } else if (mhop->hop_num == 2) {
+ *(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
+ }
+
+ return 0;
+err_alloc_hem:
+ free_mhop_hem(hr_dev, table, mhop, index);
+out:
+ return ret;
+}
+
+static int set_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 step_idx;
+ int ret = 0;
+
+ if (index->inited & HEM_INDEX_L0) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM step 0 failed!\n");
+ goto out;
+ }
+ }
+
+ if (index->inited & HEM_INDEX_L1) {
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM step 1 failed!\n");
+ goto out;
+ }
+ }
+
+ if (index->inited & HEM_INDEX_BUF) {
+ if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = mhop->hop_num;
+ ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
+ if (ret)
+ ibdev_err(ibdev, "set HEM step last failed!\n");
+ }
+out:
+ return ret;
+}
+
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
+ int ret;
+
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "calc hem config failed!\n");
+ return ret;
+ }
+
+ mutex_lock(&table->mutex);
+ if (table->hem[index.buf]) {
+ refcount_inc(&table->hem[index.buf]->refcount);
+ goto out;
+ }
+
+ ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "alloc mhop hem failed!\n");
+ goto out;
+ }
+
+ /* set HEM base address to hardware */
+ if (table->type < HEM_TYPE_MTT) {
+ ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "set HEM address to HW failed!\n");
+ goto err_alloc;
+ }
+ }
+
+ refcount_set(&table->hem[index.buf]->refcount, 1);
+ goto out;
+
+err_alloc:
+ free_mhop_hem(hr_dev, table, &mhop, &index);
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+int hns_roce_table_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long i;
+ int ret = 0;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type))
+ return hns_roce_table_mhop_get(hr_dev, table, obj);
+
+ i = obj / (table->table_chunk_size / table->obj_size);
+
+ mutex_lock(&table->mutex);
+
+ if (table->hem[i]) {
+ refcount_inc(&table->hem[i]->refcount);
+ goto out;
+ }
+
+ table->hem[i] = hns_roce_alloc_hem(hr_dev,
+ table->table_chunk_size >> PAGE_SHIFT,
+ table->table_chunk_size,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!table->hem[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Set HEM base address(128K/page, pa) to Hardware */
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) {
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed.\n");
+ goto out;
+ }
+
+ refcount_set(&table->hem[i]->refcount, 1);
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
+static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
+ struct hns_roce_hem_mhop *mhop,
+ struct hns_roce_hem_index *index)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 hop_num = mhop->hop_num;
+ u32 chunk_ba_num;
+ u32 step_idx;
+
+ index->inited = HEM_INDEX_BUF;
+ chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem))
+ index->inited |= HEM_INDEX_L0;
+ } else if (check_whether_bt_num_3(table->type, hop_num)) {
+ if (hns_roce_check_hem_null(table->hem, index->buf,
+ chunk_ba_num, table->num_hem)) {
+ index->inited |= HEM_INDEX_L1;
+ if (hns_roce_check_bt_null(table->bt_l1, index->l1,
+ chunk_ba_num))
+ index->inited |= HEM_INDEX_L0;
+ }
+ }
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (hop_num == HNS_ROCE_HOP_NUM_0)
+ step_idx = 0;
+ else
+ step_idx = hop_num;
+
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
+ ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
+
+ if (index->inited & HEM_INDEX_L1)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
+
+ if (index->inited & HEM_INDEX_L0)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
+ }
+}
+
+static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj,
+ int check_refcount)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_hem_index index = {};
+ struct hns_roce_hem_mhop mhop = {};
+ int ret;
+
+ ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
+ if (ret) {
+ ibdev_err(ibdev, "calc hem config failed!\n");
+ return;
+ }
+
+ if (!check_refcount)
+ mutex_lock(&table->mutex);
+ else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
+ &table->mutex))
+ return;
+
+ clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
+ free_mhop_hem(hr_dev, table, &mhop, &index);
+
+ mutex_unlock(&table->mutex);
+}
+
+void hns_roce_table_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long i;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_table_mhop_put(hr_dev, table, obj, 1);
+ return;
+ }
+
+ i = obj / (table->table_chunk_size / table->obj_size);
+
+ if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
+ &table->mutex))
+ return;
+
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT))
+ dev_warn(dev, "failed to clear HEM base address.\n");
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
+
+ mutex_unlock(&table->mutex);
+}
+
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj, dma_addr_t *dma_handle)
+{
+ struct hns_roce_hem_chunk *chunk;
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem *hem;
+ unsigned long mhop_obj = obj;
+ unsigned long obj_per_chunk;
+ unsigned long idx_offset;
+ int offset, dma_offset;
+ void *addr = NULL;
+ u32 hem_idx = 0;
+ int length;
+ int i, j;
+
+ mutex_lock(&table->mutex);
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ obj_per_chunk = table->table_chunk_size / table->obj_size;
+ hem = table->hem[obj / obj_per_chunk];
+ idx_offset = obj % obj_per_chunk;
+ dma_offset = offset = idx_offset * table->obj_size;
+ } else {
+ u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
+
+ if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
+ goto out;
+ /* mtt mhop */
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ if (mhop.hop_num == 2)
+ hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
+ else if (mhop.hop_num == 1 ||
+ mhop.hop_num == HNS_ROCE_HOP_NUM_0)
+ hem_idx = i;
+
+ hem = table->hem[hem_idx];
+ dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
+ if (mhop.hop_num == 2)
+ dma_offset = offset = 0;
+ }
+
+ if (!hem)
+ goto out;
+
+ list_for_each_entry(chunk, &hem->chunk_list, list) {
+ for (i = 0; i < chunk->npages; ++i) {
+ length = sg_dma_len(&chunk->mem[i]);
+ if (dma_handle && dma_offset >= 0) {
+ if (length > (u32)dma_offset)
+ *dma_handle = sg_dma_address(
+ &chunk->mem[i]) + dma_offset;
+ dma_offset -= length;
+ }
+
+ if (length > (u32)offset) {
+ addr = chunk->buf[i] + offset;
+ goto out;
+ }
+ offset -= length;
+ }
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return addr;
+}
+
+int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, u32 type,
+ unsigned long obj_size, unsigned long nobj)
+{
+ unsigned long obj_per_chunk;
+ unsigned long num_hem;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, type)) {
+ table->table_chunk_size = hr_dev->caps.chunk_sz;
+ obj_per_chunk = table->table_chunk_size / obj_size;
+ num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+ if (!table->hem)
+ return -ENOMEM;
+ } else {
+ struct hns_roce_hem_mhop mhop = {};
+ unsigned long buf_chunk_size;
+ unsigned long bt_chunk_size;
+ unsigned long bt_chunk_num;
+ unsigned long num_bt_l0;
+ u32 hop_num;
+
+ if (get_hem_table_config(hr_dev, &mhop, type))
+ return -EINVAL;
+
+ buf_chunk_size = mhop.buf_chunk_size;
+ bt_chunk_size = mhop.bt_chunk_size;
+ num_bt_l0 = mhop.ba_l0_num;
+ hop_num = mhop.hop_num;
+
+ obj_per_chunk = buf_chunk_size / obj_size;
+ num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
+ bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
+
+ if (type >= HEM_TYPE_MTT)
+ num_bt_l0 = bt_chunk_num;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem),
+ GFP_KERNEL);
+ if (!table->hem)
+ goto err_kcalloc_hem_buf;
+
+ if (check_whether_bt_num_3(type, hop_num)) {
+ unsigned long num_bt_l1;
+
+ num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
+ table->bt_l1 = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1),
+ GFP_KERNEL);
+ if (!table->bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ table->bt_l1_dma_addr = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1_dma_addr),
+ GFP_KERNEL);
+
+ if (!table->bt_l1_dma_addr)
+ goto err_kcalloc_l1_dma;
+ }
+
+ if (check_whether_bt_num_2(type, hop_num) ||
+ check_whether_bt_num_3(type, hop_num)) {
+ table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
+ GFP_KERNEL);
+ if (!table->bt_l0)
+ goto err_kcalloc_bt_l0;
+
+ table->bt_l0_dma_addr = kcalloc(num_bt_l0,
+ sizeof(*table->bt_l0_dma_addr),
+ GFP_KERNEL);
+ if (!table->bt_l0_dma_addr)
+ goto err_kcalloc_l0_dma;
+ }
+ }
+
+ table->type = type;
+ table->num_hem = num_hem;
+ table->obj_size = obj_size;
+ mutex_init(&table->mutex);
+
+ return 0;
+
+err_kcalloc_l0_dma:
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+
+err_kcalloc_bt_l0:
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(table->hem);
+ table->hem = NULL;
+
+err_kcalloc_hem_buf:
+ return -ENOMEM;
+}
+
+static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct hns_roce_hem_mhop mhop;
+ u32 buf_chunk_size;
+ u64 obj;
+ int i;
+
+ if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
+ return;
+ buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
+ mhop.bt_chunk_size;
+
+ for (i = 0; i < table->num_hem; ++i) {
+ obj = i * buf_chunk_size / table->obj_size;
+ if (table->hem[i])
+ hns_roce_table_mhop_put(hr_dev, table, obj, 0);
+ }
+
+ kfree(table->hem);
+ table->hem = NULL;
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+ kfree(table->bt_l0_dma_addr);
+ table->bt_l0_dma_addr = NULL;
+}
+
+void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long i;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_cleanup_mhop_hem_table(hr_dev, table);
+ return;
+ }
+
+ for (i = 0; i < table->num_hem; ++i)
+ if (table->hem[i]) {
+ if (hr_dev->hw->clear_hem(hr_dev, table,
+ i * table->table_chunk_size / table->obj_size, 0))
+ dev_err(dev, "clear HEM base address failed.\n");
+
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ }
+
+ kfree(table->hem);
+}
+
+void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
+{
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
+ if (hr_dev->caps.qpc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qpc_timer_table);
+ if (hr_dev->caps.cqc_timer_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->cqc_timer_table);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+
+ if (hr_dev->caps.gmv_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
+
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
+}
+
+struct hns_roce_hem_item {
+ struct list_head list; /* link all hems in the same bt level */
+ struct list_head sibling; /* link all hems in last hop for mtt */
+ void *addr;
+ dma_addr_t dma_addr;
+ size_t count; /* max ba numbers */
+ int start; /* start buf offset in this hem */
+ int end; /* end buf offset in this hem */
+};
+
+/* All HEM items are linked in a tree structure */
+struct hns_roce_hem_head {
+ struct list_head branch[HNS_ROCE_MAX_BT_REGION];
+ struct list_head root;
+ struct list_head leaf;
+};
+
+static struct hns_roce_hem_item *
+hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ bool exist_bt)
+{
+ struct hns_roce_hem_item *hem;
+
+ hem = kzalloc(sizeof(*hem), GFP_KERNEL);
+ if (!hem)
+ return NULL;
+
+ if (exist_bt) {
+ hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
+ &hem->dma_addr, GFP_KERNEL);
+ if (!hem->addr) {
+ kfree(hem);
+ return NULL;
+ }
+ }
+
+ hem->count = count;
+ hem->start = start;
+ hem->end = end;
+ INIT_LIST_HEAD(&hem->list);
+ INIT_LIST_HEAD(&hem->sibling);
+
+ return hem;
+}
+
+static void hem_list_free_item(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_item *hem, bool exist_bt)
+{
+ if (exist_bt)
+ dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
+ hem->addr, hem->dma_addr);
+ kfree(hem);
+}
+
+static void hem_list_free_all(struct hns_roce_dev *hr_dev,
+ struct list_head *head, bool exist_bt)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+
+ list_for_each_entry_safe(hem, temp_hem, head, list) {
+ list_del(&hem->list);
+ hem_list_free_item(hr_dev, hem, exist_bt);
+ }
+}
+
+static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
+ u64 table_addr)
+{
+ *(u64 *)(base_addr) = table_addr;
+}
+
+/* assign L0 table address to hem from root bt */
+static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_item *hem, void *cpu_addr,
+ u64 phy_addr)
+{
+ hem->addr = cpu_addr;
+ hem->dma_addr = (dma_addr_t)phy_addr;
+}
+
+static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
+ int offset)
+{
+ return (hem->start <= offset && offset <= hem->end);
+}
+
+static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
+ int page_offset)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+ struct hns_roce_hem_item *found = NULL;
+
+ list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
+ if (hem_list_page_is_in_range(hem, page_offset)) {
+ found = hem;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
+{
+ /*
+ * hopnum base address table levels
+ * 0 L0(buf)
+ * 1 L0 -> buf
+ * 2 L0 -> L1 -> buf
+ * 3 L0 -> L1 -> L2 -> buf
+ */
+ return bt_level >= (hopnum ? hopnum - 1 : hopnum);
+}
+
+/*
+ * calc base address entries num
+ * @hopnum: num of mutihop addressing
+ * @bt_level: base address table level
+ * @unit: ba entries per bt page
+ */
+static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
+{
+ u32 step;
+ int max;
+ int i;
+
+ if (hopnum <= bt_level)
+ return 0;
+ /*
+ * hopnum bt_level range
+ * 1 0 unit
+ * ------------
+ * 2 0 unit * unit
+ * 2 1 unit
+ * ------------
+ * 3 0 unit * unit * unit
+ * 3 1 unit * unit
+ * 3 2 unit
+ */
+ step = 1;
+ max = hopnum - bt_level;
+ for (i = 0; i < max; i++)
+ step = step * unit;
+
+ return step;
+}
+
+/*
+ * calc the root ba entries which could cover all regions
+ * @regions: buf region array
+ * @region_cnt: array size of @regions
+ * @unit: ba entries per bt page
+ */
+int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+ int region_cnt, int unit)
+{
+ struct hns_roce_buf_region *r;
+ int total = 0;
+ int step;
+ int i;
+
+ for (i = 0; i < region_cnt; i++) {
+ r = (struct hns_roce_buf_region *)&regions[i];
+ if (r->hopnum > 1) {
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step > 0)
+ total += (r->count + step - 1) / step;
+ } else {
+ total += r->count;
+ }
+ }
+
+ return total;
+}
+
+static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ const struct hns_roce_buf_region *r, int unit,
+ int offset, struct list_head *mid_bt,
+ struct list_head *btm_bt)
+{
+ struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
+ struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
+ struct hns_roce_hem_item *cur, *pre;
+ const int hopnum = r->hopnum;
+ int start_aligned;
+ int distance;
+ int ret = 0;
+ int max_ofs;
+ int level;
+ u32 step;
+ int end;
+
+ if (hopnum <= 1)
+ return 0;
+
+ if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
+ dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
+ return -EINVAL;
+ }
+
+ if (offset < r->offset) {
+ dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
+ offset, r->offset);
+ return -EINVAL;
+ }
+
+ distance = offset - r->offset;
+ max_ofs = r->offset + r->count - 1;
+ for (level = 0; level < hopnum; level++)
+ INIT_LIST_HEAD(&temp_list[level]);
+
+ /* config L1 bt to last bt and link them to corresponding parent */
+ for (level = 1; level < hopnum; level++) {
+ cur = hem_list_search_item(&mid_bt[level], offset);
+ if (cur) {
+ hem_ptrs[level] = cur;
+ continue;
+ }
+
+ step = hem_list_calc_ba_range(hopnum, level, unit);
+ if (step < 1) {
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ start_aligned = (distance / step) * step + r->offset;
+ end = min_t(int, start_aligned + step - 1, max_ofs);
+ cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
+ true);
+ if (!cur) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ hem_ptrs[level] = cur;
+ list_add(&cur->list, &temp_list[level]);
+ if (hem_list_is_bottom_bt(hopnum, level))
+ list_add(&cur->sibling, &temp_list[0]);
+
+ /* link bt to parent bt */
+ if (level > 1) {
+ pre = hem_ptrs[level - 1];
+ step = (cur->start - pre->start) / step * BA_BYTE_LEN;
+ hem_list_link_bt(hr_dev, pre->addr + step,
+ cur->dma_addr);
+ }
+ }
+
+ list_splice(&temp_list[0], btm_bt);
+ for (level = 1; level < hopnum; level++)
+ list_splice(&temp_list[level], &mid_bt[level]);
+
+ return 0;
+
+err_exit:
+ for (level = 1; level < hopnum; level++)
+ hem_list_free_all(hr_dev, &temp_list[level], true);
+
+ return ret;
+}
+
+static struct hns_roce_hem_item *
+alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
+ const struct hns_roce_buf_region *regions, int region_cnt)
+{
+ const struct hns_roce_buf_region *r;
+ struct hns_roce_hem_item *hem;
+ int ba_num;
+ int offset;
+
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return ERR_PTR(-ENOMEM);
+
+ if (ba_num > unit)
+ return ERR_PTR(-ENOBUFS);
+
+ offset = regions[0].offset;
+ /* indicate to last region */
+ r = &regions[region_cnt - 1];
+ hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true);
+ if (!hem)
+ return ERR_PTR(-ENOMEM);
+
+ *max_ba_num = ba_num;
+
+ return hem;
+}
+
+static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ u64 phy_base, const struct hns_roce_buf_region *r,
+ struct list_head *branch_head,
+ struct list_head *leaf_head)
+{
+ struct hns_roce_hem_item *hem;
+
+ hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
+ r->count, false);
+ if (!hem)
+ return -ENOMEM;
+
+ hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
+ list_add(&hem->list, branch_head);
+ list_add(&hem->sibling, leaf_head);
+
+ return r->count;
+}
+
+static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ int unit, const struct hns_roce_buf_region *r,
+ const struct list_head *branch_head)
+{
+ struct hns_roce_hem_item *hem, *temp_hem;
+ int total = 0;
+ int offset;
+ int step;
+
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step < 1)
+ return -EINVAL;
+
+ /* if exist mid bt, link L1 to L0 */
+ list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
+ offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
+ hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
+ total++;
+ }
+
+ return total;
+}
+
+static int
+setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
+ int unit, int max_ba_num, struct hns_roce_hem_head *head,
+ const struct hns_roce_buf_region *regions, int region_cnt)
+{
+ const struct hns_roce_buf_region *r;
+ struct hns_roce_hem_item *root_hem;
+ void *cpu_base;
+ u64 phy_base;
+ int i, total;
+ int ret;
+
+ root_hem = list_first_entry(&head->root,
+ struct hns_roce_hem_item, list);
+ if (!root_hem)
+ return -ENOMEM;
+
+ total = 0;
+ for (i = 0; i < region_cnt && total < max_ba_num; i++) {
+ r = &regions[i];
+ if (!r->count)
+ continue;
+
+ /* all regions's mid[x][0] shared the root_bt's trunk */
+ cpu_base = root_hem->addr + total * BA_BYTE_LEN;
+ phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
+
+ /* if hopnum is 0 or 1, cut a new fake hem from the root bt
+ * which's address share to all regions.
+ */
+ if (hem_list_is_bottom_bt(r->hopnum, 0))
+ ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
+ &head->branch[i], &head->leaf);
+ else
+ ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
+ &hem_list->mid_bt[i][1]);
+
+ if (ret < 0)
+ return ret;
+
+ total += ret;
+ }
+
+ list_splice(&head->leaf, &hem_list->btm_bt);
+ list_splice(&head->root, &hem_list->root_bt);
+ for (i = 0; i < region_cnt; i++)
+ list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
+
+ return 0;
+}
+
+static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list, int unit,
+ const struct hns_roce_buf_region *regions,
+ int region_cnt)
+{
+ struct hns_roce_hem_item *root_hem;
+ struct hns_roce_hem_head head;
+ int max_ba_num;
+ int ret;
+ int i;
+
+ root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
+ if (root_hem)
+ return 0;
+
+ max_ba_num = 0;
+ root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
+ region_cnt);
+ if (IS_ERR(root_hem))
+ return PTR_ERR(root_hem);
+
+ /* List head for storing all allocated HEM items */
+ INIT_LIST_HEAD(&head.root);
+ INIT_LIST_HEAD(&head.leaf);
+ for (i = 0; i < region_cnt; i++)
+ INIT_LIST_HEAD(&head.branch[i]);
+
+ hem_list->root_ba = root_hem->dma_addr;
+ list_add(&root_hem->list, &head.root);
+ ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
+ region_cnt);
+ if (ret) {
+ for (i = 0; i < region_cnt; i++)
+ hem_list_free_all(hr_dev, &head.branch[i], false);
+
+ hem_list_free_all(hr_dev, &head.root, true);
+ }
+
+ return ret;
+}
+
+/* construct the base address table and link them by address hop config */
+int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list,
+ const struct hns_roce_buf_region *regions,
+ int region_cnt, unsigned int bt_pg_shift)
+{
+ const struct hns_roce_buf_region *r;
+ int ofs, end;
+ int unit;
+ int ret;
+ int i;
+
+ if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
+ dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
+ region_cnt);
+ return -EINVAL;
+ }
+
+ unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
+ for (i = 0; i < region_cnt; i++) {
+ r = &regions[i];
+ if (!r->count)
+ continue;
+
+ end = r->offset + r->count;
+ for (ofs = r->offset; ofs < end; ofs += unit) {
+ ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
+ hem_list->mid_bt[i],
+ &hem_list->btm_bt);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "alloc hem trunk fail ret = %d!\n", ret);
+ goto err_alloc;
+ }
+ }
+ }
+
+ ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
+ region_cnt);
+ if (ret)
+ dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
+ else
+ return 0;
+
+err_alloc:
+ hns_roce_hem_list_release(hr_dev, hem_list);
+
+ return ret;
+}
+
+void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list)
+{
+ int i, j;
+
+ for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+ hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
+ j != 0);
+
+ hem_list_free_all(hr_dev, &hem_list->root_bt, true);
+ INIT_LIST_HEAD(&hem_list->btm_bt);
+ hem_list->root_ba = 0;
+}
+
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
+{
+ int i, j;
+
+ INIT_LIST_HEAD(&hem_list->root_bt);
+ INIT_LIST_HEAD(&hem_list->btm_bt);
+ for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+ INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
+}
+
+void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_list *hem_list,
+ int offset, int *mtt_cnt)
+{
+ struct list_head *head = &hem_list->btm_bt;
+ struct hns_roce_hem_item *hem, *temp_hem;
+ void *cpu_base = NULL;
+ int nr = 0;
+
+ list_for_each_entry_safe(hem, temp_hem, head, sibling) {
+ if (hem_list_page_is_in_range(hem, offset)) {
+ nr = offset - hem->start;
+ cpu_base = hem->addr + nr * BA_BYTE_LEN;
+ nr = hem->end + 1 - offset;
+ break;
+ }
+ }
+
+ if (mtt_cnt)
+ *mtt_cnt = nr;
+
+ return cpu_base;
+}