From 5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 21 Feb 2023 18:24:12 -0800 Subject: Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ... --- tools/bpf/bpftool/cfg.c | 485 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 485 insertions(+) create mode 100644 tools/bpf/bpftool/cfg.c (limited to 'tools/bpf/bpftool/cfg.c') diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c new file mode 100644 index 000000000..1951219a9 --- /dev/null +++ b/tools/bpf/bpftool/cfg.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ + +#include +#include +#include + +#include "cfg.h" +#include "main.h" +#include "xlated_dumper.h" + +struct cfg { + struct list_head funcs; + int func_num; +}; + +struct func_node { + struct list_head l; + struct list_head bbs; + struct bpf_insn *start; + struct bpf_insn *end; + int idx; + int bb_num; +}; + +struct bb_node { + struct list_head l; + struct list_head e_prevs; + struct list_head e_succs; + struct bpf_insn *head; + struct bpf_insn *tail; + int idx; +}; + +#define EDGE_FLAG_EMPTY 0x0 +#define EDGE_FLAG_FALLTHROUGH 0x1 +#define EDGE_FLAG_JUMP 0x2 +struct edge_node { + struct list_head l; + struct bb_node *src; + struct bb_node *dst; + int flags; +}; + +#define ENTRY_BLOCK_INDEX 0 +#define EXIT_BLOCK_INDEX 1 +#define NUM_FIXED_BLOCKS 2 +#define func_prev(func) list_prev_entry(func, l) +#define func_next(func) list_next_entry(func, l) +#define bb_prev(bb) list_prev_entry(bb, l) +#define bb_next(bb) list_next_entry(bb, l) +#define entry_bb(func) func_first_bb(func) +#define exit_bb(func) func_last_bb(func) +#define cfg_first_func(cfg) \ + list_first_entry(&cfg->funcs, struct func_node, l) +#define cfg_last_func(cfg) \ + list_last_entry(&cfg->funcs, struct func_node, l) +#define func_first_bb(func) \ + list_first_entry(&func->bbs, struct bb_node, l) +#define func_last_bb(func) \ + list_last_entry(&func->bbs, struct bb_node, l) + +static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn) +{ + struct func_node *new_func, *func; + + list_for_each_entry(func, &cfg->funcs, l) { + if (func->start == insn) + return func; + else if (func->start > insn) + break; + } + + func = func_prev(func); + new_func = calloc(1, sizeof(*new_func)); + if (!new_func) { + p_err("OOM when allocating FUNC node"); + return NULL; + } + new_func->start = insn; + new_func->idx = cfg->func_num; + list_add(&new_func->l, &func->l); + cfg->func_num++; + + return new_func; +} + +static struct bb_node *func_append_bb(struct func_node *func, + struct bpf_insn *insn) +{ + struct bb_node *new_bb, *bb; + + list_for_each_entry(bb, &func->bbs, l) { + if (bb->head == insn) + return bb; + else if (bb->head > insn) + break; + } + + bb = bb_prev(bb); + new_bb = calloc(1, sizeof(*new_bb)); + if (!new_bb) { + p_err("OOM when allocating BB node"); + return NULL; + } + new_bb->head = insn; + INIT_LIST_HEAD(&new_bb->e_prevs); + INIT_LIST_HEAD(&new_bb->e_succs); + list_add(&new_bb->l, &bb->l); + + return new_bb; +} + +static struct bb_node *func_insert_dummy_bb(struct list_head *after) +{ + struct bb_node *bb; + + bb = calloc(1, sizeof(*bb)); + if (!bb) { + p_err("OOM when allocating BB node"); + return NULL; + } + + INIT_LIST_HEAD(&bb->e_prevs); + INIT_LIST_HEAD(&bb->e_succs); + list_add(&bb->l, after); + + return bb; +} + +static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur, + struct bpf_insn *end) +{ + struct func_node *func, *last_func; + + func = cfg_append_func(cfg, cur); + if (!func) + return true; + + for (; cur < end; cur++) { + if (cur->code != (BPF_JMP | BPF_CALL)) + continue; + if (cur->src_reg != BPF_PSEUDO_CALL) + continue; + func = cfg_append_func(cfg, cur + cur->off + 1); + if (!func) + return true; + } + + last_func = cfg_last_func(cfg); + last_func->end = end - 1; + func = cfg_first_func(cfg); + list_for_each_entry_from(func, &last_func->l, l) { + func->end = func_next(func)->start - 1; + } + + return false; +} + +static bool is_jmp_insn(__u8 code) +{ + return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32; +} + +static bool func_partition_bb_head(struct func_node *func) +{ + struct bpf_insn *cur, *end; + struct bb_node *bb; + + cur = func->start; + end = func->end; + INIT_LIST_HEAD(&func->bbs); + bb = func_append_bb(func, cur); + if (!bb) + return true; + + for (; cur <= end; cur++) { + if (is_jmp_insn(cur->code)) { + __u8 opcode = BPF_OP(cur->code); + + if (opcode == BPF_EXIT || opcode == BPF_CALL) + continue; + + bb = func_append_bb(func, cur + cur->off + 1); + if (!bb) + return true; + + if (opcode != BPF_JA) { + bb = func_append_bb(func, cur + 1); + if (!bb) + return true; + } + } + } + + return false; +} + +static void func_partition_bb_tail(struct func_node *func) +{ + unsigned int bb_idx = NUM_FIXED_BLOCKS; + struct bb_node *bb, *last; + + last = func_last_bb(func); + last->tail = func->end; + bb = func_first_bb(func); + list_for_each_entry_from(bb, &last->l, l) { + bb->tail = bb_next(bb)->head - 1; + bb->idx = bb_idx++; + } + + last->idx = bb_idx++; + func->bb_num = bb_idx; +} + +static bool func_add_special_bb(struct func_node *func) +{ + struct bb_node *bb; + + bb = func_insert_dummy_bb(&func->bbs); + if (!bb) + return true; + bb->idx = ENTRY_BLOCK_INDEX; + + bb = func_insert_dummy_bb(&func_last_bb(func)->l); + if (!bb) + return true; + bb->idx = EXIT_BLOCK_INDEX; + + return false; +} + +static bool func_partition_bb(struct func_node *func) +{ + if (func_partition_bb_head(func)) + return true; + + func_partition_bb_tail(func); + + return false; +} + +static struct bb_node *func_search_bb_with_head(struct func_node *func, + struct bpf_insn *insn) +{ + struct bb_node *bb; + + list_for_each_entry(bb, &func->bbs, l) { + if (bb->head == insn) + return bb; + } + + return NULL; +} + +static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst, + int flags) +{ + struct edge_node *e; + + e = calloc(1, sizeof(*e)); + if (!e) { + p_err("OOM when allocating edge node"); + return NULL; + } + + if (src) + e->src = src; + if (dst) + e->dst = dst; + + e->flags |= flags; + + return e; +} + +static bool func_add_bb_edges(struct func_node *func) +{ + struct bpf_insn *insn; + struct edge_node *e; + struct bb_node *bb; + + bb = entry_bb(func); + e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH); + if (!e) + return true; + list_add_tail(&e->l, &bb->e_succs); + + bb = exit_bb(func); + e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH); + if (!e) + return true; + list_add_tail(&e->l, &bb->e_prevs); + + bb = entry_bb(func); + bb = bb_next(bb); + list_for_each_entry_from(bb, &exit_bb(func)->l, l) { + e = new_edge(bb, NULL, EDGE_FLAG_EMPTY); + if (!e) + return true; + e->src = bb; + + insn = bb->tail; + if (!is_jmp_insn(insn->code) || + BPF_OP(insn->code) == BPF_EXIT) { + e->dst = bb_next(bb); + e->flags |= EDGE_FLAG_FALLTHROUGH; + list_add_tail(&e->l, &bb->e_succs); + continue; + } else if (BPF_OP(insn->code) == BPF_JA) { + e->dst = func_search_bb_with_head(func, + insn + insn->off + 1); + e->flags |= EDGE_FLAG_JUMP; + list_add_tail(&e->l, &bb->e_succs); + continue; + } + + e->dst = bb_next(bb); + e->flags |= EDGE_FLAG_FALLTHROUGH; + list_add_tail(&e->l, &bb->e_succs); + + e = new_edge(bb, NULL, EDGE_FLAG_JUMP); + if (!e) + return true; + e->src = bb; + e->dst = func_search_bb_with_head(func, insn + insn->off + 1); + list_add_tail(&e->l, &bb->e_succs); + } + + return false; +} + +static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len) +{ + int cnt = len / sizeof(*insn); + struct func_node *func; + + INIT_LIST_HEAD(&cfg->funcs); + + if (cfg_partition_funcs(cfg, insn, insn + cnt)) + return true; + + list_for_each_entry(func, &cfg->funcs, l) { + if (func_partition_bb(func) || func_add_special_bb(func)) + return true; + + if (func_add_bb_edges(func)) + return true; + } + + return false; +} + +static void cfg_destroy(struct cfg *cfg) +{ + struct func_node *func, *func2; + + list_for_each_entry_safe(func, func2, &cfg->funcs, l) { + struct bb_node *bb, *bb2; + + list_for_each_entry_safe(bb, bb2, &func->bbs, l) { + struct edge_node *e, *e2; + + list_for_each_entry_safe(e, e2, &bb->e_prevs, l) { + list_del(&e->l); + free(e); + } + + list_for_each_entry_safe(e, e2, &bb->e_succs, l) { + list_del(&e->l); + free(e); + } + + list_del(&bb->l); + free(bb); + } + + list_del(&func->l); + free(func); + } +} + +static void draw_bb_node(struct func_node *func, struct bb_node *bb) +{ + const char *shape; + + if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX) + shape = "Mdiamond"; + else + shape = "record"; + + printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"", + func->idx, bb->idx, shape); + + if (bb->idx == ENTRY_BLOCK_INDEX) { + printf("ENTRY"); + } else if (bb->idx == EXIT_BLOCK_INDEX) { + printf("EXIT"); + } else { + unsigned int start_idx; + struct dump_data dd = {}; + + printf("{"); + kernel_syms_load(&dd); + start_idx = bb->head - func->start; + dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx); + kernel_syms_destroy(&dd); + printf("}"); + } + + printf("\"];\n\n"); +} + +static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb) +{ + const char *style = "\"solid,bold\""; + const char *color = "black"; + int func_idx = func->idx; + struct edge_node *e; + int weight = 10; + + if (list_empty(&bb->e_succs)) + return; + + list_for_each_entry(e, &bb->e_succs, l) { + printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true", + func_idx, e->src->idx, func_idx, e->dst->idx, + style, color, weight); + printf("];\n"); + } +} + +static void func_output_bb_def(struct func_node *func) +{ + struct bb_node *bb; + + list_for_each_entry(bb, &func->bbs, l) { + draw_bb_node(func, bb); + } +} + +static void func_output_edges(struct func_node *func) +{ + int func_idx = func->idx; + struct bb_node *bb; + + list_for_each_entry(bb, &func->bbs, l) { + draw_bb_succ_edges(func, bb); + } + + /* Add an invisible edge from ENTRY to EXIT, this is to + * improve the graph layout. + */ + printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n", + func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX); +} + +static void cfg_dump(struct cfg *cfg) +{ + struct func_node *func; + + printf("digraph \"DOT graph for eBPF program\" {\n"); + list_for_each_entry(func, &cfg->funcs, l) { + printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n", + func->idx, func->idx); + func_output_bb_def(func); + func_output_edges(func); + printf("}\n"); + } + printf("}\n"); +} + +void dump_xlated_cfg(void *buf, unsigned int len) +{ + struct bpf_insn *insn = buf; + struct cfg cfg; + + memset(&cfg, 0, sizeof(cfg)); + if (cfg_build(&cfg, insn, len)) + return; + + cfg_dump(&cfg); + + cfg_destroy(&cfg); +} -- cgit v1.2.3