aboutsummaryrefslogtreecommitdiff
path: root/fs/cifs/dfs_cache.c
diff options
context:
space:
mode:
authorLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
committerLibravatar Linus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:24:12 -0800
commit5b7c4cabbb65f5c469464da6c5f614cbd7f730f2 (patch)
treecc5c2d0a898769fd59549594fedb3ee6f84e59a0 /fs/cifs/dfs_cache.c
downloadlinux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.tar.gz
linux-5b7c4cabbb65f5c469464da6c5f614cbd7f730f2.zip
Merge tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-nextgrafted
Pull networking updates from Jakub Kicinski: "Core: - Add dedicated kmem_cache for typical/small skb->head, avoid having to access struct page at kfree time, and improve memory use. - Introduce sysctl to set default RPS configuration for new netdevs. - Define Netlink protocol specification format which can be used to describe messages used by each family and auto-generate parsers. Add tools for generating kernel data structures and uAPI headers. - Expose all net/core sysctls inside netns. - Remove 4s sleep in netpoll if carrier is instantly detected on boot. - Add configurable limit of MDB entries per port, and port-vlan. - Continue populating drop reasons throughout the stack. - Retire a handful of legacy Qdiscs and classifiers. Protocols: - Support IPv4 big TCP (TSO frames larger than 64kB). - Add IP_LOCAL_PORT_RANGE socket option, to control local port range on socket by socket basis. - Track and report in procfs number of MPTCP sockets used. - Support mixing IPv4 and IPv6 flows in the in-kernel MPTCP path manager. - IPv6: don't check net.ipv6.route.max_size and rely on garbage collection to free memory (similarly to IPv4). - Support Penultimate Segment Pop (PSP) flavor in SRv6 (RFC8986). - ICMP: add per-rate limit counters. - Add support for user scanning requests in ieee802154. - Remove static WEP support. - Support minimal Wi-Fi 7 Extremely High Throughput (EHT) rate reporting. - WiFi 7 EHT channel puncturing support (client & AP). BPF: - Add a rbtree data structure following the "next-gen data structure" precedent set by recently added linked list, that is, by using kfunc + kptr instead of adding a new BPF map type. - Expose XDP hints via kfuncs with initial support for RX hash and timestamp metadata. - Add BPF_F_NO_TUNNEL_KEY extension to bpf_skb_set_tunnel_key to better support decap on GRE tunnel devices not operating in collect metadata. - Improve x86 JIT's codegen for PROBE_MEM runtime error checks. - Remove the need for trace_printk_lock for bpf_trace_printk and bpf_trace_vprintk helpers. - Extend libbpf's bpf_tracing.h support for tracing arguments of kprobes/uprobes and syscall as a special case. - Significantly reduce the search time for module symbols by livepatch and BPF. - Enable cpumasks to be used as kptrs, which is useful for tracing programs tracking which tasks end up running on which CPUs in different time intervals. - Add support for BPF trampoline on s390x and riscv64. - Add capability to export the XDP features supported by the NIC. - Add __bpf_kfunc tag for marking kernel functions as kfuncs. - Add cgroup.memory=nobpf kernel parameter option to disable BPF memory accounting for container environments. Netfilter: - Remove the CLUSTERIP target. It has been marked as obsolete for years, and we still have WARN splats wrt races of the out-of-band /proc interface installed by this target. - Add 'destroy' commands to nf_tables. They are identical to the existing 'delete' commands, but do not return an error if the referenced object (set, chain, rule...) did not exist. Driver API: - Improve cpumask_local_spread() locality to help NICs set the right IRQ affinity on AMD platforms. - Separate C22 and C45 MDIO bus transactions more clearly. - Introduce new DCB table to control DSCP rewrite on egress. - Support configuration of Physical Layer Collision Avoidance (PLCA) Reconciliation Sublayer (RS) (802.3cg-2019). Modern version of shared medium Ethernet. - Support for MAC Merge layer (IEEE 802.3-2018 clause 99). Allowing preemption of low priority frames by high priority frames. - Add support for controlling MACSec offload using netlink SET. - Rework devlink instance refcounts to allow registration and de-registration under the instance lock. Split the code into multiple files, drop some of the unnecessarily granular locks and factor out common parts of netlink operation handling. - Add TX frame aggregation parameters (for USB drivers). - Add a new attr TCA_EXT_WARN_MSG to report TC (offload) warning messages with notifications for debug. - Allow offloading of UDP NEW connections via act_ct. - Add support for per action HW stats in TC. - Support hardware miss to TC action (continue processing in SW from a specific point in the action chain). - Warn if old Wireless Extension user space interface is used with modern cfg80211/mac80211 drivers. Do not support Wireless Extensions for Wi-Fi 7 devices at all. Everyone should switch to using nl80211 interface instead. - Improve the CAN bit timing configuration. Use extack to return error messages directly to user space, update the SJW handling, including the definition of a new default value that will benefit CAN-FD controllers, by increasing their oscillator tolerance. New hardware / drivers: - Ethernet: - nVidia BlueField-3 support (control traffic driver) - Ethernet support for imx93 SoCs - Motorcomm yt8531 gigabit Ethernet PHY - onsemi NCN26000 10BASE-T1S PHY (with support for PLCA) - Microchip LAN8841 PHY (incl. cable diagnostics and PTP) - Amlogic gxl MDIO mux - WiFi: - RealTek RTL8188EU (rtl8xxxu) - Qualcomm Wi-Fi 7 devices (ath12k) - CAN: - Renesas R-Car V4H Drivers: - Bluetooth: - Set Per Platform Antenna Gain (PPAG) for Intel controllers. - Ethernet NICs: - Intel (1G, igc): - support TSN / Qbv / packet scheduling features of i226 model - Intel (100G, ice): - use GNSS subsystem instead of TTY - multi-buffer XDP support - extend support for GPIO pins to E823 devices - nVidia/Mellanox: - update the shared buffer configuration on PFC commands - implement PTP adjphase function for HW offset control - TC support for Geneve and GRE with VF tunnel offload - more efficient crypto key management method - multi-port eswitch support - Netronome/Corigine: - add DCB IEEE support - support IPsec offloading for NFP3800 - Freescale/NXP (enetc): - support XDP_REDIRECT for XDP non-linear buffers - improve reconfig, avoid link flap and waiting for idle - support MAC Merge layer - Other NICs: - sfc/ef100: add basic devlink support for ef100 - ionic: rx_push mode operation (writing descriptors via MMIO) - bnxt: use the auxiliary bus abstraction for RDMA - r8169: disable ASPM and reset bus in case of tx timeout - cpsw: support QSGMII mode for J721e CPSW9G - cpts: support pulse-per-second output - ngbe: add an mdio bus driver - usbnet: optimize usbnet_bh() by avoiding unnecessary queuing - r8152: handle devices with FW with NCM support - amd-xgbe: support 10Mbps, 2.5GbE speeds and rx-adaptation - virtio-net: support multi buffer XDP - virtio/vsock: replace virtio_vsock_pkt with sk_buff - tsnep: XDP support - Ethernet high-speed switches: - nVidia/Mellanox (mlxsw): - add support for latency TLV (in FW control messages) - Microchip (sparx5): - separate explicit and implicit traffic forwarding rules, make the implicit rules always active - add support for egress DSCP rewrite - IS0 VCAP support (Ingress Classification) - IS2 VCAP filters (protos, L3 addrs, L4 ports, flags, ToS etc.) - ES2 VCAP support (Egress Access Control) - support for Per-Stream Filtering and Policing (802.1Q, 8.6.5.1) - Ethernet embedded switches: - Marvell (mv88e6xxx): - add MAB (port auth) offload support - enable PTP receive for mv88e6390 - NXP (ocelot): - support MAC Merge layer - support for the the vsc7512 internal copper phys - Microchip: - lan9303: convert to PHYLINK - lan966x: support TC flower filter statistics - lan937x: PTP support for KSZ9563/KSZ8563 and LAN937x - lan937x: support Credit Based Shaper configuration - ksz9477: support Energy Efficient Ethernet - other: - qca8k: convert to regmap read/write API, use bulk operations - rswitch: Improve TX timestamp accuracy - Intel WiFi (iwlwifi): - EHT (Wi-Fi 7) rate reporting - STEP equalizer support: transfer some STEP (connection to radio on platforms with integrated wifi) related parameters from the BIOS to the firmware. - Qualcomm 802.11ax WiFi (ath11k): - IPQ5018 support - Fine Timing Measurement (FTM) responder role support - channel 177 support - MediaTek WiFi (mt76): - per-PHY LED support - mt7996: EHT (Wi-Fi 7) support - Wireless Ethernet Dispatch (WED) reset support - switch to using page pool allocator - RealTek WiFi (rtw89): - support new version of Bluetooth co-existance - Mobile: - rmnet: support TX aggregation" * tag 'net-next-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1872 commits) page_pool: add a comment explaining the fragment counter usage net: ethtool: fix __ethtool_dev_mm_supported() implementation ethtool: pse-pd: Fix double word in comments xsk: add linux/vmalloc.h to xsk.c sefltests: netdevsim: wait for devlink instance after netns removal selftest: fib_tests: Always cleanup before exit net/mlx5e: Align IPsec ASO result memory to be as required by hardware net/mlx5e: TC, Set CT miss to the specific ct action instance net/mlx5e: Rename CHAIN_TO_REG to MAPPED_OBJ_TO_REG net/mlx5: Refactor tc miss handling to a single function net/mlx5: Kconfig: Make tc offload depend on tc skb extension net/sched: flower: Support hardware miss to tc action net/sched: flower: Move filter handle initialization earlier net/sched: cls_api: Support hardware miss to tc action net/sched: Rename user cookie and act cookie sfc: fix builds without CONFIG_RTC_LIB sfc: clean up some inconsistent indentings net/mlx4_en: Introduce flexible array to silence overflow warning net: lan966x: Fix possible deadlock inside PTP net/ulp: Remove redundant ->clone() test in inet_clone_ulp(). ...
Diffstat (limited to '')
-rw-r--r--fs/cifs/dfs_cache.c1458
1 files changed, 1458 insertions, 0 deletions
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
new file mode 100644
index 000000000..ac86bd0eb
--- /dev/null
+++ b/fs/cifs/dfs_cache.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DFS referral cache routines
+ *
+ * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
+ */
+
+#include <linux/jhash.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/nls.h>
+#include <linux/workqueue.h>
+#include <linux/uuid.h>
+#include "cifsglob.h"
+#include "smb2pdu.h"
+#include "smb2proto.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifs_unicode.h"
+#include "smb2glob.h"
+#include "dns_resolve.h"
+
+#include "dfs_cache.h"
+
+#define CACHE_HTABLE_SIZE 32
+#define CACHE_MAX_ENTRIES 64
+#define CACHE_MIN_TTL 120 /* 2 minutes */
+
+#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
+
+struct cache_dfs_tgt {
+ char *name;
+ int path_consumed;
+ struct list_head list;
+};
+
+struct cache_entry {
+ struct hlist_node hlist;
+ const char *path;
+ int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
+ int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
+ int srvtype; /* DFS_REREFERRAL_V3.ServerType */
+ int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
+ struct timespec64 etime;
+ int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
+ int numtgts;
+ struct list_head tlist;
+ struct cache_dfs_tgt *tgthint;
+};
+
+/* List of referral server sessions per dfs mount */
+struct mount_group {
+ struct list_head list;
+ uuid_t id;
+ struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
+ int num_sessions;
+ spinlock_t lock;
+ struct list_head refresh_list;
+ struct kref refcount;
+};
+
+static struct kmem_cache *cache_slab __read_mostly;
+static struct workqueue_struct *dfscache_wq __read_mostly;
+
+static int cache_ttl;
+static DEFINE_SPINLOCK(cache_ttl_lock);
+
+static struct nls_table *cache_cp;
+
+/*
+ * Number of entries in the cache
+ */
+static atomic_t cache_count;
+
+static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+static DECLARE_RWSEM(htable_rw_lock);
+
+static LIST_HEAD(mount_group_list);
+static DEFINE_MUTEX(mount_group_list_lock);
+
+static void refresh_cache_worker(struct work_struct *work);
+
+static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
+
+static void __mount_group_release(struct mount_group *mg)
+{
+ int i;
+
+ for (i = 0; i < mg->num_sessions; i++)
+ cifs_put_smb_ses(mg->sessions[i]);
+ kfree(mg);
+}
+
+static void mount_group_release(struct kref *kref)
+{
+ struct mount_group *mg = container_of(kref, struct mount_group, refcount);
+
+ mutex_lock(&mount_group_list_lock);
+ list_del(&mg->list);
+ mutex_unlock(&mount_group_list_lock);
+ __mount_group_release(mg);
+}
+
+static struct mount_group *find_mount_group_locked(const uuid_t *id)
+{
+ struct mount_group *mg;
+
+ list_for_each_entry(mg, &mount_group_list, list) {
+ if (uuid_equal(&mg->id, id))
+ return mg;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static struct mount_group *__get_mount_group_locked(const uuid_t *id)
+{
+ struct mount_group *mg;
+
+ mg = find_mount_group_locked(id);
+ if (!IS_ERR(mg))
+ return mg;
+
+ mg = kmalloc(sizeof(*mg), GFP_KERNEL);
+ if (!mg)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&mg->refcount);
+ uuid_copy(&mg->id, id);
+ mg->num_sessions = 0;
+ spin_lock_init(&mg->lock);
+ list_add(&mg->list, &mount_group_list);
+ return mg;
+}
+
+static struct mount_group *get_mount_group(const uuid_t *id)
+{
+ struct mount_group *mg;
+
+ mutex_lock(&mount_group_list_lock);
+ mg = __get_mount_group_locked(id);
+ if (!IS_ERR(mg))
+ kref_get(&mg->refcount);
+ mutex_unlock(&mount_group_list_lock);
+
+ return mg;
+}
+
+static void free_mount_group_list(void)
+{
+ struct mount_group *mg, *tmp_mg;
+
+ list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
+ list_del_init(&mg->list);
+ __mount_group_release(mg);
+ }
+}
+
+/**
+ * dfs_cache_canonical_path - get a canonical DFS path
+ *
+ * @path: DFS path
+ * @cp: codepage
+ * @remap: mapping type
+ *
+ * Return canonical path if success, otherwise error.
+ */
+char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
+{
+ char *tmp;
+ int plen = 0;
+ char *npath;
+
+ if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
+ tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
+ if (!tmp) {
+ cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
+ kfree(tmp);
+
+ if (!npath) {
+ cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ npath = kstrdup(path, GFP_KERNEL);
+ if (!npath)
+ return ERR_PTR(-ENOMEM);
+ }
+ convert_delimiter(npath, '\\');
+ return npath;
+}
+
+static inline bool cache_entry_expired(const struct cache_entry *ce)
+{
+ struct timespec64 ts;
+
+ ktime_get_coarse_real_ts64(&ts);
+ return timespec64_compare(&ts, &ce->etime) >= 0;
+}
+
+static inline void free_tgts(struct cache_entry *ce)
+{
+ struct cache_dfs_tgt *t, *n;
+
+ list_for_each_entry_safe(t, n, &ce->tlist, list) {
+ list_del(&t->list);
+ kfree(t->name);
+ kfree(t);
+ }
+}
+
+static inline void flush_cache_ent(struct cache_entry *ce)
+{
+ hlist_del_init(&ce->hlist);
+ kfree(ce->path);
+ free_tgts(ce);
+ atomic_dec(&cache_count);
+ kmem_cache_free(cache_slab, ce);
+}
+
+static void flush_cache_ents(void)
+{
+ int i;
+
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+ struct hlist_node *n;
+ struct cache_entry *ce;
+
+ hlist_for_each_entry_safe(ce, n, l, hlist) {
+ if (!hlist_unhashed(&ce->hlist))
+ flush_cache_ent(ce);
+ }
+ }
+}
+
+/*
+ * dfs cache /proc file
+ */
+static int dfscache_proc_show(struct seq_file *m, void *v)
+{
+ int i;
+ struct cache_entry *ce;
+ struct cache_dfs_tgt *t;
+
+ seq_puts(m, "DFS cache\n---------\n");
+
+ down_read(&htable_rw_lock);
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+
+ hlist_for_each_entry(ce, l, hlist) {
+ if (hlist_unhashed(&ce->hlist))
+ continue;
+
+ seq_printf(m,
+ "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
+ ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
+ IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
+ ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
+
+ list_for_each_entry(t, &ce->tlist, list) {
+ seq_printf(m, " %s%s\n",
+ t->name,
+ READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
+ }
+ }
+ }
+ up_read(&htable_rw_lock);
+
+ return 0;
+}
+
+static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char c;
+ int rc;
+
+ rc = get_user(c, buffer);
+ if (rc)
+ return rc;
+
+ if (c != '0')
+ return -EINVAL;
+
+ cifs_dbg(FYI, "clearing dfs cache\n");
+
+ down_write(&htable_rw_lock);
+ flush_cache_ents();
+ up_write(&htable_rw_lock);
+
+ return count;
+}
+
+static int dfscache_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dfscache_proc_show, NULL);
+}
+
+const struct proc_ops dfscache_proc_ops = {
+ .proc_open = dfscache_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_write = dfscache_proc_write,
+};
+
+#ifdef CONFIG_CIFS_DEBUG2
+static inline void dump_tgts(const struct cache_entry *ce)
+{
+ struct cache_dfs_tgt *t;
+
+ cifs_dbg(FYI, "target list:\n");
+ list_for_each_entry(t, &ce->tlist, list) {
+ cifs_dbg(FYI, " %s%s\n", t->name,
+ READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
+ }
+}
+
+static inline void dump_ce(const struct cache_entry *ce)
+{
+ cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path,
+ ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
+ ce->etime.tv_nsec,
+ ce->hdr_flags, ce->ref_flags,
+ IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
+ ce->path_consumed,
+ cache_entry_expired(ce) ? "yes" : "no");
+ dump_tgts(ce);
+}
+
+static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
+{
+ int i;
+
+ cifs_dbg(FYI, "DFS referrals returned by the server:\n");
+ for (i = 0; i < numrefs; i++) {
+ const struct dfs_info3_param *ref = &refs[i];
+
+ cifs_dbg(FYI,
+ "\n"
+ "flags: 0x%x\n"
+ "path_consumed: %d\n"
+ "server_type: 0x%x\n"
+ "ref_flag: 0x%x\n"
+ "path_name: %s\n"
+ "node_name: %s\n"
+ "ttl: %d (%dm)\n",
+ ref->flags, ref->path_consumed, ref->server_type,
+ ref->ref_flag, ref->path_name, ref->node_name,
+ ref->ttl, ref->ttl / 60);
+ }
+}
+#else
+#define dump_tgts(e)
+#define dump_ce(e)
+#define dump_refs(r, n)
+#endif
+
+/**
+ * dfs_cache_init - Initialize DFS referral cache.
+ *
+ * Return zero if initialized successfully, otherwise non-zero.
+ */
+int dfs_cache_init(void)
+{
+ int rc;
+ int i;
+
+ dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
+ if (!dfscache_wq)
+ return -ENOMEM;
+
+ cache_slab = kmem_cache_create("cifs_dfs_cache",
+ sizeof(struct cache_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cache_slab) {
+ rc = -ENOMEM;
+ goto out_destroy_wq;
+ }
+
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++)
+ INIT_HLIST_HEAD(&cache_htable[i]);
+
+ atomic_set(&cache_count, 0);
+ cache_cp = load_nls("utf8");
+ if (!cache_cp)
+ cache_cp = load_nls_default();
+
+ cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
+ return 0;
+
+out_destroy_wq:
+ destroy_workqueue(dfscache_wq);
+ return rc;
+}
+
+static int cache_entry_hash(const void *data, int size, unsigned int *hash)
+{
+ int i, clen;
+ const unsigned char *s = data;
+ wchar_t c;
+ unsigned int h = 0;
+
+ for (i = 0; i < size; i += clen) {
+ clen = cache_cp->char2uni(&s[i], size - i, &c);
+ if (unlikely(clen < 0)) {
+ cifs_dbg(VFS, "%s: can't convert char\n", __func__);
+ return clen;
+ }
+ c = cifs_toupper(c);
+ h = jhash(&c, sizeof(c), h);
+ }
+ *hash = h % CACHE_HTABLE_SIZE;
+ return 0;
+}
+
+/* Return target hint of a DFS cache entry */
+static inline char *get_tgt_name(const struct cache_entry *ce)
+{
+ struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
+
+ return t ? t->name : ERR_PTR(-ENOENT);
+}
+
+/* Return expire time out of a new entry's TTL */
+static inline struct timespec64 get_expire_time(int ttl)
+{
+ struct timespec64 ts = {
+ .tv_sec = ttl,
+ .tv_nsec = 0,
+ };
+ struct timespec64 now;
+
+ ktime_get_coarse_real_ts64(&now);
+ return timespec64_add(now, ts);
+}
+
+/* Allocate a new DFS target */
+static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
+{
+ struct cache_dfs_tgt *t;
+
+ t = kmalloc(sizeof(*t), GFP_ATOMIC);
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+ t->name = kstrdup(name, GFP_ATOMIC);
+ if (!t->name) {
+ kfree(t);
+ return ERR_PTR(-ENOMEM);
+ }
+ t->path_consumed = path_consumed;
+ INIT_LIST_HEAD(&t->list);
+ return t;
+}
+
+/*
+ * Copy DFS referral information to a cache entry and conditionally update
+ * target hint.
+ */
+static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
+ struct cache_entry *ce, const char *tgthint)
+{
+ struct cache_dfs_tgt *target;
+ int i;
+
+ ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
+ ce->etime = get_expire_time(ce->ttl);
+ ce->srvtype = refs[0].server_type;
+ ce->hdr_flags = refs[0].flags;
+ ce->ref_flags = refs[0].ref_flag;
+ ce->path_consumed = refs[0].path_consumed;
+
+ for (i = 0; i < numrefs; i++) {
+ struct cache_dfs_tgt *t;
+
+ t = alloc_target(refs[i].node_name, refs[i].path_consumed);
+ if (IS_ERR(t)) {
+ free_tgts(ce);
+ return PTR_ERR(t);
+ }
+ if (tgthint && !strcasecmp(t->name, tgthint)) {
+ list_add(&t->list, &ce->tlist);
+ tgthint = NULL;
+ } else {
+ list_add_tail(&t->list, &ce->tlist);
+ }
+ ce->numtgts++;
+ }
+
+ target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
+ list);
+ WRITE_ONCE(ce->tgthint, target);
+
+ return 0;
+}
+
+/* Allocate a new cache entry */
+static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
+{
+ struct cache_entry *ce;
+ int rc;
+
+ ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ ce->path = refs[0].path_name;
+ refs[0].path_name = NULL;
+
+ INIT_HLIST_NODE(&ce->hlist);
+ INIT_LIST_HEAD(&ce->tlist);
+
+ rc = copy_ref_data(refs, numrefs, ce, NULL);
+ if (rc) {
+ kfree(ce->path);
+ kmem_cache_free(cache_slab, ce);
+ ce = ERR_PTR(rc);
+ }
+ return ce;
+}
+
+static void remove_oldest_entry_locked(void)
+{
+ int i;
+ struct cache_entry *ce;
+ struct cache_entry *to_del = NULL;
+
+ WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+
+ for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
+ struct hlist_head *l = &cache_htable[i];
+
+ hlist_for_each_entry(ce, l, hlist) {
+ if (hlist_unhashed(&ce->hlist))
+ continue;
+ if (!to_del || timespec64_compare(&ce->etime,
+ &to_del->etime) < 0)
+ to_del = ce;
+ }
+ }
+
+ if (!to_del) {
+ cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
+ return;
+ }
+
+ cifs_dbg(FYI, "%s: removing entry\n", __func__);
+ dump_ce(to_del);
+ flush_cache_ent(to_del);
+}
+
+/* Add a new DFS cache entry */
+static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+ int numrefs)
+{
+ int rc;
+ struct cache_entry *ce;
+ unsigned int hash;
+
+ WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+
+ if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
+ cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
+ remove_oldest_entry_locked();
+ }
+
+ rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
+ if (rc)
+ return ERR_PTR(rc);
+
+ ce = alloc_cache_entry(refs, numrefs);
+ if (IS_ERR(ce))
+ return ce;
+
+ spin_lock(&cache_ttl_lock);
+ if (!cache_ttl) {
+ cache_ttl = ce->ttl;
+ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+ } else {
+ cache_ttl = min_t(int, cache_ttl, ce->ttl);
+ mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+ }
+ spin_unlock(&cache_ttl_lock);
+
+ hlist_add_head(&ce->hlist, &cache_htable[hash]);
+ dump_ce(ce);
+
+ atomic_inc(&cache_count);
+
+ return ce;
+}
+
+/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
+static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
+{
+ int i, l1, l2;
+ wchar_t c1, c2;
+
+ if (len1 != len2)
+ return false;
+
+ for (i = 0; i < len1; i += l1) {
+ l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
+ l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
+ if (unlikely(l1 < 0 && l2 < 0)) {
+ if (s1[i] != s2[i])
+ return false;
+ l1 = 1;
+ continue;
+ }
+ if (l1 != l2)
+ return false;
+ if (cifs_toupper(c1) != cifs_toupper(c2))
+ return false;
+ }
+ return true;
+}
+
+static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
+{
+ struct cache_entry *ce;
+
+ hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
+ if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
+ dump_ce(ce);
+ return ce;
+ }
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+/*
+ * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
+ *
+ * Use whole path components in the match. Must be called with htable_rw_lock held.
+ *
+ * Return cached entry if successful.
+ * Return ERR_PTR(-ENOENT) if the entry is not found.
+ * Return error ptr otherwise.
+ */
+static struct cache_entry *lookup_cache_entry(const char *path)
+{
+ struct cache_entry *ce;
+ int cnt = 0;
+ const char *s = path, *e;
+ char sep = *s;
+ unsigned int hash;
+ int rc;
+
+ while ((s = strchr(s, sep)) && ++cnt < 3)
+ s++;
+
+ if (cnt < 3) {
+ rc = cache_entry_hash(path, strlen(path), &hash);
+ if (rc)
+ return ERR_PTR(rc);
+ return __lookup_cache_entry(path, hash, strlen(path));
+ }
+ /*
+ * Handle paths that have more than two path components and are a complete prefix of the DFS
+ * referral request path (@path).
+ *
+ * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
+ */
+ e = path + strlen(path) - 1;
+ while (e > s) {
+ int len;
+
+ /* skip separators */
+ while (e > s && *e == sep)
+ e--;
+ if (e == s)
+ break;
+
+ len = e + 1 - path;
+ rc = cache_entry_hash(path, len, &hash);
+ if (rc)
+ return ERR_PTR(rc);
+ ce = __lookup_cache_entry(path, hash, len);
+ if (!IS_ERR(ce))
+ return ce;
+
+ /* backward until separator */
+ while (e > s && *e != sep)
+ e--;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * dfs_cache_destroy - destroy DFS referral cache
+ */
+void dfs_cache_destroy(void)
+{
+ cancel_delayed_work_sync(&refresh_task);
+ unload_nls(cache_cp);
+ free_mount_group_list();
+ flush_cache_ents();
+ kmem_cache_destroy(cache_slab);
+ destroy_workqueue(dfscache_wq);
+
+ cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
+}
+
+/* Update a cache entry with the new referral in @refs */
+static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
+ int numrefs)
+{
+ struct cache_dfs_tgt *target;
+ char *th = NULL;
+ int rc;
+
+ WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+
+ target = READ_ONCE(ce->tgthint);
+ if (target) {
+ th = kstrdup(target->name, GFP_ATOMIC);
+ if (!th)
+ return -ENOMEM;
+ }
+
+ free_tgts(ce);
+ ce->numtgts = 0;
+
+ rc = copy_ref_data(refs, numrefs, ce, th);
+
+ kfree(th);
+
+ return rc;
+}
+
+static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
+ struct dfs_info3_param **refs, int *numrefs)
+{
+ int rc;
+ int i;
+
+ *refs = NULL;
+ *numrefs = 0;
+
+ if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
+ return -EOPNOTSUPP;
+ if (unlikely(!cache_cp))
+ return -EINVAL;
+
+ cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
+ rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
+ NO_MAP_UNI_RSVD);
+ if (!rc) {
+ struct dfs_info3_param *ref = *refs;
+
+ for (i = 0; i < *numrefs; i++)
+ convert_delimiter(ref[i].path_name, '\\');
+ }
+ return rc;
+}
+
+/*
+ * Find, create or update a DFS cache entry.
+ *
+ * If the entry wasn't found, it will create a new one. Or if it was found but
+ * expired, then it will update the entry accordingly.
+ *
+ * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
+ * handle them properly.
+ *
+ * On success, return entry with acquired lock for reading, otherwise error ptr.
+ */
+static struct cache_entry *cache_refresh_path(const unsigned int xid,
+ struct cifs_ses *ses,
+ const char *path,
+ bool force_refresh)
+{
+ struct dfs_info3_param *refs = NULL;
+ struct cache_entry *ce;
+ int numrefs = 0;
+ int rc;
+
+ cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
+
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(path);
+ if (!IS_ERR(ce)) {
+ if (!force_refresh && !cache_entry_expired(ce))
+ return ce;
+ } else if (PTR_ERR(ce) != -ENOENT) {
+ up_read(&htable_rw_lock);
+ return ce;
+ }
+
+ /*
+ * Unlock shared access as we don't want to hold any locks while getting
+ * a new referral. The @ses used for performing the I/O could be
+ * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
+ * in order to failover -- if necessary.
+ */
+ up_read(&htable_rw_lock);
+
+ /*
+ * Either the entry was not found, or it is expired, or it is a forced
+ * refresh.
+ * Request a new DFS referral in order to create or update a cache entry.
+ */
+ rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
+ if (rc) {
+ ce = ERR_PTR(rc);
+ goto out;
+ }
+
+ dump_refs(refs, numrefs);
+
+ down_write(&htable_rw_lock);
+ /* Re-check as another task might have it added or refreshed already */
+ ce = lookup_cache_entry(path);
+ if (!IS_ERR(ce)) {
+ if (force_refresh || cache_entry_expired(ce)) {
+ rc = update_cache_entry_locked(ce, refs, numrefs);
+ if (rc)
+ ce = ERR_PTR(rc);
+ }
+ } else if (PTR_ERR(ce) == -ENOENT) {
+ ce = add_cache_entry_locked(refs, numrefs);
+ }
+
+ if (IS_ERR(ce)) {
+ up_write(&htable_rw_lock);
+ goto out;
+ }
+
+ downgrade_write(&htable_rw_lock);
+out:
+ free_dfs_info_array(refs, numrefs);
+ return ce;
+}
+
+/*
+ * Set up a DFS referral from a given cache entry.
+ *
+ * Must be called with htable_rw_lock held.
+ */
+static int setup_referral(const char *path, struct cache_entry *ce,
+ struct dfs_info3_param *ref, const char *target)
+{
+ int rc;
+
+ cifs_dbg(FYI, "%s: set up new ref\n", __func__);
+
+ memset(ref, 0, sizeof(*ref));
+
+ ref->path_name = kstrdup(path, GFP_ATOMIC);
+ if (!ref->path_name)
+ return -ENOMEM;
+
+ ref->node_name = kstrdup(target, GFP_ATOMIC);
+ if (!ref->node_name) {
+ rc = -ENOMEM;
+ goto err_free_path;
+ }
+
+ ref->path_consumed = ce->path_consumed;
+ ref->ttl = ce->ttl;
+ ref->server_type = ce->srvtype;
+ ref->ref_flag = ce->ref_flags;
+ ref->flags = ce->hdr_flags;
+
+ return 0;
+
+err_free_path:
+ kfree(ref->path_name);
+ ref->path_name = NULL;
+ return rc;
+}
+
+/* Return target list of a DFS cache entry */
+static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
+{
+ int rc;
+ struct list_head *head = &tl->tl_list;
+ struct cache_dfs_tgt *t;
+ struct dfs_cache_tgt_iterator *it, *nit;
+
+ memset(tl, 0, sizeof(*tl));
+ INIT_LIST_HEAD(head);
+
+ list_for_each_entry(t, &ce->tlist, list) {
+ it = kzalloc(sizeof(*it), GFP_ATOMIC);
+ if (!it) {
+ rc = -ENOMEM;
+ goto err_free_it;
+ }
+
+ it->it_name = kstrdup(t->name, GFP_ATOMIC);
+ if (!it->it_name) {
+ kfree(it);
+ rc = -ENOMEM;
+ goto err_free_it;
+ }
+ it->it_path_consumed = t->path_consumed;
+
+ if (READ_ONCE(ce->tgthint) == t)
+ list_add(&it->it_list, head);
+ else
+ list_add_tail(&it->it_list, head);
+ }
+
+ tl->tl_numtgts = ce->numtgts;
+
+ return 0;
+
+err_free_it:
+ list_for_each_entry_safe(it, nit, head, it_list) {
+ list_del(&it->it_list);
+ kfree(it->it_name);
+ kfree(it);
+ }
+ return rc;
+}
+
+/**
+ * dfs_cache_find - find a DFS cache entry
+ *
+ * If it doesn't find the cache entry, then it will get a DFS referral
+ * for @path and create a new entry.
+ *
+ * In case the cache entry exists but expired, it will get a DFS referral
+ * for @path and then update the respective cache entry.
+ *
+ * These parameters are passed down to the get_dfs_refer() call if it
+ * needs to be issued:
+ * @xid: syscall xid
+ * @ses: smb session to issue the request on
+ * @cp: codepage
+ * @remap: path character remapping type
+ * @path: path to lookup in DFS referral cache.
+ *
+ * @ref: when non-NULL, store single DFS referral result in it.
+ * @tgt_list: when non-NULL, store complete DFS target list in it.
+ *
+ * Return zero if the target was found, otherwise non-zero.
+ */
+int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
+ int remap, const char *path, struct dfs_info3_param *ref,
+ struct dfs_cache_tgt_list *tgt_list)
+{
+ int rc;
+ const char *npath;
+ struct cache_entry *ce;
+
+ npath = dfs_cache_canonical_path(path, cp, remap);
+ if (IS_ERR(npath))
+ return PTR_ERR(npath);
+
+ ce = cache_refresh_path(xid, ses, npath, false);
+ if (IS_ERR(ce)) {
+ rc = PTR_ERR(ce);
+ goto out_free_path;
+ }
+
+ if (ref)
+ rc = setup_referral(path, ce, ref, get_tgt_name(ce));
+ else
+ rc = 0;
+ if (!rc && tgt_list)
+ rc = get_targets(ce, tgt_list);
+
+ up_read(&htable_rw_lock);
+
+out_free_path:
+ kfree(npath);
+ return rc;
+}
+
+/**
+ * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
+ * the currently connected server.
+ *
+ * NOTE: This function will neither update a cache entry in case it was
+ * expired, nor create a new cache entry if @path hasn't been found. It heavily
+ * relies on an existing cache entry.
+ *
+ * @path: canonical DFS path to lookup in the DFS referral cache.
+ * @ref: when non-NULL, store single DFS referral result in it.
+ * @tgt_list: when non-NULL, store complete DFS target list in it.
+ *
+ * Return 0 if successful.
+ * Return -ENOENT if the entry was not found.
+ * Return non-zero for other errors.
+ */
+int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
+ struct dfs_cache_tgt_list *tgt_list)
+{
+ int rc;
+ struct cache_entry *ce;
+
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(path);
+ if (IS_ERR(ce)) {
+ rc = PTR_ERR(ce);
+ goto out_unlock;
+ }
+
+ if (ref)
+ rc = setup_referral(path, ce, ref, get_tgt_name(ce));
+ else
+ rc = 0;
+ if (!rc && tgt_list)
+ rc = get_targets(ce, tgt_list);
+
+out_unlock:
+ up_read(&htable_rw_lock);
+ return rc;
+}
+
+/**
+ * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
+ * without sending any requests to the currently connected server.
+ *
+ * NOTE: This function will neither update a cache entry in case it was
+ * expired, nor create a new cache entry if @path hasn't been found. It heavily
+ * relies on an existing cache entry.
+ *
+ * @path: canonical DFS path to lookup in DFS referral cache.
+ * @it: target iterator which contains the target hint to update the cache
+ * entry with.
+ *
+ * Return zero if the target hint was updated successfully, otherwise non-zero.
+ */
+void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
+{
+ struct cache_dfs_tgt *t;
+ struct cache_entry *ce;
+
+ if (!path || !it)
+ return;
+
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(path);
+ if (IS_ERR(ce))
+ goto out_unlock;
+
+ t = READ_ONCE(ce->tgthint);
+
+ if (unlikely(!strcasecmp(it->it_name, t->name)))
+ goto out_unlock;
+
+ list_for_each_entry(t, &ce->tlist, list) {
+ if (!strcasecmp(t->name, it->it_name)) {
+ WRITE_ONCE(ce->tgthint, t);
+ cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
+ it->it_name);
+ break;
+ }
+ }
+
+out_unlock:
+ up_read(&htable_rw_lock);
+}
+
+/**
+ * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
+ * target iterator (@it).
+ *
+ * @path: canonical DFS path to lookup in DFS referral cache.
+ * @it: DFS target iterator.
+ * @ref: DFS referral pointer to set up the gathered information.
+ *
+ * Return zero if the DFS referral was set up correctly, otherwise non-zero.
+ */
+int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
+ struct dfs_info3_param *ref)
+{
+ int rc;
+ struct cache_entry *ce;
+
+ if (!it || !ref)
+ return -EINVAL;
+
+ cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
+
+ down_read(&htable_rw_lock);
+
+ ce = lookup_cache_entry(path);
+ if (IS_ERR(ce)) {
+ rc = PTR_ERR(ce);
+ goto out_unlock;
+ }
+
+ cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
+
+ rc = setup_referral(path, ce, ref, it->it_name);
+
+out_unlock:
+ up_read(&htable_rw_lock);
+ return rc;
+}
+
+/**
+ * dfs_cache_add_refsrv_session - add SMB session of referral server
+ *
+ * @mount_id: mount group uuid to lookup.
+ * @ses: reference counted SMB session of referral server.
+ */
+void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
+{
+ struct mount_group *mg;
+
+ if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
+ return;
+
+ mg = get_mount_group(mount_id);
+ if (WARN_ON_ONCE(IS_ERR(mg)))
+ return;
+
+ spin_lock(&mg->lock);
+ if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
+ mg->sessions[mg->num_sessions++] = ses;
+ spin_unlock(&mg->lock);
+ kref_put(&mg->refcount, mount_group_release);
+}
+
+/**
+ * dfs_cache_put_refsrv_sessions - put all referral server sessions
+ *
+ * Put all SMB sessions from the given mount group id.
+ *
+ * @mount_id: mount group uuid to lookup.
+ */
+void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
+{
+ struct mount_group *mg;
+
+ if (!mount_id || uuid_is_null(mount_id))
+ return;
+
+ mutex_lock(&mount_group_list_lock);
+ mg = find_mount_group_locked(mount_id);
+ if (IS_ERR(mg)) {
+ mutex_unlock(&mount_group_list_lock);
+ return;
+ }
+ mutex_unlock(&mount_group_list_lock);
+ kref_put(&mg->refcount, mount_group_release);
+}
+
+/* Extract share from DFS target and return a pointer to prefix path or NULL */
+static const char *parse_target_share(const char *target, char **share)
+{
+ const char *s, *seps = "/\\";
+ size_t len;
+
+ s = strpbrk(target + 1, seps);
+ if (!s)
+ return ERR_PTR(-EINVAL);
+
+ len = strcspn(s + 1, seps);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+ s += len;
+
+ len = s - target + 1;
+ *share = kstrndup(target, len, GFP_KERNEL);
+ if (!*share)
+ return ERR_PTR(-ENOMEM);
+
+ s = target + len;
+ return s + strspn(s, seps);
+}
+
+/**
+ * dfs_cache_get_tgt_share - parse a DFS target
+ *
+ * @path: DFS full path
+ * @it: DFS target iterator.
+ * @share: tree name.
+ * @prefix: prefix path.
+ *
+ * Return zero if target was parsed correctly, otherwise non-zero.
+ */
+int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+ char **prefix)
+{
+ char sep;
+ char *target_share;
+ char *ppath = NULL;
+ const char *target_ppath, *dfsref_ppath;
+ size_t target_pplen, dfsref_pplen;
+ size_t len, c;
+
+ if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
+ return -EINVAL;
+
+ sep = it->it_name[0];
+ if (sep != '\\' && sep != '/')
+ return -EINVAL;
+
+ target_ppath = parse_target_share(it->it_name, &target_share);
+ if (IS_ERR(target_ppath))
+ return PTR_ERR(target_ppath);
+
+ /* point to prefix in DFS referral path */
+ dfsref_ppath = path + it->it_path_consumed;
+ dfsref_ppath += strspn(dfsref_ppath, "/\\");
+
+ target_pplen = strlen(target_ppath);
+ dfsref_pplen = strlen(dfsref_ppath);
+
+ /* merge prefix paths from DFS referral path and target node */
+ if (target_pplen || dfsref_pplen) {
+ len = target_pplen + dfsref_pplen + 2;
+ ppath = kzalloc(len, GFP_KERNEL);
+ if (!ppath) {
+ kfree(target_share);
+ return -ENOMEM;
+ }
+ c = strscpy(ppath, target_ppath, len);
+ if (c && dfsref_pplen)
+ ppath[c] = sep;
+ strlcat(ppath, dfsref_ppath, len);
+ }
+ *share = target_share;
+ *prefix = ppath;
+ return 0;
+}
+
+static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
+{
+ char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
+ const char *host;
+ size_t hostlen;
+ struct sockaddr_storage ss;
+ bool match;
+ int rc;
+
+ if (strcasecmp(s1, s2))
+ return false;
+
+ /*
+ * Resolve share's hostname and check if server address matches. Otherwise just ignore it
+ * as we could not have upcall to resolve hostname or failed to convert ip address.
+ */
+ extract_unc_hostname(s1, &host, &hostlen);
+ scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
+
+ rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
+ __func__, (int)hostlen, host);
+ return true;
+ }
+
+ cifs_server_lock(server);
+ match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
+ cifs_server_unlock(server);
+
+ return match;
+}
+
+/*
+ * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
+ * target shares in @refs.
+ */
+static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+ struct dfs_cache_tgt_list *old_tl,
+ struct dfs_cache_tgt_list *new_tl)
+{
+ struct dfs_cache_tgt_iterator *oit, *nit;
+
+ for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
+ oit = dfs_cache_get_next_tgt(old_tl, oit)) {
+ for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
+ nit = dfs_cache_get_next_tgt(new_tl, nit)) {
+ if (target_share_equal(server,
+ dfs_cache_get_tgt_name(oit),
+ dfs_cache_get_tgt_name(nit)))
+ return;
+ }
+ }
+
+ cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
+ cifs_signal_cifsd_for_reconnect(server, true);
+}
+
+/* Refresh dfs referral of tcon and mark it for reconnect if needed */
+static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
+{
+ struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
+ struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
+ struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
+ struct cifs_tcon *ipc = ses->tcon_ipc;
+ bool needs_refresh = false;
+ struct cache_entry *ce;
+ unsigned int xid;
+ int rc = 0;
+
+ xid = get_xid();
+
+ down_read(&htable_rw_lock);
+ ce = lookup_cache_entry(path);
+ needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
+ if (!IS_ERR(ce)) {
+ rc = get_targets(ce, &old_tl);
+ cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+ }
+ up_read(&htable_rw_lock);
+
+ if (!needs_refresh) {
+ rc = 0;
+ goto out;
+ }
+
+ spin_lock(&ipc->tc_lock);
+ if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
+ spin_unlock(&ipc->tc_lock);
+ cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
+ goto out;
+ }
+ spin_unlock(&ipc->tc_lock);
+
+ ce = cache_refresh_path(xid, ses, path, true);
+ if (!IS_ERR(ce)) {
+ rc = get_targets(ce, &new_tl);
+ up_read(&htable_rw_lock);
+ cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+ mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
+ }
+
+out:
+ free_xid(xid);
+ dfs_cache_free_tgts(&old_tl);
+ dfs_cache_free_tgts(&new_tl);
+ return rc;
+}
+
+static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ mutex_lock(&server->refpath_lock);
+ if (server->leaf_fullpath)
+ __refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
+ mutex_unlock(&server->refpath_lock);
+ return 0;
+}
+
+/**
+ * dfs_cache_remount_fs - remount a DFS share
+ *
+ * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
+ * match any of the new targets, mark it for reconnect.
+ *
+ * @cifs_sb: cifs superblock.
+ *
+ * Return zero if remounted, otherwise non-zero.
+ */
+int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+{
+ struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
+
+ if (!cifs_sb || !cifs_sb->master_tlink)
+ return -EINVAL;
+
+ tcon = cifs_sb_master_tcon(cifs_sb);
+ server = tcon->ses->server;
+
+ if (!server->origin_fullpath) {
+ cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
+ return 0;
+ }
+
+ if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+ cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
+ return -EINVAL;
+ }
+ /*
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+ */
+ cifs_autodisable_serverino(cifs_sb);
+ /*
+ * Force the use of prefix path to support failover on DFS paths that resolve to targets
+ * that have different prefix paths.
+ */
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+
+ return refresh_tcon(tcon, true);
+}
+
+/*
+ * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
+ * from a DFS referral.
+ */
+static void refresh_cache_worker(struct work_struct *work)
+{
+ struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon, *ntcon;
+ struct list_head tcons;
+ struct cifs_ses *ses;
+
+ INIT_LIST_HEAD(&tcons);
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ if (!server->leaf_fullpath)
+ continue;
+
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (ses->tcon_ipc) {
+ ses->ses_count++;
+ list_add_tail(&ses->tcon_ipc->ulist, &tcons);
+ }
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (!tcon->ipc) {
+ tcon->tc_count++;
+ list_add_tail(&tcon->ulist, &tcons);
+ }
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ list_del_init(&tcon->ulist);
+
+ mutex_lock(&server->refpath_lock);
+ if (server->leaf_fullpath)
+ __refresh_tcon(server->leaf_fullpath + 1, tcon, false);
+ mutex_unlock(&server->refpath_lock);
+
+ if (tcon->ipc)
+ cifs_put_smb_ses(tcon->ses);
+ else
+ cifs_put_tcon(tcon);
+ }
+
+ spin_lock(&cache_ttl_lock);
+ queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+ spin_unlock(&cache_ttl_lock);
+}