Merge tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / cifs / dfs_cache.c
CommitLineData
54be1f6c
PA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
5072010c 5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
54be1f6c
PA
6 */
7
54be1f6c
PA
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
97a32539 11#include <linux/proc_fs.h>
54be1f6c
PA
12#include <linux/nls.h>
13#include <linux/workqueue.h>
c9f71103 14#include <linux/uuid.h>
54be1f6c
PA
15#include "cifsglob.h"
16#include "smb2pdu.h"
17#include "smb2proto.h"
18#include "cifsproto.h"
19#include "cifs_debug.h"
20#include "cifs_unicode.h"
21#include "smb2glob.h"
b6236618 22#include "dns_resolve.h"
54be1f6c
PA
23
24#include "dfs_cache.h"
25
185352ae
PAS
26#define CACHE_HTABLE_SIZE 32
27#define CACHE_MAX_ENTRIES 64
c950fc7a 28#define CACHE_MIN_TTL 120 /* 2 minutes */
54be1f6c 29
889c2a70 30#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
54be1f6c 31
185352ae
PAS
32struct cache_dfs_tgt {
33 char *name;
7548e1da 34 int path_consumed;
185352ae 35 struct list_head list;
54be1f6c
PA
36};
37
185352ae
PAS
38struct cache_entry {
39 struct hlist_node hlist;
40 const char *path;
5ff2836e
PA
41 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
43 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
44 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
185352ae 45 struct timespec64 etime;
5ff2836e 46 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
185352ae
PAS
47 int numtgts;
48 struct list_head tlist;
49 struct cache_dfs_tgt *tgthint;
54be1f6c
PA
50};
51
185352ae
PAS
52static struct kmem_cache *cache_slab __read_mostly;
53static struct workqueue_struct *dfscache_wq __read_mostly;
54be1f6c 54
185352ae 55static int cache_ttl;
06d57378
PAS
56static DEFINE_SPINLOCK(cache_ttl_lock);
57
c870a8e7 58static struct nls_table *cache_cp;
54be1f6c
PA
59
60/*
61 * Number of entries in the cache
62 */
742d8de0 63static atomic_t cache_count;
185352ae
PAS
64
65static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
742d8de0 66static DECLARE_RWSEM(htable_rw_lock);
54be1f6c 67
54be1f6c
PA
68static void refresh_cache_worker(struct work_struct *work);
69
185352ae
PAS
70static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
71
c870a8e7
PA
72/**
73 * dfs_cache_canonical_path - get a canonical DFS path
74 *
75 * @path: DFS path
76 * @cp: codepage
77 * @remap: mapping type
78 *
79 * Return canonical path if success, otherwise error.
80 */
81char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
54be1f6c 82{
c870a8e7
PA
83 char *tmp;
84 int plen = 0;
85 char *npath;
86
ff2f7fc0 87 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
c870a8e7
PA
88 return ERR_PTR(-EINVAL);
89
90 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
91 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
92 if (!tmp) {
93 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
94 return ERR_PTR(-EINVAL);
95 }
54be1f6c 96
c870a8e7
PA
97 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
98 kfree(tmp);
99
100 if (!npath) {
101 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
102 return ERR_PTR(-EINVAL);
103 }
54be1f6c 104 } else {
c870a8e7
PA
105 npath = kstrdup(path, GFP_KERNEL);
106 if (!npath)
107 return ERR_PTR(-ENOMEM);
54be1f6c 108 }
c870a8e7
PA
109 convert_delimiter(npath, '\\');
110 return npath;
54be1f6c
PA
111}
112
185352ae 113static inline bool cache_entry_expired(const struct cache_entry *ce)
54be1f6c
PA
114{
115 struct timespec64 ts;
116
54e4f73c 117 ktime_get_coarse_real_ts64(&ts);
185352ae 118 return timespec64_compare(&ts, &ce->etime) >= 0;
54be1f6c
PA
119}
120
185352ae 121static inline void free_tgts(struct cache_entry *ce)
54be1f6c 122{
185352ae 123 struct cache_dfs_tgt *t, *n;
54be1f6c 124
185352ae
PAS
125 list_for_each_entry_safe(t, n, &ce->tlist, list) {
126 list_del(&t->list);
127 kfree(t->name);
54be1f6c
PA
128 kfree(t);
129 }
130}
131
185352ae 132static inline void flush_cache_ent(struct cache_entry *ce)
54be1f6c 133{
742d8de0 134 hlist_del_init(&ce->hlist);
199c6bdf 135 kfree(ce->path);
54be1f6c 136 free_tgts(ce);
742d8de0
PAS
137 atomic_dec(&cache_count);
138 kmem_cache_free(cache_slab, ce);
54be1f6c
PA
139}
140
141static void flush_cache_ents(void)
142{
143 int i;
144
185352ae
PAS
145 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
146 struct hlist_head *l = &cache_htable[i];
742d8de0 147 struct hlist_node *n;
185352ae 148 struct cache_entry *ce;
54be1f6c 149
742d8de0
PAS
150 hlist_for_each_entry_safe(ce, n, l, hlist) {
151 if (!hlist_unhashed(&ce->hlist))
152 flush_cache_ent(ce);
153 }
54be1f6c 154 }
54be1f6c
PA
155}
156
157/*
158 * dfs cache /proc file
159 */
160static int dfscache_proc_show(struct seq_file *m, void *v)
161{
742d8de0 162 int i;
185352ae
PAS
163 struct cache_entry *ce;
164 struct cache_dfs_tgt *t;
54be1f6c
PA
165
166 seq_puts(m, "DFS cache\n---------\n");
167
742d8de0
PAS
168 down_read(&htable_rw_lock);
169 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
170 struct hlist_head *l = &cache_htable[i];
54be1f6c 171
742d8de0
PAS
172 hlist_for_each_entry(ce, l, hlist) {
173 if (hlist_unhashed(&ce->hlist))
174 continue;
175
176 seq_printf(m,
5ff2836e
PA
177 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
178 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
efb21d7b 179 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
889c2a70 180 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
5ff2836e 181 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
742d8de0
PAS
182
183 list_for_each_entry(t, &ce->tlist, list) {
184 seq_printf(m, " %s%s\n",
185 t->name,
11c8b3f8 186 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
742d8de0
PAS
187 }
188 }
54be1f6c 189 }
742d8de0 190 up_read(&htable_rw_lock);
54be1f6c 191
54be1f6c
PA
192 return 0;
193}
194
195static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
196 size_t count, loff_t *ppos)
197{
198 char c;
199 int rc;
200
201 rc = get_user(c, buffer);
202 if (rc)
203 return rc;
204
205 if (c != '0')
206 return -EINVAL;
207
a0a3036b 208 cifs_dbg(FYI, "clearing dfs cache\n");
742d8de0
PAS
209
210 down_write(&htable_rw_lock);
54be1f6c 211 flush_cache_ents();
742d8de0 212 up_write(&htable_rw_lock);
54be1f6c
PA
213
214 return count;
215}
216
217static int dfscache_proc_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, dfscache_proc_show, NULL);
220}
221
97a32539
AD
222const struct proc_ops dfscache_proc_ops = {
223 .proc_open = dfscache_proc_open,
224 .proc_read = seq_read,
225 .proc_lseek = seq_lseek,
226 .proc_release = single_release,
227 .proc_write = dfscache_proc_write,
54be1f6c
PA
228};
229
230#ifdef CONFIG_CIFS_DEBUG2
185352ae 231static inline void dump_tgts(const struct cache_entry *ce)
54be1f6c 232{
185352ae 233 struct cache_dfs_tgt *t;
54be1f6c
PA
234
235 cifs_dbg(FYI, "target list:\n");
185352ae
PAS
236 list_for_each_entry(t, &ce->tlist, list) {
237 cifs_dbg(FYI, " %s%s\n", t->name,
11c8b3f8 238 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
54be1f6c
PA
239 }
240}
241
185352ae 242static inline void dump_ce(const struct cache_entry *ce)
54be1f6c 243{
5ff2836e 244 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
a0a3036b 245 ce->path,
185352ae
PAS
246 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
247 ce->etime.tv_nsec,
5ff2836e 248 ce->hdr_flags, ce->ref_flags,
889c2a70 249 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
185352ae 250 ce->path_consumed,
54be1f6c
PA
251 cache_entry_expired(ce) ? "yes" : "no");
252 dump_tgts(ce);
253}
254
255static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
256{
257 int i;
258
259 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
260 for (i = 0; i < numrefs; i++) {
261 const struct dfs_info3_param *ref = &refs[i];
262
263 cifs_dbg(FYI,
264 "\n"
265 "flags: 0x%x\n"
266 "path_consumed: %d\n"
267 "server_type: 0x%x\n"
268 "ref_flag: 0x%x\n"
269 "path_name: %s\n"
270 "node_name: %s\n"
271 "ttl: %d (%dm)\n",
272 ref->flags, ref->path_consumed, ref->server_type,
273 ref->ref_flag, ref->path_name, ref->node_name,
274 ref->ttl, ref->ttl / 60);
275 }
276}
277#else
278#define dump_tgts(e)
279#define dump_ce(e)
280#define dump_refs(r, n)
281#endif
282
283/**
284 * dfs_cache_init - Initialize DFS referral cache.
285 *
286 * Return zero if initialized successfully, otherwise non-zero.
287 */
288int dfs_cache_init(void)
289{
185352ae 290 int rc;
54be1f6c
PA
291 int i;
292
c9f71103 293 dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
185352ae 294 if (!dfscache_wq)
54be1f6c
PA
295 return -ENOMEM;
296
185352ae
PAS
297 cache_slab = kmem_cache_create("cifs_dfs_cache",
298 sizeof(struct cache_entry), 0,
299 SLAB_HWCACHE_ALIGN, NULL);
300 if (!cache_slab) {
301 rc = -ENOMEM;
302 goto out_destroy_wq;
303 }
304
305 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
306 INIT_HLIST_HEAD(&cache_htable[i]);
54be1f6c 307
742d8de0 308 atomic_set(&cache_count, 0);
c870a8e7
PA
309 cache_cp = load_nls("utf8");
310 if (!cache_cp)
311 cache_cp = load_nls_default();
54be1f6c
PA
312
313 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
314 return 0;
185352ae
PAS
315
316out_destroy_wq:
317 destroy_workqueue(dfscache_wq);
318 return rc;
54be1f6c
PA
319}
320
42caeba7 321static int cache_entry_hash(const void *data, int size, unsigned int *hash)
54be1f6c 322{
42caeba7
PA
323 int i, clen;
324 const unsigned char *s = data;
325 wchar_t c;
326 unsigned int h = 0;
327
328 for (i = 0; i < size; i += clen) {
329 clen = cache_cp->char2uni(&s[i], size - i, &c);
330 if (unlikely(clen < 0)) {
331 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
332 return clen;
333 }
334 c = cifs_toupper(c);
335 h = jhash(&c, sizeof(c), h);
336 }
337 *hash = h % CACHE_HTABLE_SIZE;
338 return 0;
54be1f6c
PA
339}
340
54be1f6c 341/* Return target hint of a DFS cache entry */
185352ae 342static inline char *get_tgt_name(const struct cache_entry *ce)
54be1f6c 343{
11c8b3f8 344 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
54be1f6c 345
185352ae 346 return t ? t->name : ERR_PTR(-ENOENT);
54be1f6c
PA
347}
348
349/* Return expire time out of a new entry's TTL */
350static inline struct timespec64 get_expire_time(int ttl)
351{
352 struct timespec64 ts = {
353 .tv_sec = ttl,
354 .tv_nsec = 0,
355 };
54e4f73c 356 struct timespec64 now;
54be1f6c 357
54e4f73c
SR
358 ktime_get_coarse_real_ts64(&now);
359 return timespec64_add(now, ts);
54be1f6c
PA
360}
361
362/* Allocate a new DFS target */
7548e1da 363static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
54be1f6c 364{
185352ae 365 struct cache_dfs_tgt *t;
54be1f6c 366
742d8de0 367 t = kmalloc(sizeof(*t), GFP_ATOMIC);
54be1f6c
PA
368 if (!t)
369 return ERR_PTR(-ENOMEM);
8d767223 370 t->name = kstrdup(name, GFP_ATOMIC);
185352ae 371 if (!t->name) {
54be1f6c
PA
372 kfree(t);
373 return ERR_PTR(-ENOMEM);
374 }
7548e1da 375 t->path_consumed = path_consumed;
185352ae 376 INIT_LIST_HEAD(&t->list);
54be1f6c
PA
377 return t;
378}
379
380/*
381 * Copy DFS referral information to a cache entry and conditionally update
382 * target hint.
383 */
384static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
185352ae 385 struct cache_entry *ce, const char *tgthint)
54be1f6c 386{
11c8b3f8 387 struct cache_dfs_tgt *target;
54be1f6c
PA
388 int i;
389
c950fc7a 390 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
185352ae
PAS
391 ce->etime = get_expire_time(ce->ttl);
392 ce->srvtype = refs[0].server_type;
5ff2836e
PA
393 ce->hdr_flags = refs[0].flags;
394 ce->ref_flags = refs[0].ref_flag;
185352ae 395 ce->path_consumed = refs[0].path_consumed;
54be1f6c
PA
396
397 for (i = 0; i < numrefs; i++) {
185352ae 398 struct cache_dfs_tgt *t;
54be1f6c 399
7548e1da 400 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
54be1f6c
PA
401 if (IS_ERR(t)) {
402 free_tgts(ce);
403 return PTR_ERR(t);
404 }
185352ae
PAS
405 if (tgthint && !strcasecmp(t->name, tgthint)) {
406 list_add(&t->list, &ce->tlist);
54be1f6c
PA
407 tgthint = NULL;
408 } else {
185352ae 409 list_add_tail(&t->list, &ce->tlist);
54be1f6c 410 }
185352ae 411 ce->numtgts++;
54be1f6c
PA
412 }
413
11c8b3f8
PA
414 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
415 list);
416 WRITE_ONCE(ce->tgthint, target);
54be1f6c
PA
417
418 return 0;
419}
420
421/* Allocate a new cache entry */
42caeba7 422static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
54be1f6c 423{
185352ae 424 struct cache_entry *ce;
54be1f6c
PA
425 int rc;
426
185352ae 427 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
54be1f6c
PA
428 if (!ce)
429 return ERR_PTR(-ENOMEM);
430
42caeba7
PA
431 ce->path = refs[0].path_name;
432 refs[0].path_name = NULL;
433
185352ae
PAS
434 INIT_HLIST_NODE(&ce->hlist);
435 INIT_LIST_HEAD(&ce->tlist);
54be1f6c
PA
436
437 rc = copy_ref_data(refs, numrefs, ce, NULL);
438 if (rc) {
199c6bdf 439 kfree(ce->path);
185352ae 440 kmem_cache_free(cache_slab, ce);
54be1f6c
PA
441 ce = ERR_PTR(rc);
442 }
443 return ce;
444}
445
c9f71103 446static void remove_oldest_entry_locked(void)
54be1f6c 447{
742d8de0 448 int i;
185352ae
PAS
449 struct cache_entry *ce;
450 struct cache_entry *to_del = NULL;
54be1f6c 451
1023e90b
PA
452 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
453
742d8de0
PAS
454 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
455 struct hlist_head *l = &cache_htable[i];
456
457 hlist_for_each_entry(ce, l, hlist) {
458 if (hlist_unhashed(&ce->hlist))
459 continue;
460 if (!to_del || timespec64_compare(&ce->etime,
461 &to_del->etime) < 0)
462 to_del = ce;
463 }
54be1f6c 464 }
742d8de0 465
54be1f6c 466 if (!to_del) {
a0a3036b 467 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
742d8de0 468 return;
54be1f6c 469 }
742d8de0 470
a0a3036b 471 cifs_dbg(FYI, "%s: removing entry\n", __func__);
54be1f6c
PA
472 dump_ce(to_del);
473 flush_cache_ent(to_del);
54be1f6c
PA
474}
475
476/* Add a new DFS cache entry */
48d240bf
PA
477static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
478 int numrefs)
54be1f6c 479{
42caeba7 480 int rc;
185352ae 481 struct cache_entry *ce;
42caeba7 482 unsigned int hash;
54be1f6c 483
1023e90b
PA
484 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
485
486 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
487 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
488 remove_oldest_entry_locked();
489 }
490
42caeba7
PA
491 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
492 if (rc)
48d240bf 493 return ERR_PTR(rc);
42caeba7
PA
494
495 ce = alloc_cache_entry(refs, numrefs);
54be1f6c 496 if (IS_ERR(ce))
48d240bf 497 return ce;
54be1f6c 498
06d57378
PAS
499 spin_lock(&cache_ttl_lock);
500 if (!cache_ttl) {
185352ae
PAS
501 cache_ttl = ce->ttl;
502 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
54be1f6c 503 } else {
185352ae
PAS
504 cache_ttl = min_t(int, cache_ttl, ce->ttl);
505 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
54be1f6c 506 }
06d57378 507 spin_unlock(&cache_ttl_lock);
54be1f6c 508
742d8de0
PAS
509 hlist_add_head(&ce->hlist, &cache_htable[hash]);
510 dump_ce(ce);
742d8de0 511
1023e90b
PA
512 atomic_inc(&cache_count);
513
48d240bf 514 return ce;
54be1f6c
PA
515}
516
42caeba7
PA
517/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
518static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
54be1f6c 519{
42caeba7
PA
520 int i, l1, l2;
521 wchar_t c1, c2;
522
523 if (len1 != len2)
524 return false;
525
526 for (i = 0; i < len1; i += l1) {
527 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
528 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
529 if (unlikely(l1 < 0 && l2 < 0)) {
530 if (s1[i] != s2[i])
531 return false;
532 l1 = 1;
533 continue;
534 }
535 if (l1 != l2)
536 return false;
537 if (cifs_toupper(c1) != cifs_toupper(c2))
538 return false;
539 }
540 return true;
541}
54be1f6c 542
42caeba7
PA
543static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
544{
545 struct cache_entry *ce;
54be1f6c 546
42caeba7
PA
547 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
548 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
185352ae 549 dump_ce(ce);
42caeba7 550 return ce;
54be1f6c
PA
551 }
552 }
337b8b0e 553 return ERR_PTR(-ENOENT);
2e5de424
PA
554}
555
556/*
42caeba7
PA
557 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
558 *
559 * Use whole path components in the match. Must be called with htable_rw_lock held.
2e5de424 560 *
3deddb77 561 * Return cached entry if successful.
337b8b0e 562 * Return ERR_PTR(-ENOENT) if the entry is not found.
3deddb77 563 * Return error ptr otherwise.
2e5de424 564 */
42caeba7 565static struct cache_entry *lookup_cache_entry(const char *path)
2e5de424 566{
42caeba7 567 struct cache_entry *ce;
2e5de424 568 int cnt = 0;
42caeba7
PA
569 const char *s = path, *e;
570 char sep = *s;
571 unsigned int hash;
572 int rc;
2e5de424 573
2e5de424
PA
574 while ((s = strchr(s, sep)) && ++cnt < 3)
575 s++;
576
577 if (cnt < 3) {
42caeba7
PA
578 rc = cache_entry_hash(path, strlen(path), &hash);
579 if (rc)
580 return ERR_PTR(rc);
581 return __lookup_cache_entry(path, hash, strlen(path));
2e5de424
PA
582 }
583 /*
584 * Handle paths that have more than two path components and are a complete prefix of the DFS
585 * referral request path (@path).
586 *
587 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
588 */
42caeba7 589 e = path + strlen(path) - 1;
2e5de424 590 while (e > s) {
42caeba7 591 int len;
2e5de424
PA
592
593 /* skip separators */
594 while (e > s && *e == sep)
595 e--;
596 if (e == s)
2e5de424 597 break;
2e5de424 598
42caeba7
PA
599 len = e + 1 - path;
600 rc = cache_entry_hash(path, len, &hash);
601 if (rc)
602 return ERR_PTR(rc);
603 ce = __lookup_cache_entry(path, hash, len);
604 if (!IS_ERR(ce))
605 return ce;
606
2e5de424
PA
607 /* backward until separator */
608 while (e > s && *e != sep)
609 e--;
610 }
337b8b0e 611 return ERR_PTR(-ENOENT);
54be1f6c
PA
612}
613
54be1f6c
PA
614/**
615 * dfs_cache_destroy - destroy DFS referral cache
616 */
617void dfs_cache_destroy(void)
618{
185352ae 619 cancel_delayed_work_sync(&refresh_task);
c870a8e7 620 unload_nls(cache_cp);
54be1f6c 621 flush_cache_ents();
742d8de0 622 kmem_cache_destroy(cache_slab);
185352ae 623 destroy_workqueue(dfscache_wq);
54be1f6c
PA
624
625 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
626}
627
c9f71103 628/* Update a cache entry with the new referral in @refs */
1023e90b 629static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
c9f71103 630 int numrefs)
54be1f6c 631{
11c8b3f8
PA
632 struct cache_dfs_tgt *target;
633 char *th = NULL;
54be1f6c 634 int rc;
54be1f6c 635
1023e90b 636 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
54be1f6c 637
11c8b3f8
PA
638 target = READ_ONCE(ce->tgthint);
639 if (target) {
640 th = kstrdup(target->name, GFP_ATOMIC);
54be1f6c 641 if (!th)
742d8de0 642 return -ENOMEM;
54be1f6c
PA
643 }
644
645 free_tgts(ce);
185352ae 646 ce->numtgts = 0;
54be1f6c
PA
647
648 rc = copy_ref_data(refs, numrefs, ce, th);
54be1f6c 649
742d8de0 650 kfree(th);
54be1f6c 651
eecfc571 652 return rc;
54be1f6c
PA
653}
654
c870a8e7
PA
655static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
656 struct dfs_info3_param **refs, int *numrefs)
54be1f6c 657{
1023e90b
PA
658 int rc;
659 int i;
660
1023e90b
PA
661 *refs = NULL;
662 *numrefs = 0;
663
54be1f6c 664 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
742d8de0 665 return -EOPNOTSUPP;
c870a8e7 666 if (unlikely(!cache_cp))
742d8de0 667 return -EINVAL;
54be1f6c 668
6916881f 669 cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
1023e90b
PA
670 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
671 NO_MAP_UNI_RSVD);
672 if (!rc) {
673 struct dfs_info3_param *ref = *refs;
54be1f6c 674
1023e90b
PA
675 for (i = 0; i < *numrefs; i++)
676 convert_delimiter(ref[i].path_name, '\\');
677 }
678 return rc;
742d8de0 679}
54be1f6c 680
54be1f6c
PA
681/*
682 * Find, create or update a DFS cache entry.
683 *
684 * If the entry wasn't found, it will create a new one. Or if it was found but
685 * expired, then it will update the entry accordingly.
686 *
c9f71103 687 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
54be1f6c 688 * handle them properly.
48d240bf
PA
689 *
690 * On success, return entry with acquired lock for reading, otherwise error ptr.
54be1f6c 691 */
48d240bf
PA
692static struct cache_entry *cache_refresh_path(const unsigned int xid,
693 struct cifs_ses *ses,
8064f711
PA
694 const char *path,
695 bool force_refresh)
54be1f6c 696{
742d8de0 697 struct dfs_info3_param *refs = NULL;
9fb0db40 698 struct cache_entry *ce;
742d8de0 699 int numrefs = 0;
9fb0db40 700 int rc;
54be1f6c
PA
701
702 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
703
9fb0db40 704 down_read(&htable_rw_lock);
54be1f6c 705
42caeba7 706 ce = lookup_cache_entry(path);
3deddb77
PA
707 if (!IS_ERR(ce)) {
708 if (!force_refresh && !cache_entry_expired(ce))
709 return ce;
710 } else if (PTR_ERR(ce) != -ENOENT) {
711 up_read(&htable_rw_lock);
48d240bf 712 return ce;
3deddb77 713 }
48d240bf 714
9fb0db40
PA
715 /*
716 * Unlock shared access as we don't want to hold any locks while getting
717 * a new referral. The @ses used for performing the I/O could be
718 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
719 * in order to failover -- if necessary.
720 */
721 up_read(&htable_rw_lock);
54be1f6c 722
742d8de0 723 /*
8064f711
PA
724 * Either the entry was not found, or it is expired, or it is a forced
725 * refresh.
c9f71103 726 * Request a new DFS referral in order to create or update a cache entry.
742d8de0 727 */
c870a8e7 728 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
48d240bf
PA
729 if (rc) {
730 ce = ERR_PTR(rc);
9fb0db40 731 goto out;
48d240bf 732 }
54be1f6c 733
742d8de0 734 dump_refs(refs, numrefs);
54be1f6c 735
9fb0db40
PA
736 down_write(&htable_rw_lock);
737 /* Re-check as another task might have it added or refreshed already */
738 ce = lookup_cache_entry(path);
739 if (!IS_ERR(ce)) {
8064f711 740 if (force_refresh || cache_entry_expired(ce)) {
9fb0db40 741 rc = update_cache_entry_locked(ce, refs, numrefs);
48d240bf
PA
742 if (rc)
743 ce = ERR_PTR(rc);
744 }
3deddb77 745 } else if (PTR_ERR(ce) == -ENOENT) {
48d240bf 746 ce = add_cache_entry_locked(refs, numrefs);
742d8de0 747 }
54be1f6c 748
48d240bf
PA
749 if (IS_ERR(ce)) {
750 up_write(&htable_rw_lock);
751 goto out;
752 }
753
754 downgrade_write(&htable_rw_lock);
9fb0db40 755out:
742d8de0 756 free_dfs_info_array(refs, numrefs);
48d240bf 757 return ce;
54be1f6c
PA
758}
759
742d8de0
PAS
760/*
761 * Set up a DFS referral from a given cache entry.
762 *
763 * Must be called with htable_rw_lock held.
764 */
765static int setup_referral(const char *path, struct cache_entry *ce,
766 struct dfs_info3_param *ref, const char *target)
54be1f6c
PA
767{
768 int rc;
769
770 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
771
772 memset(ref, 0, sizeof(*ref));
773
8d767223 774 ref->path_name = kstrdup(path, GFP_ATOMIC);
54be1f6c
PA
775 if (!ref->path_name)
776 return -ENOMEM;
777
8d767223 778 ref->node_name = kstrdup(target, GFP_ATOMIC);
54be1f6c
PA
779 if (!ref->node_name) {
780 rc = -ENOMEM;
781 goto err_free_path;
782 }
783
742d8de0 784 ref->path_consumed = ce->path_consumed;
185352ae
PAS
785 ref->ttl = ce->ttl;
786 ref->server_type = ce->srvtype;
5ff2836e
PA
787 ref->ref_flag = ce->ref_flags;
788 ref->flags = ce->hdr_flags;
54be1f6c
PA
789
790 return 0;
791
792err_free_path:
793 kfree(ref->path_name);
794 ref->path_name = NULL;
795 return rc;
796}
797
798/* Return target list of a DFS cache entry */
742d8de0 799static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
54be1f6c
PA
800{
801 int rc;
802 struct list_head *head = &tl->tl_list;
185352ae 803 struct cache_dfs_tgt *t;
54be1f6c
PA
804 struct dfs_cache_tgt_iterator *it, *nit;
805
806 memset(tl, 0, sizeof(*tl));
807 INIT_LIST_HEAD(head);
808
185352ae 809 list_for_each_entry(t, &ce->tlist, list) {
742d8de0 810 it = kzalloc(sizeof(*it), GFP_ATOMIC);
54be1f6c
PA
811 if (!it) {
812 rc = -ENOMEM;
813 goto err_free_it;
814 }
815
8d767223 816 it->it_name = kstrdup(t->name, GFP_ATOMIC);
54be1f6c 817 if (!it->it_name) {
c715f89c 818 kfree(it);
54be1f6c
PA
819 rc = -ENOMEM;
820 goto err_free_it;
821 }
7548e1da 822 it->it_path_consumed = t->path_consumed;
54be1f6c 823
11c8b3f8 824 if (READ_ONCE(ce->tgthint) == t)
54be1f6c
PA
825 list_add(&it->it_list, head);
826 else
827 list_add_tail(&it->it_list, head);
828 }
742d8de0 829
185352ae 830 tl->tl_numtgts = ce->numtgts;
54be1f6c
PA
831
832 return 0;
833
834err_free_it:
835 list_for_each_entry_safe(it, nit, head, it_list) {
b6236618 836 list_del(&it->it_list);
54be1f6c
PA
837 kfree(it->it_name);
838 kfree(it);
839 }
840 return rc;
841}
842
843/**
844 * dfs_cache_find - find a DFS cache entry
845 *
846 * If it doesn't find the cache entry, then it will get a DFS referral
847 * for @path and create a new entry.
848 *
849 * In case the cache entry exists but expired, it will get a DFS referral
850 * for @path and then update the respective cache entry.
851 *
852 * These parameters are passed down to the get_dfs_refer() call if it
853 * needs to be issued:
854 * @xid: syscall xid
855 * @ses: smb session to issue the request on
c870a8e7 856 * @cp: codepage
54be1f6c
PA
857 * @remap: path character remapping type
858 * @path: path to lookup in DFS referral cache.
859 *
860 * @ref: when non-NULL, store single DFS referral result in it.
861 * @tgt_list: when non-NULL, store complete DFS target list in it.
862 *
863 * Return zero if the target was found, otherwise non-zero.
864 */
c870a8e7
PA
865int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
866 int remap, const char *path, struct dfs_info3_param *ref,
54be1f6c
PA
867 struct dfs_cache_tgt_list *tgt_list)
868{
869 int rc;
9cfdb1c1 870 const char *npath;
185352ae 871 struct cache_entry *ce;
54be1f6c 872
c870a8e7
PA
873 npath = dfs_cache_canonical_path(path, cp, remap);
874 if (IS_ERR(npath))
875 return PTR_ERR(npath);
54be1f6c 876
8064f711 877 ce = cache_refresh_path(xid, ses, npath, false);
742d8de0 878 if (IS_ERR(ce)) {
54be1f6c 879 rc = PTR_ERR(ce);
742d8de0 880 goto out_free_path;
54be1f6c 881 }
742d8de0
PAS
882
883 if (ref)
884 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
885 else
886 rc = 0;
887 if (!rc && tgt_list)
888 rc = get_targets(ce, tgt_list);
889
890 up_read(&htable_rw_lock);
891
892out_free_path:
c870a8e7 893 kfree(npath);
54be1f6c
PA
894 return rc;
895}
896
897/**
898 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
899 * the currently connected server.
900 *
901 * NOTE: This function will neither update a cache entry in case it was
902 * expired, nor create a new cache entry if @path hasn't been found. It heavily
903 * relies on an existing cache entry.
904 *
c870a8e7 905 * @path: canonical DFS path to lookup in the DFS referral cache.
54be1f6c
PA
906 * @ref: when non-NULL, store single DFS referral result in it.
907 * @tgt_list: when non-NULL, store complete DFS target list in it.
908 *
909 * Return 0 if successful.
910 * Return -ENOENT if the entry was not found.
911 * Return non-zero for other errors.
912 */
913int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
914 struct dfs_cache_tgt_list *tgt_list)
915{
916 int rc;
185352ae 917 struct cache_entry *ce;
54be1f6c 918
c870a8e7 919 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
742d8de0
PAS
920
921 down_read(&htable_rw_lock);
922
42caeba7 923 ce = lookup_cache_entry(path);
54be1f6c
PA
924 if (IS_ERR(ce)) {
925 rc = PTR_ERR(ce);
742d8de0 926 goto out_unlock;
54be1f6c
PA
927 }
928
929 if (ref)
742d8de0 930 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
54be1f6c
PA
931 else
932 rc = 0;
933 if (!rc && tgt_list)
742d8de0
PAS
934 rc = get_targets(ce, tgt_list);
935
936out_unlock:
937 up_read(&htable_rw_lock);
54be1f6c
PA
938 return rc;
939}
940
54be1f6c
PA
941/**
942 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
943 * without sending any requests to the currently connected server.
944 *
945 * NOTE: This function will neither update a cache entry in case it was
946 * expired, nor create a new cache entry if @path hasn't been found. It heavily
947 * relies on an existing cache entry.
948 *
c870a8e7 949 * @path: canonical DFS path to lookup in DFS referral cache.
54be1f6c
PA
950 * @it: target iterator which contains the target hint to update the cache
951 * entry with.
952 *
953 * Return zero if the target hint was updated successfully, otherwise non-zero.
954 */
1d04a6fe 955void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
54be1f6c 956{
185352ae 957 struct cache_dfs_tgt *t;
1d04a6fe 958 struct cache_entry *ce;
54be1f6c 959
1d04a6fe
PA
960 if (!path || !it)
961 return;
54be1f6c 962
c870a8e7 963 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
54be1f6c 964
11c8b3f8 965 down_read(&htable_rw_lock);
54be1f6c 966
42caeba7 967 ce = lookup_cache_entry(path);
1d04a6fe 968 if (IS_ERR(ce))
742d8de0 969 goto out_unlock;
54be1f6c 970
11c8b3f8 971 t = READ_ONCE(ce->tgthint);
54be1f6c 972
185352ae 973 if (unlikely(!strcasecmp(it->it_name, t->name)))
742d8de0 974 goto out_unlock;
54be1f6c 975
185352ae
PAS
976 list_for_each_entry(t, &ce->tlist, list) {
977 if (!strcasecmp(t->name, it->it_name)) {
11c8b3f8 978 WRITE_ONCE(ce->tgthint, t);
54be1f6c
PA
979 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
980 it->it_name);
981 break;
982 }
983 }
984
742d8de0 985out_unlock:
11c8b3f8 986 up_read(&htable_rw_lock);
54be1f6c
PA
987}
988
989/**
990 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
991 * target iterator (@it).
992 *
c870a8e7 993 * @path: canonical DFS path to lookup in DFS referral cache.
54be1f6c
PA
994 * @it: DFS target iterator.
995 * @ref: DFS referral pointer to set up the gathered information.
996 *
997 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
998 */
c870a8e7 999int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
54be1f6c
PA
1000 struct dfs_info3_param *ref)
1001{
1002 int rc;
185352ae 1003 struct cache_entry *ce;
54be1f6c
PA
1004
1005 if (!it || !ref)
1006 return -EINVAL;
54be1f6c 1007
c870a8e7 1008 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
54be1f6c 1009
742d8de0 1010 down_read(&htable_rw_lock);
54be1f6c 1011
42caeba7 1012 ce = lookup_cache_entry(path);
54be1f6c
PA
1013 if (IS_ERR(ce)) {
1014 rc = PTR_ERR(ce);
742d8de0 1015 goto out_unlock;
54be1f6c
PA
1016 }
1017
1018 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1019
742d8de0 1020 rc = setup_referral(path, ce, ref, it->it_name);
54be1f6c 1021
742d8de0
PAS
1022out_unlock:
1023 up_read(&htable_rw_lock);
54be1f6c
PA
1024 return rc;
1025}
1026
ef605e86
PA
1027/* Extract share from DFS target and return a pointer to prefix path or NULL */
1028static const char *parse_target_share(const char *target, char **share)
1029{
1030 const char *s, *seps = "/\\";
1031 size_t len;
1032
1033 s = strpbrk(target + 1, seps);
1034 if (!s)
1035 return ERR_PTR(-EINVAL);
1036
1037 len = strcspn(s + 1, seps);
1038 if (!len)
1039 return ERR_PTR(-EINVAL);
1040 s += len;
1041
1042 len = s - target + 1;
1043 *share = kstrndup(target, len, GFP_KERNEL);
1044 if (!*share)
1045 return ERR_PTR(-ENOMEM);
1046
1047 s = target + len;
1048 return s + strspn(s, seps);
1049}
1050
bacd704a
PAS
1051/**
1052 * dfs_cache_get_tgt_share - parse a DFS target
1053 *
7548e1da 1054 * @path: DFS full path
bacd704a
PAS
1055 * @it: DFS target iterator.
1056 * @share: tree name.
bacd704a 1057 * @prefix: prefix path.
bacd704a
PAS
1058 *
1059 * Return zero if target was parsed correctly, otherwise non-zero.
1060 */
c870a8e7
PA
1061int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1062 char **prefix)
bacd704a 1063{
ef605e86 1064 char sep;
ee3c8019
SF
1065 char *target_share;
1066 char *ppath = NULL;
ef605e86
PA
1067 const char *target_ppath, *dfsref_ppath;
1068 size_t target_pplen, dfsref_pplen;
1069 size_t len, c;
bacd704a 1070
7548e1da 1071 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
bacd704a
PAS
1072 return -EINVAL;
1073
1074 sep = it->it_name[0];
1075 if (sep != '\\' && sep != '/')
1076 return -EINVAL;
1077
ef605e86
PA
1078 target_ppath = parse_target_share(it->it_name, &target_share);
1079 if (IS_ERR(target_ppath))
1080 return PTR_ERR(target_ppath);
bacd704a 1081
ef605e86
PA
1082 /* point to prefix in DFS referral path */
1083 dfsref_ppath = path + it->it_path_consumed;
1084 dfsref_ppath += strspn(dfsref_ppath, "/\\");
bacd704a 1085
ef605e86
PA
1086 target_pplen = strlen(target_ppath);
1087 dfsref_pplen = strlen(dfsref_ppath);
bacd704a 1088
ef605e86
PA
1089 /* merge prefix paths from DFS referral path and target node */
1090 if (target_pplen || dfsref_pplen) {
1091 len = target_pplen + dfsref_pplen + 2;
1092 ppath = kzalloc(len, GFP_KERNEL);
1093 if (!ppath) {
1094 kfree(target_share);
7548e1da
PA
1095 return -ENOMEM;
1096 }
ef605e86
PA
1097 c = strscpy(ppath, target_ppath, len);
1098 if (c && dfsref_pplen)
1099 ppath[c] = sep;
1100 strlcat(ppath, dfsref_ppath, len);
7548e1da 1101 }
ef605e86
PA
1102 *share = target_share;
1103 *prefix = ppath;
bacd704a
PAS
1104 return 0;
1105}
1106
b6236618
PA
1107static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1108{
1109 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1110 const char *host;
1111 size_t hostlen;
6d740164 1112 struct sockaddr_storage ss;
b6236618
PA
1113 bool match;
1114 int rc;
1115
1116 if (strcasecmp(s1, s2))
1117 return false;
1118
1119 /*
1120 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1121 * as we could not have upcall to resolve hostname or failed to convert ip address.
1122 */
b6236618
PA
1123 extract_unc_hostname(s1, &host, &hostlen);
1124 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1125
6d740164 1126 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
b6236618
PA
1127 if (rc < 0) {
1128 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1129 __func__, (int)hostlen, host);
1130 return true;
1131 }
1132
6d740164
PA
1133 cifs_server_lock(server);
1134 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1135 cifs_server_unlock(server);
b6236618 1136
b6236618
PA
1137 return match;
1138}
1139
1140/*
1141 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1142 * target shares in @refs.
1143 */
8064f711
PA
1144static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1145 struct dfs_cache_tgt_list *old_tl,
1146 struct dfs_cache_tgt_list *new_tl)
b6236618 1147{
8064f711
PA
1148 struct dfs_cache_tgt_iterator *oit, *nit;
1149
1150 for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1151 oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1152 for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1153 nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1154 if (target_share_equal(server,
1155 dfs_cache_get_tgt_name(oit),
1156 dfs_cache_get_tgt_name(nit)))
b6236618
PA
1157 return;
1158 }
1159 }
1160
1161 cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
8064f711 1162 cifs_signal_cifsd_for_reconnect(server, true);
b6236618
PA
1163}
1164
1165/* Refresh dfs referral of tcon and mark it for reconnect if needed */
6916881f 1166static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
b6236618 1167{
8064f711
PA
1168 struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
1169 struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
6916881f 1170 struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
83328585 1171 struct cifs_tcon *ipc = ses->tcon_ipc;
b6236618 1172 bool needs_refresh = false;
83328585 1173 struct cache_entry *ce;
b6236618 1174 unsigned int xid;
83328585 1175 int rc = 0;
b6236618 1176
6916881f 1177 xid = get_xid();
b6236618
PA
1178
1179 down_read(&htable_rw_lock);
1180 ce = lookup_cache_entry(path);
1181 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1182 if (!IS_ERR(ce)) {
8064f711
PA
1183 rc = get_targets(ce, &old_tl);
1184 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
b6236618
PA
1185 }
1186 up_read(&htable_rw_lock);
1187
1188 if (!needs_refresh) {
1189 rc = 0;
1190 goto out;
1191 }
1192
83328585 1193 spin_lock(&ipc->tc_lock);
2f0e4f03 1194 if (ipc->status != TID_GOOD) {
83328585
PA
1195 spin_unlock(&ipc->tc_lock);
1196 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
1197 goto out;
1198 }
1199 spin_unlock(&ipc->tc_lock);
1200
8064f711
PA
1201 ce = cache_refresh_path(xid, ses, path, true);
1202 if (!IS_ERR(ce)) {
1203 rc = get_targets(ce, &new_tl);
1204 up_read(&htable_rw_lock);
1205 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1206 mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
b6236618
PA
1207 }
1208
1209out:
6916881f 1210 free_xid(xid);
8064f711
PA
1211 dfs_cache_free_tgts(&old_tl);
1212 dfs_cache_free_tgts(&new_tl);
b6236618
PA
1213 return rc;
1214}
1215
6916881f 1216static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
c88f7dcd
PA
1217{
1218 struct TCP_Server_Info *server = tcon->ses->server;
1219
1220 mutex_lock(&server->refpath_lock);
6916881f
PA
1221 if (server->leaf_fullpath)
1222 __refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
c88f7dcd 1223 mutex_unlock(&server->refpath_lock);
c88f7dcd
PA
1224 return 0;
1225}
1226
b6236618
PA
1227/**
1228 * dfs_cache_remount_fs - remount a DFS share
1229 *
1230 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1231 * match any of the new targets, mark it for reconnect.
1232 *
1233 * @cifs_sb: cifs superblock.
1234 *
1235 * Return zero if remounted, otherwise non-zero.
1236 */
1237int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1238{
1239 struct cifs_tcon *tcon;
c88f7dcd 1240 struct TCP_Server_Info *server;
b6236618
PA
1241
1242 if (!cifs_sb || !cifs_sb->master_tlink)
1243 return -EINVAL;
1244
1245 tcon = cifs_sb_master_tcon(cifs_sb);
c88f7dcd
PA
1246 server = tcon->ses->server;
1247
1248 if (!server->origin_fullpath) {
1249 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
b6236618
PA
1250 return 0;
1251 }
b6236618
PA
1252 /*
1253 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1254 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1255 */
1256 cifs_autodisable_serverino(cifs_sb);
1257 /*
1258 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1259 * that have different prefix paths.
1260 */
1261 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
b6236618 1262
6916881f 1263 return refresh_tcon(tcon, true);
b6236618
PA
1264}
1265
c9f71103 1266/*
6916881f
PA
1267 * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1268 * from a DFS referral.
c9f71103 1269 */
6916881f 1270static void refresh_cache_worker(struct work_struct *work)
54be1f6c 1271{
c9f71103 1272 struct TCP_Server_Info *server;
c9f71103
PA
1273 struct cifs_tcon *tcon, *ntcon;
1274 struct list_head tcons;
83328585 1275 struct cifs_ses *ses;
54be1f6c 1276
c9f71103 1277 INIT_LIST_HEAD(&tcons);
54be1f6c
PA
1278
1279 spin_lock(&cifs_tcp_ses_lock);
c9f71103 1280 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
a1c0d005 1281 if (!server->leaf_fullpath)
c88f7dcd
PA
1282 continue;
1283
c9f71103 1284 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
83328585
PA
1285 if (ses->tcon_ipc) {
1286 ses->ses_count++;
1287 list_add_tail(&ses->tcon_ipc->ulist, &tcons);
6916881f 1288 }
c9f71103 1289 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
83328585 1290 if (!tcon->ipc) {
c9f71103
PA
1291 tcon->tc_count++;
1292 list_add_tail(&tcon->ulist, &tcons);
1293 }
54be1f6c
PA
1294 }
1295 }
54be1f6c
PA
1296 }
1297 spin_unlock(&cifs_tcp_ses_lock);
5072010c 1298
c9f71103 1299 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
c88f7dcd
PA
1300 struct TCP_Server_Info *server = tcon->ses->server;
1301
c9f71103 1302 list_del_init(&tcon->ulist);
c88f7dcd
PA
1303
1304 mutex_lock(&server->refpath_lock);
a1c0d005 1305 if (server->leaf_fullpath)
6916881f 1306 __refresh_tcon(server->leaf_fullpath + 1, tcon, false);
c88f7dcd
PA
1307 mutex_unlock(&server->refpath_lock);
1308
83328585
PA
1309 if (tcon->ipc)
1310 cifs_put_smb_ses(tcon->ses);
1311 else
1312 cifs_put_tcon(tcon);
345c1a4a 1313 }
06d57378
PAS
1314
1315 spin_lock(&cache_ttl_lock);
185352ae 1316 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
06d57378 1317 spin_unlock(&cache_ttl_lock);
54be1f6c 1318}