cifs: constify get_normalized_path() properly
[linux-2.6-block.git] / fs / cifs / dfs_cache.c
CommitLineData
54be1f6c
PA
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
5072010c 5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
54be1f6c
PA
6 */
7
54be1f6c
PA
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
97a32539 11#include <linux/proc_fs.h>
54be1f6c
PA
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include "cifsglob.h"
15#include "smb2pdu.h"
16#include "smb2proto.h"
17#include "cifsproto.h"
18#include "cifs_debug.h"
19#include "cifs_unicode.h"
20#include "smb2glob.h"
24e0a1ef 21#include "fs_context.h"
54be1f6c
PA
22
23#include "dfs_cache.h"
24
185352ae
PAS
25#define CACHE_HTABLE_SIZE 32
26#define CACHE_MAX_ENTRIES 64
54be1f6c
PA
27
28#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \
29 DFSREF_STORAGE_SERVER))
30
185352ae
PAS
31struct cache_dfs_tgt {
32 char *name;
7548e1da 33 int path_consumed;
185352ae 34 struct list_head list;
54be1f6c
PA
35};
36
185352ae
PAS
37struct cache_entry {
38 struct hlist_node hlist;
39 const char *path;
5ff2836e
PA
40 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
41 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
42 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
43 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
185352ae 44 struct timespec64 etime;
5ff2836e 45 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
185352ae
PAS
46 int numtgts;
47 struct list_head tlist;
48 struct cache_dfs_tgt *tgthint;
54be1f6c
PA
49};
50
185352ae
PAS
51struct vol_info {
52 char *fullpath;
3fa1c6d1
RS
53 spinlock_t ctx_lock;
54 struct smb3_fs_context ctx;
185352ae
PAS
55 char *mntdata;
56 struct list_head list;
06d57378
PAS
57 struct list_head rlist;
58 struct kref refcnt;
54be1f6c
PA
59};
60
185352ae
PAS
61static struct kmem_cache *cache_slab __read_mostly;
62static struct workqueue_struct *dfscache_wq __read_mostly;
54be1f6c 63
185352ae 64static int cache_ttl;
06d57378
PAS
65static DEFINE_SPINLOCK(cache_ttl_lock);
66
185352ae 67static struct nls_table *cache_nlsc;
54be1f6c
PA
68
69/*
70 * Number of entries in the cache
71 */
742d8de0 72static atomic_t cache_count;
185352ae
PAS
73
74static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
742d8de0 75static DECLARE_RWSEM(htable_rw_lock);
54be1f6c 76
185352ae 77static LIST_HEAD(vol_list);
06d57378 78static DEFINE_SPINLOCK(vol_list_lock);
54be1f6c
PA
79
80static void refresh_cache_worker(struct work_struct *work);
81
185352ae
PAS
82static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
83
9cfdb1c1 84static int get_normalized_path(const char *path, const char **npath)
54be1f6c 85{
ff2f7fc0
PAS
86 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
87 return -EINVAL;
54be1f6c 88
54be1f6c 89 if (*path == '\\') {
9cfdb1c1 90 *npath = path;
54be1f6c 91 } else {
9cfdb1c1
AV
92 char *s = kstrdup(path, GFP_KERNEL);
93 if (!s)
54be1f6c 94 return -ENOMEM;
9cfdb1c1
AV
95 convert_delimiter(s, '\\');
96 *npath = s;
54be1f6c
PA
97 }
98 return 0;
99}
100
9cfdb1c1 101static inline void free_normalized_path(const char *path, const char *npath)
54be1f6c
PA
102{
103 if (path != npath)
104 kfree(npath);
105}
106
185352ae 107static inline bool cache_entry_expired(const struct cache_entry *ce)
54be1f6c
PA
108{
109 struct timespec64 ts;
110
54e4f73c 111 ktime_get_coarse_real_ts64(&ts);
185352ae 112 return timespec64_compare(&ts, &ce->etime) >= 0;
54be1f6c
PA
113}
114
185352ae 115static inline void free_tgts(struct cache_entry *ce)
54be1f6c 116{
185352ae 117 struct cache_dfs_tgt *t, *n;
54be1f6c 118
185352ae
PAS
119 list_for_each_entry_safe(t, n, &ce->tlist, list) {
120 list_del(&t->list);
121 kfree(t->name);
54be1f6c
PA
122 kfree(t);
123 }
124}
125
185352ae 126static inline void flush_cache_ent(struct cache_entry *ce)
54be1f6c 127{
742d8de0 128 hlist_del_init(&ce->hlist);
199c6bdf 129 kfree(ce->path);
54be1f6c 130 free_tgts(ce);
742d8de0
PAS
131 atomic_dec(&cache_count);
132 kmem_cache_free(cache_slab, ce);
54be1f6c
PA
133}
134
135static void flush_cache_ents(void)
136{
137 int i;
138
185352ae
PAS
139 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
140 struct hlist_head *l = &cache_htable[i];
742d8de0 141 struct hlist_node *n;
185352ae 142 struct cache_entry *ce;
54be1f6c 143
742d8de0
PAS
144 hlist_for_each_entry_safe(ce, n, l, hlist) {
145 if (!hlist_unhashed(&ce->hlist))
146 flush_cache_ent(ce);
147 }
54be1f6c 148 }
54be1f6c
PA
149}
150
151/*
152 * dfs cache /proc file
153 */
154static int dfscache_proc_show(struct seq_file *m, void *v)
155{
742d8de0 156 int i;
185352ae
PAS
157 struct cache_entry *ce;
158 struct cache_dfs_tgt *t;
54be1f6c
PA
159
160 seq_puts(m, "DFS cache\n---------\n");
161
742d8de0
PAS
162 down_read(&htable_rw_lock);
163 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
164 struct hlist_head *l = &cache_htable[i];
54be1f6c 165
742d8de0
PAS
166 hlist_for_each_entry(ce, l, hlist) {
167 if (hlist_unhashed(&ce->hlist))
168 continue;
169
170 seq_printf(m,
5ff2836e
PA
171 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
172 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
173 ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
174 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
175 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
742d8de0
PAS
176
177 list_for_each_entry(t, &ce->tlist, list) {
178 seq_printf(m, " %s%s\n",
179 t->name,
180 ce->tgthint == t ? " (target hint)" : "");
181 }
182 }
54be1f6c 183 }
742d8de0 184 up_read(&htable_rw_lock);
54be1f6c 185
54be1f6c
PA
186 return 0;
187}
188
189static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
190 size_t count, loff_t *ppos)
191{
192 char c;
193 int rc;
194
195 rc = get_user(c, buffer);
196 if (rc)
197 return rc;
198
199 if (c != '0')
200 return -EINVAL;
201
a0a3036b 202 cifs_dbg(FYI, "clearing dfs cache\n");
742d8de0
PAS
203
204 down_write(&htable_rw_lock);
54be1f6c 205 flush_cache_ents();
742d8de0 206 up_write(&htable_rw_lock);
54be1f6c
PA
207
208 return count;
209}
210
211static int dfscache_proc_open(struct inode *inode, struct file *file)
212{
213 return single_open(file, dfscache_proc_show, NULL);
214}
215
97a32539
AD
216const struct proc_ops dfscache_proc_ops = {
217 .proc_open = dfscache_proc_open,
218 .proc_read = seq_read,
219 .proc_lseek = seq_lseek,
220 .proc_release = single_release,
221 .proc_write = dfscache_proc_write,
54be1f6c
PA
222};
223
224#ifdef CONFIG_CIFS_DEBUG2
185352ae 225static inline void dump_tgts(const struct cache_entry *ce)
54be1f6c 226{
185352ae 227 struct cache_dfs_tgt *t;
54be1f6c
PA
228
229 cifs_dbg(FYI, "target list:\n");
185352ae
PAS
230 list_for_each_entry(t, &ce->tlist, list) {
231 cifs_dbg(FYI, " %s%s\n", t->name,
232 ce->tgthint == t ? " (target hint)" : "");
54be1f6c
PA
233 }
234}
235
185352ae 236static inline void dump_ce(const struct cache_entry *ce)
54be1f6c 237{
5ff2836e 238 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
a0a3036b 239 ce->path,
185352ae
PAS
240 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
241 ce->etime.tv_nsec,
5ff2836e
PA
242 ce->hdr_flags, ce->ref_flags,
243 IS_INTERLINK_SET(ce->hdr_flags) ? "yes" : "no",
185352ae 244 ce->path_consumed,
54be1f6c
PA
245 cache_entry_expired(ce) ? "yes" : "no");
246 dump_tgts(ce);
247}
248
249static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
250{
251 int i;
252
253 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
254 for (i = 0; i < numrefs; i++) {
255 const struct dfs_info3_param *ref = &refs[i];
256
257 cifs_dbg(FYI,
258 "\n"
259 "flags: 0x%x\n"
260 "path_consumed: %d\n"
261 "server_type: 0x%x\n"
262 "ref_flag: 0x%x\n"
263 "path_name: %s\n"
264 "node_name: %s\n"
265 "ttl: %d (%dm)\n",
266 ref->flags, ref->path_consumed, ref->server_type,
267 ref->ref_flag, ref->path_name, ref->node_name,
268 ref->ttl, ref->ttl / 60);
269 }
270}
271#else
272#define dump_tgts(e)
273#define dump_ce(e)
274#define dump_refs(r, n)
275#endif
276
277/**
278 * dfs_cache_init - Initialize DFS referral cache.
279 *
280 * Return zero if initialized successfully, otherwise non-zero.
281 */
282int dfs_cache_init(void)
283{
185352ae 284 int rc;
54be1f6c
PA
285 int i;
286
185352ae
PAS
287 dfscache_wq = alloc_workqueue("cifs-dfscache",
288 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1);
289 if (!dfscache_wq)
54be1f6c
PA
290 return -ENOMEM;
291
185352ae
PAS
292 cache_slab = kmem_cache_create("cifs_dfs_cache",
293 sizeof(struct cache_entry), 0,
294 SLAB_HWCACHE_ALIGN, NULL);
295 if (!cache_slab) {
296 rc = -ENOMEM;
297 goto out_destroy_wq;
298 }
299
300 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
301 INIT_HLIST_HEAD(&cache_htable[i]);
54be1f6c 302
742d8de0 303 atomic_set(&cache_count, 0);
185352ae 304 cache_nlsc = load_nls_default();
54be1f6c
PA
305
306 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
307 return 0;
185352ae
PAS
308
309out_destroy_wq:
310 destroy_workqueue(dfscache_wq);
311 return rc;
54be1f6c
PA
312}
313
314static inline unsigned int cache_entry_hash(const void *data, int size)
315{
316 unsigned int h;
317
318 h = jhash(data, size, 0);
185352ae 319 return h & (CACHE_HTABLE_SIZE - 1);
54be1f6c
PA
320}
321
322/* Check whether second path component of @path is SYSVOL or NETLOGON */
323static inline bool is_sysvol_or_netlogon(const char *path)
324{
325 const char *s;
326 char sep = path[0];
327
328 s = strchr(path + 1, sep) + 1;
329 return !strncasecmp(s, "sysvol", strlen("sysvol")) ||
330 !strncasecmp(s, "netlogon", strlen("netlogon"));
331}
332
333/* Return target hint of a DFS cache entry */
185352ae 334static inline char *get_tgt_name(const struct cache_entry *ce)
54be1f6c 335{
185352ae 336 struct cache_dfs_tgt *t = ce->tgthint;
54be1f6c 337
185352ae 338 return t ? t->name : ERR_PTR(-ENOENT);
54be1f6c
PA
339}
340
341/* Return expire time out of a new entry's TTL */
342static inline struct timespec64 get_expire_time(int ttl)
343{
344 struct timespec64 ts = {
345 .tv_sec = ttl,
346 .tv_nsec = 0,
347 };
54e4f73c 348 struct timespec64 now;
54be1f6c 349
54e4f73c
SR
350 ktime_get_coarse_real_ts64(&now);
351 return timespec64_add(now, ts);
54be1f6c
PA
352}
353
354/* Allocate a new DFS target */
7548e1da 355static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
54be1f6c 356{
185352ae 357 struct cache_dfs_tgt *t;
54be1f6c 358
742d8de0 359 t = kmalloc(sizeof(*t), GFP_ATOMIC);
54be1f6c
PA
360 if (!t)
361 return ERR_PTR(-ENOMEM);
8d767223 362 t->name = kstrdup(name, GFP_ATOMIC);
185352ae 363 if (!t->name) {
54be1f6c
PA
364 kfree(t);
365 return ERR_PTR(-ENOMEM);
366 }
7548e1da 367 t->path_consumed = path_consumed;
185352ae 368 INIT_LIST_HEAD(&t->list);
54be1f6c
PA
369 return t;
370}
371
372/*
373 * Copy DFS referral information to a cache entry and conditionally update
374 * target hint.
375 */
376static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
185352ae 377 struct cache_entry *ce, const char *tgthint)
54be1f6c
PA
378{
379 int i;
380
185352ae
PAS
381 ce->ttl = refs[0].ttl;
382 ce->etime = get_expire_time(ce->ttl);
383 ce->srvtype = refs[0].server_type;
5ff2836e
PA
384 ce->hdr_flags = refs[0].flags;
385 ce->ref_flags = refs[0].ref_flag;
185352ae 386 ce->path_consumed = refs[0].path_consumed;
54be1f6c
PA
387
388 for (i = 0; i < numrefs; i++) {
185352ae 389 struct cache_dfs_tgt *t;
54be1f6c 390
7548e1da 391 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
54be1f6c
PA
392 if (IS_ERR(t)) {
393 free_tgts(ce);
394 return PTR_ERR(t);
395 }
185352ae
PAS
396 if (tgthint && !strcasecmp(t->name, tgthint)) {
397 list_add(&t->list, &ce->tlist);
54be1f6c
PA
398 tgthint = NULL;
399 } else {
185352ae 400 list_add_tail(&t->list, &ce->tlist);
54be1f6c 401 }
185352ae 402 ce->numtgts++;
54be1f6c
PA
403 }
404
185352ae
PAS
405 ce->tgthint = list_first_entry_or_null(&ce->tlist,
406 struct cache_dfs_tgt, list);
54be1f6c
PA
407
408 return 0;
409}
410
411/* Allocate a new cache entry */
185352ae
PAS
412static struct cache_entry *alloc_cache_entry(const char *path,
413 const struct dfs_info3_param *refs,
414 int numrefs)
54be1f6c 415{
185352ae 416 struct cache_entry *ce;
54be1f6c
PA
417 int rc;
418
185352ae 419 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
54be1f6c
PA
420 if (!ce)
421 return ERR_PTR(-ENOMEM);
422
8d767223 423 ce->path = kstrdup(path, GFP_KERNEL);
185352ae
PAS
424 if (!ce->path) {
425 kmem_cache_free(cache_slab, ce);
54be1f6c
PA
426 return ERR_PTR(-ENOMEM);
427 }
185352ae
PAS
428 INIT_HLIST_NODE(&ce->hlist);
429 INIT_LIST_HEAD(&ce->tlist);
54be1f6c
PA
430
431 rc = copy_ref_data(refs, numrefs, ce, NULL);
432 if (rc) {
199c6bdf 433 kfree(ce->path);
185352ae 434 kmem_cache_free(cache_slab, ce);
54be1f6c
PA
435 ce = ERR_PTR(rc);
436 }
437 return ce;
438}
439
742d8de0 440/* Must be called with htable_rw_lock held */
54be1f6c
PA
441static void remove_oldest_entry(void)
442{
742d8de0 443 int i;
185352ae
PAS
444 struct cache_entry *ce;
445 struct cache_entry *to_del = NULL;
54be1f6c 446
742d8de0
PAS
447 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
448 struct hlist_head *l = &cache_htable[i];
449
450 hlist_for_each_entry(ce, l, hlist) {
451 if (hlist_unhashed(&ce->hlist))
452 continue;
453 if (!to_del || timespec64_compare(&ce->etime,
454 &to_del->etime) < 0)
455 to_del = ce;
456 }
54be1f6c 457 }
742d8de0 458
54be1f6c 459 if (!to_del) {
a0a3036b 460 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
742d8de0 461 return;
54be1f6c 462 }
742d8de0 463
a0a3036b 464 cifs_dbg(FYI, "%s: removing entry\n", __func__);
54be1f6c
PA
465 dump_ce(to_del);
466 flush_cache_ent(to_del);
54be1f6c
PA
467}
468
469/* Add a new DFS cache entry */
742d8de0
PAS
470static int add_cache_entry(const char *path, unsigned int hash,
471 struct dfs_info3_param *refs, int numrefs)
54be1f6c 472{
185352ae 473 struct cache_entry *ce;
54be1f6c
PA
474
475 ce = alloc_cache_entry(path, refs, numrefs);
476 if (IS_ERR(ce))
742d8de0 477 return PTR_ERR(ce);
54be1f6c 478
06d57378
PAS
479 spin_lock(&cache_ttl_lock);
480 if (!cache_ttl) {
185352ae
PAS
481 cache_ttl = ce->ttl;
482 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
54be1f6c 483 } else {
185352ae
PAS
484 cache_ttl = min_t(int, cache_ttl, ce->ttl);
485 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
54be1f6c 486 }
06d57378 487 spin_unlock(&cache_ttl_lock);
54be1f6c 488
742d8de0
PAS
489 down_write(&htable_rw_lock);
490 hlist_add_head(&ce->hlist, &cache_htable[hash]);
491 dump_ce(ce);
492 up_write(&htable_rw_lock);
493
494 return 0;
54be1f6c
PA
495}
496
2e5de424 497static struct cache_entry *__lookup_cache_entry(const char *path)
54be1f6c 498{
185352ae
PAS
499 struct cache_entry *ce;
500 unsigned int h;
54be1f6c
PA
501 bool found = false;
502
185352ae 503 h = cache_entry_hash(path, strlen(path));
54be1f6c 504
742d8de0 505 hlist_for_each_entry(ce, &cache_htable[h], hlist) {
185352ae 506 if (!strcasecmp(path, ce->path)) {
54be1f6c 507 found = true;
185352ae 508 dump_ce(ce);
54be1f6c
PA
509 break;
510 }
511 }
54be1f6c 512
185352ae
PAS
513 if (!found)
514 ce = ERR_PTR(-ENOENT);
2e5de424
PA
515 return ce;
516}
517
518/*
519 * Find a DFS cache entry in hash table and optionally check prefix path against
520 * @path.
521 * Use whole path components in the match.
522 * Must be called with htable_rw_lock held.
523 *
524 * Return ERR_PTR(-ENOENT) if the entry is not found.
525 */
526static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash)
527{
528 struct cache_entry *ce = ERR_PTR(-ENOENT);
529 unsigned int h;
530 int cnt = 0;
531 char *npath;
532 char *s, *e;
533 char sep;
534
8d767223 535 npath = kstrdup(path, GFP_KERNEL);
2e5de424
PA
536 if (!npath)
537 return ERR_PTR(-ENOMEM);
538
539 s = npath;
540 sep = *npath;
541 while ((s = strchr(s, sep)) && ++cnt < 3)
542 s++;
543
544 if (cnt < 3) {
545 h = cache_entry_hash(path, strlen(path));
546 ce = __lookup_cache_entry(path);
547 goto out;
548 }
549 /*
550 * Handle paths that have more than two path components and are a complete prefix of the DFS
551 * referral request path (@path).
552 *
553 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
554 */
555 h = cache_entry_hash(npath, strlen(npath));
556 e = npath + strlen(npath) - 1;
557 while (e > s) {
558 char tmp;
559
560 /* skip separators */
561 while (e > s && *e == sep)
562 e--;
563 if (e == s)
564 goto out;
565
566 tmp = *(e+1);
567 *(e+1) = 0;
568
569 ce = __lookup_cache_entry(npath);
570 if (!IS_ERR(ce)) {
571 h = cache_entry_hash(npath, strlen(npath));
572 break;
573 }
574
575 *(e+1) = tmp;
576 /* backward until separator */
577 while (e > s && *e != sep)
578 e--;
579 }
580out:
185352ae
PAS
581 if (hash)
582 *hash = h;
2e5de424 583 kfree(npath);
185352ae 584 return ce;
54be1f6c
PA
585}
586
06d57378 587static void __vol_release(struct vol_info *vi)
54be1f6c 588{
185352ae
PAS
589 kfree(vi->fullpath);
590 kfree(vi->mntdata);
c741cba2 591 smb3_cleanup_fs_context_contents(&vi->ctx);
54be1f6c
PA
592 kfree(vi);
593}
594
06d57378
PAS
595static void vol_release(struct kref *kref)
596{
597 struct vol_info *vi = container_of(kref, struct vol_info, refcnt);
598
599 spin_lock(&vol_list_lock);
600 list_del(&vi->list);
601 spin_unlock(&vol_list_lock);
602 __vol_release(vi);
603}
604
54be1f6c
PA
605static inline void free_vol_list(void)
606{
185352ae 607 struct vol_info *vi, *nvi;
54be1f6c 608
06d57378
PAS
609 list_for_each_entry_safe(vi, nvi, &vol_list, list) {
610 list_del_init(&vi->list);
611 __vol_release(vi);
612 }
54be1f6c
PA
613}
614
615/**
616 * dfs_cache_destroy - destroy DFS referral cache
617 */
618void dfs_cache_destroy(void)
619{
185352ae
PAS
620 cancel_delayed_work_sync(&refresh_task);
621 unload_nls(cache_nlsc);
54be1f6c 622 free_vol_list();
54be1f6c 623 flush_cache_ents();
742d8de0 624 kmem_cache_destroy(cache_slab);
185352ae 625 destroy_workqueue(dfscache_wq);
54be1f6c
PA
626
627 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
628}
629
742d8de0
PAS
630/* Must be called with htable_rw_lock held */
631static int __update_cache_entry(const char *path,
632 const struct dfs_info3_param *refs,
633 int numrefs)
54be1f6c
PA
634{
635 int rc;
185352ae 636 struct cache_entry *ce;
54be1f6c
PA
637 char *s, *th = NULL;
638
742d8de0 639 ce = lookup_cache_entry(path, NULL);
54be1f6c 640 if (IS_ERR(ce))
742d8de0 641 return PTR_ERR(ce);
54be1f6c 642
185352ae
PAS
643 if (ce->tgthint) {
644 s = ce->tgthint->name;
8d767223 645 th = kstrdup(s, GFP_ATOMIC);
54be1f6c 646 if (!th)
742d8de0 647 return -ENOMEM;
54be1f6c
PA
648 }
649
650 free_tgts(ce);
185352ae 651 ce->numtgts = 0;
54be1f6c
PA
652
653 rc = copy_ref_data(refs, numrefs, ce, th);
54be1f6c 654
742d8de0 655 kfree(th);
54be1f6c 656
eecfc571 657 return rc;
54be1f6c
PA
658}
659
742d8de0
PAS
660static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
661 const struct nls_table *nls_codepage, int remap,
662 const char *path, struct dfs_info3_param **refs,
663 int *numrefs)
54be1f6c 664{
742d8de0 665 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path);
54be1f6c 666
54be1f6c 667 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
742d8de0 668 return -EOPNOTSUPP;
54be1f6c 669 if (unlikely(!nls_codepage))
742d8de0 670 return -EINVAL;
54be1f6c 671
742d8de0
PAS
672 *refs = NULL;
673 *numrefs = 0;
54be1f6c 674
742d8de0
PAS
675 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs,
676 nls_codepage, remap);
677}
54be1f6c 678
742d8de0
PAS
679/* Update an expired cache entry by getting a new DFS referral from server */
680static int update_cache_entry(const char *path,
681 const struct dfs_info3_param *refs,
682 int numrefs)
683{
54be1f6c 684
742d8de0
PAS
685 int rc;
686
687 down_write(&htable_rw_lock);
688 rc = __update_cache_entry(path, refs, numrefs);
689 up_write(&htable_rw_lock);
690
691 return rc;
54be1f6c
PA
692}
693
694/*
695 * Find, create or update a DFS cache entry.
696 *
697 * If the entry wasn't found, it will create a new one. Or if it was found but
698 * expired, then it will update the entry accordingly.
699 *
700 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to
701 * handle them properly.
702 */
742d8de0
PAS
703static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
704 const struct nls_table *nls_codepage, int remap,
705 const char *path, bool noreq)
54be1f6c
PA
706{
707 int rc;
742d8de0 708 unsigned int hash;
185352ae 709 struct cache_entry *ce;
742d8de0
PAS
710 struct dfs_info3_param *refs = NULL;
711 int numrefs = 0;
712 bool newent = false;
54be1f6c
PA
713
714 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
715
742d8de0 716 down_read(&htable_rw_lock);
54be1f6c 717
742d8de0 718 ce = lookup_cache_entry(path, &hash);
54be1f6c 719
742d8de0
PAS
720 /*
721 * If @noreq is set, no requests will be sent to the server. Just return
722 * the cache entry.
723 */
724 if (noreq) {
725 up_read(&htable_rw_lock);
050d2a8b 726 return PTR_ERR_OR_ZERO(ce);
742d8de0 727 }
54be1f6c 728
742d8de0
PAS
729 if (!IS_ERR(ce)) {
730 if (!cache_entry_expired(ce)) {
731 dump_ce(ce);
732 up_read(&htable_rw_lock);
733 return 0;
54be1f6c 734 }
742d8de0
PAS
735 } else {
736 newent = true;
737 }
54be1f6c 738
742d8de0 739 up_read(&htable_rw_lock);
54be1f6c 740
742d8de0
PAS
741 /*
742 * No entry was found.
743 *
744 * Request a new DFS referral in order to create a new cache entry, or
745 * updating an existing one.
746 */
747 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path,
748 &refs, &numrefs);
749 if (rc)
750 return rc;
54be1f6c 751
742d8de0 752 dump_refs(refs, numrefs);
54be1f6c 753
742d8de0
PAS
754 if (!newent) {
755 rc = update_cache_entry(path, refs, numrefs);
756 goto out_free_refs;
757 }
54be1f6c 758
742d8de0 759 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
a0a3036b
JP
760 cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
761 __func__, CACHE_MAX_ENTRIES);
742d8de0
PAS
762 down_write(&htable_rw_lock);
763 remove_oldest_entry();
764 up_write(&htable_rw_lock);
54be1f6c
PA
765 }
766
742d8de0
PAS
767 rc = add_cache_entry(path, hash, refs, numrefs);
768 if (!rc)
769 atomic_inc(&cache_count);
54be1f6c 770
742d8de0
PAS
771out_free_refs:
772 free_dfs_info_array(refs, numrefs);
773 return rc;
54be1f6c
PA
774}
775
742d8de0
PAS
776/*
777 * Set up a DFS referral from a given cache entry.
778 *
779 * Must be called with htable_rw_lock held.
780 */
781static int setup_referral(const char *path, struct cache_entry *ce,
782 struct dfs_info3_param *ref, const char *target)
54be1f6c
PA
783{
784 int rc;
785
786 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
787
788 memset(ref, 0, sizeof(*ref));
789
8d767223 790 ref->path_name = kstrdup(path, GFP_ATOMIC);
54be1f6c
PA
791 if (!ref->path_name)
792 return -ENOMEM;
793
8d767223 794 ref->node_name = kstrdup(target, GFP_ATOMIC);
54be1f6c
PA
795 if (!ref->node_name) {
796 rc = -ENOMEM;
797 goto err_free_path;
798 }
799
742d8de0 800 ref->path_consumed = ce->path_consumed;
185352ae
PAS
801 ref->ttl = ce->ttl;
802 ref->server_type = ce->srvtype;
5ff2836e
PA
803 ref->ref_flag = ce->ref_flags;
804 ref->flags = ce->hdr_flags;
54be1f6c
PA
805
806 return 0;
807
808err_free_path:
809 kfree(ref->path_name);
810 ref->path_name = NULL;
811 return rc;
812}
813
814/* Return target list of a DFS cache entry */
742d8de0 815static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
54be1f6c
PA
816{
817 int rc;
818 struct list_head *head = &tl->tl_list;
185352ae 819 struct cache_dfs_tgt *t;
54be1f6c
PA
820 struct dfs_cache_tgt_iterator *it, *nit;
821
822 memset(tl, 0, sizeof(*tl));
823 INIT_LIST_HEAD(head);
824
185352ae 825 list_for_each_entry(t, &ce->tlist, list) {
742d8de0 826 it = kzalloc(sizeof(*it), GFP_ATOMIC);
54be1f6c
PA
827 if (!it) {
828 rc = -ENOMEM;
829 goto err_free_it;
830 }
831
8d767223 832 it->it_name = kstrdup(t->name, GFP_ATOMIC);
54be1f6c 833 if (!it->it_name) {
c715f89c 834 kfree(it);
54be1f6c
PA
835 rc = -ENOMEM;
836 goto err_free_it;
837 }
7548e1da 838 it->it_path_consumed = t->path_consumed;
54be1f6c 839
185352ae 840 if (ce->tgthint == t)
54be1f6c
PA
841 list_add(&it->it_list, head);
842 else
843 list_add_tail(&it->it_list, head);
844 }
742d8de0 845
185352ae 846 tl->tl_numtgts = ce->numtgts;
54be1f6c
PA
847
848 return 0;
849
850err_free_it:
851 list_for_each_entry_safe(it, nit, head, it_list) {
852 kfree(it->it_name);
853 kfree(it);
854 }
855 return rc;
856}
857
858/**
859 * dfs_cache_find - find a DFS cache entry
860 *
861 * If it doesn't find the cache entry, then it will get a DFS referral
862 * for @path and create a new entry.
863 *
864 * In case the cache entry exists but expired, it will get a DFS referral
865 * for @path and then update the respective cache entry.
866 *
867 * These parameters are passed down to the get_dfs_refer() call if it
868 * needs to be issued:
869 * @xid: syscall xid
870 * @ses: smb session to issue the request on
871 * @nls_codepage: charset conversion
872 * @remap: path character remapping type
873 * @path: path to lookup in DFS referral cache.
874 *
875 * @ref: when non-NULL, store single DFS referral result in it.
876 * @tgt_list: when non-NULL, store complete DFS target list in it.
877 *
878 * Return zero if the target was found, otherwise non-zero.
879 */
880int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
881 const struct nls_table *nls_codepage, int remap,
882 const char *path, struct dfs_info3_param *ref,
883 struct dfs_cache_tgt_list *tgt_list)
884{
885 int rc;
9cfdb1c1 886 const char *npath;
185352ae 887 struct cache_entry *ce;
54be1f6c 888
54be1f6c
PA
889 rc = get_normalized_path(path, &npath);
890 if (rc)
891 return rc;
892
742d8de0
PAS
893 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
894 if (rc)
895 goto out_free_path;
896
897 down_read(&htable_rw_lock);
898
899 ce = lookup_cache_entry(npath, NULL);
900 if (IS_ERR(ce)) {
901 up_read(&htable_rw_lock);
54be1f6c 902 rc = PTR_ERR(ce);
742d8de0 903 goto out_free_path;
54be1f6c 904 }
742d8de0
PAS
905
906 if (ref)
907 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
908 else
909 rc = 0;
910 if (!rc && tgt_list)
911 rc = get_targets(ce, tgt_list);
912
913 up_read(&htable_rw_lock);
914
915out_free_path:
54be1f6c
PA
916 free_normalized_path(path, npath);
917 return rc;
918}
919
920/**
921 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
922 * the currently connected server.
923 *
924 * NOTE: This function will neither update a cache entry in case it was
925 * expired, nor create a new cache entry if @path hasn't been found. It heavily
926 * relies on an existing cache entry.
927 *
928 * @path: path to lookup in the DFS referral cache.
929 * @ref: when non-NULL, store single DFS referral result in it.
930 * @tgt_list: when non-NULL, store complete DFS target list in it.
931 *
932 * Return 0 if successful.
933 * Return -ENOENT if the entry was not found.
934 * Return non-zero for other errors.
935 */
936int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
937 struct dfs_cache_tgt_list *tgt_list)
938{
939 int rc;
9cfdb1c1 940 const char *npath;
185352ae 941 struct cache_entry *ce;
54be1f6c 942
54be1f6c
PA
943 rc = get_normalized_path(path, &npath);
944 if (rc)
945 return rc;
946
742d8de0
PAS
947 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
948
949 down_read(&htable_rw_lock);
950
951 ce = lookup_cache_entry(npath, NULL);
54be1f6c
PA
952 if (IS_ERR(ce)) {
953 rc = PTR_ERR(ce);
742d8de0 954 goto out_unlock;
54be1f6c
PA
955 }
956
957 if (ref)
742d8de0 958 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
54be1f6c
PA
959 else
960 rc = 0;
961 if (!rc && tgt_list)
742d8de0
PAS
962 rc = get_targets(ce, tgt_list);
963
964out_unlock:
965 up_read(&htable_rw_lock);
54be1f6c 966 free_normalized_path(path, npath);
742d8de0 967
54be1f6c
PA
968 return rc;
969}
970
971/**
972 * dfs_cache_update_tgthint - update target hint of a DFS cache entry
973 *
974 * If it doesn't find the cache entry, then it will get a DFS referral for @path
975 * and create a new entry.
976 *
977 * In case the cache entry exists but expired, it will get a DFS referral
978 * for @path and then update the respective cache entry.
979 *
980 * @xid: syscall id
981 * @ses: smb session
982 * @nls_codepage: charset conversion
983 * @remap: type of character remapping for paths
984 * @path: path to lookup in DFS referral cache.
985 * @it: DFS target iterator
986 *
987 * Return zero if the target hint was updated successfully, otherwise non-zero.
988 */
989int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
990 const struct nls_table *nls_codepage, int remap,
991 const char *path,
992 const struct dfs_cache_tgt_iterator *it)
993{
994 int rc;
9cfdb1c1 995 const char *npath;
185352ae
PAS
996 struct cache_entry *ce;
997 struct cache_dfs_tgt *t;
54be1f6c 998
54be1f6c
PA
999 rc = get_normalized_path(path, &npath);
1000 if (rc)
1001 return rc;
1002
742d8de0
PAS
1003 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
1004
1005 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false);
1006 if (rc)
1007 goto out_free_path;
54be1f6c 1008
742d8de0
PAS
1009 down_write(&htable_rw_lock);
1010
1011 ce = lookup_cache_entry(npath, NULL);
54be1f6c
PA
1012 if (IS_ERR(ce)) {
1013 rc = PTR_ERR(ce);
742d8de0 1014 goto out_unlock;
54be1f6c
PA
1015 }
1016
185352ae 1017 t = ce->tgthint;
54be1f6c 1018
185352ae 1019 if (likely(!strcasecmp(it->it_name, t->name)))
742d8de0 1020 goto out_unlock;
54be1f6c 1021
185352ae
PAS
1022 list_for_each_entry(t, &ce->tlist, list) {
1023 if (!strcasecmp(t->name, it->it_name)) {
1024 ce->tgthint = t;
54be1f6c
PA
1025 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1026 it->it_name);
1027 break;
1028 }
1029 }
1030
742d8de0
PAS
1031out_unlock:
1032 up_write(&htable_rw_lock);
1033out_free_path:
54be1f6c 1034 free_normalized_path(path, npath);
742d8de0 1035
54be1f6c
PA
1036 return rc;
1037}
1038
1039/**
1040 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
1041 * without sending any requests to the currently connected server.
1042 *
1043 * NOTE: This function will neither update a cache entry in case it was
1044 * expired, nor create a new cache entry if @path hasn't been found. It heavily
1045 * relies on an existing cache entry.
1046 *
1047 * @path: path to lookup in DFS referral cache.
1048 * @it: target iterator which contains the target hint to update the cache
1049 * entry with.
1050 *
1051 * Return zero if the target hint was updated successfully, otherwise non-zero.
1052 */
1053int dfs_cache_noreq_update_tgthint(const char *path,
1054 const struct dfs_cache_tgt_iterator *it)
1055{
1056 int rc;
9cfdb1c1 1057 const char *npath;
185352ae
PAS
1058 struct cache_entry *ce;
1059 struct cache_dfs_tgt *t;
54be1f6c 1060
ff2f7fc0 1061 if (!it)
54be1f6c
PA
1062 return -EINVAL;
1063
1064 rc = get_normalized_path(path, &npath);
1065 if (rc)
1066 return rc;
1067
1068 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1069
742d8de0 1070 down_write(&htable_rw_lock);
54be1f6c 1071
742d8de0 1072 ce = lookup_cache_entry(npath, NULL);
54be1f6c
PA
1073 if (IS_ERR(ce)) {
1074 rc = PTR_ERR(ce);
742d8de0 1075 goto out_unlock;
54be1f6c
PA
1076 }
1077
1078 rc = 0;
185352ae 1079 t = ce->tgthint;
54be1f6c 1080
185352ae 1081 if (unlikely(!strcasecmp(it->it_name, t->name)))
742d8de0 1082 goto out_unlock;
54be1f6c 1083
185352ae
PAS
1084 list_for_each_entry(t, &ce->tlist, list) {
1085 if (!strcasecmp(t->name, it->it_name)) {
1086 ce->tgthint = t;
54be1f6c
PA
1087 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
1088 it->it_name);
1089 break;
1090 }
1091 }
1092
742d8de0
PAS
1093out_unlock:
1094 up_write(&htable_rw_lock);
54be1f6c 1095 free_normalized_path(path, npath);
742d8de0 1096
54be1f6c
PA
1097 return rc;
1098}
1099
1100/**
1101 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
1102 * target iterator (@it).
1103 *
1104 * @path: path to lookup in DFS referral cache.
1105 * @it: DFS target iterator.
1106 * @ref: DFS referral pointer to set up the gathered information.
1107 *
1108 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
1109 */
1110int dfs_cache_get_tgt_referral(const char *path,
1111 const struct dfs_cache_tgt_iterator *it,
1112 struct dfs_info3_param *ref)
1113{
1114 int rc;
9cfdb1c1 1115 const char *npath;
185352ae 1116 struct cache_entry *ce;
54be1f6c
PA
1117
1118 if (!it || !ref)
1119 return -EINVAL;
54be1f6c
PA
1120
1121 rc = get_normalized_path(path, &npath);
1122 if (rc)
1123 return rc;
1124
1125 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath);
1126
742d8de0 1127 down_read(&htable_rw_lock);
54be1f6c 1128
742d8de0 1129 ce = lookup_cache_entry(npath, NULL);
54be1f6c
PA
1130 if (IS_ERR(ce)) {
1131 rc = PTR_ERR(ce);
742d8de0 1132 goto out_unlock;
54be1f6c
PA
1133 }
1134
1135 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1136
742d8de0 1137 rc = setup_referral(path, ce, ref, it->it_name);
54be1f6c 1138
742d8de0
PAS
1139out_unlock:
1140 up_read(&htable_rw_lock);
54be1f6c 1141 free_normalized_path(path, npath);
742d8de0 1142
54be1f6c
PA
1143 return rc;
1144}
1145
54be1f6c 1146/**
24e0a1ef 1147 * dfs_cache_add_vol - add a cifs context during mount() that will be handled by
54be1f6c
PA
1148 * DFS cache refresh worker.
1149 *
5072010c 1150 * @mntdata: mount data.
3fa1c6d1 1151 * @ctx: cifs context.
54be1f6c
PA
1152 * @fullpath: origin full path.
1153 *
24e0a1ef 1154 * Return zero if context was set up correctly, otherwise non-zero.
54be1f6c 1155 */
3fa1c6d1 1156int dfs_cache_add_vol(char *mntdata, struct smb3_fs_context *ctx, const char *fullpath)
54be1f6c
PA
1157{
1158 int rc;
185352ae 1159 struct vol_info *vi;
54be1f6c 1160
3fa1c6d1 1161 if (!ctx || !fullpath || !mntdata)
54be1f6c
PA
1162 return -EINVAL;
1163
1164 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1165
1166 vi = kzalloc(sizeof(*vi), GFP_KERNEL);
1167 if (!vi)
1168 return -ENOMEM;
1169
8d767223 1170 vi->fullpath = kstrdup(fullpath, GFP_KERNEL);
185352ae 1171 if (!vi->fullpath) {
54be1f6c
PA
1172 rc = -ENOMEM;
1173 goto err_free_vi;
1174 }
1175
837e3a1b 1176 rc = smb3_fs_context_dup(&vi->ctx, ctx);
54be1f6c
PA
1177 if (rc)
1178 goto err_free_fullpath;
1179
185352ae 1180 vi->mntdata = mntdata;
3fa1c6d1 1181 spin_lock_init(&vi->ctx_lock);
06d57378 1182 kref_init(&vi->refcnt);
5072010c 1183
06d57378 1184 spin_lock(&vol_list_lock);
185352ae 1185 list_add_tail(&vi->list, &vol_list);
06d57378
PAS
1186 spin_unlock(&vol_list_lock);
1187
54be1f6c
PA
1188 return 0;
1189
1190err_free_fullpath:
185352ae 1191 kfree(vi->fullpath);
54be1f6c
PA
1192err_free_vi:
1193 kfree(vi);
1194 return rc;
1195}
1196
06d57378
PAS
1197/* Must be called with vol_list_lock held */
1198static struct vol_info *find_vol(const char *fullpath)
54be1f6c 1199{
185352ae 1200 struct vol_info *vi;
54be1f6c 1201
185352ae
PAS
1202 list_for_each_entry(vi, &vol_list, list) {
1203 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath);
1204 if (!strcasecmp(vi->fullpath, fullpath))
54be1f6c
PA
1205 return vi;
1206 }
1207 return ERR_PTR(-ENOENT);
1208}
1209
1210/**
1211 * dfs_cache_update_vol - update vol info in DFS cache after failover
1212 *
1213 * @fullpath: fullpath to look up in volume list.
1214 * @server: TCP ses pointer.
1215 *
1216 * Return zero if volume was updated, otherwise non-zero.
1217 */
1218int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server)
1219{
185352ae 1220 struct vol_info *vi;
54be1f6c
PA
1221
1222 if (!fullpath || !server)
1223 return -EINVAL;
1224
1225 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1226
06d57378 1227 spin_lock(&vol_list_lock);
54be1f6c
PA
1228 vi = find_vol(fullpath);
1229 if (IS_ERR(vi)) {
06d57378
PAS
1230 spin_unlock(&vol_list_lock);
1231 return PTR_ERR(vi);
54be1f6c 1232 }
06d57378
PAS
1233 kref_get(&vi->refcnt);
1234 spin_unlock(&vol_list_lock);
54be1f6c
PA
1235
1236 cifs_dbg(FYI, "%s: updating volume info\n", __func__);
3fa1c6d1
RS
1237 spin_lock(&vi->ctx_lock);
1238 memcpy(&vi->ctx.dstaddr, &server->dstaddr,
1239 sizeof(vi->ctx.dstaddr));
1240 spin_unlock(&vi->ctx_lock);
54be1f6c 1241
06d57378
PAS
1242 kref_put(&vi->refcnt, vol_release);
1243
1244 return 0;
54be1f6c
PA
1245}
1246
1247/**
1248 * dfs_cache_del_vol - remove volume info in DFS cache during umount()
1249 *
1250 * @fullpath: fullpath to look up in volume list.
1251 */
1252void dfs_cache_del_vol(const char *fullpath)
1253{
185352ae 1254 struct vol_info *vi;
54be1f6c
PA
1255
1256 if (!fullpath || !*fullpath)
1257 return;
1258
1259 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath);
1260
06d57378 1261 spin_lock(&vol_list_lock);
54be1f6c 1262 vi = find_vol(fullpath);
06d57378
PAS
1263 spin_unlock(&vol_list_lock);
1264
77b6ec01
TR
1265 if (!IS_ERR(vi))
1266 kref_put(&vi->refcnt, vol_release);
54be1f6c
PA
1267}
1268
bacd704a
PAS
1269/**
1270 * dfs_cache_get_tgt_share - parse a DFS target
1271 *
7548e1da 1272 * @path: DFS full path
bacd704a
PAS
1273 * @it: DFS target iterator.
1274 * @share: tree name.
bacd704a 1275 * @prefix: prefix path.
bacd704a
PAS
1276 *
1277 * Return zero if target was parsed correctly, otherwise non-zero.
1278 */
7548e1da
PA
1279int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
1280 char **share, char **prefix)
bacd704a 1281{
7548e1da
PA
1282 char *s, sep, *p;
1283 size_t len;
1284 size_t plen1, plen2;
bacd704a 1285
7548e1da 1286 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
bacd704a
PAS
1287 return -EINVAL;
1288
7548e1da
PA
1289 *share = NULL;
1290 *prefix = NULL;
1291
bacd704a
PAS
1292 sep = it->it_name[0];
1293 if (sep != '\\' && sep != '/')
1294 return -EINVAL;
1295
1296 s = strchr(it->it_name + 1, sep);
1297 if (!s)
1298 return -EINVAL;
1299
7548e1da 1300 /* point to prefix in target node */
bacd704a
PAS
1301 s = strchrnul(s + 1, sep);
1302
7548e1da
PA
1303 /* extract target share */
1304 *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL);
1305 if (!*share)
1306 return -ENOMEM;
bacd704a 1307
7548e1da
PA
1308 /* skip separator */
1309 if (*s)
1310 s++;
1311 /* point to prefix in DFS path */
1312 p = path + it->it_path_consumed;
1313 if (*p == sep)
1314 p++;
1315
1316 /* merge prefix paths from DFS path and target node */
1317 plen1 = it->it_name + strlen(it->it_name) - s;
1318 plen2 = path + strlen(path) - p;
1319 if (plen1 || plen2) {
1320 len = plen1 + plen2 + 2;
1321 *prefix = kmalloc(len, GFP_KERNEL);
1322 if (!*prefix) {
1323 kfree(*share);
1324 *share = NULL;
1325 return -ENOMEM;
1326 }
1327 if (plen1)
1328 scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p);
1329 else
1330 strscpy(*prefix, p, len);
1331 }
bacd704a
PAS
1332 return 0;
1333}
1334
54be1f6c
PA
1335/* Get all tcons that are within a DFS namespace and can be refreshed */
1336static void get_tcons(struct TCP_Server_Info *server, struct list_head *head)
1337{
1338 struct cifs_ses *ses;
1339 struct cifs_tcon *tcon;
1340
1341 INIT_LIST_HEAD(head);
1342
1343 spin_lock(&cifs_tcp_ses_lock);
1344 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1345 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1346 if (!tcon->need_reconnect && !tcon->need_reopen_files &&
1347 tcon->dfs_path) {
1348 tcon->tc_count++;
1349 list_add_tail(&tcon->ulist, head);
1350 }
1351 }
1352 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect &&
1353 ses->tcon_ipc->dfs_path) {
1354 list_add_tail(&ses->tcon_ipc->ulist, head);
1355 }
1356 }
1357 spin_unlock(&cifs_tcp_ses_lock);
1358}
1359
185352ae 1360static bool is_dfs_link(const char *path)
5072010c
PAS
1361{
1362 char *s;
1363
1364 s = strchr(path + 1, '\\');
1365 if (!s)
1366 return false;
1367 return !!strchr(s + 1, '\\');
1368}
1369
185352ae 1370static char *get_dfs_root(const char *path)
5072010c
PAS
1371{
1372 char *s, *npath;
1373
1374 s = strchr(path + 1, '\\');
1375 if (!s)
1376 return ERR_PTR(-EINVAL);
1377
1378 s = strchr(s + 1, '\\');
1379 if (!s)
1380 return ERR_PTR(-EINVAL);
1381
1382 npath = kstrndup(path, s - path, GFP_KERNEL);
1383 if (!npath)
1384 return ERR_PTR(-ENOMEM);
1385
1386 return npath;
1387}
1388
345c1a4a
PAS
1389static inline void put_tcp_server(struct TCP_Server_Info *server)
1390{
1391 cifs_put_tcp_session(server, 0);
1392}
1393
3fa1c6d1 1394static struct TCP_Server_Info *get_tcp_server(struct smb3_fs_context *ctx)
345c1a4a
PAS
1395{
1396 struct TCP_Server_Info *server;
1397
3fa1c6d1 1398 server = cifs_find_tcp_session(ctx);
345c1a4a
PAS
1399 if (IS_ERR_OR_NULL(server))
1400 return NULL;
1401
1402 spin_lock(&GlobalMid_Lock);
1403 if (server->tcpStatus != CifsGood) {
1404 spin_unlock(&GlobalMid_Lock);
1405 put_tcp_server(server);
1406 return NULL;
1407 }
1408 spin_unlock(&GlobalMid_Lock);
1409
1410 return server;
1411}
1412
5072010c 1413/* Find root SMB session out of a DFS link path */
185352ae
PAS
1414static struct cifs_ses *find_root_ses(struct vol_info *vi,
1415 struct cifs_tcon *tcon,
1416 const char *path)
5072010c
PAS
1417{
1418 char *rpath;
1419 int rc;
742d8de0 1420 struct cache_entry *ce;
5072010c 1421 struct dfs_info3_param ref = {0};
0d4873f9 1422 char *mdata = NULL, *devname = NULL;
5072010c
PAS
1423 struct TCP_Server_Info *server;
1424 struct cifs_ses *ses;
3fa1c6d1 1425 struct smb3_fs_context ctx = {NULL};
5072010c
PAS
1426
1427 rpath = get_dfs_root(path);
1428 if (IS_ERR(rpath))
1429 return ERR_CAST(rpath);
1430
742d8de0
PAS
1431 down_read(&htable_rw_lock);
1432
1433 ce = lookup_cache_entry(rpath, NULL);
1434 if (IS_ERR(ce)) {
1435 up_read(&htable_rw_lock);
1436 ses = ERR_CAST(ce);
1437 goto out;
1438 }
5072010c 1439
742d8de0 1440 rc = setup_referral(path, ce, &ref, get_tgt_name(ce));
5072010c 1441 if (rc) {
742d8de0 1442 up_read(&htable_rw_lock);
5072010c
PAS
1443 ses = ERR_PTR(rc);
1444 goto out;
1445 }
1446
742d8de0
PAS
1447 up_read(&htable_rw_lock);
1448
0d4873f9
RS
1449 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
1450 &devname);
5072010c
PAS
1451 free_dfs_info_param(&ref);
1452
1453 if (IS_ERR(mdata)) {
1454 ses = ERR_CAST(mdata);
1455 mdata = NULL;
1456 goto out;
1457 }
1458
0d4873f9 1459 rc = cifs_setup_volume_info(&ctx, NULL, devname);
5072010c
PAS
1460
1461 if (rc) {
1462 ses = ERR_PTR(rc);
1463 goto out;
1464 }
1465
3fa1c6d1 1466 server = get_tcp_server(&ctx);
345c1a4a 1467 if (!server) {
5072010c
PAS
1468 ses = ERR_PTR(-EHOSTDOWN);
1469 goto out;
1470 }
1471
3fa1c6d1 1472 ses = cifs_get_smb_ses(server, &ctx);
5072010c
PAS
1473
1474out:
c741cba2 1475 smb3_cleanup_fs_context_contents(&ctx);
5072010c
PAS
1476 kfree(mdata);
1477 kfree(rpath);
0d4873f9 1478 kfree(devname);
5072010c
PAS
1479
1480 return ses;
1481}
1482
54be1f6c 1483/* Refresh DFS cache entry from a given tcon */
742d8de0 1484static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon)
54be1f6c
PA
1485{
1486 int rc = 0;
1487 unsigned int xid;
9cfdb1c1 1488 const char *path, *npath;
185352ae 1489 struct cache_entry *ce;
742d8de0 1490 struct cifs_ses *root_ses = NULL, *ses;
54be1f6c
PA
1491 struct dfs_info3_param *refs = NULL;
1492 int numrefs = 0;
1493
1494 xid = get_xid();
1495
1496 path = tcon->dfs_path + 1;
1497
1498 rc = get_normalized_path(path, &npath);
1499 if (rc)
742d8de0 1500 goto out_free_xid;
54be1f6c 1501
742d8de0 1502 down_read(&htable_rw_lock);
54be1f6c 1503
742d8de0 1504 ce = lookup_cache_entry(npath, NULL);
54be1f6c
PA
1505 if (IS_ERR(ce)) {
1506 rc = PTR_ERR(ce);
742d8de0
PAS
1507 up_read(&htable_rw_lock);
1508 goto out_free_path;
54be1f6c
PA
1509 }
1510
742d8de0
PAS
1511 if (!cache_entry_expired(ce)) {
1512 up_read(&htable_rw_lock);
1513 goto out_free_path;
1514 }
1515
1516 up_read(&htable_rw_lock);
54be1f6c 1517
5072010c
PAS
1518 /* If it's a DFS Link, then use root SMB session for refreshing it */
1519 if (is_dfs_link(npath)) {
1520 ses = root_ses = find_root_ses(vi, tcon, npath);
1521 if (IS_ERR(ses)) {
1522 rc = PTR_ERR(ses);
1523 root_ses = NULL;
742d8de0 1524 goto out_free_path;
5072010c
PAS
1525 }
1526 } else {
1527 ses = tcon->ses;
1528 }
1529
742d8de0
PAS
1530 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs,
1531 &numrefs);
1532 if (!rc) {
1533 dump_refs(refs, numrefs);
1534 rc = update_cache_entry(npath, refs, numrefs);
1535 free_dfs_info_array(refs, numrefs);
54be1f6c 1536 }
5072010c 1537
5072010c
PAS
1538 if (root_ses)
1539 cifs_put_smb_ses(root_ses);
1540
742d8de0 1541out_free_path:
54be1f6c 1542 free_normalized_path(path, npath);
742d8de0
PAS
1543
1544out_free_xid:
1545 free_xid(xid);
1546 return rc;
54be1f6c
PA
1547}
1548
1549/*
1550 * Worker that will refresh DFS cache based on lowest TTL value from a DFS
1551 * referral.
54be1f6c
PA
1552 */
1553static void refresh_cache_worker(struct work_struct *work)
1554{
06d57378 1555 struct vol_info *vi, *nvi;
54be1f6c 1556 struct TCP_Server_Info *server;
06d57378
PAS
1557 LIST_HEAD(vols);
1558 LIST_HEAD(tcons);
54be1f6c 1559 struct cifs_tcon *tcon, *ntcon;
742d8de0 1560 int rc;
54be1f6c 1561
06d57378
PAS
1562 /*
1563 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood)
1564 * for refreshing.
1565 */
1566 spin_lock(&vol_list_lock);
185352ae 1567 list_for_each_entry(vi, &vol_list, list) {
3fa1c6d1 1568 server = get_tcp_server(&vi->ctx);
345c1a4a 1569 if (!server)
54be1f6c 1570 continue;
345c1a4a 1571
06d57378
PAS
1572 kref_get(&vi->refcnt);
1573 list_add_tail(&vi->rlist, &vols);
1574 put_tcp_server(server);
1575 }
1576 spin_unlock(&vol_list_lock);
1577
1578 /* Walk through all TCONs and refresh any expired cache entry */
1579 list_for_each_entry_safe(vi, nvi, &vols, rlist) {
3fa1c6d1
RS
1580 spin_lock(&vi->ctx_lock);
1581 server = get_tcp_server(&vi->ctx);
1582 spin_unlock(&vi->ctx_lock);
06d57378
PAS
1583
1584 if (!server)
1585 goto next_vol;
1586
1587 get_tcons(server, &tcons);
742d8de0
PAS
1588 rc = 0;
1589
06d57378 1590 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
742d8de0
PAS
1591 /*
1592 * Skip tcp server if any of its tcons failed to refresh
1593 * (possibily due to reconnects).
1594 */
1595 if (!rc)
1596 rc = refresh_tcon(vi, tcon);
1597
54be1f6c
PA
1598 list_del_init(&tcon->ulist);
1599 cifs_put_tcon(tcon);
1600 }
345c1a4a
PAS
1601
1602 put_tcp_server(server);
06d57378
PAS
1603
1604next_vol:
1605 list_del_init(&vi->rlist);
1606 kref_put(&vi->refcnt, vol_release);
54be1f6c 1607 }
06d57378
PAS
1608
1609 spin_lock(&cache_ttl_lock);
185352ae 1610 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
06d57378 1611 spin_unlock(&cache_ttl_lock);
54be1f6c 1612}