diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index d14002d67b6ae4a9ce21eb36634100c68836d4b6..d180ef3f9d8917d41836af501291e982bb36cbe1 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -60,6 +60,7 @@ struct cifs_sb_info { struct rb_root tlink_tree; struct list_head tcon_sb_link; spinlock_t tlink_tree_lock; + struct super_block *vfs_sb; struct tcon_link *master_tlink; struct nls_table *local_nls; unsigned int bsize; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index dac20bbc2786e6d53af8b692d3b4fcb4c9e3a9a6..a86a1fb34e599dc0fdcae84b69654c02db387382 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -780,6 +780,7 @@ static int cifs_set_super(struct super_block *sb, void *data) { struct cifs_mnt_data *mnt_data = data; sb->s_fs_info = mnt_data->cifs_sb; + mnt_data->cifs_sb->vfs_sb = sb; return set_anon_super(sb, NULL); } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 56afed6d9ef8f665e3f51b28e83e2f2ba0085144..23798ab5d5f1ad31cbf12128cfd90459dc1c3d5e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -407,12 +407,7 @@ cifs_reconnect(struct TCP_Server_Info *server) #ifdef CONFIG_CIFS_DFS_UPCALL spin_unlock(&GlobalMid_Lock); sb = cifs_get_tcp_super(server); - if (IS_ERR(sb)) { - rc = PTR_ERR(sb); - cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n", - __func__, rc); - sb = NULL; - } else { + if (sb) { cifs_sb = CIFS_SB(sb); rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list); if (rc) { @@ -424,6 +419,9 @@ cifs_reconnect(struct TCP_Server_Info *server) } else { server->nr_targets = dfs_cache_get_nr_tgts(&tgt_list); } + } else { + cifs_dbg(FYI, "%s: will not do DFS failover\n", __func__); + rc = -EINVAL; } cifs_dbg(FYI, "%s: will retry %d target(s)\n", __func__, server->nr_targets); diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 12131a5d507303a5a0125d2c585cd59304476890..2b52750acc072f28e71a37895353c7d53da03f4e 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1036,62 +1036,48 @@ struct super_cb_data { struct super_block *sb; }; -static void tcp_super_cb(struct super_block *sb, void *arg) +struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server) { - struct super_cb_data *sd = arg; - struct TCP_Server_Info *server = sd->data; - struct cifs_sb_info *cifs_sb; + struct super_block *sb; + struct cifs_ses *ses; struct cifs_tcon *tcon; + struct cifs_sb_info *cifs_sb; - if (sd->sb) - return; - - cifs_sb = CIFS_SB(sb); - tcon = cifs_sb_master_tcon(cifs_sb); - if (tcon->ses->server == server) - sd->sb = sb; -} + if (!server) + return NULL; -static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *), - void *data) -{ - struct super_cb_data sd = { - .data = data, - .sb = NULL, - }; - struct file_system_type **fs_type = (struct file_system_type *[]) { - &cifs_fs_type, &smb3_fs_type, NULL, - }; - - for (; *fs_type; fs_type++) { - iterate_supers_type(*fs_type, f, &sd); - if (sd.sb) { - /* - * Grab an active reference in order to prevent automounts (DFS links) - * of expiring and then freeing up our cifs superblock pointer while - * we're doing failover. - */ - cifs_sb_active(sd.sb); - return sd.sb; + spin_lock(&cifs_tcp_ses_lock); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + spin_lock(&tcon->sb_list_lock); + list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) { + sb = cifs_sb->vfs_sb; + + /* Safely increment s_active only if it's not zero. + * + * When s_active == 0, the super block is being deactivated + * and should not be used. This prevents UAF scenarios + * where we might grab a reference to a super block that's + * in the middle of destruction. + */ + if (!atomic_add_unless(&sb->s_active, 1, 0)) + continue; + spin_unlock(&tcon->sb_list_lock); + spin_unlock(&cifs_tcp_ses_lock); + return sb; + } + spin_unlock(&tcon->sb_list_lock); } } - return ERR_PTR(-EINVAL); -} - -static void __cifs_put_super(struct super_block *sb) -{ - if (!IS_ERR_OR_NULL(sb)) - cifs_sb_deactive(sb); -} + spin_unlock(&cifs_tcp_ses_lock); -struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server) -{ - return __cifs_get_super(tcp_super_cb, server); + return NULL; } void cifs_put_tcp_super(struct super_block *sb) { - __cifs_put_super(sb); + if (!IS_ERR_OR_NULL(sb)) + cifs_sb_deactive(sb); } #ifdef CONFIG_CIFS_DFS_UPCALL @@ -1140,29 +1126,42 @@ int match_target_ip(struct TCP_Server_Info *server, return rc; } -static void tcon_super_cb(struct super_block *sb, void *arg) +static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon) { - struct super_cb_data *sd = arg; - struct cifs_tcon *tcon = sd->data; struct cifs_sb_info *cifs_sb; + struct super_block *sb = ERR_PTR(-EINVAL); + + if (!tcon && list_empty(&tcon->cifs_sb_list)) + return sb; + + spin_lock(&tcon->sb_list_lock); + list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) { + sb = cifs_sb->vfs_sb; + + if (!tcon->dfs_path) + continue; + if (!cifs_sb->origin_fullpath) + continue; + if (strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath)) + continue; + /* + * Use atomic_add_unless to safely increment s_active. + * This ensures we don't add a reference to a super block + * that has s_active == 0 (being destroyed). + */ + if (!atomic_add_unless(&sb->s_active, 1, 0)) + continue; + break; + } + spin_unlock(&tcon->sb_list_lock); - if (sd->sb) - return; - - cifs_sb = CIFS_SB(sb); - if (tcon->dfs_path && cifs_sb->origin_fullpath && - !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath)) - sd->sb = sb; -} - -static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon) -{ - return __cifs_get_super(tcon_super_cb, tcon); + return sb; } static inline void cifs_put_tcon_super(struct super_block *sb) { - __cifs_put_super(sb); + if (!IS_ERR_OR_NULL(sb)) + cifs_sb_deactive(sb); } #else static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)