diff --git a/[refs] b/[refs] index 23d083f6f062..97d474f618e3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: ec073428281b401f1142cb84b277a5b00c7994c9 +refs/heads/master: 3359b54c8c07338f3a863d1109b42eebccdcf379 diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c index d2208f7c87db..a04a575ad433 100644 --- a/trunk/fs/exec.c +++ b/trunk/fs/exec.c @@ -126,7 +126,8 @@ asmlinkage long sys_uselib(const char __user * library) struct nameidata nd; int error; - error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ); + nd.intent.open.flags = FMODE_READ; + error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd); if (error) goto out; @@ -138,7 +139,7 @@ asmlinkage long sys_uselib(const char __user * library) if (error) goto exit; - file = nameidata_to_filp(&nd, O_RDONLY); + file = dentry_open(nd.dentry, nd.mnt, O_RDONLY); error = PTR_ERR(file); if (IS_ERR(file)) goto out; @@ -166,7 +167,6 @@ asmlinkage long sys_uselib(const char __user * library) out: return error; exit: - release_open_intent(&nd); path_release(&nd); goto out; } @@ -490,7 +490,8 @@ struct file *open_exec(const char *name) int err; struct file *file; - err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ); + nd.intent.open.flags = FMODE_READ; + err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd); file = ERR_PTR(err); if (!err) { @@ -503,7 +504,7 @@ struct file *open_exec(const char *name) err = -EACCES; file = ERR_PTR(err); if (!err) { - file = nameidata_to_filp(&nd, O_RDONLY); + file = dentry_open(nd.dentry, nd.mnt, O_RDONLY); if (!IS_ERR(file)) { err = deny_write_access(file); if (err) { @@ -515,7 +516,6 @@ struct file *open_exec(const char *name) return file; } } - release_open_intent(&nd); path_release(&nd); } goto out; diff --git a/trunk/fs/lockd/host.c b/trunk/fs/lockd/host.c index c4c8601096e0..82c77df81c5f 100644 --- a/trunk/fs/lockd/host.c +++ b/trunk/fs/lockd/host.c @@ -173,10 +173,11 @@ nlm_bind_host(struct nlm_host *host) /* If we've already created an RPC client, check whether * RPC rebind is required + * Note: why keep rebinding if we're on a tcp connection? */ if ((clnt = host->h_rpcclnt) != NULL) { xprt = clnt->cl_xprt; - if (time_after_eq(jiffies, host->h_nextrebind)) { + if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) { clnt->cl_port = 0; host->h_nextrebind = jiffies + NLM_HOST_REBIND; dprintk("lockd: next rebind in %ld jiffies\n", @@ -188,6 +189,7 @@ nlm_bind_host(struct nlm_host *host) goto forgetit; xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); + xprt->nocong = 1; /* No congestion control for NLM */ xprt->resvport = 1; /* NLM requires a reserved port */ /* Existing NLM servers accept AUTH_UNIX only */ diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c index a1e8b2248014..f7daa5f48949 100644 --- a/trunk/fs/locks.c +++ b/trunk/fs/locks.c @@ -316,22 +316,21 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, /* POSIX-1996 leaves the case l->l_len < 0 undefined; POSIX-2001 defines it. */ start += l->l_start; - if (start < 0) - return -EINVAL; - fl->fl_end = OFFSET_MAX; - if (l->l_len > 0) { - end = start + l->l_len - 1; - fl->fl_end = end; - } else if (l->l_len < 0) { + end = start + l->l_len - 1; + if (l->l_len < 0) { end = start - 1; - fl->fl_end = end; start += l->l_len; - if (start < 0) - return -EINVAL; } - fl->fl_start = start; /* we record the absolute position */ - if (fl->fl_end < fl->fl_start) + + if (start < 0) + return -EINVAL; + if (l->l_len > 0 && end < 0) return -EOVERFLOW; + + fl->fl_start = start; /* we record the absolute position */ + fl->fl_end = end; + if (l->l_len == 0) + fl->fl_end = OFFSET_MAX; fl->fl_owner = current->files; fl->fl_pid = current->tgid; @@ -363,21 +362,14 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, return -EINVAL; } - start += l->l_start; - if (start < 0) + if (((start += l->l_start) < 0) || (l->l_len < 0)) return -EINVAL; - fl->fl_end = OFFSET_MAX; - if (l->l_len > 0) { - fl->fl_end = start + l->l_len - 1; - } else if (l->l_len < 0) { - fl->fl_end = start - 1; - start += l->l_len; - if (start < 0) - return -EINVAL; - } - fl->fl_start = start; /* we record the absolute position */ - if (fl->fl_end < fl->fl_start) + fl->fl_end = start + l->l_len - 1; + if (l->l_len > 0 && fl->fl_end < 0) return -EOVERFLOW; + fl->fl_start = start; /* we record the absolute position */ + if (l->l_len == 0) + fl->fl_end = OFFSET_MAX; fl->fl_owner = current->files; fl->fl_pid = current->tgid; @@ -837,16 +829,12 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) /* Detect adjacent or overlapping regions (if same lock type) */ if (request->fl_type == fl->fl_type) { - /* In all comparisons of start vs end, use - * "start - 1" rather than "end + 1". If end - * is OFFSET_MAX, end + 1 will become negative. - */ if (fl->fl_end < request->fl_start - 1) goto next_lock; /* If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. */ - if (fl->fl_start - 1 > request->fl_end) + if (fl->fl_start > request->fl_end + 1) break; /* If we come here, the new and old lock are of the diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index aaaa81036234..aa62dbda93ac 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -318,18 +317,6 @@ void path_release_on_umount(struct nameidata *nd) mntput_no_expire(nd->mnt); } -/** - * release_open_intent - free up open intent resources - * @nd: pointer to nameidata - */ -void release_open_intent(struct nameidata *nd) -{ - if (nd->intent.open.file->f_dentry == NULL) - put_filp(nd->intent.open.file); - else - fput(nd->intent.open.file); -} - /* * Internal lookup() using the new generic dcache. * SMP-safe @@ -763,7 +750,6 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) struct qstr this; unsigned int c; - nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode, nd); if (err == -EAGAIN) { err = permission(inode, MAY_EXEC, nd); @@ -816,6 +802,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) if (err < 0) break; } + nd->flags |= LOOKUP_CONTINUE; /* This does the actual lookups.. */ err = do_lookup(nd, &this, &next); if (err) @@ -1065,70 +1052,6 @@ int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata return retval; } -static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags, - struct nameidata *nd, int open_flags, int create_mode) -{ - struct file *filp = get_empty_filp(); - int err; - - if (filp == NULL) - return -ENFILE; - nd->intent.open.file = filp; - nd->intent.open.flags = open_flags; - nd->intent.open.create_mode = create_mode; - err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd); - if (IS_ERR(nd->intent.open.file)) { - if (err == 0) { - err = PTR_ERR(nd->intent.open.file); - path_release(nd); - } - } else if (err != 0) - release_open_intent(nd); - return err; -} - -/** - * path_lookup_open - lookup a file path with open intent - * @name: pointer to file name - * @lookup_flags: lookup intent flags - * @nd: pointer to nameidata - * @open_flags: open intent flags - */ -int path_lookup_open(const char *name, unsigned int lookup_flags, - struct nameidata *nd, int open_flags) -{ - return __path_lookup_intent_open(name, lookup_flags, nd, - open_flags, 0); -} - -/** - * path_lookup_create - lookup a file path with open + create intent - * @name: pointer to file name - * @lookup_flags: lookup intent flags - * @nd: pointer to nameidata - * @open_flags: open intent flags - * @create_mode: create intent flags - */ -int path_lookup_create(const char *name, unsigned int lookup_flags, - struct nameidata *nd, int open_flags, int create_mode) -{ - return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd, - open_flags, create_mode); -} - -int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags, - struct nameidata *nd, int open_flags) -{ - char *tmp = getname(name); - int err = PTR_ERR(tmp); - - if (!IS_ERR(tmp)) { - err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0); - putname(tmp); - } - return err; -} - /* * Restricted form of lookup. Doesn't follow links, single-component only, * needs parent already locked. Doesn't follow mounts. @@ -1493,27 +1416,27 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) */ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) { - int acc_mode, error; + int acc_mode, error = 0; struct path path; struct dentry *dir; int count = 0; acc_mode = ACC_MODE(flag); - /* O_TRUNC implies we need access checks for write permissions */ - if (flag & O_TRUNC) - acc_mode |= MAY_WRITE; - /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flag & O_APPEND) acc_mode |= MAY_APPEND; + /* Fill in the open() intent data */ + nd->intent.open.flags = flag; + nd->intent.open.create_mode = mode; + /* * The simplest case - just a plain lookup. */ if (!(flag & O_CREAT)) { - error = path_lookup_open(pathname, lookup_flags(flag), nd, flag); + error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd); if (error) return error; goto ok; @@ -1522,7 +1445,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) /* * Create - we need to know the parent. */ - error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode); + error = path_lookup(pathname, LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE, nd); if (error) return error; @@ -1597,8 +1520,6 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) exit_dput: dput_path(&path, nd); exit: - if (!IS_ERR(nd->intent.open.file)) - release_open_intent(nd); path_release(nd); return error; diff --git a/trunk/fs/nfs/delegation.c b/trunk/fs/nfs/delegation.c index 44135af9894c..4a36839f0bbd 100644 --- a/trunk/fs/nfs/delegation.c +++ b/trunk/fs/nfs/delegation.c @@ -142,7 +142,7 @@ static void nfs_msync_inode(struct inode *inode) /* * Basic procedure for returning a delegation to the server */ -int __nfs_inode_return_delegation(struct inode *inode) +int nfs_inode_return_delegation(struct inode *inode) { struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; struct nfs_inode *nfsi = NFS_I(inode); diff --git a/trunk/fs/nfs/delegation.h b/trunk/fs/nfs/delegation.h index 8017846b561f..3f6c45a29d6a 100644 --- a/trunk/fs/nfs/delegation.h +++ b/trunk/fs/nfs/delegation.h @@ -25,7 +25,7 @@ struct nfs_delegation { int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); -int __nfs_inode_return_delegation(struct inode *inode); +int nfs_inode_return_delegation(struct inode *inode); int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle); @@ -47,25 +47,11 @@ static inline int nfs_have_delegation(struct inode *inode, int flags) return 1; return 0; } - -static inline int nfs_inode_return_delegation(struct inode *inode) -{ - int err = 0; - - if (NFS_I(inode)->delegation != NULL) - err = __nfs_inode_return_delegation(inode); - return err; -} #else static inline int nfs_have_delegation(struct inode *inode, int flags) { return 0; } - -static inline int nfs_inode_return_delegation(struct inode *inode) -{ - return 0; -} #endif #endif diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c index eb50c19fc253..2df639f143e8 100644 --- a/trunk/fs/nfs/dir.c +++ b/trunk/fs/nfs/dir.c @@ -565,6 +565,8 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } } unlock_kernel(); + if (desc->error < 0) + return desc->error; if (res < 0) return res; return 0; @@ -801,7 +803,6 @@ static int nfs_dentry_delete(struct dentry *dentry) */ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) { - nfs_inode_return_delegation(inode); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { lock_kernel(); inode->i_nlink--; @@ -915,6 +916,7 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd) static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct dentry *res = NULL; + struct inode *inode = NULL; int error; /* Check that we are indeed trying to open this file */ @@ -928,10 +930,8 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry dentry->d_op = NFS_PROTO(dir)->dentry_ops; /* Let vfs_create() deal with O_EXCL */ - if (nd->intent.open.flags & O_EXCL) { - d_add(dentry, NULL); - goto out; - } + if (nd->intent.open.flags & O_EXCL) + goto no_entry; /* Open the file on the server */ lock_kernel(); @@ -945,30 +945,32 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry if (nd->intent.open.flags & O_CREAT) { nfs_begin_data_update(dir); - res = nfs4_atomic_open(dir, dentry, nd); + inode = nfs4_atomic_open(dir, dentry, nd); nfs_end_data_update(dir); } else - res = nfs4_atomic_open(dir, dentry, nd); + inode = nfs4_atomic_open(dir, dentry, nd); unlock_kernel(); - if (IS_ERR(res)) { - error = PTR_ERR(res); + if (IS_ERR(inode)) { + error = PTR_ERR(inode); switch (error) { /* Make a negative dentry */ case -ENOENT: - res = NULL; - goto out; + inode = NULL; + break; /* This turned out not to be a regular file */ - case -EISDIR: - case -ENOTDIR: - goto no_open; case -ELOOP: if (!(nd->intent.open.flags & O_NOFOLLOW)) goto no_open; + /* case -EISDIR: */ /* case -EINVAL: */ default: + res = ERR_PTR(error); goto out; } - } else if (res != NULL) + } +no_entry: + res = d_add_unique(dentry, inode); + if (res != NULL) dentry = res; nfs_renew_times(dentry); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); @@ -1012,7 +1014,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) */ lock_kernel(); verifier = nfs_save_change_attribute(dir); - ret = nfs4_open_revalidate(dir, dentry, openflags, nd); + ret = nfs4_open_revalidate(dir, dentry, openflags); if (!ret) nfs_set_verifier(dentry, verifier); unlock_kernel(); @@ -1135,7 +1137,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, lock_kernel(); nfs_begin_data_update(dir); - error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd); + error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags); nfs_end_data_update(dir); if (error != 0) goto out_err; @@ -1330,7 +1332,6 @@ static int nfs_safe_remove(struct dentry *dentry) nfs_begin_data_update(dir); if (inode != NULL) { - nfs_inode_return_delegation(inode); nfs_begin_data_update(inode); error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); /* The VFS may want to delete this inode */ @@ -1511,11 +1512,9 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, */ if (!new_inode) goto go_ahead; - if (S_ISDIR(new_inode->i_mode)) { - error = -EISDIR; - if (!S_ISDIR(old_inode->i_mode)) - goto out; - } else if (atomic_read(&new_dentry->d_count) > 2) { + if (S_ISDIR(new_inode->i_mode)) + goto out; + else if (atomic_read(&new_dentry->d_count) > 2) { int err; /* copy the target dentry's name */ dentry = d_alloc(new_dentry->d_parent, @@ -1540,8 +1539,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, #endif goto out; } - } else - new_inode->i_nlink--; + } go_ahead: /* @@ -1551,7 +1549,6 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, nfs_wb_all(old_inode); shrink_dcache_parent(old_dentry); } - nfs_inode_return_delegation(old_inode); if (new_inode) d_delete(new_dentry); diff --git a/trunk/fs/nfs/file.c b/trunk/fs/nfs/file.c index 572d8593486f..6bdcfa95de94 100644 --- a/trunk/fs/nfs/file.c +++ b/trunk/fs/nfs/file.c @@ -376,31 +376,22 @@ nfs_file_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) { - struct file_lock *cfl; struct inode *inode = filp->f_mapping->host; int status = 0; lock_kernel(); - /* Try local locking first */ - cfl = posix_test_lock(filp, fl); - if (cfl != NULL) { - locks_copy_lock(fl, cfl); - goto out; - } - - if (nfs_have_delegation(inode, FMODE_READ)) - goto out_noconflict; - - if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) - goto out_noconflict; + /* Use local locking if mounted with "-onolock" */ + if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) + status = NFS_PROTO(inode)->lock(filp, cmd, fl); + else { + struct file_lock *cfl = posix_test_lock(filp, fl); - status = NFS_PROTO(inode)->lock(filp, cmd, fl); -out: + fl->fl_type = F_UNLCK; + if (cfl != NULL) + memcpy(fl, cfl, sizeof(*fl)); + } unlock_kernel(); return status; -out_noconflict: - fl->fl_type = F_UNLCK; - goto out; } static int do_vfs_lock(struct file *file, struct file_lock *fl) diff --git a/trunk/fs/nfs/inode.c b/trunk/fs/nfs/inode.c index 65d5ab45ddc5..d4eadeea128e 100644 --- a/trunk/fs/nfs/inode.c +++ b/trunk/fs/nfs/inode.c @@ -358,35 +358,6 @@ nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor) return no_root_error; } -static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans) -{ - to->to_initval = timeo * HZ / 10; - to->to_retries = retrans; - if (!to->to_retries) - to->to_retries = 2; - - switch (proto) { - case IPPROTO_TCP: - if (!to->to_initval) - to->to_initval = 60 * HZ; - if (to->to_initval > NFS_MAX_TCP_TIMEOUT) - to->to_initval = NFS_MAX_TCP_TIMEOUT; - to->to_increment = to->to_initval; - to->to_maxval = to->to_initval + (to->to_increment * to->to_retries); - to->to_exponential = 0; - break; - case IPPROTO_UDP: - default: - if (!to->to_initval) - to->to_initval = 11 * HZ / 10; - if (to->to_initval > NFS_MAX_UDP_TIMEOUT) - to->to_initval = NFS_MAX_UDP_TIMEOUT; - to->to_maxval = NFS_MAX_UDP_TIMEOUT; - to->to_exponential = 1; - break; - } -} - /* * Create an RPC client handle. */ @@ -396,12 +367,22 @@ nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data) struct rpc_timeout timeparms; struct rpc_xprt *xprt = NULL; struct rpc_clnt *clnt = NULL; - int proto = (data->flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP; + int tcp = (data->flags & NFS_MOUNT_TCP); - nfs_init_timeout_values(&timeparms, proto, data->timeo, data->retrans); + /* Initialize timeout values */ + timeparms.to_initval = data->timeo * HZ / 10; + timeparms.to_retries = data->retrans; + timeparms.to_maxval = tcp ? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT; + timeparms.to_exponential = 1; + + if (!timeparms.to_initval) + timeparms.to_initval = (tcp ? 600 : 11) * HZ / 10; + if (!timeparms.to_retries) + timeparms.to_retries = 5; /* create transport and client */ - xprt = xprt_create_proto(proto, &server->addr, &timeparms); + xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP, + &server->addr, &timeparms); if (IS_ERR(xprt)) { dprintk("%s: cannot create RPC transport. Error = %ld\n", __FUNCTION__, PTR_ERR(xprt)); @@ -595,6 +576,7 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) { NFS_MOUNT_SOFT, ",soft", ",hard" }, { NFS_MOUNT_INTR, ",intr", "" }, { NFS_MOUNT_POSIX, ",posix", "" }, + { NFS_MOUNT_TCP, ",tcp", ",udp" }, { NFS_MOUNT_NOCTO, ",nocto", "" }, { NFS_MOUNT_NOAC, ",noac", "" }, { NFS_MOUNT_NONLM, ",nolock", ",lock" }, @@ -603,8 +585,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) }; struct proc_nfs_info *nfs_infop; struct nfs_server *nfss = NFS_SB(mnt->mnt_sb); - char buf[12]; - char *proto; seq_printf(m, ",v%d", nfss->rpc_ops->version); seq_printf(m, ",rsize=%d", nfss->rsize); @@ -623,18 +603,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) else seq_puts(m, nfs_infop->nostr); } - switch (nfss->client->cl_xprt->prot) { - case IPPROTO_TCP: - proto = "tcp"; - break; - case IPPROTO_UDP: - proto = "udp"; - break; - default: - snprintf(buf, sizeof(buf), "%u", nfss->client->cl_xprt->prot); - proto = buf; - } - seq_printf(m, ",proto=%s", proto); seq_puts(m, ",addr="); seq_escape(m, nfss->hostname, " \t\n\\"); return 0; @@ -853,11 +821,6 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr) filemap_fdatawait(inode->i_mapping); nfs_wb_all(inode); } - /* - * Return any delegations if we're going to change ACLs - */ - if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) - nfs_inode_return_delegation(inode); error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr); if (error == 0) nfs_refresh_inode(inode, &fattr); @@ -1676,7 +1639,8 @@ static void nfs4_clear_inode(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); /* If we are holding a delegation, return it! */ - nfs_inode_return_delegation(inode); + if (nfsi->delegation != NULL) + nfs_inode_return_delegation(inode); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); /* Now clear out any remaining state */ @@ -1705,7 +1669,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, struct rpc_clnt *clnt = NULL; struct rpc_timeout timeparms; rpc_authflavor_t authflavour; - int err = -EIO; + int proto, err = -EIO; sb->s_blocksize_bits = 0; sb->s_blocksize = 0; @@ -1723,8 +1687,30 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, server->acdirmax = data->acdirmax*HZ; server->rpc_ops = &nfs_v4_clientops; + /* Initialize timeout values */ + + timeparms.to_initval = data->timeo * HZ / 10; + timeparms.to_retries = data->retrans; + timeparms.to_exponential = 1; + if (!timeparms.to_retries) + timeparms.to_retries = 5; - nfs_init_timeout_values(&timeparms, data->proto, data->timeo, data->retrans); + proto = data->proto; + /* Which IP protocol do we use? */ + switch (proto) { + case IPPROTO_TCP: + timeparms.to_maxval = RPC_MAX_TCP_TIMEOUT; + if (!timeparms.to_initval) + timeparms.to_initval = 600 * HZ / 10; + break; + case IPPROTO_UDP: + timeparms.to_maxval = RPC_MAX_UDP_TIMEOUT; + if (!timeparms.to_initval) + timeparms.to_initval = 11 * HZ / 10; + break; + default: + return -EINVAL; + } clp = nfs4_get_client(&server->addr.sin_addr); if (!clp) { @@ -1749,7 +1735,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, down_write(&clp->cl_sem); if (IS_ERR(clp->cl_rpcclient)) { - xprt = xprt_create_proto(data->proto, &server->addr, &timeparms); + xprt = xprt_create_proto(proto, &server->addr, &timeparms); if (IS_ERR(xprt)) { up_write(&clp->cl_sem); err = PTR_ERR(xprt); diff --git a/trunk/fs/nfs/nfs3proc.c b/trunk/fs/nfs/nfs3proc.c index df80477c5af4..edc95514046d 100644 --- a/trunk/fs/nfs/nfs3proc.c +++ b/trunk/fs/nfs/nfs3proc.c @@ -299,7 +299,7 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata) */ static int nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags, struct nameidata *nd) + int flags) { struct nfs_fh fhandle; struct nfs_fattr fattr; diff --git a/trunk/fs/nfs/nfs4_fs.h b/trunk/fs/nfs/nfs4_fs.h index 78a53f5a9f18..ec1a22d7b876 100644 --- a/trunk/fs/nfs/nfs4_fs.h +++ b/trunk/fs/nfs/nfs4_fs.h @@ -92,51 +92,26 @@ struct nfs4_client { unsigned char cl_id_uniquifier; }; -/* - * struct rpc_sequence ensures that RPC calls are sent in the exact - * order that they appear on the list. - */ -struct rpc_sequence { - struct rpc_wait_queue wait; /* RPC call delay queue */ - spinlock_t lock; /* Protects the list */ - struct list_head list; /* Defines sequence of RPC calls */ -}; - -#define NFS_SEQID_CONFIRMED 1 -struct nfs_seqid_counter { - struct rpc_sequence *sequence; - int flags; - u32 counter; -}; - -struct nfs_seqid { - struct nfs_seqid_counter *sequence; - struct list_head list; -}; - -static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status) -{ - if (seqid_mutating_err(-status)) - seqid->flags |= NFS_SEQID_CONFIRMED; -} - /* * NFS4 state_owners and lock_owners are simply labels for ordered * sequences of RPC calls. Their sole purpose is to provide once-only * semantics by allowing the server to identify replayed requests. + * + * The ->so_sema is held during all state_owner seqid-mutating operations: + * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize + * so_seqid. */ struct nfs4_state_owner { - spinlock_t so_lock; struct list_head so_list; /* per-clientid list of state_owners */ struct nfs4_client *so_client; u32 so_id; /* 32-bit identifier, unique */ + struct semaphore so_sema; + u32 so_seqid; /* protected by so_sema */ atomic_t so_count; struct rpc_cred *so_cred; /* Associated cred */ struct list_head so_states; struct list_head so_delegations; - struct nfs_seqid_counter so_seqid; - struct rpc_sequence so_sequence; }; /* @@ -157,7 +132,7 @@ struct nfs4_lock_state { fl_owner_t ls_owner; /* POSIX lock owner */ #define NFS_LOCK_INITIALIZED 1 int ls_flags; - struct nfs_seqid_counter ls_seqid; + u32 ls_seqid; u32 ls_id; nfs4_stateid ls_stateid; atomic_t ls_count; @@ -178,6 +153,7 @@ struct nfs4_state { struct inode *inode; /* Pointer to the inode */ unsigned long flags; /* Do we hold any locks? */ + struct semaphore lock_sema; /* Serializes file locking operations */ spinlock_t state_lock; /* Protects the lock_states list */ nfs4_stateid stateid; @@ -215,8 +191,8 @@ extern int nfs4_proc_setclientid_confirm(struct nfs4_client *); extern int nfs4_proc_async_renew(struct nfs4_client *); extern int nfs4_proc_renew(struct nfs4_client *); extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode); -extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); -extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); +extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); +extern int nfs4_open_revalidate(struct inode *, struct dentry *, int); extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops; @@ -248,17 +224,12 @@ extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_close_state(struct nfs4_state *, mode_t); extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); +extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); extern void nfs4_schedule_state_recovery(struct nfs4_client *); -extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); +extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); -extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter); -extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); -extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); -extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); -extern void nfs_free_seqid(struct nfs_seqid *seqid); - extern const nfs4_stateid zero_stateid; /* nfs4xdr.c */ diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index 9c1da34036aa..9701ca8c9428 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -47,7 +47,6 @@ #include #include #include -#include #include "nfs4_fs.h" #include "delegation.h" @@ -57,11 +56,10 @@ #define NFS4_POLL_RETRY_MIN (1*HZ) #define NFS4_POLL_RETRY_MAX (15*HZ) -static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); -static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *); +static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *); static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); -static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); +static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception); extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus); extern struct rpc_procinfo nfs4_procedures[]; @@ -191,28 +189,12 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf nfsi->change_attr = cinfo->after; } -/* Helper for asynchronous RPC calls */ -static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin, - rpc_action tk_exit, void *calldata) -{ - struct rpc_task *task; - - if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC))) - return -ENOMEM; - - task->tk_calldata = calldata; - task->tk_action = tk_begin; - rpc_execute(task); - return 0; -} - static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) { struct inode *inode = state->inode; open_flags &= (FMODE_READ|FMODE_WRITE); /* Protect against nfs4_find_state() */ - spin_lock(&state->owner->so_lock); spin_lock(&inode->i_lock); state->state |= open_flags; /* NB! List reordering - see the reclaim code for why. */ @@ -222,12 +204,12 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, state->nreaders++; memcpy(&state->stateid, stateid, sizeof(state->stateid)); spin_unlock(&inode->i_lock); - spin_unlock(&state->owner->so_lock); } /* * OPEN_RECLAIM: * reclaim state on the server after a reboot. + * Assumes caller is holding the sp->so_sem */ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) { @@ -236,6 +218,7 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st struct nfs_delegation *delegation = NFS_I(inode)->delegation; struct nfs_openargs o_arg = { .fh = NFS_FH(inode), + .seqid = sp->so_seqid, .id = sp->so_id, .open_flags = state->state, .clientid = server->nfs4_state->cl_clientid, @@ -262,13 +245,8 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st } o_arg.u.delegation_type = delegation->type; } - o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); - if (o_arg.seqid == NULL) - return -ENOMEM; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - /* Confirm the sequence as being established */ - nfs_confirm_seqid(&sp->so_seqid, status); - nfs_increment_open_seqid(status, o_arg.seqid); + nfs4_increment_seqid(status, sp); if (status == 0) { memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid)); if (o_res.delegation_type != 0) { @@ -278,7 +256,6 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st nfs_async_inode_return_delegation(inode, &o_res.stateid); } } - nfs_free_seqid(o_arg.seqid); clear_bit(NFS_DELEGATED_STATE, &state->flags); /* Ensure we update the inode attributes */ NFS_CACHEINV(inode); @@ -325,35 +302,23 @@ static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state }; int status = 0; + down(&sp->so_sema); if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) goto out; if (state->state == 0) goto out; - arg.seqid = nfs_alloc_seqid(&sp->so_seqid); - status = -ENOMEM; - if (arg.seqid == NULL) - goto out; + arg.seqid = sp->so_seqid; arg.open_flags = state->state; memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data)); status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - nfs_increment_open_seqid(status, arg.seqid); - if (status != 0) - goto out_free; - if(res.rflags & NFS4_OPEN_RESULT_CONFIRM) { - status = _nfs4_proc_open_confirm(server->client, NFS_FH(inode), - sp, &res.stateid, arg.seqid); - if (status != 0) - goto out_free; - } - nfs_confirm_seqid(&sp->so_seqid, 0); + nfs4_increment_seqid(status, sp); if (status >= 0) { memcpy(state->stateid.data, res.stateid.data, sizeof(state->stateid.data)); clear_bit(NFS_DELEGATED_STATE, &state->flags); } -out_free: - nfs_free_seqid(arg.seqid); out: + up(&sp->so_sema); dput(parent); return status; } @@ -380,11 +345,11 @@ int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) return err; } -static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid) +static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid) { struct nfs_open_confirmargs arg = { .fh = fh, - .seqid = seqid, + .seqid = sp->so_seqid, .stateid = *stateid, }; struct nfs_open_confirmres res; @@ -397,9 +362,7 @@ static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *f int status; status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR); - /* Confirm the sequence as being established */ - nfs_confirm_seqid(&sp->so_seqid, status); - nfs_increment_open_seqid(status, seqid); + nfs4_increment_seqid(status, sp); if (status >= 0) memcpy(stateid, &res.stateid, sizeof(*stateid)); return status; @@ -417,37 +380,21 @@ static int _nfs4_proc_open(struct inode *dir, struct nfs4_state_owner *sp, stru int status; /* Update sequence id. The caller must serialize! */ + o_arg->seqid = sp->so_seqid; o_arg->id = sp->so_id; o_arg->clientid = sp->so_client->cl_clientid; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - if (status == 0) { - /* OPEN on anything except a regular file is disallowed in NFSv4 */ - switch (o_res->f_attr->mode & S_IFMT) { - case S_IFREG: - break; - case S_IFLNK: - status = -ELOOP; - break; - case S_IFDIR: - status = -EISDIR; - break; - default: - status = -ENOTDIR; - } - } - - nfs_increment_open_seqid(status, o_arg->seqid); + nfs4_increment_seqid(status, sp); if (status != 0) goto out; update_changeattr(dir, &o_res->cinfo); if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(server->client, &o_res->fh, - sp, &o_res->stateid, o_arg->seqid); + sp, &o_res->stateid); if (status != 0) goto out; } - nfs_confirm_seqid(&sp->so_seqid, 0); if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr); out: @@ -518,10 +465,6 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st set_bit(NFS_DELEGATED_STATE, &state->flags); goto out; } - o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); - status = -ENOMEM; - if (o_arg.seqid == NULL) - goto out; status = _nfs4_proc_open(dir, sp, &o_arg, &o_res); if (status != 0) goto out_nodeleg; @@ -547,7 +490,6 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res); } out_nodeleg: - nfs_free_seqid(o_arg.seqid); clear_bit(NFS_DELEGATED_STATE, &state->flags); out: dput(parent); @@ -622,6 +564,7 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__); goto out_err; } + down(&sp->so_sema); state = nfs4_get_open_state(inode, sp); if (state == NULL) goto out_err; @@ -646,6 +589,7 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred set_bit(NFS_DELEGATED_STATE, &state->flags); update_open_stateid(state, &delegation->stateid, open_flags); out_ok: + up(&sp->so_sema); nfs4_put_state_owner(sp); up_read(&nfsi->rwsem); up_read(&clp->cl_sem); @@ -656,12 +600,11 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred if (sp != NULL) { if (state != NULL) nfs4_put_open_state(state); + up(&sp->so_sema); nfs4_put_state_owner(sp); } up_read(&nfsi->rwsem); up_read(&clp->cl_sem); - if (err != -EACCES) - nfs_inode_return_delegation(inode); return err; } @@ -722,10 +665,8 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st } else o_arg.u.attrs = sattr; /* Serialization for the sequence id */ + down(&sp->so_sema); - o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); - if (o_arg.seqid == NULL) - return -ENOMEM; status = _nfs4_proc_open(dir, sp, &o_arg, &o_res); if (status != 0) goto out_err; @@ -740,7 +681,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st update_open_stateid(state, &o_res.stateid, flags); if (o_res.delegation_type != 0) nfs_inode_set_delegation(inode, cred, &o_res); - nfs_free_seqid(o_arg.seqid); + up(&sp->so_sema); nfs4_put_state_owner(sp); up_read(&clp->cl_sem); *res = state; @@ -749,7 +690,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st if (sp != NULL) { if (state != NULL) nfs4_put_open_state(state); - nfs_free_seqid(o_arg.seqid); + up(&sp->so_sema); nfs4_put_state_owner(sp); } /* Note: clp->cl_sem must be released before nfs4_put_open_state()! */ @@ -777,7 +718,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, * It is actually a sign of a bug on the client or on the server. * * If we receive a BAD_SEQID error in the particular case of - * doing an OPEN, we assume that nfs_increment_open_seqid() will + * doing an OPEN, we assume that nfs4_increment_seqid() will * have unhashed the old state_owner for us, and that we can * therefore safely retry using a new one. We should still warn * the user though... @@ -787,16 +728,6 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, exception.retry = 1; continue; } - /* - * BAD_STATEID on OPEN means that the server cancelled our - * state before it received the OPEN_CONFIRM. - * Recover by retrying the request as per the discussion - * on Page 181 of RFC3530. - */ - if (status == -NFS4ERR_BAD_STATEID) { - exception.retry = 1; - continue; - } res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), status, &exception)); } while (exception.retry); @@ -858,27 +789,17 @@ struct nfs4_closedata { struct nfs_closeres res; }; -static void nfs4_free_closedata(struct nfs4_closedata *calldata) -{ - struct nfs4_state *state = calldata->state; - struct nfs4_state_owner *sp = state->owner; - - nfs4_put_open_state(calldata->state); - nfs_free_seqid(calldata->arg.seqid); - nfs4_put_state_owner(sp); - kfree(calldata); -} - static void nfs4_close_done(struct rpc_task *task) { struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata; struct nfs4_state *state = calldata->state; + struct nfs4_state_owner *sp = state->owner; struct nfs_server *server = NFS_SERVER(calldata->inode); /* hmm. we are done with the inode, and in the process of freeing * the state_owner. we keep this around to process errors */ - nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); + nfs4_increment_seqid(task->tk_status, sp); switch (task->tk_status) { case 0: memcpy(&state->stateid, &calldata->res.stateid, @@ -896,46 +817,24 @@ static void nfs4_close_done(struct rpc_task *task) } } state->state = calldata->arg.open_flags; - nfs4_free_closedata(calldata); + nfs4_put_open_state(state); + up(&sp->so_sema); + nfs4_put_state_owner(sp); + up_read(&server->nfs4_state->cl_sem); + kfree(calldata); } -static void nfs4_close_begin(struct rpc_task *task) +static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *calldata) { - struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata; - struct nfs4_state *state = calldata->state; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], .rpc_argp = &calldata->arg, .rpc_resp = &calldata->res, - .rpc_cred = state->owner->so_cred, + .rpc_cred = calldata->state->owner->so_cred, }; - int mode = 0; - int status; - - status = nfs_wait_on_sequence(calldata->arg.seqid, task); - if (status != 0) - return; - /* Don't reorder reads */ - smp_rmb(); - /* Recalculate the new open mode in case someone reopened the file - * while we were waiting in line to be scheduled. - */ - if (state->nreaders != 0) - mode |= FMODE_READ; - if (state->nwriters != 0) - mode |= FMODE_WRITE; - if (test_bit(NFS_DELEGATED_STATE, &state->flags)) - state->state = mode; - if (mode == state->state) { - nfs4_free_closedata(calldata); - task->tk_exit = NULL; - rpc_exit(task, 0); - return; - } - if (mode != 0) + if (calldata->arg.open_flags != 0) msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; - calldata->arg.open_flags = mode; - rpc_call_setup(task, &msg, 0); + return rpc_call_async(clnt, &msg, 0, nfs4_close_done, calldata); } /* @@ -952,52 +851,39 @@ static void nfs4_close_begin(struct rpc_task *task) int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode) { struct nfs4_closedata *calldata; - int status = -ENOMEM; + int status; - calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); + /* Tell caller we're done */ + if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { + state->state = mode; + return 0; + } + calldata = (struct nfs4_closedata *)kmalloc(sizeof(*calldata), GFP_KERNEL); if (calldata == NULL) - goto out; + return -ENOMEM; calldata->inode = inode; calldata->state = state; calldata->arg.fh = NFS_FH(inode); - calldata->arg.stateid = &state->stateid; /* Serialization for the sequence id */ - calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); - if (calldata->arg.seqid == NULL) - goto out_free_calldata; - - status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_close_begin, - nfs4_close_done, calldata); - if (status == 0) - goto out; - - nfs_free_seqid(calldata->arg.seqid); -out_free_calldata: - kfree(calldata); -out: - return status; -} - -static void nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state) -{ - struct file *filp; - - filp = lookup_instantiate_filp(nd, dentry, NULL); - if (!IS_ERR(filp)) { - struct nfs_open_context *ctx; - ctx = (struct nfs_open_context *)filp->private_data; - ctx->state = state; - } else - nfs4_close_state(state, nd->intent.open.flags); + calldata->arg.seqid = state->owner->so_seqid; + calldata->arg.open_flags = mode; + memcpy(&calldata->arg.stateid, &state->stateid, + sizeof(calldata->arg.stateid)); + status = nfs4_close_call(NFS_SERVER(inode)->client, calldata); + /* + * Return -EINPROGRESS on success in order to indicate to the + * caller that an asynchronous RPC call has been launched, and + * that it will release the semaphores on completion. + */ + return (status == 0) ? -EINPROGRESS : status; } -struct dentry * +struct inode * nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct iattr attr; struct rpc_cred *cred; struct nfs4_state *state; - struct dentry *res; if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; @@ -1011,23 +897,16 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0); if (IS_ERR(cred)) - return (struct dentry *)cred; + return (struct inode *)cred; state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred); put_rpccred(cred); - if (IS_ERR(state)) { - if (PTR_ERR(state) == -ENOENT) - d_add(dentry, NULL); - return (struct dentry *)state; - } - res = d_add_unique(dentry, state->inode); - if (res != NULL) - dentry = res; - nfs4_intent_set_file(nd, dentry, state); - return res; + if (IS_ERR(state)) + return (struct inode *)state; + return state->inode; } int -nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) +nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags) { struct rpc_cred *cred; struct nfs4_state *state; @@ -1040,30 +919,18 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st if (IS_ERR(state)) state = nfs4_do_open(dir, dentry, openflags, NULL, cred); put_rpccred(cred); - if (IS_ERR(state)) { - switch (PTR_ERR(state)) { - case -EPERM: - case -EACCES: - case -EDQUOT: - case -ENOSPC: - case -EROFS: - lookup_instantiate_filp(nd, (struct dentry *)state, NULL); - return 1; - case -ENOENT: - if (dentry->d_inode == NULL) - return 1; - } - goto out_drop; - } + if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0) + return 1; + if (IS_ERR(state)) + return 0; inode = state->inode; - iput(inode); if (inode == dentry->d_inode) { - nfs4_intent_set_file(nd, dentry, state); + iput(inode); return 1; } - nfs4_close_state(state, openflags); -out_drop: d_drop(dentry); + nfs4_close_state(state, openflags); + iput(inode); return 0; } @@ -1564,7 +1431,7 @@ static int nfs4_proc_commit(struct nfs_write_data *cdata) static int nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags, struct nameidata *nd) + int flags) { struct nfs4_state *state; struct rpc_cred *cred; @@ -1586,13 +1453,13 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, struct nfs_fattr fattr; status = nfs4_do_setattr(NFS_SERVER(dir), &fattr, NFS_FH(state->inode), sattr, state); - if (status == 0) + if (status == 0) { nfs_setattr_update_inode(state->inode, sattr); - } - if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN)) - nfs4_intent_set_file(nd, dentry, state); - else - nfs4_close_state(state, flags); + goto out; + } + } else if (flags != 0) + goto out; + nfs4_close_state(state, flags); out: return status; } @@ -2239,6 +2106,65 @@ nfs4_proc_renew(struct nfs4_client *clp) return 0; } +/* + * We will need to arrange for the VFS layer to provide an atomic open. + * Until then, this open method is prone to inefficiency and race conditions + * due to the lookup, potential create, and open VFS calls from sys_open() + * placed on the wire. + */ +static int +nfs4_proc_file_open(struct inode *inode, struct file *filp) +{ + struct dentry *dentry = filp->f_dentry; + struct nfs_open_context *ctx; + struct nfs4_state *state = NULL; + struct rpc_cred *cred; + int status = -ENOMEM; + + dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n", + (int)dentry->d_parent->d_name.len, + dentry->d_parent->d_name.name, + (int)dentry->d_name.len, dentry->d_name.name); + + + /* Find our open stateid */ + cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); + if (IS_ERR(cred)) + return PTR_ERR(cred); + ctx = alloc_nfs_open_context(dentry, cred); + put_rpccred(cred); + if (unlikely(ctx == NULL)) + return -ENOMEM; + status = -EIO; /* ERACE actually */ + state = nfs4_find_state(inode, cred, filp->f_mode); + if (unlikely(state == NULL)) + goto no_state; + ctx->state = state; + nfs4_close_state(state, filp->f_mode); + ctx->mode = filp->f_mode; + nfs_file_set_open_context(filp, ctx); + put_nfs_open_context(ctx); + if (filp->f_mode & FMODE_WRITE) + nfs_begin_data_update(inode); + return 0; +no_state: + printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__); + put_nfs_open_context(ctx); + return status; +} + +/* + * Release our state + */ +static int +nfs4_proc_file_release(struct inode *inode, struct file *filp) +{ + if (filp->f_mode & FMODE_WRITE) + nfs_end_data_update(inode); + nfs_file_clear_open_context(filp); + return 0; +} + static inline int nfs4_server_supports_acls(struct nfs_server *server) { return (server->caps & NFS_CAP_ACLS) @@ -2359,7 +2285,7 @@ static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size return -ENOMEM; args.acl_pages[0] = localpage; args.acl_pgbase = 0; - resp_len = args.acl_len = PAGE_SIZE; + args.acl_len = PAGE_SIZE; } else { resp_buf = buf; buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase); @@ -2419,7 +2345,6 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; - nfs_inode_return_delegation(inode); buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0); if (ret == 0) @@ -2428,7 +2353,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen } static int -nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) +nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server) { struct nfs4_client *clp = server->nfs4_state; @@ -2506,7 +2431,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) /* This is the error handling routine for processes that are allowed * to sleep. */ -int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) +int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs4_client *clp = server->nfs4_state; int ret = errorcode; @@ -2707,6 +2632,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock down_read(&clp->cl_sem); nlo.clientid = clp->cl_clientid; + down(&state->lock_sema); status = nfs4_set_lock_state(state, request); if (status != 0) goto out; @@ -2733,6 +2659,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock status = 0; } out: + up(&state->lock_sema); up_read(&clp->cl_sem); return status; } @@ -2769,149 +2696,79 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl) return res; } -struct nfs4_unlockdata { - struct nfs_lockargs arg; - struct nfs_locku_opargs luargs; - struct nfs_lockres res; - struct nfs4_lock_state *lsp; - struct nfs_open_context *ctx; - atomic_t refcount; - struct completion completion; -}; - -static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata) -{ - if (atomic_dec_and_test(&calldata->refcount)) { - nfs_free_seqid(calldata->luargs.seqid); - nfs4_put_lock_state(calldata->lsp); - put_nfs_open_context(calldata->ctx); - kfree(calldata); - } -} - -static void nfs4_locku_complete(struct nfs4_unlockdata *calldata) -{ - complete(&calldata->completion); - nfs4_locku_release_calldata(calldata); -} - -static void nfs4_locku_done(struct rpc_task *task) +static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata; - - nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid); - switch (task->tk_status) { - case 0: - memcpy(calldata->lsp->ls_stateid.data, - calldata->res.u.stateid.data, - sizeof(calldata->lsp->ls_stateid.data)); - break; - case -NFS4ERR_STALE_STATEID: - case -NFS4ERR_EXPIRED: - nfs4_schedule_state_recovery(calldata->res.server->nfs4_state); - break; - default: - if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) { - rpc_restart_call(task); - return; - } - } - nfs4_locku_complete(calldata); -} - -static void nfs4_locku_begin(struct rpc_task *task) -{ - struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata; + struct inode *inode = state->inode; + struct nfs_server *server = NFS_SERVER(inode); + struct nfs4_client *clp = server->nfs4_state; + struct nfs_lockargs arg = { + .fh = NFS_FH(inode), + .type = nfs4_lck_type(cmd, request), + .offset = request->fl_start, + .length = nfs4_lck_length(request), + }; + struct nfs_lockres res = { + .server = server, + }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], - .rpc_argp = &calldata->arg, - .rpc_resp = &calldata->res, - .rpc_cred = calldata->lsp->ls_state->owner->so_cred, + .rpc_argp = &arg, + .rpc_resp = &res, + .rpc_cred = state->owner->so_cred, }; - int status; - - status = nfs_wait_on_sequence(calldata->luargs.seqid, task); - if (status != 0) - return; - if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { - nfs4_locku_complete(calldata); - task->tk_exit = NULL; - rpc_exit(task, 0); - return; - } - rpc_call_setup(task, &msg, 0); -} - -static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) -{ - struct nfs4_unlockdata *calldata; - struct inode *inode = state->inode; - struct nfs_server *server = NFS_SERVER(inode); struct nfs4_lock_state *lsp; + struct nfs_locku_opargs luargs; int status; - + + down_read(&clp->cl_sem); + down(&state->lock_sema); status = nfs4_set_lock_state(state, request); if (status != 0) - return status; + goto out; lsp = request->fl_u.nfs4_fl.owner; /* We might have lost the locks! */ if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) - return 0; - calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); - if (calldata == NULL) - return -ENOMEM; - calldata->luargs.seqid = nfs_alloc_seqid(&lsp->ls_seqid); - if (calldata->luargs.seqid == NULL) { - kfree(calldata); - return -ENOMEM; - } - calldata->luargs.stateid = &lsp->ls_stateid; - calldata->arg.fh = NFS_FH(inode); - calldata->arg.type = nfs4_lck_type(cmd, request); - calldata->arg.offset = request->fl_start; - calldata->arg.length = nfs4_lck_length(request); - calldata->arg.u.locku = &calldata->luargs; - calldata->res.server = server; - calldata->lsp = lsp; - atomic_inc(&lsp->ls_count); - - /* Ensure we don't close file until we're done freeing locks! */ - calldata->ctx = get_nfs_open_context((struct nfs_open_context*)request->fl_file->private_data); - - atomic_set(&calldata->refcount, 2); - init_completion(&calldata->completion); - - status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin, - nfs4_locku_done, calldata); + goto out; + luargs.seqid = lsp->ls_seqid; + memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); + arg.u.locku = &luargs; + status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); + nfs4_increment_lock_seqid(status, lsp); + if (status == 0) - wait_for_completion_interruptible(&calldata->completion); - do_vfs_lock(request->fl_file, request); - nfs4_locku_release_calldata(calldata); + memcpy(&lsp->ls_stateid, &res.u.stateid, + sizeof(lsp->ls_stateid)); +out: + up(&state->lock_sema); + if (status == 0) + do_vfs_lock(request->fl_file, request); + up_read(&clp->cl_sem); return status; } +static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = nfs4_handle_exception(NFS_SERVER(state->inode), + _nfs4_proc_unlck(state, cmd, request), + &exception); + } while (exception.retry); + return err; +} + static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim) { struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; - struct nfs_lock_opargs largs = { - .lock_stateid = &lsp->ls_stateid, - .open_stateid = &state->stateid, - .lock_owner = { - .clientid = server->nfs4_state->cl_clientid, - .id = lsp->ls_id, - }, - .reclaim = reclaim, - }; struct nfs_lockargs arg = { .fh = NFS_FH(inode), .type = nfs4_lck_type(cmd, request), .offset = request->fl_start, .length = nfs4_lck_length(request), - .u = { - .lock = &largs, - }, }; struct nfs_lockres res = { .server = server, @@ -2922,39 +2779,53 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; - int status = -ENOMEM; + struct nfs_lock_opargs largs = { + .reclaim = reclaim, + .new_lock_owner = 0, + }; + int status; - largs.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid); - if (largs.lock_seqid == NULL) - return -ENOMEM; - if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) { + if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) { struct nfs4_state_owner *owner = state->owner; - - largs.open_seqid = nfs_alloc_seqid(&owner->so_seqid); - if (largs.open_seqid == NULL) - goto out; + struct nfs_open_to_lock otl = { + .lock_owner = { + .clientid = server->nfs4_state->cl_clientid, + }, + }; + + otl.lock_seqid = lsp->ls_seqid; + otl.lock_owner.id = lsp->ls_id; + memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid)); + largs.u.open_lock = &otl; largs.new_lock_owner = 1; + arg.u.lock = &largs; + down(&owner->so_sema); + otl.open_seqid = owner->so_seqid; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - /* increment open seqid on success, and seqid mutating errors */ - if (largs.new_lock_owner != 0) { - nfs_increment_open_seqid(status, largs.open_seqid); - if (status == 0) - nfs_confirm_seqid(&lsp->ls_seqid, 0); + /* increment open_owner seqid on success, and + * seqid mutating errors */ + nfs4_increment_seqid(status, owner); + up(&owner->so_sema); + if (status == 0) { + lsp->ls_flags |= NFS_LOCK_INITIALIZED; + lsp->ls_seqid++; } - nfs_free_seqid(largs.open_seqid); - } else + } else { + struct nfs_exist_lock el = { + .seqid = lsp->ls_seqid, + }; + memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid)); + largs.u.exist_lock = ⪙ + arg.u.lock = &largs; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - /* increment lock seqid on success, and seqid mutating errors*/ - nfs_increment_lock_seqid(status, largs.lock_seqid); + /* increment seqid on success, and * seqid mutating errors*/ + nfs4_increment_lock_seqid(status, lsp); + } /* save the returned stateid. */ - if (status == 0) { - memcpy(lsp->ls_stateid.data, res.u.stateid.data, - sizeof(lsp->ls_stateid.data)); - lsp->ls_flags |= NFS_LOCK_INITIALIZED; - } else if (status == -NFS4ERR_DENIED) + if (status == 0) + memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid)); + else if (status == -NFS4ERR_DENIED) status = -EAGAIN; -out: - nfs_free_seqid(largs.lock_seqid); return status; } @@ -2994,9 +2865,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock int status; down_read(&clp->cl_sem); + down(&state->lock_sema); status = nfs4_set_lock_state(state, request); if (status == 0) status = _nfs4_do_setlk(state, cmd, request, 0); + up(&state->lock_sema); if (status == 0) { /* Note: we always want to sleep here! */ request->fl_flags |= FL_SLEEP; @@ -3151,8 +3024,8 @@ struct nfs_rpc_ops nfs_v4_clientops = { .read_setup = nfs4_proc_read_setup, .write_setup = nfs4_proc_write_setup, .commit_setup = nfs4_proc_commit_setup, - .file_open = nfs_open, - .file_release = nfs_release, + .file_open = nfs4_proc_file_open, + .file_release = nfs4_proc_file_release, .lock = nfs4_proc_lock, .clear_acl_cache = nfs4_zap_acl_attr, }; diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c index 2d5a6a2b9dec..afe587d82f1e 100644 --- a/trunk/fs/nfs/nfs4state.c +++ b/trunk/fs/nfs/nfs4state.c @@ -264,16 +264,13 @@ nfs4_alloc_state_owner(void) { struct nfs4_state_owner *sp; - sp = kzalloc(sizeof(*sp),GFP_KERNEL); + sp = kmalloc(sizeof(*sp),GFP_KERNEL); if (!sp) return NULL; - spin_lock_init(&sp->so_lock); + init_MUTEX(&sp->so_sema); + sp->so_seqid = 0; /* arbitrary */ INIT_LIST_HEAD(&sp->so_states); INIT_LIST_HEAD(&sp->so_delegations); - rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); - sp->so_seqid.sequence = &sp->so_sequence; - spin_lock_init(&sp->so_sequence.lock); - INIT_LIST_HEAD(&sp->so_sequence.list); atomic_set(&sp->so_count, 1); return sp; } @@ -362,6 +359,7 @@ nfs4_alloc_open_state(void) memset(state->stateid.data, 0, sizeof(state->stateid.data)); atomic_set(&state->count, 1); INIT_LIST_HEAD(&state->lock_states); + init_MUTEX(&state->lock_sema); spin_lock_init(&state->state_lock); return state; } @@ -439,23 +437,21 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) if (state) goto out; new = nfs4_alloc_open_state(); - spin_lock(&owner->so_lock); spin_lock(&inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); if (state == NULL && new != NULL) { state = new; + /* Caller *must* be holding owner->so_sem */ + /* Note: The reclaim code dictates that we add stateless + * and read-only stateids to the end of the list */ + list_add_tail(&state->open_states, &owner->so_states); state->owner = owner; atomic_inc(&owner->so_count); list_add(&state->inode_states, &nfsi->open_states); state->inode = igrab(inode); spin_unlock(&inode->i_lock); - /* Note: The reclaim code dictates that we add stateless - * and read-only stateids to the end of the list */ - list_add_tail(&state->open_states, &owner->so_states); - spin_unlock(&owner->so_lock); } else { spin_unlock(&inode->i_lock); - spin_unlock(&owner->so_lock); if (new) nfs4_free_open_state(new); } @@ -465,21 +461,19 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) /* * Beware! Caller must be holding exactly one - * reference to clp->cl_sem! + * reference to clp->cl_sem and owner->so_sema! */ void nfs4_put_open_state(struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; - if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) + if (!atomic_dec_and_lock(&state->count, &inode->i_lock)) return; - spin_lock(&inode->i_lock); if (!list_empty(&state->inode_states)) list_del(&state->inode_states); - list_del(&state->open_states); spin_unlock(&inode->i_lock); - spin_unlock(&owner->so_lock); + list_del(&state->open_states); iput(inode); BUG_ON (state->state != 0); nfs4_free_open_state(state); @@ -487,17 +481,20 @@ void nfs4_put_open_state(struct nfs4_state *state) } /* - * Close the current file. + * Beware! Caller must be holding no references to clp->cl_sem! + * of owner->so_sema! */ void nfs4_close_state(struct nfs4_state *state, mode_t mode) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; + struct nfs4_client *clp = owner->so_client; int newstate; atomic_inc(&owner->so_count); + down_read(&clp->cl_sem); + down(&owner->so_sema); /* Protect against nfs4_find_state() */ - spin_lock(&owner->so_lock); spin_lock(&inode->i_lock); if (mode & FMODE_READ) state->nreaders--; @@ -510,7 +507,6 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode) list_move_tail(&state->open_states, &owner->so_states); } spin_unlock(&inode->i_lock); - spin_unlock(&owner->so_lock); newstate = 0; if (state->state != 0) { if (state->nreaders) @@ -519,16 +515,14 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode) newstate |= FMODE_WRITE; if (state->state == newstate) goto out; - if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { - state->state = newstate; - goto out; - } - if (nfs4_do_close(inode, state, newstate) == 0) + if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS) return; } out: nfs4_put_open_state(state); + up(&owner->so_sema); nfs4_put_state_owner(owner); + up_read(&clp->cl_sem); } /* @@ -552,16 +546,19 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * + * The caller must be holding state->lock_sema */ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) { struct nfs4_lock_state *lsp; struct nfs4_client *clp = state->owner->so_client; - lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); + lsp = kmalloc(sizeof(*lsp), GFP_KERNEL); if (lsp == NULL) return NULL; - lsp->ls_seqid.sequence = &state->owner->so_sequence; + lsp->ls_flags = 0; + lsp->ls_seqid = 0; /* arbitrary */ + memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); atomic_set(&lsp->ls_count, 1); lsp->ls_owner = fl_owner; spin_lock(&clp->cl_lock); @@ -575,7 +572,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * - * The caller must be holding clp->cl_sem + * The caller must be holding state->lock_sema and clp->cl_sem */ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) { @@ -608,7 +605,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ * Release reference to lock_state, and free it if we see that * it is no longer in use */ -void nfs4_put_lock_state(struct nfs4_lock_state *lsp) +static void nfs4_put_lock_state(struct nfs4_lock_state *lsp) { struct nfs4_state *state; @@ -676,94 +673,29 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f nfs4_put_lock_state(lsp); } -struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) -{ - struct nfs_seqid *new; - - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (new != NULL) { - new->sequence = counter; - INIT_LIST_HEAD(&new->list); - } - return new; -} - -void nfs_free_seqid(struct nfs_seqid *seqid) -{ - struct rpc_sequence *sequence = seqid->sequence->sequence; - - if (!list_empty(&seqid->list)) { - spin_lock(&sequence->lock); - list_del(&seqid->list); - spin_unlock(&sequence->lock); - } - rpc_wake_up_next(&sequence->wait); - kfree(seqid); -} - /* - * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or - * failed with a seqid incrementing error - - * see comments nfs_fs.h:seqid_mutating_error() - */ -static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid) +* Called with state->lock_sema and clp->cl_sem held. +*/ +void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) { - switch (status) { - case 0: - break; - case -NFS4ERR_BAD_SEQID: - case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_STALE_STATEID: - case -NFS4ERR_BAD_STATEID: - case -NFS4ERR_BADXDR: - case -NFS4ERR_RESOURCE: - case -NFS4ERR_NOFILEHANDLE: - /* Non-seqid mutating errors */ - return; - }; - /* - * Note: no locking needed as we are guaranteed to be first - * on the sequence list - */ - seqid->sequence->counter++; -} - -void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) -{ - if (status == -NFS4ERR_BAD_SEQID) { - struct nfs4_state_owner *sp = container_of(seqid->sequence, - struct nfs4_state_owner, so_seqid); - nfs4_drop_state_owner(sp); - } - return nfs_increment_seqid(status, seqid); + if (status == NFS_OK || seqid_mutating_err(-status)) + lsp->ls_seqid++; } /* - * Increment the seqid if the LOCK/LOCKU succeeded, or - * failed with a seqid incrementing error - - * see comments nfs_fs.h:seqid_mutating_error() - */ -void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) -{ - return nfs_increment_seqid(status, seqid); -} - -int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) -{ - struct rpc_sequence *sequence = seqid->sequence->sequence; - int status = 0; - - if (sequence->list.next == &seqid->list) - goto out; - spin_lock(&sequence->lock); - if (!list_empty(&sequence->list)) { - rpc_sleep_on(&sequence->wait, task, NULL, NULL); - status = -EAGAIN; - } else - list_add(&seqid->list, &sequence->list); - spin_unlock(&sequence->lock); -out: - return status; +* Called with sp->so_sema and clp->cl_sem held. +* +* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or +* failed with a seqid incrementing error - +* see comments nfs_fs.h:seqid_mutating_error() +*/ +void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp) +{ + if (status == NFS_OK || seqid_mutating_err(-status)) + sp->so_seqid++; + /* If the server returns BAD_SEQID, unhash state_owner here */ + if (status == -NFS4ERR_BAD_SEQID) + nfs4_drop_state_owner(sp); } static int reclaimer(void *); @@ -859,6 +791,8 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n if (state->state == 0) continue; status = ops->recover_open(sp, state); + list_for_each_entry(lock, &state->lock_states, ls_locks) + lock->ls_flags &= ~NFS_LOCK_INITIALIZED; if (status >= 0) { status = nfs4_reclaim_locks(ops, state); if (status < 0) @@ -897,28 +831,6 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n return status; } -static void nfs4_state_mark_reclaim(struct nfs4_client *clp) -{ - struct nfs4_state_owner *sp; - struct nfs4_state *state; - struct nfs4_lock_state *lock; - - /* Reset all sequence ids to zero */ - list_for_each_entry(sp, &clp->cl_state_owners, so_list) { - sp->so_seqid.counter = 0; - sp->so_seqid.flags = 0; - spin_lock(&sp->so_lock); - list_for_each_entry(state, &sp->so_states, open_states) { - list_for_each_entry(lock, &state->lock_states, ls_locks) { - lock->ls_seqid.counter = 0; - lock->ls_seqid.flags = 0; - lock->ls_flags &= ~NFS_LOCK_INITIALIZED; - } - } - spin_unlock(&sp->so_lock); - } -} - static int reclaimer(void *ptr) { struct reclaimer_args *args = (struct reclaimer_args *)ptr; @@ -952,7 +864,6 @@ static int reclaimer(void *ptr) default: ops = &nfs4_network_partition_recovery_ops; }; - nfs4_state_mark_reclaim(clp); status = __nfs4_init_client(clp); if (status) goto out_error; diff --git a/trunk/fs/nfs/nfs4xdr.c b/trunk/fs/nfs/nfs4xdr.c index cd762648fa9a..6c564ef9489e 100644 --- a/trunk/fs/nfs/nfs4xdr.c +++ b/trunk/fs/nfs/nfs4xdr.c @@ -602,10 +602,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg) { uint32_t *p; - RESERVE_SPACE(8+sizeof(arg->stateid->data)); + RESERVE_SPACE(8+sizeof(arg->stateid.data)); WRITE32(OP_CLOSE); - WRITE32(arg->seqid->sequence->counter); - WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); + WRITE32(arg->seqid); + WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); return 0; } @@ -729,18 +729,22 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lockargs *arg) WRITE64(arg->length); WRITE32(opargs->new_lock_owner); if (opargs->new_lock_owner){ + struct nfs_open_to_lock *ol = opargs->u.open_lock; + RESERVE_SPACE(40); - WRITE32(opargs->open_seqid->sequence->counter); - WRITEMEM(opargs->open_stateid->data, sizeof(opargs->open_stateid->data)); - WRITE32(opargs->lock_seqid->sequence->counter); - WRITE64(opargs->lock_owner.clientid); + WRITE32(ol->open_seqid); + WRITEMEM(&ol->open_stateid, sizeof(ol->open_stateid)); + WRITE32(ol->lock_seqid); + WRITE64(ol->lock_owner.clientid); WRITE32(4); - WRITE32(opargs->lock_owner.id); + WRITE32(ol->lock_owner.id); } else { + struct nfs_exist_lock *el = opargs->u.exist_lock; + RESERVE_SPACE(20); - WRITEMEM(opargs->lock_stateid->data, sizeof(opargs->lock_stateid->data)); - WRITE32(opargs->lock_seqid->sequence->counter); + WRITEMEM(&el->stateid, sizeof(el->stateid)); + WRITE32(el->seqid); } return 0; @@ -771,8 +775,8 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_lockargs *arg) RESERVE_SPACE(44); WRITE32(OP_LOCKU); WRITE32(arg->type); - WRITE32(opargs->seqid->sequence->counter); - WRITEMEM(opargs->stateid->data, sizeof(opargs->stateid->data)); + WRITE32(opargs->seqid); + WRITEMEM(&opargs->stateid, sizeof(opargs->stateid)); WRITE64(arg->offset); WRITE64(arg->length); @@ -822,7 +826,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena */ RESERVE_SPACE(8); WRITE32(OP_OPEN); - WRITE32(arg->seqid->sequence->counter); + WRITE32(arg->seqid); encode_share_access(xdr, arg->open_flags); RESERVE_SPACE(16); WRITE64(arg->clientid); @@ -937,7 +941,7 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con RESERVE_SPACE(8+sizeof(arg->stateid.data)); WRITE32(OP_OPEN_CONFIRM); WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); - WRITE32(arg->seqid->sequence->counter); + WRITE32(arg->seqid); return 0; } @@ -946,10 +950,10 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea { uint32_t *p; - RESERVE_SPACE(8+sizeof(arg->stateid->data)); + RESERVE_SPACE(8+sizeof(arg->stateid.data)); WRITE32(OP_OPEN_DOWNGRADE); - WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); - WRITE32(arg->seqid->sequence->counter); + WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); + WRITE32(arg->seqid); encode_share_access(xdr, arg->open_flags); return 0; } @@ -1433,9 +1437,6 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena }; int status; - status = nfs_wait_on_sequence(args->seqid, req->rq_task); - if (status != 0) - goto out; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -1463,9 +1464,6 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, uint32_t *p, struct n }; int status; - status = nfs_wait_on_sequence(args->seqid, req->rq_task); - if (status != 0) - goto out; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -1487,9 +1485,6 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nf }; int status; - status = nfs_wait_on_sequence(args->seqid, req->rq_task); - if (status != 0) - goto out; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -1530,15 +1525,8 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_locka struct compound_hdr hdr = { .nops = 2, }; - struct nfs_lock_opargs *opargs = args->u.lock; int status; - status = nfs_wait_on_sequence(opargs->lock_seqid, req->rq_task); - if (status != 0) - goto out; - /* Do we need to do an open_to_lock_owner? */ - if (opargs->lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED) - opargs->new_lock_owner = 0; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -2902,8 +2890,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lockres *res) status = decode_op_hdr(xdr, OP_LOCK); if (status == 0) { - READ_BUF(sizeof(res->u.stateid.data)); - COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data)); + READ_BUF(sizeof(nfs4_stateid)); + COPYMEM(&res->u.stateid, sizeof(res->u.stateid)); } else if (status == -NFS4ERR_DENIED) return decode_lock_denied(xdr, &res->u.denied); return status; @@ -2925,8 +2913,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_lockres *res) status = decode_op_hdr(xdr, OP_LOCKU); if (status == 0) { - READ_BUF(sizeof(res->u.stateid.data)); - COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data)); + READ_BUF(sizeof(nfs4_stateid)); + COPYMEM(&res->u.stateid, sizeof(res->u.stateid)); } return status; } @@ -3255,8 +3243,7 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, if (attrlen <= *acl_len) xdr_read_pages(xdr, attrlen); *acl_len = attrlen; - } else - status = -EOPNOTSUPP; + } out: return status; diff --git a/trunk/fs/nfs/proc.c b/trunk/fs/nfs/proc.c index 8fef86523d7f..be23c3fb9260 100644 --- a/trunk/fs/nfs/proc.c +++ b/trunk/fs/nfs/proc.c @@ -216,7 +216,7 @@ static int nfs_proc_write(struct nfs_write_data *wdata) static int nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags, struct nameidata *nd) + int flags) { struct nfs_fh fhandle; struct nfs_fattr fattr; diff --git a/trunk/fs/open.c b/trunk/fs/open.c index 8d06ec911fd9..f0d90cf0495c 100644 --- a/trunk/fs/open.c +++ b/trunk/fs/open.c @@ -739,8 +739,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group) } static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, - int flags, struct file *f, - int (*open)(struct inode *, struct file *)) + int flags, struct file *f) { struct inode *inode; int error; @@ -762,14 +761,11 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, f->f_op = fops_get(inode->i_fop); file_move(f, &inode->i_sb->s_files); - if (!open && f->f_op) - open = f->f_op->open; - if (open) { - error = open(inode, f); + if (f->f_op && f->f_op->open) { + error = f->f_op->open(inode,f); if (error) goto cleanup_all; } - f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); @@ -818,75 +814,28 @@ struct file *filp_open(const char * filename, int flags, int mode) { int namei_flags, error; struct nameidata nd; + struct file *f; namei_flags = flags; if ((namei_flags+1) & O_ACCMODE) namei_flags++; + if (namei_flags & O_TRUNC) + namei_flags |= 2; + + error = -ENFILE; + f = get_empty_filp(); + if (f == NULL) + return ERR_PTR(error); error = open_namei(filename, namei_flags, mode, &nd); if (!error) - return nameidata_to_filp(&nd, flags); + return __dentry_open(nd.dentry, nd.mnt, flags, f); + put_filp(f); return ERR_PTR(error); } EXPORT_SYMBOL(filp_open); -/** - * lookup_instantiate_filp - instantiates the open intent filp - * @nd: pointer to nameidata - * @dentry: pointer to dentry - * @open: open callback - * - * Helper for filesystems that want to use lookup open intents and pass back - * a fully instantiated struct file to the caller. - * This function is meant to be called from within a filesystem's - * lookup method. - * Note that in case of error, nd->intent.open.file is destroyed, but the - * path information remains valid. - * If the open callback is set to NULL, then the standard f_op->open() - * filesystem callback is substituted. - */ -struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, - int (*open)(struct inode *, struct file *)) -{ - if (IS_ERR(nd->intent.open.file)) - goto out; - if (IS_ERR(dentry)) - goto out_err; - nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt), - nd->intent.open.flags - 1, - nd->intent.open.file, - open); -out: - return nd->intent.open.file; -out_err: - release_open_intent(nd); - nd->intent.open.file = (struct file *)dentry; - goto out; -} -EXPORT_SYMBOL_GPL(lookup_instantiate_filp); - -/** - * nameidata_to_filp - convert a nameidata to an open filp. - * @nd: pointer to nameidata - * @flags: open flags - * - * Note that this function destroys the original nameidata - */ -struct file *nameidata_to_filp(struct nameidata *nd, int flags) -{ - struct file *filp; - - /* Pick up the filp from the open intent */ - filp = nd->intent.open.file; - /* Has the filesystem initialised the file for us? */ - if (filp->f_dentry == NULL) - filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL); - else - path_release(nd); - return filp; -} - struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) { int error; @@ -897,7 +846,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) if (f == NULL) return ERR_PTR(error); - return __dentry_open(dentry, mnt, flags, f, NULL); + return __dentry_open(dentry, mnt, flags, f); } EXPORT_SYMBOL(dentry_open); diff --git a/trunk/include/linux/hugetlb.h b/trunk/include/linux/hugetlb.h index e670b0d13fe0..42cb7d70f9ac 100644 --- a/trunk/include/linux/hugetlb.h +++ b/trunk/include/linux/hugetlb.h @@ -155,11 +155,24 @@ static inline void set_file_hugepages(struct file *file) { file->f_op = &hugetlbfs_file_operations; } + +static inline int valid_hugetlb_file_off(struct vm_area_struct *vma, + unsigned long address) +{ + struct inode *inode = vma->vm_file->f_dentry->d_inode; + loff_t file_off = address - vma->vm_start; + + file_off += (vma->vm_pgoff << PAGE_SHIFT); + + return (file_off < inode->i_size); +} + #else /* !CONFIG_HUGETLBFS */ #define is_file_hugepages(file) 0 #define set_file_hugepages(file) BUG() #define hugetlb_zero_setup(size) ERR_PTR(-ENOSYS) +#define valid_hugetlb_file_off(vma, address) 0 #endif /* !CONFIG_HUGETLBFS */ diff --git a/trunk/include/linux/namei.h b/trunk/include/linux/namei.h index 1c975d0d9e94..7db67b008cac 100644 --- a/trunk/include/linux/namei.h +++ b/trunk/include/linux/namei.h @@ -8,7 +8,6 @@ struct vfsmount; struct open_intent { int flags; int create_mode; - struct file *file; }; enum { MAX_NESTED_LINKS = 5 }; @@ -66,13 +65,6 @@ extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); extern void path_release(struct nameidata *); extern void path_release_on_umount(struct nameidata *); -extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags); -extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags); -extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, - int (*open)(struct inode *, struct file *)); -extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); -extern void release_open_intent(struct nameidata *); - extern struct dentry * lookup_one_len(const char *, struct dentry *, int); extern struct dentry * lookup_hash(struct qstr *, struct dentry *); diff --git a/trunk/include/linux/nfs_fs.h b/trunk/include/linux/nfs_fs.h index 7bac2785c6e4..9a6047ff1b25 100644 --- a/trunk/include/linux/nfs_fs.h +++ b/trunk/include/linux/nfs_fs.h @@ -41,10 +41,6 @@ #define NFS_MAX_FILE_IO_BUFFER_SIZE 32768 #define NFS_DEF_FILE_IO_BUFFER_SIZE 4096 -/* Default timeout values */ -#define NFS_MAX_UDP_TIMEOUT (60*HZ) -#define NFS_MAX_TCP_TIMEOUT (600*HZ) - /* * superblock magic number for NFS */ diff --git a/trunk/include/linux/nfs_xdr.h b/trunk/include/linux/nfs_xdr.h index 60086dac11d5..a2bf6914ff1b 100644 --- a/trunk/include/linux/nfs_xdr.h +++ b/trunk/include/linux/nfs_xdr.h @@ -96,13 +96,12 @@ struct nfs4_change_info { u64 after; }; -struct nfs_seqid; /* * Arguments to the open call. */ struct nfs_openargs { const struct nfs_fh * fh; - struct nfs_seqid * seqid; + __u32 seqid; int open_flags; __u64 clientid; __u32 id; @@ -137,7 +136,7 @@ struct nfs_openres { struct nfs_open_confirmargs { const struct nfs_fh * fh; nfs4_stateid stateid; - struct nfs_seqid * seqid; + __u32 seqid; }; struct nfs_open_confirmres { @@ -149,8 +148,8 @@ struct nfs_open_confirmres { */ struct nfs_closeargs { struct nfs_fh * fh; - nfs4_stateid * stateid; - struct nfs_seqid * seqid; + nfs4_stateid stateid; + __u32 seqid; int open_flags; }; @@ -165,19 +164,30 @@ struct nfs_lowner { u32 id; }; -struct nfs_lock_opargs { - struct nfs_seqid * lock_seqid; - nfs4_stateid * lock_stateid; - struct nfs_seqid * open_seqid; - nfs4_stateid * open_stateid; +struct nfs_open_to_lock { + __u32 open_seqid; + nfs4_stateid open_stateid; + __u32 lock_seqid; struct nfs_lowner lock_owner; +}; + +struct nfs_exist_lock { + nfs4_stateid stateid; + __u32 seqid; +}; + +struct nfs_lock_opargs { __u32 reclaim; __u32 new_lock_owner; + union { + struct nfs_open_to_lock *open_lock; + struct nfs_exist_lock *exist_lock; + } u; }; struct nfs_locku_opargs { - struct nfs_seqid * seqid; - nfs4_stateid * stateid; + __u32 seqid; + nfs4_stateid stateid; }; struct nfs_lockargs { @@ -712,7 +722,7 @@ struct nfs_rpc_ops { int (*write) (struct nfs_write_data *); int (*commit) (struct nfs_write_data *); int (*create) (struct inode *, struct dentry *, - struct iattr *, int, struct nameidata *); + struct iattr *, int); int (*remove) (struct inode *, struct qstr *); int (*unlink_setup) (struct rpc_message *, struct dentry *, struct qstr *); diff --git a/trunk/include/linux/sunrpc/auth.h b/trunk/include/linux/sunrpc/auth.h index b68c11a2d6dd..04ebc24db348 100644 --- a/trunk/include/linux/sunrpc/auth.h +++ b/trunk/include/linux/sunrpc/auth.h @@ -66,12 +66,7 @@ struct rpc_cred_cache { struct rpc_auth { unsigned int au_cslack; /* call cred size estimate */ - /* guess at number of u32's auth adds before - * reply data; normally the verifier size: */ - unsigned int au_rslack; - /* for gss, used to calculate au_rslack: */ - unsigned int au_verfsize; - + unsigned int au_rslack; /* reply verf size guess */ unsigned int au_flags; /* various flags */ struct rpc_authops * au_ops; /* operations */ rpc_authflavor_t au_flavor; /* pseudoflavor (note may diff --git a/trunk/include/linux/sunrpc/debug.h b/trunk/include/linux/sunrpc/debug.h index 1a42d902bc11..eadb31e3c198 100644 --- a/trunk/include/linux/sunrpc/debug.h +++ b/trunk/include/linux/sunrpc/debug.h @@ -32,7 +32,6 @@ #define RPCDBG_AUTH 0x0010 #define RPCDBG_PMAP 0x0020 #define RPCDBG_SCHED 0x0040 -#define RPCDBG_TRANS 0x0080 #define RPCDBG_SVCSOCK 0x0100 #define RPCDBG_SVCDSP 0x0200 #define RPCDBG_MISC 0x0400 @@ -95,8 +94,6 @@ enum { CTL_NLMDEBUG, CTL_SLOTTABLE_UDP, CTL_SLOTTABLE_TCP, - CTL_MIN_RESVPORT, - CTL_MAX_RESVPORT, }; #endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/trunk/include/linux/sunrpc/gss_api.h b/trunk/include/linux/sunrpc/gss_api.h index 9b8bcf125c18..689262f63059 100644 --- a/trunk/include/linux/sunrpc/gss_api.h +++ b/trunk/include/linux/sunrpc/gss_api.h @@ -40,21 +40,14 @@ int gss_import_sec_context( struct gss_ctx **ctx_id); u32 gss_get_mic( struct gss_ctx *ctx_id, + u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 gss_verify_mic( struct gss_ctx *ctx_id, struct xdr_buf *message, - struct xdr_netobj *mic_token); -u32 gss_wrap( - struct gss_ctx *ctx_id, - int offset, - struct xdr_buf *outbuf, - struct page **inpages); -u32 gss_unwrap( - struct gss_ctx *ctx_id, - int offset, - struct xdr_buf *inbuf); + struct xdr_netobj *mic_token, + u32 *qstate); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); @@ -63,6 +56,7 @@ char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); struct pf_desc { u32 pseudoflavor; + u32 qop; u32 service; char *name; char *auth_domain_name; @@ -91,21 +85,14 @@ struct gss_api_ops { struct gss_ctx *ctx_id); u32 (*gss_get_mic)( struct gss_ctx *ctx_id, + u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 (*gss_verify_mic)( struct gss_ctx *ctx_id, struct xdr_buf *message, - struct xdr_netobj *mic_token); - u32 (*gss_wrap)( - struct gss_ctx *ctx_id, - int offset, - struct xdr_buf *outbuf, - struct page **inpages); - u32 (*gss_unwrap)( - struct gss_ctx *ctx_id, - int offset, - struct xdr_buf *buf); + struct xdr_netobj *mic_token, + u32 *qstate); void (*gss_delete_sec_context)( void *internal_ctx_id); }; diff --git a/trunk/include/linux/sunrpc/gss_err.h b/trunk/include/linux/sunrpc/gss_err.h index a6807867bd21..92608a2e574c 100644 --- a/trunk/include/linux/sunrpc/gss_err.h +++ b/trunk/include/linux/sunrpc/gss_err.h @@ -65,6 +65,16 @@ typedef unsigned int OM_uint32; #define GSS_C_MECH_CODE 2 +/* + * Define the default Quality of Protection for per-message services. Note + * that an implementation that offers multiple levels of QOP may either reserve + * a value (for example zero, as assumed here) to mean "default protection", or + * alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit + * QOP value. However a value of 0 should always be interpreted by a GSSAPI + * implementation as a request for the default protection level. + */ +#define GSS_C_QOP_DEFAULT 0 + /* * Expiration time of 2^32-1 seconds means infinite lifetime for a * credential or security context diff --git a/trunk/include/linux/sunrpc/gss_krb5.h b/trunk/include/linux/sunrpc/gss_krb5.h index 2c3601d31045..ffe31d2eb9ec 100644 --- a/trunk/include/linux/sunrpc/gss_krb5.h +++ b/trunk/include/linux/sunrpc/gss_krb5.h @@ -116,22 +116,18 @@ enum seal_alg { s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, - int body_offset, struct xdr_netobj *cksum); - -u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *, - struct xdr_netobj *); - -u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *, - struct xdr_netobj *); + struct xdr_netobj *cksum); u32 -gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, - struct xdr_buf *outbuf, struct page **pages); +krb5_make_token(struct krb5_ctx *context_handle, int qop_req, + struct xdr_buf *input_message_buffer, + struct xdr_netobj *output_message_buffer, int toktype); u32 -gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, - struct xdr_buf *buf); - +krb5_read_token(struct krb5_ctx *context_handle, + struct xdr_netobj *input_token_buffer, + struct xdr_buf *message_buffer, + int *qop_state, int toktype); u32 krb5_encrypt(struct crypto_tfm * key, @@ -141,13 +137,6 @@ u32 krb5_decrypt(struct crypto_tfm * key, void *iv, void *in, void *out, int length); -int -gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset, - struct page **pages); - -int -gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset); - s32 krb5_make_seq_num(struct crypto_tfm * key, int direction, diff --git a/trunk/include/linux/sunrpc/gss_spkm3.h b/trunk/include/linux/sunrpc/gss_spkm3.h index 0beb2cf00a84..b5c9968c3c17 100644 --- a/trunk/include/linux/sunrpc/gss_spkm3.h +++ b/trunk/include/linux/sunrpc/gss_spkm3.h @@ -41,9 +41,9 @@ struct spkm3_ctx { #define SPKM_WRAP_TOK 5 #define SPKM_DEL_TOK 6 -u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype); +u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype); -u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype); +u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype); #define CKSUMTYPE_RSA_MD5 0x0007 diff --git a/trunk/include/linux/sunrpc/msg_prot.h b/trunk/include/linux/sunrpc/msg_prot.h index f43f237360ae..15f115332389 100644 --- a/trunk/include/linux/sunrpc/msg_prot.h +++ b/trunk/include/linux/sunrpc/msg_prot.h @@ -76,30 +76,5 @@ enum rpc_auth_stat { #define RPC_MAXNETNAMELEN 256 -/* - * From RFC 1831: - * - * "A record is composed of one or more record fragments. A record - * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of - * fragment data. The bytes encode an unsigned binary number; as with - * XDR integers, the byte order is from highest to lowest. The number - * encodes two values -- a boolean which indicates whether the fragment - * is the last fragment of the record (bit value 1 implies the fragment - * is the last fragment) and a 31-bit unsigned binary value which is the - * length in bytes of the fragment's data. The boolean value is the - * highest-order bit of the header; the length is the 31 low-order bits. - * (Note that this record specification is NOT in XDR standard form!)" - * - * The Linux RPC client always sends its requests in a single record - * fragment, limiting the maximum payload size for stream transports to - * 2GB. - */ - -typedef u32 rpc_fraghdr; - -#define RPC_LAST_STREAM_FRAGMENT (1U << 31) -#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT) -#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1) - #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/trunk/include/linux/sunrpc/xdr.h b/trunk/include/linux/sunrpc/xdr.h index 5da968729cf8..23448d0fb5bc 100644 --- a/trunk/include/linux/sunrpc/xdr.h +++ b/trunk/include/linux/sunrpc/xdr.h @@ -161,10 +161,14 @@ typedef struct { typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len); -extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, skb_reader_t *, skb_read_actor_t); +struct socket; +struct sockaddr; +extern int xdr_sendpages(struct socket *, struct sockaddr *, int, + struct xdr_buf *, unsigned int, int); + extern int xdr_encode_word(struct xdr_buf *, int, u32); extern int xdr_decode_word(struct xdr_buf *, int, u32 *); diff --git a/trunk/include/linux/sunrpc/xprt.h b/trunk/include/linux/sunrpc/xprt.h index 3b8b6e823c70..e618c1649814 100644 --- a/trunk/include/linux/sunrpc/xprt.h +++ b/trunk/include/linux/sunrpc/xprt.h @@ -1,5 +1,5 @@ /* - * linux/include/linux/sunrpc/xprt.h + * linux/include/linux/sunrpc/clnt_xprt.h * * Declarations for the RPC transport interface. * @@ -15,6 +15,20 @@ #include #include +/* + * The transport code maintains an estimate on the maximum number of out- + * standing RPC requests, using a smoothed version of the congestion + * avoidance implemented in 44BSD. This is basically the Van Jacobson + * congestion algorithm: If a retransmit occurs, the congestion window is + * halved; otherwise, it is incremented by 1/cwnd when + * + * - a reply is received and + * - a full number of requests are outstanding and + * - the congestion window hasn't been updated recently. + * + * Upper procedures may check whether a request would block waiting for + * a free RPC slot by using the RPC_CONGESTED() macro. + */ extern unsigned int xprt_udp_slot_table_entries; extern unsigned int xprt_tcp_slot_table_entries; @@ -22,23 +36,34 @@ extern unsigned int xprt_tcp_slot_table_entries; #define RPC_DEF_SLOT_TABLE (16U) #define RPC_MAX_SLOT_TABLE (128U) +#define RPC_CWNDSHIFT (8U) +#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) +#define RPC_INITCWND RPC_CWNDSCALE +#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) +#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) + +/* Default timeout values */ +#define RPC_MAX_UDP_TIMEOUT (60*HZ) +#define RPC_MAX_TCP_TIMEOUT (600*HZ) + /* - * RPC call and reply header size as number of 32bit words (verifier - * size computed separately) + * Wait duration for an RPC TCP connection to be established. Solaris + * NFS over TCP uses 60 seconds, for example, which is in line with how + * long a server takes to reboot. */ -#define RPC_CALLHDRSIZE 6 -#define RPC_REPHDRSIZE 4 +#define RPC_CONNECT_TIMEOUT (60*HZ) /* - * Parameters for choosing a free port + * Delay an arbitrary number of seconds before attempting to reconnect + * after an error. */ -extern unsigned int xprt_min_resvport; -extern unsigned int xprt_max_resvport; +#define RPC_REESTABLISH_TIMEOUT (15*HZ) -#define RPC_MIN_RESVPORT (1U) -#define RPC_MAX_RESVPORT (65535U) -#define RPC_DEF_MIN_RESVPORT (650U) -#define RPC_DEF_MAX_RESVPORT (1023U) +/* RPC call and reply header size as number of 32bit words (verifier + * size computed separately) + */ +#define RPC_CALLHDRSIZE 6 +#define RPC_REPHDRSIZE 4 /* * This describes a timeout strategy @@ -51,9 +76,6 @@ struct rpc_timeout { unsigned char to_exponential; }; -struct rpc_task; -struct rpc_xprt; - /* * This describes a complete RPC request */ @@ -73,10 +95,7 @@ struct rpc_rqst { int rq_cong; /* has incremented xprt->cong */ int rq_received; /* receive completed */ u32 rq_seqno; /* gss seq no. used on req. */ - int rq_enc_pages_num; - struct page **rq_enc_pages; /* scratch pages for use by - gss privacy code */ - void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ + struct list_head rq_list; struct xdr_buf rq_private_buf; /* The receive buffer @@ -102,21 +121,12 @@ struct rpc_rqst { #define rq_svec rq_snd_buf.head #define rq_slen rq_snd_buf.len -struct rpc_xprt_ops { - void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); - int (*reserve_xprt)(struct rpc_task *task); - void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); - void (*connect)(struct rpc_task *task); - int (*send_request)(struct rpc_task *task); - void (*set_retrans_timeout)(struct rpc_task *task); - void (*timer)(struct rpc_task *task); - void (*release_request)(struct rpc_task *task); - void (*close)(struct rpc_xprt *xprt); - void (*destroy)(struct rpc_xprt *xprt); -}; +#define XPRT_LAST_FRAG (1 << 0) +#define XPRT_COPY_RECM (1 << 1) +#define XPRT_COPY_XID (1 << 2) +#define XPRT_COPY_DATA (1 << 3) struct rpc_xprt { - struct rpc_xprt_ops * ops; /* transport methods */ struct socket * sock; /* BSD socket layer */ struct sock * inet; /* INET layer */ @@ -127,13 +137,11 @@ struct rpc_xprt { unsigned long cong; /* current congestion */ unsigned long cwnd; /* congestion window */ - size_t rcvsize, /* transport rcv buffer size */ - sndsize; /* transport send buffer size */ + unsigned int rcvsize, /* socket receive buffer size */ + sndsize; /* socket send buffer size */ size_t max_payload; /* largest RPC payload size, in bytes */ - unsigned int tsh_size; /* size of transport specific - header */ struct rpc_wait_queue sending; /* requests waiting to send */ struct rpc_wait_queue resend; /* requests waiting to resend */ @@ -142,9 +150,11 @@ struct rpc_xprt { struct list_head free; /* free slots */ struct rpc_rqst * slot; /* slot table storage */ unsigned int max_reqs; /* total slots */ - unsigned long state; /* transport state */ + unsigned long sockstate; /* Socket state */ unsigned char shutdown : 1, /* being shut down */ - resvport : 1; /* use a reserved port */ + nocong : 1, /* no congestion control */ + resvport : 1, /* use a reserved port */ + stream : 1; /* TCP */ /* * XID @@ -161,27 +171,22 @@ struct rpc_xprt { unsigned long tcp_copied, /* copied to request */ tcp_flags; /* - * Connection of transports + * Connection of sockets */ - unsigned long connect_timeout, - bind_timeout, - reestablish_timeout; - struct work_struct connect_worker; + struct work_struct sock_connect; unsigned short port; - /* - * Disconnection of idle transports + * Disconnection of idle sockets */ struct work_struct task_cleanup; struct timer_list timer; - unsigned long last_used, - idle_timeout; + unsigned long last_used; /* * Send stuff */ - spinlock_t transport_lock; /* lock transport info */ - spinlock_t reserve_lock; /* lock slot table */ + spinlock_t sock_lock; /* lock socket info */ + spinlock_t xprt_lock; /* lock xprt info */ struct rpc_task * snd_task; /* Task blocked in send */ struct list_head recv; @@ -190,111 +195,37 @@ struct rpc_xprt { void (*old_data_ready)(struct sock *, int); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); -}; -#define XPRT_LAST_FRAG (1 << 0) -#define XPRT_COPY_RECM (1 << 1) -#define XPRT_COPY_XID (1 << 2) -#define XPRT_COPY_DATA (1 << 3) + wait_queue_head_t cong_wait; +}; #ifdef __KERNEL__ -/* - * Transport operations used by ULPs - */ -struct rpc_xprt * xprt_create_proto(int proto, struct sockaddr_in *addr, struct rpc_timeout *to); -void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr); +struct rpc_xprt * xprt_create_proto(int proto, struct sockaddr_in *addr, + struct rpc_timeout *toparms); +int xprt_destroy(struct rpc_xprt *); +void xprt_set_timeout(struct rpc_timeout *, unsigned int, + unsigned long); -/* - * Generic internal transport functions - */ -void xprt_connect(struct rpc_task *task); -void xprt_reserve(struct rpc_task *task); -int xprt_reserve_xprt(struct rpc_task *task); -int xprt_reserve_xprt_cong(struct rpc_task *task); -int xprt_prepare_transmit(struct rpc_task *task); -void xprt_transmit(struct rpc_task *task); -void xprt_abort_transmit(struct rpc_task *task); +void xprt_reserve(struct rpc_task *); +int xprt_prepare_transmit(struct rpc_task *); +void xprt_transmit(struct rpc_task *); +void xprt_receive(struct rpc_task *); int xprt_adjust_timeout(struct rpc_rqst *req); -void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); -void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); -void xprt_release(struct rpc_task *task); -int xprt_destroy(struct rpc_xprt *xprt); - -static inline u32 *xprt_skip_transport_header(struct rpc_xprt *xprt, u32 *p) -{ - return p + xprt->tsh_size; -} - -/* - * Transport switch helper functions - */ -void xprt_set_retrans_timeout_def(struct rpc_task *task); -void xprt_set_retrans_timeout_rtt(struct rpc_task *task); -void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); -void xprt_wait_for_buffer_space(struct rpc_task *task); -void xprt_write_space(struct rpc_xprt *xprt); -void xprt_update_rtt(struct rpc_task *task); -void xprt_adjust_cwnd(struct rpc_task *task, int result); -struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid); -void xprt_complete_rqst(struct rpc_task *task, int copied); -void xprt_release_rqst_cong(struct rpc_task *task); -void xprt_disconnect(struct rpc_xprt *xprt); - -/* - * Socket transport setup operations - */ -int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to); -int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to); - -/* - * Reserved bit positions in xprt->state - */ -#define XPRT_LOCKED (0) -#define XPRT_CONNECTED (1) -#define XPRT_CONNECTING (2) - -static inline void xprt_set_connected(struct rpc_xprt *xprt) -{ - set_bit(XPRT_CONNECTED, &xprt->state); -} - -static inline void xprt_clear_connected(struct rpc_xprt *xprt) -{ - clear_bit(XPRT_CONNECTED, &xprt->state); -} - -static inline int xprt_connected(struct rpc_xprt *xprt) -{ - return test_bit(XPRT_CONNECTED, &xprt->state); -} - -static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) -{ - return test_and_set_bit(XPRT_CONNECTED, &xprt->state); -} - -static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) -{ - return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); -} - -static inline void xprt_clear_connecting(struct rpc_xprt *xprt) -{ - smp_mb__before_clear_bit(); - clear_bit(XPRT_CONNECTING, &xprt->state); - smp_mb__after_clear_bit(); -} - -static inline int xprt_connecting(struct rpc_xprt *xprt) -{ - return test_bit(XPRT_CONNECTING, &xprt->state); -} - -static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) -{ - return test_and_set_bit(XPRT_CONNECTING, &xprt->state); -} +void xprt_release(struct rpc_task *); +void xprt_connect(struct rpc_task *); +void xprt_sock_setbufsize(struct rpc_xprt *); + +#define XPRT_LOCKED 0 +#define XPRT_CONNECT 1 +#define XPRT_CONNECTING 2 + +#define xprt_connected(xp) (test_bit(XPRT_CONNECT, &(xp)->sockstate)) +#define xprt_set_connected(xp) (set_bit(XPRT_CONNECT, &(xp)->sockstate)) +#define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate)) +#define xprt_test_and_clear_connected(xp) \ + (test_and_clear_bit(XPRT_CONNECT, &(xp)->sockstate)) +#define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate)) #endif /* __KERNEL__*/ diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c index ae8161f1f459..8c88b973abc5 100644 --- a/trunk/mm/memory.c +++ b/trunk/mm/memory.c @@ -2045,8 +2045,18 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, inc_page_state(pgfault); - if (is_vm_hugetlb_page(vma)) - return VM_FAULT_SIGBUS; /* mapping truncation does this. */ + if (unlikely(is_vm_hugetlb_page(vma))) { + if (valid_hugetlb_file_off(vma, address)) + /* We get here only if there was a stale(zero) TLB entry + * (because of HW prefetching). + * Low-level arch code (if needed) should have already + * purged the stale entry as part of this fault handling. + * Here we just return. + */ + return VM_FAULT_MINOR; + else + return VM_FAULT_SIGBUS; /* mapping truncation does this. */ + } /* * We need the page table lock to synchronize with kswapd diff --git a/trunk/net/sunrpc/Makefile b/trunk/net/sunrpc/Makefile index cdcab9ca4c60..46a2ce00a29b 100644 --- a/trunk/net/sunrpc/Makefile +++ b/trunk/net/sunrpc/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_SUNRPC) += sunrpc.o obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ -sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ +sunrpc-y := clnt.o xprt.o sched.o \ auth.o auth_null.o auth_unix.o \ svc.o svcsock.o svcauth.o svcauth_unix.o \ pmap_clnt.o timer.o xdr.o \ diff --git a/trunk/net/sunrpc/auth.c b/trunk/net/sunrpc/auth.c index a415d99c394d..505e2d4b3d62 100644 --- a/trunk/net/sunrpc/auth.c +++ b/trunk/net/sunrpc/auth.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/trunk/net/sunrpc/auth_gss/Makefile b/trunk/net/sunrpc/auth_gss/Makefile index f3431a7e33da..fe1b874084bc 100644 --- a/trunk/net/sunrpc/auth_gss/Makefile +++ b/trunk/net/sunrpc/auth_gss/Makefile @@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ - gss_krb5_seqnum.o gss_krb5_wrap.o + gss_krb5_seqnum.o obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o diff --git a/trunk/net/sunrpc/auth_gss/auth_gss.c b/trunk/net/sunrpc/auth_gss/auth_gss.c index f44f46f1d8e0..2f7b867161d2 100644 --- a/trunk/net/sunrpc/auth_gss/auth_gss.c +++ b/trunk/net/sunrpc/auth_gss/auth_gss.c @@ -42,8 +42,9 @@ #include #include #include +#include +#include #include -#include #include #include #include @@ -845,8 +846,10 @@ gss_marshal(struct rpc_task *task, u32 *p) /* We compute the checksum for the verifier over the xdr-encoded bytes * starting with the xid and ending at the end of the credential: */ - iov.iov_base = xprt_skip_transport_header(task->tk_xprt, - req->rq_snd_buf.head[0].iov_base); + iov.iov_base = req->rq_snd_buf.head[0].iov_base; + if (task->tk_client->cl_xprt->stream) + /* See clnt.c:call_header() */ + iov.iov_base += 4; iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; xdr_buf_from_iov(&iov, &verf_buf); @@ -854,7 +857,9 @@ gss_marshal(struct rpc_task *task, u32 *p) *p++ = htonl(RPC_AUTH_GSS); mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); + maj_stat = gss_get_mic(ctx->gc_gss_ctx, + GSS_C_QOP_DEFAULT, + &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; } else if (maj_stat != 0) { @@ -885,8 +890,10 @@ static u32 * gss_validate(struct rpc_task *task, u32 *p) { struct rpc_cred *cred = task->tk_msg.rpc_cred; + struct gss_cred *gss_cred = container_of(cred, struct gss_cred, + gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); - u32 seq; + u32 seq, qop_state; struct kvec iov; struct xdr_buf verf_buf; struct xdr_netobj mic; @@ -907,14 +914,23 @@ gss_validate(struct rpc_task *task, u32 *p) mic.data = (u8 *)p; mic.len = len; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat) goto out_bad; - /* We leave it to unwrap to calculate au_rslack. For now we just - * calculate the length of the verifier: */ - task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; + switch (gss_cred->gc_service) { + case RPC_GSS_SVC_NONE: + /* verifier data, flavor, length: */ + task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2; + break; + case RPC_GSS_SVC_INTEGRITY: + /* verifier data, flavor, length, length, sequence number: */ + task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4; + break; + case RPC_GSS_SVC_PRIVACY: + goto out_bad; + } gss_put_ctx(ctx); dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n", task->tk_pid); @@ -959,7 +975,8 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, p = iov->iov_base + iov->iov_len; mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); + maj_stat = gss_get_mic(ctx->gc_gss_ctx, + GSS_C_QOP_DEFAULT, &integ_buf, &mic); status = -EIO; /* XXX? */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; @@ -973,113 +990,6 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } -static void -priv_release_snd_buf(struct rpc_rqst *rqstp) -{ - int i; - - for (i=0; i < rqstp->rq_enc_pages_num; i++) - __free_page(rqstp->rq_enc_pages[i]); - kfree(rqstp->rq_enc_pages); -} - -static int -alloc_enc_pages(struct rpc_rqst *rqstp) -{ - struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; - int first, last, i; - - if (snd_buf->page_len == 0) { - rqstp->rq_enc_pages_num = 0; - return 0; - } - - first = snd_buf->page_base >> PAGE_CACHE_SHIFT; - last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; - rqstp->rq_enc_pages_num = last - first + 1 + 1; - rqstp->rq_enc_pages - = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), - GFP_NOFS); - if (!rqstp->rq_enc_pages) - goto out; - for (i=0; i < rqstp->rq_enc_pages_num; i++) { - rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); - if (rqstp->rq_enc_pages[i] == NULL) - goto out_free; - } - rqstp->rq_release_snd_buf = priv_release_snd_buf; - return 0; -out_free: - for (i--; i >= 0; i--) { - __free_page(rqstp->rq_enc_pages[i]); - } -out: - return -EAGAIN; -} - -static inline int -gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, - kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) -{ - struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; - u32 offset; - u32 maj_stat; - int status; - u32 *opaque_len; - struct page **inpages; - int first; - int pad; - struct kvec *iov; - char *tmp; - - opaque_len = p++; - offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; - *p++ = htonl(rqstp->rq_seqno); - - status = encode(rqstp, p, obj); - if (status) - return status; - - status = alloc_enc_pages(rqstp); - if (status) - return status; - first = snd_buf->page_base >> PAGE_CACHE_SHIFT; - inpages = snd_buf->pages + first; - snd_buf->pages = rqstp->rq_enc_pages; - snd_buf->page_base -= first << PAGE_CACHE_SHIFT; - /* Give the tail its own page, in case we need extra space in the - * head when wrapping: */ - if (snd_buf->page_len || snd_buf->tail[0].iov_len) { - tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); - memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); - snd_buf->tail[0].iov_base = tmp; - } - maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); - /* RPC_SLACK_SPACE should prevent this ever happening: */ - BUG_ON(snd_buf->len > snd_buf->buflen); - status = -EIO; - /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was - * done anyway, so it's safe to put the request on the wire: */ - if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; - else if (maj_stat) - return status; - - *opaque_len = htonl(snd_buf->len - offset); - /* guess whether we're in the head or the tail: */ - if (snd_buf->page_len || snd_buf->tail[0].iov_len) - iov = snd_buf->tail; - else - iov = snd_buf->head; - p = iov->iov_base + iov->iov_len; - pad = 3 - ((snd_buf->len - offset - 1) & 3); - memset(p, 0, pad); - iov->iov_len += pad; - snd_buf->len += pad; - - return 0; -} - static int gss_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, u32 *p, void *obj) @@ -1107,8 +1017,6 @@ gss_wrap_req(struct rpc_task *task, rqstp, p, obj); break; case RPC_GSS_SVC_PRIVACY: - status = gss_wrap_req_priv(cred, ctx, encode, - rqstp, p, obj); break; } out: @@ -1146,7 +1054,8 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) return status; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, + &mic, NULL); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat != GSS_S_COMPLETE) @@ -1154,35 +1063,6 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } -static inline int -gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, - struct rpc_rqst *rqstp, u32 **p) -{ - struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; - u32 offset; - u32 opaque_len; - u32 maj_stat; - int status = -EIO; - - opaque_len = ntohl(*(*p)++); - offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; - if (offset + opaque_len > rcv_buf->len) - return status; - /* remove padding: */ - rcv_buf->len = offset + opaque_len; - - maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); - if (maj_stat == GSS_S_CONTEXT_EXPIRED) - cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; - if (maj_stat != GSS_S_COMPLETE) - return status; - if (ntohl(*(*p)++) != rqstp->rq_seqno) - return status; - - return 0; -} - - static int gss_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, u32 *p, void *obj) @@ -1191,9 +1071,6 @@ gss_unwrap_resp(struct rpc_task *task, struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); - u32 *savedp = p; - struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; - int savedlen = head->iov_len; int status = -EIO; if (ctx->gc_proc != RPC_GSS_PROC_DATA) @@ -1207,14 +1084,8 @@ gss_unwrap_resp(struct rpc_task *task, goto out; break; case RPC_GSS_SVC_PRIVACY: - status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); - if (status) - goto out; break; } - /* take into account extra slack for integrity and privacy cases: */ - task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) - + (savedlen - head->iov_len); out_decode: status = decode(rqstp, p, obj); out: diff --git a/trunk/net/sunrpc/auth_gss/gss_krb5_crypto.c b/trunk/net/sunrpc/auth_gss/gss_krb5_crypto.c index 3f3d5437f02d..ee6ae74cd1b2 100644 --- a/trunk/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/trunk/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -139,91 +139,17 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) { sg->length = len; } -static int -process_xdr_buf(struct xdr_buf *buf, int offset, int len, - int (*actor)(struct scatterlist *, void *), void *data) -{ - int i, page_len, thislen, page_offset, ret = 0; - struct scatterlist sg[1]; - - if (offset >= buf->head[0].iov_len) { - offset -= buf->head[0].iov_len; - } else { - thislen = buf->head[0].iov_len - offset; - if (thislen > len) - thislen = len; - buf_to_sg(sg, buf->head[0].iov_base + offset, thislen); - ret = actor(sg, data); - if (ret) - goto out; - offset = 0; - len -= thislen; - } - if (len == 0) - goto out; - - if (offset >= buf->page_len) { - offset -= buf->page_len; - } else { - page_len = buf->page_len - offset; - if (page_len > len) - page_len = len; - len -= page_len; - page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); - i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; - thislen = PAGE_CACHE_SIZE - page_offset; - do { - if (thislen > page_len) - thislen = page_len; - sg->page = buf->pages[i]; - sg->offset = page_offset; - sg->length = thislen; - ret = actor(sg, data); - if (ret) - goto out; - page_len -= thislen; - i++; - page_offset = 0; - thislen = PAGE_CACHE_SIZE; - } while (page_len != 0); - offset = 0; - } - if (len == 0) - goto out; - - if (offset < buf->tail[0].iov_len) { - thislen = buf->tail[0].iov_len - offset; - if (thislen > len) - thislen = len; - buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen); - ret = actor(sg, data); - len -= thislen; - } - if (len != 0) - ret = -EINVAL; -out: - return ret; -} - -static int -checksummer(struct scatterlist *sg, void *data) -{ - struct crypto_tfm *tfm = (struct crypto_tfm *)data; - - crypto_digest_update(tfm, sg, 1); - - return 0; -} - /* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, - int body_offset, struct xdr_netobj *cksum) + struct xdr_netobj *cksum) { char *cksumname; struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ struct scatterlist sg[1]; u32 code = GSS_S_FAILURE; + int len, thislen, offset; + int i; switch (cksumtype) { case CKSUMTYPE_RSA_MD5: @@ -243,8 +169,33 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, crypto_digest_init(tfm); buf_to_sg(sg, header, hdrlen); crypto_digest_update(tfm, sg, 1); - process_xdr_buf(body, body_offset, body->len - body_offset, - checksummer, tfm); + if (body->head[0].iov_len) { + buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len); + crypto_digest_update(tfm, sg, 1); + } + + len = body->page_len; + if (len != 0) { + offset = body->page_base & (PAGE_CACHE_SIZE - 1); + i = body->page_base >> PAGE_CACHE_SHIFT; + thislen = PAGE_CACHE_SIZE - offset; + do { + if (thislen > len) + thislen = len; + sg->page = body->pages[i]; + sg->offset = offset; + sg->length = thislen; + crypto_digest_update(tfm, sg, 1); + len -= thislen; + i++; + offset = 0; + thislen = PAGE_CACHE_SIZE; + } while(len != 0); + } + if (body->tail[0].iov_len) { + buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len); + crypto_digest_update(tfm, sg, 1); + } crypto_digest_final(tfm, cksum->data); code = 0; out: @@ -253,154 +204,3 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, } EXPORT_SYMBOL(make_checksum); - -struct encryptor_desc { - u8 iv[8]; /* XXX hard-coded blocksize */ - struct crypto_tfm *tfm; - int pos; - struct xdr_buf *outbuf; - struct page **pages; - struct scatterlist infrags[4]; - struct scatterlist outfrags[4]; - int fragno; - int fraglen; -}; - -static int -encryptor(struct scatterlist *sg, void *data) -{ - struct encryptor_desc *desc = data; - struct xdr_buf *outbuf = desc->outbuf; - struct page *in_page; - int thislen = desc->fraglen + sg->length; - int fraglen, ret; - int page_pos; - - /* Worst case is 4 fragments: head, end of page 1, start - * of page 2, tail. Anything more is a bug. */ - BUG_ON(desc->fragno > 3); - desc->infrags[desc->fragno] = *sg; - desc->outfrags[desc->fragno] = *sg; - - page_pos = desc->pos - outbuf->head[0].iov_len; - if (page_pos >= 0 && page_pos < outbuf->page_len) { - /* pages are not in place: */ - int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; - in_page = desc->pages[i]; - } else { - in_page = sg->page; - } - desc->infrags[desc->fragno].page = in_page; - desc->fragno++; - desc->fraglen += sg->length; - desc->pos += sg->length; - - fraglen = thislen & 7; /* XXX hardcoded blocksize */ - thislen -= fraglen; - - if (thislen == 0) - return 0; - - ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, - thislen, desc->iv); - if (ret) - return ret; - if (fraglen) { - desc->outfrags[0].page = sg->page; - desc->outfrags[0].offset = sg->offset + sg->length - fraglen; - desc->outfrags[0].length = fraglen; - desc->infrags[0] = desc->outfrags[0]; - desc->infrags[0].page = in_page; - desc->fragno = 1; - desc->fraglen = fraglen; - } else { - desc->fragno = 0; - desc->fraglen = 0; - } - return 0; -} - -int -gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, - struct page **pages) -{ - int ret; - struct encryptor_desc desc; - - BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); - - memset(desc.iv, 0, sizeof(desc.iv)); - desc.tfm = tfm; - desc.pos = offset; - desc.outbuf = buf; - desc.pages = pages; - desc.fragno = 0; - desc.fraglen = 0; - - ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc); - return ret; -} - -EXPORT_SYMBOL(gss_encrypt_xdr_buf); - -struct decryptor_desc { - u8 iv[8]; /* XXX hard-coded blocksize */ - struct crypto_tfm *tfm; - struct scatterlist frags[4]; - int fragno; - int fraglen; -}; - -static int -decryptor(struct scatterlist *sg, void *data) -{ - struct decryptor_desc *desc = data; - int thislen = desc->fraglen + sg->length; - int fraglen, ret; - - /* Worst case is 4 fragments: head, end of page 1, start - * of page 2, tail. Anything more is a bug. */ - BUG_ON(desc->fragno > 3); - desc->frags[desc->fragno] = *sg; - desc->fragno++; - desc->fraglen += sg->length; - - fraglen = thislen & 7; /* XXX hardcoded blocksize */ - thislen -= fraglen; - - if (thislen == 0) - return 0; - - ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, - thislen, desc->iv); - if (ret) - return ret; - if (fraglen) { - desc->frags[0].page = sg->page; - desc->frags[0].offset = sg->offset + sg->length - fraglen; - desc->frags[0].length = fraglen; - desc->fragno = 1; - desc->fraglen = fraglen; - } else { - desc->fragno = 0; - desc->fraglen = 0; - } - return 0; -} - -int -gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) -{ - struct decryptor_desc desc; - - /* XXXJBF: */ - BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); - - memset(desc.iv, 0, sizeof(desc.iv)); - desc.tfm = tfm; - desc.fragno = 0; - desc.fraglen = 0; - return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); -} - -EXPORT_SYMBOL(gss_decrypt_xdr_buf); diff --git a/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c b/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c index 5f1f806a0b11..606a8a82cafb 100644 --- a/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/trunk/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -190,12 +191,43 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { kfree(kctx); } +static u32 +gss_verify_mic_kerberos(struct gss_ctx *ctx, + struct xdr_buf *message, + struct xdr_netobj *mic_token, + u32 *qstate) { + u32 maj_stat = 0; + int qop_state; + struct krb5_ctx *kctx = ctx->internal_ctx_id; + + maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state, + KG_TOK_MIC_MSG); + if (!maj_stat && qop_state) + *qstate = qop_state; + + dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat); + return maj_stat; +} + +static u32 +gss_get_mic_kerberos(struct gss_ctx *ctx, + u32 qop, + struct xdr_buf *message, + struct xdr_netobj *mic_token) { + u32 err = 0; + struct krb5_ctx *kctx = ctx->internal_ctx_id; + + err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG); + + dprintk("RPC: gss_get_mic_kerberos returning %d\n",err); + + return err; +} + static struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, .gss_verify_mic = gss_verify_mic_kerberos, - .gss_wrap = gss_wrap_kerberos, - .gss_unwrap = gss_unwrap_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, }; @@ -210,11 +242,6 @@ static struct pf_desc gss_kerberos_pfs[] = { .service = RPC_GSS_SVC_INTEGRITY, .name = "krb5i", }, - [2] = { - .pseudoflavor = RPC_AUTH_GSS_KRB5P, - .service = RPC_GSS_SVC_PRIVACY, - .name = "krb5p", - }, }; static struct gss_api_mech gss_kerberos_mech = { diff --git a/trunk/net/sunrpc/auth_gss/gss_krb5_seal.c b/trunk/net/sunrpc/auth_gss/gss_krb5_seal.c index 13f8ae979454..afeeb8715a77 100644 --- a/trunk/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/trunk/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -70,13 +70,22 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +static inline int +gss_krb5_padding(int blocksize, int length) { + /* Most of the code is block-size independent but in practice we + * use only 8: */ + BUG_ON(blocksize != 8); + return 8 - (length & 7); +} + u32 -gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, - struct xdr_netobj *token) +krb5_make_token(struct krb5_ctx *ctx, int qop_req, + struct xdr_buf *text, struct xdr_netobj *token, + int toktype) { - struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; s32 checksum_type; struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + int blocksize = 0, tmsglen; unsigned char *ptr, *krb5_hdr, *msg_start; s32 now; @@ -84,6 +93,9 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, now = get_seconds(); + if (qop_req != 0) + goto out_err; + switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: checksum_type = CKSUMTYPE_RSA_MD5; @@ -99,13 +111,21 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, goto out_err; } - token->len = g_token_size(&ctx->mech_used, 22); + if (toktype == KG_TOK_WRAP_MSG) { + blocksize = crypto_tfm_alg_blocksize(ctx->enc); + tmsglen = blocksize + text->len + + gss_krb5_padding(blocksize, blocksize + text->len); + } else { + tmsglen = 0; + } + + token->len = g_token_size(&ctx->mech_used, 22 + tmsglen); ptr = token->data; - g_make_token_header(&ctx->mech_used, 22, &ptr); + g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr); - *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); - *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); + *ptr++ = (unsigned char) ((toktype>>8)&0xff); + *ptr++ = (unsigned char) (toktype&0xff); /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ krb5_hdr = ptr - 2; @@ -113,9 +133,17 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); memset(krb5_hdr + 4, 0xff, 4); + if (toktype == KG_TOK_WRAP_MSG) + *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg); - if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) + if (toktype == KG_TOK_WRAP_MSG) { + /* XXX removing support for now */ + goto out_err; + } else { /* Sign only. */ + if (make_checksum(checksum_type, krb5_hdr, 8, text, + &md5cksum)) goto out_err; + } switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: diff --git a/trunk/net/sunrpc/auth_gss/gss_krb5_unseal.c b/trunk/net/sunrpc/auth_gss/gss_krb5_unseal.c index 2030475d98ed..8767fc53183d 100644 --- a/trunk/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/trunk/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -68,14 +68,21 @@ #endif -/* read_token is a mic token, and message_buffer is the data that the mic was - * supposedly taken over. */ +/* message_buffer is an input if toktype is MIC and an output if it is WRAP: + * If toktype is MIC: read_token is a mic token, and message_buffer is the + * data that the mic was supposedly taken over. + * If toktype is WRAP: read_token is a wrap token, and message_buffer is used + * to return the decrypted data. + */ +/* XXX will need to change prototype and/or just split into a separate function + * when we add privacy (because read_token will be in pages too). */ u32 -gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, - struct xdr_buf *message_buffer, struct xdr_netobj *read_token) +krb5_read_token(struct krb5_ctx *ctx, + struct xdr_netobj *read_token, + struct xdr_buf *message_buffer, + int *qop_state, int toktype) { - struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; int signalg; int sealalg; s32 checksum_type; @@ -93,12 +100,16 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, read_token->len)) goto out; - if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || - (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) + if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff))) goto out; /* XXX sanity-check bodysize?? */ + if (toktype == KG_TOK_WRAP_MSG) { + /* XXX gone */ + goto out; + } + /* get the sign and seal algorithms */ signalg = ptr[0] + (ptr[1] << 8); @@ -109,7 +120,14 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) goto out; - if (sealalg != 0xffff) + if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) || + ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff))) + goto out; + + /* in the current spec, there is only one valid seal algorithm per + key type, so a simple comparison is ok */ + + if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg)) goto out; /* there are several mappings of seal algorithms to sign algorithms, @@ -136,7 +154,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, switch (signalg) { case SGN_ALG_DES_MAC_MD5: ret = make_checksum(checksum_type, ptr - 2, 8, - message_buffer, 0, &md5cksum); + message_buffer, &md5cksum); if (ret) goto out; @@ -157,6 +175,9 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, /* it got through unscathed. Make sure the context is unexpired */ + if (qop_state) + *qop_state = GSS_C_QOP_DEFAULT; + now = get_seconds(); ret = GSS_S_CONTEXT_EXPIRED; diff --git a/trunk/net/sunrpc/auth_gss/gss_krb5_wrap.c b/trunk/net/sunrpc/auth_gss/gss_krb5_wrap.c deleted file mode 100644 index af777cf9f251..000000000000 --- a/trunk/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ /dev/null @@ -1,363 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef RPC_DEBUG -# define RPCDBG_FACILITY RPCDBG_AUTH -#endif - -static inline int -gss_krb5_padding(int blocksize, int length) -{ - /* Most of the code is block-size independent but currently we - * use only 8: */ - BUG_ON(blocksize != 8); - return 8 - (length & 7); -} - -static inline void -gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) -{ - int padding = gss_krb5_padding(blocksize, buf->len - offset); - char *p; - struct kvec *iov; - - if (buf->page_len || buf->tail[0].iov_len) - iov = &buf->tail[0]; - else - iov = &buf->head[0]; - p = iov->iov_base + iov->iov_len; - iov->iov_len += padding; - buf->len += padding; - memset(p, padding, padding); -} - -static inline int -gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) -{ - u8 *ptr; - u8 pad; - int len = buf->len; - - if (len <= buf->head[0].iov_len) { - pad = *(u8 *)(buf->head[0].iov_base + len - 1); - if (pad > buf->head[0].iov_len) - return -EINVAL; - buf->head[0].iov_len -= pad; - goto out; - } else - len -= buf->head[0].iov_len; - if (len <= buf->page_len) { - int last = (buf->page_base + len - 1) - >>PAGE_CACHE_SHIFT; - int offset = (buf->page_base + len - 1) - & (PAGE_CACHE_SIZE - 1); - ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA); - pad = *(ptr + offset); - kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA); - goto out; - } else - len -= buf->page_len; - BUG_ON(len > buf->tail[0].iov_len); - pad = *(u8 *)(buf->tail[0].iov_base + len - 1); -out: - /* XXX: NOTE: we do not adjust the page lengths--they represent - * a range of data in the real filesystem page cache, and we need - * to know that range so the xdr code can properly place read data. - * However adjusting the head length, as we do above, is harmless. - * In the case of a request that fits into a single page, the server - * also uses length and head length together to determine the original - * start of the request to copy the request for deferal; so it's - * easier on the server if we adjust head and tail length in tandem. - * It's not really a problem that we don't fool with the page and - * tail lengths, though--at worst badly formed xdr might lead the - * server to attempt to parse the padding. - * XXX: Document all these weird requirements for gss mechanism - * wrap/unwrap functions. */ - if (pad > blocksize) - return -EINVAL; - if (buf->len > pad) - buf->len -= pad; - else - return -EINVAL; - return 0; -} - -static inline void -make_confounder(char *p, int blocksize) -{ - static u64 i = 0; - u64 *q = (u64 *)p; - - /* rfc1964 claims this should be "random". But all that's really - * necessary is that it be unique. And not even that is necessary in - * our case since our "gssapi" implementation exists only to support - * rpcsec_gss, so we know that the only buffers we will ever encrypt - * already begin with a unique sequence number. Just to hedge my bets - * I'll make a half-hearted attempt at something unique, but ensuring - * uniqueness would mean worrying about atomicity and rollover, and I - * don't care enough. */ - - BUG_ON(blocksize != 8); - *q = i++; -} - -/* Assumptions: the head and tail of inbuf are ours to play with. - * The pages, however, may be real pages in the page cache and we replace - * them with scratch pages from **pages before writing to them. */ -/* XXX: obviously the above should be documentation of wrap interface, - * and shouldn't be in this kerberos-specific file. */ - -/* XXX factor out common code with seal/unseal. */ - -u32 -gss_wrap_kerberos(struct gss_ctx *ctx, int offset, - struct xdr_buf *buf, struct page **pages) -{ - struct krb5_ctx *kctx = ctx->internal_ctx_id; - s32 checksum_type; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; - int blocksize = 0, plainlen; - unsigned char *ptr, *krb5_hdr, *msg_start; - s32 now; - int headlen; - struct page **tmp_pages; - - dprintk("RPC: gss_wrap_kerberos\n"); - - now = get_seconds(); - - switch (kctx->signalg) { - case SGN_ALG_DES_MAC_MD5: - checksum_type = CKSUMTYPE_RSA_MD5; - break; - default: - dprintk("RPC: gss_krb5_seal: kctx->signalg %d not" - " supported\n", kctx->signalg); - goto out_err; - } - if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) { - dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n", - kctx->sealalg); - goto out_err; - } - - blocksize = crypto_tfm_alg_blocksize(kctx->enc); - gss_krb5_add_padding(buf, offset, blocksize); - BUG_ON((buf->len - offset) % blocksize); - plainlen = blocksize + buf->len - offset; - - headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - - (buf->len - offset); - - ptr = buf->head[0].iov_base + offset; - /* shift data to make room for header. */ - /* XXX Would be cleverer to encrypt while copying. */ - /* XXX bounds checking, slack, etc. */ - memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); - buf->head[0].iov_len += headlen; - buf->len += headlen; - BUG_ON((buf->len - offset - headlen) % blocksize); - - g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); - - - *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); - *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff); - - /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ - krb5_hdr = ptr - 2; - msg_start = krb5_hdr + 24; - /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); - - *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg); - memset(krb5_hdr + 4, 0xff, 4); - *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg); - - make_confounder(msg_start, blocksize); - - /* XXXJBF: UGH!: */ - tmp_pages = buf->pages; - buf->pages = pages; - if (make_checksum(checksum_type, krb5_hdr, 8, buf, - offset + headlen - blocksize, &md5cksum)) - goto out_err; - buf->pages = tmp_pages; - - switch (kctx->signalg) { - case SGN_ALG_DES_MAC_MD5: - if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) - goto out_err; - memcpy(krb5_hdr + 16, - md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, - KRB5_CKSUM_LENGTH); - - dprintk("RPC: make_seal_token: cksum data: \n"); - print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0); - break; - default: - BUG(); - } - - kfree(md5cksum.data); - - /* XXX would probably be more efficient to compute checksum - * and encrypt at the same time: */ - if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, - kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) - goto out_err; - - if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, - pages)) - goto out_err; - - kctx->seq_send++; - - return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); -out_err: - if (md5cksum.data) kfree(md5cksum.data); - return GSS_S_FAILURE; -} - -u32 -gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) -{ - struct krb5_ctx *kctx = ctx->internal_ctx_id; - int signalg; - int sealalg; - s32 checksum_type; - struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; - s32 now; - int direction; - s32 seqnum; - unsigned char *ptr; - int bodysize; - u32 ret = GSS_S_DEFECTIVE_TOKEN; - void *data_start, *orig_start; - int data_len; - int blocksize; - - dprintk("RPC: gss_unwrap_kerberos\n"); - - ptr = (u8 *)buf->head[0].iov_base + offset; - if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, - buf->len - offset)) - goto out; - - if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || - (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) - goto out; - - /* XXX sanity-check bodysize?? */ - - /* get the sign and seal algorithms */ - - signalg = ptr[0] + (ptr[1] << 8); - sealalg = ptr[2] + (ptr[3] << 8); - - /* Sanity checks */ - - if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) - goto out; - - if (sealalg == 0xffff) - goto out; - - /* in the current spec, there is only one valid seal algorithm per - key type, so a simple comparison is ok */ - - if (sealalg != kctx->sealalg) - goto out; - - /* there are several mappings of seal algorithms to sign algorithms, - but few enough that we can try them all. */ - - if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) || - (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || - (kctx->sealalg == SEAL_ALG_DES3KD && - signalg != SGN_ALG_HMAC_SHA1_DES3_KD)) - goto out; - - if (gss_decrypt_xdr_buf(kctx->enc, buf, - ptr + 22 - (unsigned char *)buf->head[0].iov_base)) - goto out; - - /* compute the checksum of the message */ - - /* initialize the the cksum */ - switch (signalg) { - case SGN_ALG_DES_MAC_MD5: - checksum_type = CKSUMTYPE_RSA_MD5; - break; - default: - ret = GSS_S_DEFECTIVE_TOKEN; - goto out; - } - - switch (signalg) { - case SGN_ALG_DES_MAC_MD5: - ret = make_checksum(checksum_type, ptr - 2, 8, buf, - ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum); - if (ret) - goto out; - - ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len); - if (ret) - goto out; - - if (memcmp(md5cksum.data + 8, ptr + 14, 8)) { - ret = GSS_S_BAD_SIG; - goto out; - } - break; - default: - ret = GSS_S_DEFECTIVE_TOKEN; - goto out; - } - - /* it got through unscathed. Make sure the context is unexpired */ - - now = get_seconds(); - - ret = GSS_S_CONTEXT_EXPIRED; - if (now > kctx->endtime) - goto out; - - /* do sequencing checks */ - - ret = GSS_S_BAD_SIG; - if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, - &seqnum))) - goto out; - - if ((kctx->initiate && direction != 0xff) || - (!kctx->initiate && direction != 0)) - goto out; - - /* Copy the data back to the right position. XXX: Would probably be - * better to copy and encrypt at the same time. */ - - blocksize = crypto_tfm_alg_blocksize(kctx->enc); - data_start = ptr + 22 + blocksize; - orig_start = buf->head[0].iov_base + offset; - data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; - memmove(orig_start, data_start, data_len); - buf->head[0].iov_len -= (data_start - orig_start); - buf->len -= (data_start - orig_start); - - ret = GSS_S_DEFECTIVE_TOKEN; - if (gss_krb5_remove_padding(buf, blocksize)) - goto out; - - ret = GSS_S_COMPLETE; -out: - if (md5cksum.data) kfree(md5cksum.data); - return ret; -} diff --git a/trunk/net/sunrpc/auth_gss/gss_mech_switch.c b/trunk/net/sunrpc/auth_gss/gss_mech_switch.c index b048bf672da2..9dfb68377d69 100644 --- a/trunk/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/trunk/net/sunrpc/auth_gss/gss_mech_switch.c @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -250,11 +251,13 @@ gss_import_sec_context(const void *input_token, size_t bufsize, u32 gss_get_mic(struct gss_ctx *context_handle, + u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token) { return context_handle->mech_type->gm_ops ->gss_get_mic(context_handle, + qop, message, mic_token); } @@ -264,34 +267,16 @@ gss_get_mic(struct gss_ctx *context_handle, u32 gss_verify_mic(struct gss_ctx *context_handle, struct xdr_buf *message, - struct xdr_netobj *mic_token) + struct xdr_netobj *mic_token, + u32 *qstate) { return context_handle->mech_type->gm_ops ->gss_verify_mic(context_handle, message, - mic_token); + mic_token, + qstate); } -u32 -gss_wrap(struct gss_ctx *ctx_id, - int offset, - struct xdr_buf *buf, - struct page **inpages) -{ - return ctx_id->mech_type->gm_ops - ->gss_wrap(ctx_id, offset, buf, inpages); -} - -u32 -gss_unwrap(struct gss_ctx *ctx_id, - int offset, - struct xdr_buf *buf) -{ - return ctx_id->mech_type->gm_ops - ->gss_unwrap(ctx_id, offset, buf); -} - - /* gss_delete_sec_context: free all resources associated with context_handle. * Note this differs from the RFC 2744-specified prototype in that we don't * bother returning an output token, since it would never be used anyway. */ diff --git a/trunk/net/sunrpc/auth_gss/gss_spkm3_mech.c b/trunk/net/sunrpc/auth_gss/gss_spkm3_mech.c index 39b3edc14694..6c97d61baa9b 100644 --- a/trunk/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/trunk/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -224,13 +224,18 @@ gss_delete_sec_context_spkm3(void *internal_ctx) { static u32 gss_verify_mic_spkm3(struct gss_ctx *ctx, struct xdr_buf *signbuf, - struct xdr_netobj *checksum) -{ + struct xdr_netobj *checksum, + u32 *qstate) { u32 maj_stat = 0; + int qop_state = 0; struct spkm3_ctx *sctx = ctx->internal_ctx_id; dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); - maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); + maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state, + SPKM_MIC_TOK); + + if (!maj_stat && qop_state) + *qstate = qop_state; dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); return maj_stat; @@ -238,15 +243,15 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx, static u32 gss_get_mic_spkm3(struct gss_ctx *ctx, + u32 qop, struct xdr_buf *message_buffer, - struct xdr_netobj *message_token) -{ + struct xdr_netobj *message_token) { u32 err = 0; struct spkm3_ctx *sctx = ctx->internal_ctx_id; dprintk("RPC: gss_get_mic_spkm3\n"); - err = spkm3_make_token(sctx, message_buffer, + err = spkm3_make_token(sctx, qop, message_buffer, message_token, SPKM_MIC_TOK); return err; } @@ -259,8 +264,8 @@ static struct gss_api_ops gss_spkm3_ops = { }; static struct pf_desc gss_spkm3_pfs[] = { - {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"}, - {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, + {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"}, + {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, }; static struct gss_api_mech gss_spkm3_mech = { diff --git a/trunk/net/sunrpc/auth_gss/gss_spkm3_seal.c b/trunk/net/sunrpc/auth_gss/gss_spkm3_seal.c index 148201e929d0..25339868d462 100644 --- a/trunk/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/trunk/net/sunrpc/auth_gss/gss_spkm3_seal.c @@ -51,7 +51,7 @@ */ u32 -spkm3_make_token(struct spkm3_ctx *ctx, +spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype) { @@ -68,6 +68,8 @@ spkm3_make_token(struct spkm3_ctx *ctx, dprintk("RPC: spkm3_make_token\n"); now = jiffies; + if (qop_req != 0) + goto out_err; if (ctx->ctx_id.len != 16) { dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", diff --git a/trunk/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/trunk/net/sunrpc/auth_gss/gss_spkm3_unseal.c index c3c0d9586103..65ce81bf0bc4 100644 --- a/trunk/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/trunk/net/sunrpc/auth_gss/gss_spkm3_unseal.c @@ -52,7 +52,7 @@ u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, /* checksum */ struct xdr_buf *message_buffer, /* signbuf */ - int toktype) + int *qop_state, int toktype) { s32 code; struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; diff --git a/trunk/net/sunrpc/auth_gss/svcauth_gss.c b/trunk/net/sunrpc/auth_gss/svcauth_gss.c index e4ada15ed856..e3308195374e 100644 --- a/trunk/net/sunrpc/auth_gss/svcauth_gss.c +++ b/trunk/net/sunrpc/auth_gss/svcauth_gss.c @@ -566,7 +566,8 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, if (rqstp->rq_deferred) /* skip verification of revisited request */ return SVC_OK; - if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) { + if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL) + != GSS_S_COMPLETE) { *authp = rpcsec_gsserr_credproblem; return SVC_DENIED; } @@ -603,7 +604,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) xdr_buf_from_iov(&iov, &verf_data); p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); + maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic); if (maj_stat != GSS_S_COMPLETE) return -1; *p++ = htonl(mic.len); @@ -709,7 +710,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) goto out; if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) goto out; - maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); + maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL); if (maj_stat != GSS_S_COMPLETE) goto out; if (ntohl(svc_getu32(&buf->head[0])) != seq) @@ -1011,7 +1012,7 @@ svcauth_gss_release(struct svc_rqst *rqstp) resv = &resbuf->tail[0]; } mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; - if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) + if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic)) goto out_err; svc_putu32(resv, htonl(mic.len)); memset(mic.data + mic.len, 0, diff --git a/trunk/net/sunrpc/auth_null.c b/trunk/net/sunrpc/auth_null.c index f56767aaa927..9b72d3abf823 100644 --- a/trunk/net/sunrpc/auth_null.c +++ b/trunk/net/sunrpc/auth_null.c @@ -7,7 +7,9 @@ */ #include +#include #include +#include #include #include #include diff --git a/trunk/net/sunrpc/auth_unix.c b/trunk/net/sunrpc/auth_unix.c index 890fb5ea0dcb..4ff297a9b15b 100644 --- a/trunk/net/sunrpc/auth_unix.c +++ b/trunk/net/sunrpc/auth_unix.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include #include diff --git a/trunk/net/sunrpc/clnt.c b/trunk/net/sunrpc/clnt.c index 702ede309b06..f17e6153b688 100644 --- a/trunk/net/sunrpc/clnt.c +++ b/trunk/net/sunrpc/clnt.c @@ -1,5 +1,5 @@ /* - * linux/net/sunrpc/clnt.c + * linux/net/sunrpc/rpcclnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,6 @@ static void call_allocate(struct rpc_task *task); static void call_encode(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); -static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); static void call_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); @@ -517,8 +517,15 @@ void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt = clnt->cl_xprt; - if (xprt->ops->set_buffer_size) - xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); + + xprt->sndsize = 0; + if (sndsize) + xprt->sndsize = sndsize + RPC_SLACK_SPACE; + xprt->rcvsize = 0; + if (rcvsize) + xprt->rcvsize = rcvsize + RPC_SLACK_SPACE; + if (xprt_connected(xprt)) + xprt_sock_setbufsize(xprt); } /* @@ -678,11 +685,13 @@ call_allocate(struct rpc_task *task) static void call_encode(struct rpc_task *task) { + struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct xdr_buf *sndbuf = &req->rq_snd_buf; struct xdr_buf *rcvbuf = &req->rq_rcv_buf; unsigned int bufsiz; kxdrproc_t encode; + int status; u32 *p; dprintk("RPC: %4d call_encode (status %d)\n", @@ -710,15 +719,11 @@ call_encode(struct rpc_task *task) rpc_exit(task, -EIO); return; } - if (encode == NULL) - return; - - task->tk_status = rpcauth_wrap_req(task, encode, req, p, - task->tk_msg.rpc_argp); - if (task->tk_status == -ENOMEM) { - /* XXX: Is this sane? */ - rpc_delay(task, 3*HZ); - task->tk_status = -EAGAIN; + if (encode && (status = rpcauth_wrap_req(task, encode, req, p, + task->tk_msg.rpc_argp)) < 0) { + printk(KERN_WARNING "%s: can't encode arguments: %d\n", + clnt->cl_protname, -status); + rpc_exit(task, status); } } @@ -729,95 +734,43 @@ static void call_bind(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; + struct rpc_xprt *xprt = clnt->cl_xprt; - dprintk("RPC: %4d call_bind (status %d)\n", - task->tk_pid, task->tk_status); - - task->tk_action = call_connect; - if (!clnt->cl_port) { - task->tk_action = call_bind_status; - task->tk_timeout = task->tk_xprt->bind_timeout; - rpc_getport(task, clnt); - } -} + dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid, + xprt, (xprt_connected(xprt) ? "is" : "is not")); -/* - * 4a. Sort out bind result - */ -static void -call_bind_status(struct rpc_task *task) -{ - int status = -EACCES; + task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect; - if (task->tk_status >= 0) { - dprintk("RPC: %4d call_bind_status (status %d)\n", - task->tk_pid, task->tk_status); - task->tk_status = 0; + if (!clnt->cl_port) { task->tk_action = call_connect; - return; - } - - switch (task->tk_status) { - case -EACCES: - dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", - task->tk_pid); - rpc_delay(task, 3*HZ); - goto retry_bind; - case -ETIMEDOUT: - dprintk("RPC: %4d rpcbind request timed out\n", - task->tk_pid); - if (RPC_IS_SOFT(task)) { - status = -EIO; - break; - } - goto retry_bind; - case -EPFNOSUPPORT: - dprintk("RPC: %4d remote rpcbind service unavailable\n", - task->tk_pid); - break; - case -EPROTONOSUPPORT: - dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", - task->tk_pid); - break; - default: - dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", - task->tk_pid, -task->tk_status); - status = -EIO; - break; + task->tk_timeout = RPC_CONNECT_TIMEOUT; + rpc_getport(task, clnt); } - - rpc_exit(task, status); - return; - -retry_bind: - task->tk_status = 0; - task->tk_action = call_bind; - return; } /* - * 4b. Connect to the RPC server + * 4a. Connect to the RPC server (TCP case) */ static void call_connect(struct rpc_task *task) { - struct rpc_xprt *xprt = task->tk_xprt; + struct rpc_clnt *clnt = task->tk_client; - dprintk("RPC: %4d call_connect xprt %p %s connected\n", - task->tk_pid, xprt, - (xprt_connected(xprt) ? "is" : "is not")); + dprintk("RPC: %4d call_connect status %d\n", + task->tk_pid, task->tk_status); - task->tk_action = call_transmit; - if (!xprt_connected(xprt)) { - task->tk_action = call_connect_status; - if (task->tk_status < 0) - return; - xprt_connect(task); + if (xprt_connected(clnt->cl_xprt)) { + task->tk_action = call_transmit; + return; } + task->tk_action = call_connect_status; + if (task->tk_status < 0) + return; + xprt_connect(task); } /* - * 4c. Sort out connect result + * 4b. Sort out connect result */ static void call_connect_status(struct rpc_task *task) @@ -825,9 +778,6 @@ call_connect_status(struct rpc_task *task) struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; - dprintk("RPC: %5u call_connect_status (status %d)\n", - task->tk_pid, task->tk_status); - task->tk_status = 0; if (status >= 0) { clnt->cl_stats->netreconn++; @@ -835,19 +785,17 @@ call_connect_status(struct rpc_task *task) return; } - /* Something failed: remote service port may have changed */ + /* Something failed: we may have to rebind */ if (clnt->cl_autobind) clnt->cl_port = 0; - switch (status) { case -ENOTCONN: case -ETIMEDOUT: case -EAGAIN: - task->tk_action = call_bind; + task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect; break; default: rpc_exit(task, -EIO); - break; } } @@ -867,12 +815,10 @@ call_transmit(struct rpc_task *task) if (task->tk_status != 0) return; /* Encode here so that rpcsec_gss can use correct sequence number. */ - if (task->tk_rqstp->rq_bytes_sent == 0) { + if (!task->tk_rqstp->rq_bytes_sent) call_encode(task); - /* Did the encode result in an error condition? */ - if (task->tk_status != 0) - goto out_nosend; - } + if (task->tk_status < 0) + return; xprt_transmit(task); if (task->tk_status < 0) return; @@ -880,10 +826,6 @@ call_transmit(struct rpc_task *task) task->tk_action = NULL; rpc_wake_up_task(task); } - return; -out_nosend: - /* release socket write lock before attempting to handle error */ - xprt_abort_transmit(task); } /* @@ -1078,12 +1020,13 @@ static u32 * call_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; + struct rpc_xprt *xprt = clnt->cl_xprt; struct rpc_rqst *req = task->tk_rqstp; u32 *p = req->rq_svec[0].iov_base; /* FIXME: check buffer size? */ - - p = xprt_skip_transport_header(task->tk_xprt, p); + if (xprt->stream) + *p++ = 0; /* fill in later */ *p++ = req->rq_xid; /* XID */ *p++ = htonl(RPC_CALL); /* CALL */ *p++ = htonl(RPC_VERSION); /* RPC version */ diff --git a/trunk/net/sunrpc/pmap_clnt.c b/trunk/net/sunrpc/pmap_clnt.c index a398575f94b8..4e81f2766923 100644 --- a/trunk/net/sunrpc/pmap_clnt.c +++ b/trunk/net/sunrpc/pmap_clnt.c @@ -26,7 +26,7 @@ #define PMAP_GETPORT 3 static struct rpc_procinfo pmap_procedures[]; -static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); +static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int); static void pmap_getport_done(struct rpc_task *); static struct rpc_program pmap_program; static DEFINE_SPINLOCK(pmap_lock); @@ -65,7 +65,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) map->pm_binding = 1; spin_unlock(&pmap_lock); - pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); + pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot); if (IS_ERR(pmap_clnt)) { task->tk_status = PTR_ERR(pmap_clnt); goto bailout; @@ -112,7 +112,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); - pmap_clnt = pmap_create(hostname, sin, prot, 0); + pmap_clnt = pmap_create(hostname, sin, prot); if (IS_ERR(pmap_clnt)) return PTR_ERR(pmap_clnt); @@ -171,7 +171,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); + pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP); if (IS_ERR(pmap_clnt)) { error = PTR_ERR(pmap_clnt); dprintk("RPC: couldn't create pmap client. Error = %d\n", error); @@ -198,7 +198,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) } static struct rpc_clnt * -pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) +pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; @@ -208,8 +208,6 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileg if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; xprt->addr.sin_port = htons(RPC_PMAP_PORT); - if (!privileged) - xprt->resvport = 0; /* printk("pmap: create clnt\n"); */ clnt = rpc_new_client(xprt, hostname, diff --git a/trunk/net/sunrpc/rpc_pipe.c b/trunk/net/sunrpc/rpc_pipe.c index 649d609e7d94..ded6c63f11ec 100644 --- a/trunk/net/sunrpc/rpc_pipe.c +++ b/trunk/net/sunrpc/rpc_pipe.c @@ -177,8 +177,6 @@ rpc_pipe_release(struct inode *inode, struct file *filp) __rpc_purge_upcall(inode, -EPIPE); if (rpci->ops->release_pipe) rpci->ops->release_pipe(inode); - if (!rpci->nreaders && !rpci->nwriters) - rpci->ops = NULL; out: up(&inode->i_sem); return 0; diff --git a/trunk/net/sunrpc/socklib.c b/trunk/net/sunrpc/socklib.c deleted file mode 100644 index 8f97e90f36c8..000000000000 --- a/trunk/net/sunrpc/socklib.c +++ /dev/null @@ -1,175 +0,0 @@ -/* - * linux/net/sunrpc/socklib.c - * - * Common socket helper routines for RPC client and server - * - * Copyright (C) 1995, 1996 Olaf Kirch - */ - -#include -#include -#include -#include - - -/** - * skb_read_bits - copy some data bits from skb to internal buffer - * @desc: sk_buff copy helper - * @to: copy destination - * @len: number of bytes to copy - * - * Possibly called several times to iterate over an sk_buff and copy - * data out of it. - */ -static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) -{ - if (len > desc->count) - len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, to, len)) - return 0; - desc->count -= len; - desc->offset += len; - return len; -} - -/** - * skb_read_and_csum_bits - copy and checksum from skb to buffer - * @desc: sk_buff copy helper - * @to: copy destination - * @len: number of bytes to copy - * - * Same as skb_read_bits, but calculate a checksum at the same time. - */ -static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) -{ - unsigned int csum2, pos; - - if (len > desc->count) - len = desc->count; - pos = desc->offset; - csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); - desc->csum = csum_block_add(desc->csum, csum2, pos); - desc->count -= len; - desc->offset += len; - return len; -} - -/** - * xdr_partial_copy_from_skb - copy data out of an skb - * @xdr: target XDR buffer - * @base: starting offset - * @desc: sk_buff copy helper - * @copy_actor: virtual method for copying data - * - */ -ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) -{ - struct page **ppage = xdr->pages; - unsigned int len, pglen = xdr->page_len; - ssize_t copied = 0; - int ret; - - len = xdr->head[0].iov_len; - if (base < len) { - len -= base; - ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); - copied += ret; - if (ret != len || !desc->count) - goto out; - base = 0; - } else - base -= len; - - if (unlikely(pglen == 0)) - goto copy_tail; - if (unlikely(base >= pglen)) { - base -= pglen; - goto copy_tail; - } - if (base || xdr->page_base) { - pglen -= base; - base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; - } - do { - char *kaddr; - - /* ACL likes to be lazy in allocating pages - ACLs - * are small by default but can get huge. */ - if (unlikely(*ppage == NULL)) { - *ppage = alloc_page(GFP_ATOMIC); - if (unlikely(*ppage == NULL)) { - if (copied == 0) - copied = -ENOMEM; - goto out; - } - } - - len = PAGE_CACHE_SIZE; - kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); - if (base) { - len -= base; - if (pglen < len) - len = pglen; - ret = copy_actor(desc, kaddr + base, len); - base = 0; - } else { - if (pglen < len) - len = pglen; - ret = copy_actor(desc, kaddr, len); - } - flush_dcache_page(*ppage); - kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); - copied += ret; - if (ret != len || !desc->count) - goto out; - ppage++; - } while ((pglen -= len) != 0); -copy_tail: - len = xdr->tail[0].iov_len; - if (base < len) - copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); -out: - return copied; -} - -/** - * csum_partial_copy_to_xdr - checksum and copy data - * @xdr: target XDR buffer - * @skb: source skb - * - * We have set things up such that we perform the checksum of the UDP - * packet in parallel with the copies into the RPC client iovec. -DaveM - */ -int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) -{ - skb_reader_t desc; - - desc.skb = skb; - desc.offset = sizeof(struct udphdr); - desc.count = skb->len - desc.offset; - - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - goto no_checksum; - - desc.csum = csum_partial(skb->data, desc.offset, skb->csum); - if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) - return -1; - if (desc.offset != skb->len) { - unsigned int csum2; - csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); - desc.csum = csum_block_add(desc.csum, csum2, desc.offset); - } - if (desc.count) - return -1; - if ((unsigned short)csum_fold(desc.csum)) - return -1; - return 0; -no_checksum: - if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) - return -1; - if (desc.count) - return -1; - return 0; -} diff --git a/trunk/net/sunrpc/sunrpc_syms.c b/trunk/net/sunrpc/sunrpc_syms.c index 2387e7b823ff..ed48ff022d35 100644 --- a/trunk/net/sunrpc/sunrpc_syms.c +++ b/trunk/net/sunrpc/sunrpc_syms.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/trunk/net/sunrpc/svcsock.c b/trunk/net/sunrpc/svcsock.c index 130f2b5d93dd..30ec3efc48a6 100644 --- a/trunk/net/sunrpc/svcsock.c +++ b/trunk/net/sunrpc/svcsock.c @@ -548,6 +548,9 @@ svc_write_space(struct sock *sk) /* * Receive a datagram from a UDP socket. */ +extern int +csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb); + static int svc_udp_recvfrom(struct svc_rqst *rqstp) { diff --git a/trunk/net/sunrpc/sysctl.c b/trunk/net/sunrpc/sysctl.c index d0c9f460e411..1b9616a12e24 100644 --- a/trunk/net/sunrpc/sysctl.c +++ b/trunk/net/sunrpc/sysctl.c @@ -119,18 +119,8 @@ proc_dodebug(ctl_table *table, int write, struct file *file, return 0; } -unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; -unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; -unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; -EXPORT_SYMBOL(xprt_min_resvport); -unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; -EXPORT_SYMBOL(xprt_max_resvport); - - static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; -static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; -static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; static ctl_table debug_table[] = { { @@ -187,28 +177,6 @@ static ctl_table debug_table[] = { .extra1 = &min_slot_table_size, .extra2 = &max_slot_table_size }, - { - .ctl_name = CTL_MIN_RESVPORT, - .procname = "min_resvport", - .data = &xprt_min_resvport, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &xprt_min_resvport_limit, - .extra2 = &xprt_max_resvport_limit - }, - { - .ctl_name = CTL_MAX_RESVPORT, - .procname = "max_resvport", - .data = &xprt_max_resvport, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &xprt_min_resvport_limit, - .extra2 = &xprt_max_resvport_limit - }, { .ctl_name = 0 } }; diff --git a/trunk/net/sunrpc/xdr.c b/trunk/net/sunrpc/xdr.c index 32df43372ee9..fde16f40a581 100644 --- a/trunk/net/sunrpc/xdr.c +++ b/trunk/net/sunrpc/xdr.c @@ -6,12 +6,15 @@ * Copyright (C) 1995, 1996 Olaf Kirch */ -#include #include +#include #include #include #include #include +#include +#include +#include #include #include @@ -173,6 +176,178 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, xdr->buflen += len; } +ssize_t +xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, + skb_reader_t *desc, + skb_read_actor_t copy_actor) +{ + struct page **ppage = xdr->pages; + unsigned int len, pglen = xdr->page_len; + ssize_t copied = 0; + int ret; + + len = xdr->head[0].iov_len; + if (base < len) { + len -= base; + ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); + copied += ret; + if (ret != len || !desc->count) + goto out; + base = 0; + } else + base -= len; + + if (pglen == 0) + goto copy_tail; + if (base >= pglen) { + base -= pglen; + goto copy_tail; + } + if (base || xdr->page_base) { + pglen -= base; + base += xdr->page_base; + ppage += base >> PAGE_CACHE_SHIFT; + base &= ~PAGE_CACHE_MASK; + } + do { + char *kaddr; + + /* ACL likes to be lazy in allocating pages - ACLs + * are small by default but can get huge. */ + if (unlikely(*ppage == NULL)) { + *ppage = alloc_page(GFP_ATOMIC); + if (unlikely(*ppage == NULL)) { + if (copied == 0) + copied = -ENOMEM; + goto out; + } + } + + len = PAGE_CACHE_SIZE; + kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); + if (base) { + len -= base; + if (pglen < len) + len = pglen; + ret = copy_actor(desc, kaddr + base, len); + base = 0; + } else { + if (pglen < len) + len = pglen; + ret = copy_actor(desc, kaddr, len); + } + flush_dcache_page(*ppage); + kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); + copied += ret; + if (ret != len || !desc->count) + goto out; + ppage++; + } while ((pglen -= len) != 0); +copy_tail: + len = xdr->tail[0].iov_len; + if (base < len) + copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); +out: + return copied; +} + + +int +xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, + struct xdr_buf *xdr, unsigned int base, int msgflags) +{ + struct page **ppage = xdr->pages; + unsigned int len, pglen = xdr->page_len; + int err, ret = 0; + ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); + + len = xdr->head[0].iov_len; + if (base < len || (addr != NULL && base == 0)) { + struct kvec iov = { + .iov_base = xdr->head[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_name = addr, + .msg_namelen = addrlen, + .msg_flags = msgflags, + }; + if (xdr->len > len) + msg.msg_flags |= MSG_MORE; + + if (iov.iov_len != 0) + err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + else + err = kernel_sendmsg(sock, &msg, NULL, 0, 0); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + if (err != iov.iov_len) + goto out; + base = 0; + } else + base -= len; + + if (pglen == 0) + goto copy_tail; + if (base >= pglen) { + base -= pglen; + goto copy_tail; + } + if (base || xdr->page_base) { + pglen -= base; + base += xdr->page_base; + ppage += base >> PAGE_CACHE_SHIFT; + base &= ~PAGE_CACHE_MASK; + } + + sendpage = sock->ops->sendpage ? : sock_no_sendpage; + do { + int flags = msgflags; + + len = PAGE_CACHE_SIZE; + if (base) + len -= base; + if (pglen < len) + len = pglen; + + if (pglen != len || xdr->tail[0].iov_len != 0) + flags |= MSG_MORE; + + /* Hmm... We might be dealing with highmem pages */ + if (PageHighMem(*ppage)) + sendpage = sock_no_sendpage; + err = sendpage(sock, *ppage, base, len, flags); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + if (err != len) + goto out; + base = 0; + ppage++; + } while ((pglen -= len) != 0); +copy_tail: + len = xdr->tail[0].iov_len; + if (base < len) { + struct kvec iov = { + .iov_base = xdr->tail[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_flags = msgflags, + }; + err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + } +out: + return ret; +} + /* * Helper routines for doing 'memmove' like operations on a struct xdr_buf diff --git a/trunk/net/sunrpc/xprt.c b/trunk/net/sunrpc/xprt.c index 6dda3860351f..3c654e06b084 100644 --- a/trunk/net/sunrpc/xprt.c +++ b/trunk/net/sunrpc/xprt.c @@ -10,12 +10,12 @@ * one is available. Otherwise, it sleeps on the backlog queue * (xprt_reserve). * - Next, the caller puts together the RPC message, stuffs it into - * the request struct, and calls xprt_transmit(). - * - xprt_transmit sends the message and installs the caller on the - * transport's wait list. At the same time, it installs a timer that + * the request struct, and calls xprt_call(). + * - xprt_call transmits the message and installs the caller on the + * socket's wait list. At the same time, it installs a timer that * is run after the packet's timeout has expired. * - When a packet arrives, the data_ready handler walks the list of - * pending requests for that transport. If a matching XID is found, the + * pending requests for that socket. If a matching XID is found, the * caller is woken up, and the timer removed. * - When no reply arrives within the timeout interval, the timer is * fired by the kernel and runs xprt_timer(). It either adjusts the @@ -33,17 +33,36 @@ * * Copyright (C) 1995-1997, Olaf Kirch * - * Transport switch API copyright (C) 2005, Chuck Lever + * TCP callback races fixes (C) 1998 Red Hat Software + * TCP send fixes (C) 1998 Red Hat Software + * TCP NFS related read + write fixes + * (C) 1999 Dave Airlie, University of Limerick, Ireland + * + * Rewrite of larges part of the code in order to stabilize TCP stuff. + * Fix behaviour when socket buffer is full. + * (C) 1999 Trond Myklebust */ -#include - #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include +#include +#include +#include +#include /* * Local variables @@ -54,90 +73,81 @@ # define RPCDBG_FACILITY RPCDBG_XPRT #endif +#define XPRT_MAX_BACKOFF (8) +#define XPRT_IDLE_TIMEOUT (5*60*HZ) +#define XPRT_MAX_RESVPORT (800) + /* * Local functions */ static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); static inline void do_xprt_reserve(struct rpc_task *); +static void xprt_disconnect(struct rpc_xprt *); static void xprt_connect_status(struct rpc_task *task); +static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap, + struct rpc_timeout *to); +static struct socket *xprt_create_socket(struct rpc_xprt *, int, int); +static void xprt_bind_socket(struct rpc_xprt *, struct socket *); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); -/* - * The transport code maintains an estimate on the maximum number of out- - * standing RPC requests, using a smoothed version of the congestion - * avoidance implemented in 44BSD. This is basically the Van Jacobson - * congestion algorithm: If a retransmit occurs, the congestion window is - * halved; otherwise, it is incremented by 1/cwnd when - * - * - a reply is received and - * - a full number of requests are outstanding and - * - the congestion window hasn't been updated recently. - */ -#define RPC_CWNDSHIFT (8U) -#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) -#define RPC_INITCWND RPC_CWNDSCALE -#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) - -#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) +static int xprt_clear_backlog(struct rpc_xprt *xprt); -/** - * xprt_reserve_xprt - serialize write access to transports - * @task: task that is requesting access to the transport - * - * This prevents mixing the payload of separate requests, and prevents - * transport connects from colliding with writes. No congestion control - * is provided. +#ifdef RPC_DEBUG_DATA +/* + * Print the buffer contents (first 128 bytes only--just enough for + * diropres return). */ -int xprt_reserve_xprt(struct rpc_task *task) +static void +xprt_pktdump(char *msg, u32 *packet, unsigned int count) { - struct rpc_xprt *xprt = task->tk_xprt; - struct rpc_rqst *req = task->tk_rqstp; - - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { - if (task == xprt->snd_task) - return 1; - if (task == NULL) - return 0; - goto out_sleep; - } - xprt->snd_task = task; - if (req) { - req->rq_bytes_sent = 0; - req->rq_ntrans++; + u8 *buf = (u8 *) packet; + int j; + + dprintk("RPC: %s\n", msg); + for (j = 0; j < count && j < 128; j += 4) { + if (!(j & 31)) { + if (j) + dprintk("\n"); + dprintk("0x%04x ", j); + } + dprintk("%02x%02x%02x%02x ", + buf[j], buf[j+1], buf[j+2], buf[j+3]); } - return 1; + dprintk("\n"); +} +#else +static inline void +xprt_pktdump(char *msg, u32 *packet, unsigned int count) +{ + /* NOP */ +} +#endif -out_sleep: - dprintk("RPC: %4d failed to lock transport %p\n", - task->tk_pid, xprt); - task->tk_timeout = 0; - task->tk_status = -EAGAIN; - if (req && req->rq_ntrans) - rpc_sleep_on(&xprt->resend, task, NULL, NULL); - else - rpc_sleep_on(&xprt->sending, task, NULL, NULL); - return 0; +/* + * Look up RPC transport given an INET socket + */ +static inline struct rpc_xprt * +xprt_from_sock(struct sock *sk) +{ + return (struct rpc_xprt *) sk->sk_user_data; } /* - * xprt_reserve_xprt_cong - serialize write access to transports - * @task: task that is requesting access to the transport - * - * Same as xprt_reserve_xprt, but Van Jacobson congestion control is - * integrated into the decision of whether a request is allowed to be - * woken up and given access to the transport. + * Serialize write access to sockets, in order to prevent different + * requests from interfering with each other. + * Also prevents TCP socket connects from colliding with writes. */ -int xprt_reserve_xprt_cong(struct rpc_task *task) +static int +__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) { - struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req = task->tk_rqstp; - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { + if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) { if (task == xprt->snd_task) return 1; goto out_sleep; } - if (__xprt_get_cong(xprt, task)) { + if (xprt->nocong || __xprt_get_cong(xprt, task)) { xprt->snd_task = task; if (req) { req->rq_bytes_sent = 0; @@ -146,10 +156,10 @@ int xprt_reserve_xprt_cong(struct rpc_task *task) return 1; } smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->state); + clear_bit(XPRT_LOCKED, &xprt->sockstate); smp_mb__after_clear_bit(); out_sleep: - dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); + dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt); task->tk_timeout = 0; task->tk_status = -EAGAIN; if (req && req->rq_ntrans) @@ -159,52 +169,26 @@ int xprt_reserve_xprt_cong(struct rpc_task *task) return 0; } -static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) +static inline int +xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) { int retval; - spin_lock_bh(&xprt->transport_lock); - retval = xprt->ops->reserve_xprt(task); - spin_unlock_bh(&xprt->transport_lock); + spin_lock_bh(&xprt->sock_lock); + retval = __xprt_lock_write(xprt, task); + spin_unlock_bh(&xprt->sock_lock); return retval; } -static void __xprt_lock_write_next(struct rpc_xprt *xprt) -{ - struct rpc_task *task; - struct rpc_rqst *req; - - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) - return; - - task = rpc_wake_up_next(&xprt->resend); - if (!task) { - task = rpc_wake_up_next(&xprt->sending); - if (!task) - goto out_unlock; - } - req = task->tk_rqstp; - xprt->snd_task = task; - if (req) { - req->rq_bytes_sent = 0; - req->rq_ntrans++; - } - return; - -out_unlock: - smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->state); - smp_mb__after_clear_bit(); -} - -static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) +static void +__xprt_lock_write_next(struct rpc_xprt *xprt) { struct rpc_task *task; - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) + if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) return; - if (RPCXPRT_CONGESTED(xprt)) + if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) goto out_unlock; task = rpc_wake_up_next(&xprt->resend); if (!task) { @@ -212,7 +196,7 @@ static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) if (!task) goto out_unlock; } - if (__xprt_get_cong(xprt, task)) { + if (xprt->nocong || __xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { @@ -223,52 +207,87 @@ static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) } out_unlock: smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->state); + clear_bit(XPRT_LOCKED, &xprt->sockstate); smp_mb__after_clear_bit(); } -/** - * xprt_release_xprt - allow other requests to use a transport - * @xprt: transport with other tasks potentially waiting - * @task: task that is releasing access to the transport - * - * Note that "task" can be NULL. No congestion control is provided. +/* + * Releases the socket for use by other requests. */ -void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) +static void +__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) { if (xprt->snd_task == task) { xprt->snd_task = NULL; smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->state); + clear_bit(XPRT_LOCKED, &xprt->sockstate); smp_mb__after_clear_bit(); __xprt_lock_write_next(xprt); } } -/** - * xprt_release_xprt_cong - allow other requests to use a transport - * @xprt: transport with other tasks potentially waiting - * @task: task that is releasing access to the transport - * - * Note that "task" can be NULL. Another task is awoken to use the - * transport if the transport's congestion window allows it. - */ -void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) +static inline void +xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) { - if (xprt->snd_task == task) { - xprt->snd_task = NULL; - smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->state); - smp_mb__after_clear_bit(); - __xprt_lock_write_next_cong(xprt); - } + spin_lock_bh(&xprt->sock_lock); + __xprt_release_write(xprt, task); + spin_unlock_bh(&xprt->sock_lock); } -static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) +/* + * Write data to socket. + */ +static inline int +xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) { - spin_lock_bh(&xprt->transport_lock); - xprt->ops->release_xprt(xprt, task); - spin_unlock_bh(&xprt->transport_lock); + struct socket *sock = xprt->sock; + struct xdr_buf *xdr = &req->rq_snd_buf; + struct sockaddr *addr = NULL; + int addrlen = 0; + unsigned int skip; + int result; + + if (!sock) + return -ENOTCONN; + + xprt_pktdump("packet data:", + req->rq_svec->iov_base, + req->rq_svec->iov_len); + + /* For UDP, we need to provide an address */ + if (!xprt->stream) { + addr = (struct sockaddr *) &xprt->addr; + addrlen = sizeof(xprt->addr); + } + /* Dont repeat bytes */ + skip = req->rq_bytes_sent; + + clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); + + dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result); + + if (result >= 0) + return result; + + switch (result) { + case -ECONNREFUSED: + /* When the server has died, an ICMP port unreachable message + * prompts ECONNREFUSED. + */ + case -EAGAIN: + break; + case -ECONNRESET: + case -ENOTCONN: + case -EPIPE: + /* connection broken */ + if (xprt->stream) + result = -ENOTCONN; + break; + default: + printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); + } + return result; } /* @@ -302,40 +321,26 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) return; req->rq_cong = 0; xprt->cong -= RPC_CWNDSCALE; - __xprt_lock_write_next_cong(xprt); -} - -/** - * xprt_release_rqst_cong - housekeeping when request is complete - * @task: RPC request that recently completed - * - * Useful for transports that require congestion control. - */ -void xprt_release_rqst_cong(struct rpc_task *task) -{ - __xprt_put_cong(task->tk_xprt, task->tk_rqstp); + __xprt_lock_write_next(xprt); } -/** - * xprt_adjust_cwnd - adjust transport congestion window - * @task: recently completed RPC request used to adjust window - * @result: result code of completed RPC request - * +/* + * Adjust RPC congestion window * We use a time-smoothed congestion estimator to avoid heavy oscillation. */ -void xprt_adjust_cwnd(struct rpc_task *task, int result) +static void +xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) { - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_xprt *xprt = task->tk_xprt; - unsigned long cwnd = xprt->cwnd; + unsigned long cwnd; + cwnd = xprt->cwnd; if (result >= 0 && cwnd <= xprt->cong) { /* The (cwnd >> 1) term makes sure * the result gets rounded properly. */ cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; if (cwnd > RPC_MAXCWND(xprt)) cwnd = RPC_MAXCWND(xprt); - __xprt_lock_write_next_cong(xprt); + __xprt_lock_write_next(xprt); } else if (result == -ETIMEDOUT) { cwnd >>= 1; if (cwnd < RPC_CWNDSCALE) @@ -344,89 +349,11 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result) dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", xprt->cong, xprt->cwnd, cwnd); xprt->cwnd = cwnd; - __xprt_put_cong(xprt, req); -} - -/** - * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue - * @xprt: transport with waiting tasks - * @status: result code to plant in each task before waking it - * - */ -void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) -{ - if (status < 0) - rpc_wake_up_status(&xprt->pending, status); - else - rpc_wake_up(&xprt->pending); -} - -/** - * xprt_wait_for_buffer_space - wait for transport output buffer to clear - * @task: task to be put to sleep - * - */ -void xprt_wait_for_buffer_space(struct rpc_task *task) -{ - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_xprt *xprt = req->rq_xprt; - - task->tk_timeout = req->rq_timeout; - rpc_sleep_on(&xprt->pending, task, NULL, NULL); -} - -/** - * xprt_write_space - wake the task waiting for transport output buffer space - * @xprt: transport with waiting tasks - * - * Can be called in a soft IRQ context, so xprt_write_space never sleeps. - */ -void xprt_write_space(struct rpc_xprt *xprt) -{ - if (unlikely(xprt->shutdown)) - return; - - spin_lock_bh(&xprt->transport_lock); - if (xprt->snd_task) { - dprintk("RPC: write space: waking waiting task on xprt %p\n", - xprt); - rpc_wake_up_task(xprt->snd_task); - } - spin_unlock_bh(&xprt->transport_lock); -} - -/** - * xprt_set_retrans_timeout_def - set a request's retransmit timeout - * @task: task whose timeout is to be set - * - * Set a request's retransmit timeout based on the transport's - * default timeout parameters. Used by transports that don't adjust - * the retransmit timeout based on round-trip time estimation. - */ -void xprt_set_retrans_timeout_def(struct rpc_task *task) -{ - task->tk_timeout = task->tk_rqstp->rq_timeout; } /* - * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout - * @task: task whose timeout is to be set - * - * Set a request's retransmit timeout using the RTT estimator. + * Reset the major timeout value */ -void xprt_set_retrans_timeout_rtt(struct rpc_task *task) -{ - int timer = task->tk_msg.rpc_proc->p_timer; - struct rpc_rtt *rtt = task->tk_client->cl_rtt; - struct rpc_rqst *req = task->tk_rqstp; - unsigned long max_timeout = req->rq_xprt->timeout.to_maxval; - - task->tk_timeout = rpc_calc_rto(rtt, timer); - task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; - if (task->tk_timeout > max_timeout || task->tk_timeout == 0) - task->tk_timeout = max_timeout; -} - static void xprt_reset_majortimeo(struct rpc_rqst *req) { struct rpc_timeout *to = &req->rq_xprt->timeout; @@ -441,10 +368,8 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req) req->rq_majortimeo += jiffies; } -/** - * xprt_adjust_timeout - adjust timeout values for next retransmit - * @req: RPC request containing parameters to use for the adjustment - * +/* + * Adjust timeout values etc for next retransmit */ int xprt_adjust_timeout(struct rpc_rqst *req) { @@ -466,9 +391,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) req->rq_retries = 0; xprt_reset_majortimeo(req); /* Reset the RTT counters == "slow start" */ - spin_lock_bh(&xprt->transport_lock); + spin_lock_bh(&xprt->sock_lock); rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock_bh(&xprt->sock_lock); pprintk("RPC: %lu timeout\n", jiffies); status = -ETIMEDOUT; } @@ -480,52 +405,133 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -static void xprt_autoclose(void *args) +/* + * Close down a transport socket + */ +static void +xprt_close(struct rpc_xprt *xprt) +{ + struct socket *sock = xprt->sock; + struct sock *sk = xprt->inet; + + if (!sk) + return; + + write_lock_bh(&sk->sk_callback_lock); + xprt->inet = NULL; + xprt->sock = NULL; + + sk->sk_user_data = NULL; + sk->sk_data_ready = xprt->old_data_ready; + sk->sk_state_change = xprt->old_state_change; + sk->sk_write_space = xprt->old_write_space; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_no_check = 0; + + sock_release(sock); +} + +static void +xprt_socket_autoclose(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *)args; xprt_disconnect(xprt); - xprt->ops->close(xprt); + xprt_close(xprt); xprt_release_write(xprt, NULL); } -/** - * xprt_disconnect - mark a transport as disconnected - * @xprt: transport to flag for disconnect - * +/* + * Mark a transport as disconnected */ -void xprt_disconnect(struct rpc_xprt *xprt) +static void +xprt_disconnect(struct rpc_xprt *xprt) { dprintk("RPC: disconnected transport %p\n", xprt); - spin_lock_bh(&xprt->transport_lock); + spin_lock_bh(&xprt->sock_lock); xprt_clear_connected(xprt); - xprt_wake_pending_tasks(xprt, -ENOTCONN); - spin_unlock_bh(&xprt->transport_lock); + rpc_wake_up_status(&xprt->pending, -ENOTCONN); + spin_unlock_bh(&xprt->sock_lock); } +/* + * Used to allow disconnection when we've been idle + */ static void xprt_init_autodisconnect(unsigned long data) { struct rpc_xprt *xprt = (struct rpc_xprt *)data; - spin_lock(&xprt->transport_lock); + spin_lock(&xprt->sock_lock); if (!list_empty(&xprt->recv) || xprt->shutdown) goto out_abort; - if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) + if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) goto out_abort; - spin_unlock(&xprt->transport_lock); - if (xprt_connecting(xprt)) + spin_unlock(&xprt->sock_lock); + /* Let keventd close the socket */ + if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) xprt_release_write(xprt, NULL); else schedule_work(&xprt->task_cleanup); return; out_abort: - spin_unlock(&xprt->transport_lock); + spin_unlock(&xprt->sock_lock); +} + +static void xprt_socket_connect(void *args) +{ + struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct socket *sock = xprt->sock; + int status = -EIO; + + if (xprt->shutdown || xprt->addr.sin_port == 0) + goto out; + + /* + * Start by resetting any existing state + */ + xprt_close(xprt); + sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport); + if (sock == NULL) { + /* couldn't create socket or bind to reserved port; + * this is likely a permanent error, so cause an abort */ + goto out; + } + xprt_bind_socket(xprt, sock); + xprt_sock_setbufsize(xprt); + + status = 0; + if (!xprt->stream) + goto out; + + /* + * Tell the socket layer to start connecting... + */ + status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, + sizeof(xprt->addr), O_NONBLOCK); + dprintk("RPC: %p connect status %d connected %d sock state %d\n", + xprt, -status, xprt_connected(xprt), sock->sk->sk_state); + if (status < 0) { + switch (status) { + case -EINPROGRESS: + case -EALREADY: + goto out_clear; + } + } +out: + if (status < 0) + rpc_wake_up_status(&xprt->pending, status); + else + rpc_wake_up(&xprt->pending); +out_clear: + smp_mb__before_clear_bit(); + clear_bit(XPRT_CONNECTING, &xprt->sockstate); + smp_mb__after_clear_bit(); } -/** - * xprt_connect - schedule a transport connect operation - * @task: RPC task that is requesting the connect +/* + * Attempt to connect a TCP socket. * */ void xprt_connect(struct rpc_task *task) @@ -546,19 +552,37 @@ void xprt_connect(struct rpc_task *task) if (!xprt_lock_write(xprt, task)) return; if (xprt_connected(xprt)) - xprt_release_write(xprt, task); - else { - if (task->tk_rqstp) - task->tk_rqstp->rq_bytes_sent = 0; + goto out_write; - task->tk_timeout = xprt->connect_timeout; - rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); - xprt->ops->connect(task); + if (task->tk_rqstp) + task->tk_rqstp->rq_bytes_sent = 0; + + task->tk_timeout = RPC_CONNECT_TIMEOUT; + rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); + if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { + /* Note: if we are here due to a dropped connection + * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ + * seconds + */ + if (xprt->sock != NULL) + schedule_delayed_work(&xprt->sock_connect, + RPC_REESTABLISH_TIMEOUT); + else { + schedule_work(&xprt->sock_connect); + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); + } } return; + out_write: + xprt_release_write(xprt, task); } -static void xprt_connect_status(struct rpc_task *task) +/* + * We arrive here when awoken from waiting on connection establishment. + */ +static void +xprt_connect_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -568,42 +592,31 @@ static void xprt_connect_status(struct rpc_task *task) return; } + /* if soft mounted, just cause this RPC to fail */ + if (RPC_IS_SOFT(task)) + task->tk_status = -EIO; + switch (task->tk_status) { case -ECONNREFUSED: case -ECONNRESET: - dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", - task->tk_pid, task->tk_client->cl_server); - break; case -ENOTCONN: - dprintk("RPC: %4d xprt_connect_status: connection broken\n", - task->tk_pid); - break; + return; case -ETIMEDOUT: - dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", + dprintk("RPC: %4d xprt_connect_status: timed out\n", task->tk_pid); break; default: - dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", - task->tk_pid, -task->tk_status, task->tk_client->cl_server); - xprt_release_write(xprt, task); - task->tk_status = -EIO; - return; - } - - /* if soft mounted, just cause this RPC to fail */ - if (RPC_IS_SOFT(task)) { - xprt_release_write(xprt, task); - task->tk_status = -EIO; + printk(KERN_ERR "RPC: error %d connecting to server %s\n", + -task->tk_status, task->tk_client->cl_server); } + xprt_release_write(xprt, task); } -/** - * xprt_lookup_rqst - find an RPC request corresponding to an XID - * @xprt: transport on which the original request was transmitted - * @xid: RPC XID of incoming reply - * +/* + * Look up the RPC request corresponding to a reply, and then lock it. */ -struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) +static inline struct rpc_rqst * +xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { struct list_head *pos; struct rpc_rqst *req = NULL; @@ -618,68 +631,556 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) return req; } -/** - * xprt_update_rtt - update an RPC client's RTT state after receiving a reply - * @task: RPC request that recently completed - * +/* + * Complete reply received. + * The TCP code relies on us to remove the request from xprt->pending. */ -void xprt_update_rtt(struct rpc_task *task) +static void +xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) { - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_rtt *rtt = task->tk_client->cl_rtt; - unsigned timer = task->tk_msg.rpc_proc->p_timer; + struct rpc_task *task = req->rq_task; + struct rpc_clnt *clnt = task->tk_client; + + /* Adjust congestion window */ + if (!xprt->nocong) { + unsigned timer = task->tk_msg.rpc_proc->p_timer; + xprt_adjust_cwnd(xprt, copied); + __xprt_put_cong(xprt, req); + if (timer) { + if (req->rq_ntrans == 1) + rpc_update_rtt(clnt->cl_rtt, timer, + (long)jiffies - req->rq_xtime); + rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1); + } + } - if (timer) { - if (req->rq_ntrans == 1) - rpc_update_rtt(rtt, timer, - (long)jiffies - req->rq_xtime); - rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); +#ifdef RPC_PROFILE + /* Profile only reads for now */ + if (copied > 1024) { + static unsigned long nextstat; + static unsigned long pkt_rtt, pkt_len, pkt_cnt; + + pkt_cnt++; + pkt_len += req->rq_slen + copied; + pkt_rtt += jiffies - req->rq_xtime; + if (time_before(nextstat, jiffies)) { + printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd); + printk("RPC: %ld %ld %ld %ld stat\n", + jiffies, pkt_cnt, pkt_len, pkt_rtt); + pkt_rtt = pkt_len = pkt_cnt = 0; + nextstat = jiffies + 5 * HZ; + } } +#endif + + dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied); + list_del_init(&req->rq_list); + req->rq_received = req->rq_private_buf.len = copied; + + /* ... and wake up the process. */ + rpc_wake_up_task(task); + return; } -/** - * xprt_complete_rqst - called when reply processing is complete - * @task: RPC request that recently completed - * @copied: actual number of bytes received from the transport - * - * Caller holds transport lock. +static size_t +skb_read_bits(skb_reader_t *desc, void *to, size_t len) +{ + if (len > desc->count) + len = desc->count; + if (skb_copy_bits(desc->skb, desc->offset, to, len)) + return 0; + desc->count -= len; + desc->offset += len; + return len; +} + +static size_t +skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) +{ + unsigned int csum2, pos; + + if (len > desc->count) + len = desc->count; + pos = desc->offset; + csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); + desc->csum = csum_block_add(desc->csum, csum2, pos); + desc->count -= len; + desc->offset += len; + return len; +} + +/* + * We have set things up such that we perform the checksum of the UDP + * packet in parallel with the copies into the RPC client iovec. -DaveM */ -void xprt_complete_rqst(struct rpc_task *task, int copied) +int +csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) { - struct rpc_rqst *req = task->tk_rqstp; + skb_reader_t desc; + + desc.skb = skb; + desc.offset = sizeof(struct udphdr); + desc.count = skb->len - desc.offset; + + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + goto no_checksum; + + desc.csum = csum_partial(skb->data, desc.offset, skb->csum); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) + return -1; + if (desc.offset != skb->len) { + unsigned int csum2; + csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); + desc.csum = csum_block_add(desc.csum, csum2, desc.offset); + } + if (desc.count) + return -1; + if ((unsigned short)csum_fold(desc.csum)) + return -1; + return 0; +no_checksum: + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) + return -1; + if (desc.count) + return -1; + return 0; +} + +/* + * Input handler for RPC replies. Called from a bottom half and hence + * atomic. + */ +static void +udp_data_ready(struct sock *sk, int len) +{ + struct rpc_task *task; + struct rpc_xprt *xprt; + struct rpc_rqst *rovr; + struct sk_buff *skb; + int err, repsize, copied; + u32 _xid, *xp; + + read_lock(&sk->sk_callback_lock); + dprintk("RPC: udp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) { + printk("RPC: udp_data_ready request not found!\n"); + goto out; + } - dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", - task->tk_pid, ntohl(req->rq_xid), copied); + dprintk("RPC: udp_data_ready client %p\n", xprt); - list_del_init(&req->rq_list); - req->rq_received = req->rq_private_buf.len = copied; - rpc_wake_up_task(task); + if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) + goto out; + + if (xprt->shutdown) + goto dropit; + + repsize = skb->len - sizeof(struct udphdr); + if (repsize < 4) { + printk("RPC: impossible RPC reply size %d!\n", repsize); + goto dropit; + } + + /* Copy the XID from the skb... */ + xp = skb_header_pointer(skb, sizeof(struct udphdr), + sizeof(_xid), &_xid); + if (xp == NULL) + goto dropit; + + /* Look up and lock the request corresponding to the given XID */ + spin_lock(&xprt->sock_lock); + rovr = xprt_lookup_rqst(xprt, *xp); + if (!rovr) + goto out_unlock; + task = rovr->rq_task; + + dprintk("RPC: %4d received reply\n", task->tk_pid); + + if ((copied = rovr->rq_private_buf.buflen) > repsize) + copied = repsize; + + /* Suck it into the iovec, verify checksum if not done by hw. */ + if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) + goto out_unlock; + + /* Something worked... */ + dst_confirm(skb->dst); + + xprt_complete_rqst(xprt, rovr, copied); + + out_unlock: + spin_unlock(&xprt->sock_lock); + dropit: + skb_free_datagram(sk, skb); + out: + read_unlock(&sk->sk_callback_lock); } -static void xprt_timer(struct rpc_task *task) +/* + * Copy from an skb into memory and shrink the skb. + */ +static inline size_t +tcp_copy_data(skb_reader_t *desc, void *p, size_t len) { - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_xprt *xprt = req->rq_xprt; + if (len > desc->count) + len = desc->count; + if (skb_copy_bits(desc->skb, desc->offset, p, len)) { + dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", + len, desc->count); + return 0; + } + desc->offset += len; + desc->count -= len; + dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", + len, desc->count); + return len; +} - dprintk("RPC: %4d xprt_timer\n", task->tk_pid); +/* + * TCP read fragment marker + */ +static inline void +tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len, used; + char *p; + + p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; + len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; + used = tcp_copy_data(desc, p, len); + xprt->tcp_offset += used; + if (used != len) + return; + xprt->tcp_reclen = ntohl(xprt->tcp_recm); + if (xprt->tcp_reclen & 0x80000000) + xprt->tcp_flags |= XPRT_LAST_FRAG; + else + xprt->tcp_flags &= ~XPRT_LAST_FRAG; + xprt->tcp_reclen &= 0x7fffffff; + xprt->tcp_flags &= ~XPRT_COPY_RECM; + xprt->tcp_offset = 0; + /* Sanity check of the record length */ + if (xprt->tcp_reclen < 4) { + printk(KERN_ERR "RPC: Invalid TCP record fragment length\n"); + xprt_disconnect(xprt); + } + dprintk("RPC: reading TCP record fragment of length %d\n", + xprt->tcp_reclen); +} - spin_lock(&xprt->transport_lock); - if (!req->rq_received) { - if (xprt->ops->timer) - xprt->ops->timer(task); - task->tk_status = -ETIMEDOUT; +static void +tcp_check_recm(struct rpc_xprt *xprt) +{ + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); + if (xprt->tcp_offset == xprt->tcp_reclen) { + xprt->tcp_flags |= XPRT_COPY_RECM; + xprt->tcp_offset = 0; + if (xprt->tcp_flags & XPRT_LAST_FRAG) { + xprt->tcp_flags &= ~XPRT_COPY_DATA; + xprt->tcp_flags |= XPRT_COPY_XID; + xprt->tcp_copied = 0; + } } +} + +/* + * TCP read xid + */ +static inline void +tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len, used; + char *p; + + len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; + dprintk("RPC: reading XID (%Zu bytes)\n", len); + p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; + used = tcp_copy_data(desc, p, len); + xprt->tcp_offset += used; + if (used != len) + return; + xprt->tcp_flags &= ~XPRT_COPY_XID; + xprt->tcp_flags |= XPRT_COPY_DATA; + xprt->tcp_copied = 4; + dprintk("RPC: reading reply for XID %08x\n", + ntohl(xprt->tcp_xid)); + tcp_check_recm(xprt); +} + +/* + * TCP read and complete request + */ +static inline void +tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + struct rpc_rqst *req; + struct xdr_buf *rcvbuf; + size_t len; + ssize_t r; + + /* Find and lock the request corresponding to this xid */ + spin_lock(&xprt->sock_lock); + req = xprt_lookup_rqst(xprt, xprt->tcp_xid); + if (!req) { + xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x request not found!\n", + ntohl(xprt->tcp_xid)); + spin_unlock(&xprt->sock_lock); + return; + } + + rcvbuf = &req->rq_private_buf; + len = desc->count; + if (len > xprt->tcp_reclen - xprt->tcp_offset) { + skb_reader_t my_desc; + + len = xprt->tcp_reclen - xprt->tcp_offset; + memcpy(&my_desc, desc, sizeof(my_desc)); + my_desc.count = len; + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + &my_desc, tcp_copy_data); + desc->count -= r; + desc->offset += r; + } else + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + desc, tcp_copy_data); + + if (r > 0) { + xprt->tcp_copied += r; + xprt->tcp_offset += r; + } + if (r != len) { + /* Error when copying to the receive buffer, + * usually because we weren't able to allocate + * additional buffer pages. All we can do now + * is turn off XPRT_COPY_DATA, so the request + * will not receive any additional updates, + * and time out. + * Any remaining data from this record will + * be discarded. + */ + xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x truncated request\n", + ntohl(xprt->tcp_xid)); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + goto out; + } + + dprintk("RPC: XID %08x read %Zd bytes\n", + ntohl(xprt->tcp_xid), r); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + + if (xprt->tcp_copied == req->rq_private_buf.buflen) + xprt->tcp_flags &= ~XPRT_COPY_DATA; + else if (xprt->tcp_offset == xprt->tcp_reclen) { + if (xprt->tcp_flags & XPRT_LAST_FRAG) + xprt->tcp_flags &= ~XPRT_COPY_DATA; + } + +out: + if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { + dprintk("RPC: %4d received reply complete\n", + req->rq_task->tk_pid); + xprt_complete_rqst(xprt, req, xprt->tcp_copied); + } + spin_unlock(&xprt->sock_lock); + tcp_check_recm(xprt); +} + +/* + * TCP discard extra bytes from a short read + */ +static inline void +tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len; + + len = xprt->tcp_reclen - xprt->tcp_offset; + if (len > desc->count) + len = desc->count; + desc->count -= len; + desc->offset += len; + xprt->tcp_offset += len; + dprintk("RPC: discarded %Zu bytes\n", len); + tcp_check_recm(xprt); +} + +/* + * TCP record receive routine + * We first have to grab the record marker, then the XID, then the data. + */ +static int +tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, + unsigned int offset, size_t len) +{ + struct rpc_xprt *xprt = rd_desc->arg.data; + skb_reader_t desc = { + .skb = skb, + .offset = offset, + .count = len, + .csum = 0 + }; + + dprintk("RPC: tcp_data_recv\n"); + do { + /* Read in a new fragment marker if necessary */ + /* Can we ever really expect to get completely empty fragments? */ + if (xprt->tcp_flags & XPRT_COPY_RECM) { + tcp_read_fraghdr(xprt, &desc); + continue; + } + /* Read in the xid if necessary */ + if (xprt->tcp_flags & XPRT_COPY_XID) { + tcp_read_xid(xprt, &desc); + continue; + } + /* Read in the request data */ + if (xprt->tcp_flags & XPRT_COPY_DATA) { + tcp_read_request(xprt, &desc); + continue; + } + /* Skip over any trailing bytes on short reads */ + tcp_read_discard(xprt, &desc); + } while (desc.count); + dprintk("RPC: tcp_data_recv done\n"); + return len - desc.count; +} + +static void tcp_data_ready(struct sock *sk, int bytes) +{ + struct rpc_xprt *xprt; + read_descriptor_t rd_desc; + + read_lock(&sk->sk_callback_lock); + dprintk("RPC: tcp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) { + printk("RPC: tcp_data_ready socket info not found!\n"); + goto out; + } + if (xprt->shutdown) + goto out; + + /* We use rd_desc to pass struct xprt to tcp_data_recv */ + rd_desc.arg.data = xprt; + rd_desc.count = 65536; + tcp_read_sock(sk, &rd_desc, tcp_data_recv); +out: + read_unlock(&sk->sk_callback_lock); +} + +static void +tcp_state_change(struct sock *sk) +{ + struct rpc_xprt *xprt; + + read_lock(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk))) + goto out; + dprintk("RPC: tcp_state_change client %p...\n", xprt); + dprintk("RPC: state %x conn %d dead %d zapped %d\n", + sk->sk_state, xprt_connected(xprt), + sock_flag(sk, SOCK_DEAD), + sock_flag(sk, SOCK_ZAPPED)); + + switch (sk->sk_state) { + case TCP_ESTABLISHED: + spin_lock_bh(&xprt->sock_lock); + if (!xprt_test_and_set_connected(xprt)) { + /* Reset TCP record info */ + xprt->tcp_offset = 0; + xprt->tcp_reclen = 0; + xprt->tcp_copied = 0; + xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; + rpc_wake_up(&xprt->pending); + } + spin_unlock_bh(&xprt->sock_lock); + break; + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + default: + xprt_disconnect(xprt); + break; + } + out: + read_unlock(&sk->sk_callback_lock); +} + +/* + * Called when more output buffer space is available for this socket. + * We try not to wake our writers until they can make "significant" + * progress, otherwise we'll waste resources thrashing sock_sendmsg + * with a bunch of small requests. + */ +static void +xprt_write_space(struct sock *sk) +{ + struct rpc_xprt *xprt; + struct socket *sock; + + read_lock(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) + goto out; + if (xprt->shutdown) + goto out; + + /* Wait until we have enough socket memory */ + if (xprt->stream) { + /* from net/core/stream.c:sk_stream_write_space */ + if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)) + goto out; + } else { + /* from net/core/sock.c:sock_def_write_space */ + if (!sock_writeable(sk)) + goto out; + } + + if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) + goto out; + + spin_lock_bh(&xprt->sock_lock); + if (xprt->snd_task) + rpc_wake_up_task(xprt->snd_task); + spin_unlock_bh(&xprt->sock_lock); +out: + read_unlock(&sk->sk_callback_lock); +} + +/* + * RPC receive timeout handler. + */ +static void +xprt_timer(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + + spin_lock(&xprt->sock_lock); + if (req->rq_received) + goto out; + + xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT); + __xprt_put_cong(xprt, req); + + dprintk("RPC: %4d xprt_timer (%s request)\n", + task->tk_pid, req ? "pending" : "backlogged"); + + task->tk_status = -ETIMEDOUT; +out: task->tk_timeout = 0; rpc_wake_up_task(task); - spin_unlock(&xprt->transport_lock); + spin_unlock(&xprt->sock_lock); } -/** - * xprt_prepare_transmit - reserve the transport before sending a request - * @task: RPC task about to send a request - * +/* + * Place the actual RPC call. + * We have to copy the iovec because sendmsg fiddles with its contents. */ -int xprt_prepare_transmit(struct rpc_task *task) +int +xprt_prepare_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; @@ -690,12 +1191,12 @@ int xprt_prepare_transmit(struct rpc_task *task) if (xprt->shutdown) return -EIO; - spin_lock_bh(&xprt->transport_lock); + spin_lock_bh(&xprt->sock_lock); if (req->rq_received && !req->rq_bytes_sent) { err = req->rq_received; goto out_unlock; } - if (!xprt->ops->reserve_xprt(task)) { + if (!__xprt_lock_write(xprt, task)) { err = -EAGAIN; goto out_unlock; } @@ -705,42 +1206,39 @@ int xprt_prepare_transmit(struct rpc_task *task) goto out_unlock; } out_unlock: - spin_unlock_bh(&xprt->transport_lock); + spin_unlock_bh(&xprt->sock_lock); return err; } void -xprt_abort_transmit(struct rpc_task *task) -{ - struct rpc_xprt *xprt = task->tk_xprt; - - xprt_release_write(xprt, task); -} - -/** - * xprt_transmit - send an RPC request on a transport - * @task: controlling RPC task - * - * We have to copy the iovec because sendmsg fiddles with its contents. - */ -void xprt_transmit(struct rpc_task *task) +xprt_transmit(struct rpc_task *task) { + struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - int status; + int status, retry = 0; + dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); + /* set up everything as needed. */ + /* Write the record marker */ + if (xprt->stream) { + u32 *marker = req->rq_svec[0].iov_base; + + *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); + } + smp_rmb(); if (!req->rq_received) { if (list_empty(&req->rq_list)) { - spin_lock_bh(&xprt->transport_lock); + spin_lock_bh(&xprt->sock_lock); /* Update the softirq receive buffer */ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(req->rq_private_buf)); /* Add request to the receive list */ list_add_tail(&req->rq_list, &xprt->recv); - spin_unlock_bh(&xprt->transport_lock); + spin_unlock_bh(&xprt->sock_lock); xprt_reset_majortimeo(req); /* Turn off autodisconnect */ del_singleshot_timer_sync(&xprt->timer); @@ -748,19 +1246,40 @@ void xprt_transmit(struct rpc_task *task) } else if (!req->rq_bytes_sent) return; - status = xprt->ops->send_request(task); - if (status == 0) { - dprintk("RPC: %4d xmit complete\n", task->tk_pid); - spin_lock_bh(&xprt->transport_lock); - xprt->ops->set_retrans_timeout(task); - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (!req->rq_received) - rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); - xprt->ops->release_xprt(xprt, task); - spin_unlock_bh(&xprt->transport_lock); - return; + /* Continue transmitting the packet/record. We must be careful + * to cope with writespace callbacks arriving _after_ we have + * called xprt_sendmsg(). + */ + while (1) { + req->rq_xtime = jiffies; + status = xprt_sendmsg(xprt, req); + + if (status < 0) + break; + + if (xprt->stream) { + req->rq_bytes_sent += status; + + /* If we've sent the entire packet, immediately + * reset the count of bytes sent. */ + if (req->rq_bytes_sent >= req->rq_slen) { + req->rq_bytes_sent = 0; + goto out_receive; + } + } else { + if (status >= req->rq_slen) + goto out_receive; + status = -EAGAIN; + break; + } + + dprintk("RPC: %4d xmit incomplete (%d left of %d)\n", + task->tk_pid, req->rq_slen - req->rq_bytes_sent, + req->rq_slen); + + status = -EAGAIN; + if (retry++ > 50) + break; } /* Note: at this point, task->tk_sleeping has not yet been set, @@ -770,19 +1289,60 @@ void xprt_transmit(struct rpc_task *task) task->tk_status = status; switch (status) { + case -EAGAIN: + if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { + /* Protect against races with xprt_write_space */ + spin_lock_bh(&xprt->sock_lock); + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) { + task->tk_timeout = req->rq_timeout; + rpc_sleep_on(&xprt->pending, task, NULL, NULL); + } + spin_unlock_bh(&xprt->sock_lock); + return; + } + /* Keep holding the socket if it is blocked */ + rpc_delay(task, HZ>>4); + return; case -ECONNREFUSED: + task->tk_timeout = RPC_REESTABLISH_TIMEOUT; rpc_sleep_on(&xprt->sending, task, NULL, NULL); - case -EAGAIN: case -ENOTCONN: return; default: - break; + if (xprt->stream) + xprt_disconnect(xprt); } xprt_release_write(xprt, task); return; + out_receive: + dprintk("RPC: %4d xmit complete\n", task->tk_pid); + /* Set the task's receive timeout value */ + spin_lock_bh(&xprt->sock_lock); + if (!xprt->nocong) { + int timer = task->tk_msg.rpc_proc->p_timer; + task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); + task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries; + if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0) + task->tk_timeout = xprt->timeout.to_maxval; + } else + task->tk_timeout = req->rq_timeout; + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (!req->rq_received) + rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); + __xprt_release_write(xprt, task); + spin_unlock_bh(&xprt->sock_lock); } -static inline void do_xprt_reserve(struct rpc_task *task) +/* + * Reserve an RPC call slot. + */ +static inline void +do_xprt_reserve(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -802,25 +1362,22 @@ static inline void do_xprt_reserve(struct rpc_task *task) rpc_sleep_on(&xprt->backlog, task, NULL, NULL); } -/** - * xprt_reserve - allocate an RPC request slot - * @task: RPC task requesting a slot allocation - * - * If no more slots are available, place the task on the transport's - * backlog queue. - */ -void xprt_reserve(struct rpc_task *task) +void +xprt_reserve(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; task->tk_status = -EIO; if (!xprt->shutdown) { - spin_lock(&xprt->reserve_lock); + spin_lock(&xprt->xprt_lock); do_xprt_reserve(task); - spin_unlock(&xprt->reserve_lock); + spin_unlock(&xprt->xprt_lock); } } +/* + * Allocate a 'unique' XID + */ static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) { return xprt->xid++; @@ -831,7 +1388,11 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt) get_random_bytes(&xprt->xid, sizeof(xprt->xid)); } -static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) +/* + * Initialize RPC request + */ +static void +xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) { struct rpc_rqst *req = task->tk_rqstp; @@ -839,104 +1400,128 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) req->rq_task = task; req->rq_xprt = xprt; req->rq_xid = xprt_alloc_xid(xprt); - req->rq_release_snd_buf = NULL; dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, req, ntohl(req->rq_xid)); } -/** - * xprt_release - release an RPC request slot - * @task: task which is finished with the slot - * +/* + * Release an RPC call slot */ -void xprt_release(struct rpc_task *task) +void +xprt_release(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req; if (!(req = task->tk_rqstp)) return; - spin_lock_bh(&xprt->transport_lock); - xprt->ops->release_xprt(xprt, task); - if (xprt->ops->release_request) - xprt->ops->release_request(task); + spin_lock_bh(&xprt->sock_lock); + __xprt_release_write(xprt, task); + __xprt_put_cong(xprt, req); if (!list_empty(&req->rq_list)) list_del(&req->rq_list); xprt->last_used = jiffies; if (list_empty(&xprt->recv) && !xprt->shutdown) - mod_timer(&xprt->timer, - xprt->last_used + xprt->idle_timeout); - spin_unlock_bh(&xprt->transport_lock); + mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT); + spin_unlock_bh(&xprt->sock_lock); task->tk_rqstp = NULL; - if (req->rq_release_snd_buf) - req->rq_release_snd_buf(req); memset(req, 0, sizeof(*req)); /* mark unused */ dprintk("RPC: %4d release request %p\n", task->tk_pid, req); - spin_lock(&xprt->reserve_lock); + spin_lock(&xprt->xprt_lock); list_add(&req->rq_list, &xprt->free); - rpc_wake_up_next(&xprt->backlog); - spin_unlock(&xprt->reserve_lock); + xprt_clear_backlog(xprt); + spin_unlock(&xprt->xprt_lock); } -/** - * xprt_set_timeout - set constant RPC timeout - * @to: RPC timeout parameters to set up - * @retr: number of retries - * @incr: amount of increase after each retry - * +/* + * Set default timeout parameters */ -void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) +static void +xprt_default_timeout(struct rpc_timeout *to, int proto) +{ + if (proto == IPPROTO_UDP) + xprt_set_timeout(to, 5, 5 * HZ); + else + xprt_set_timeout(to, 5, 60 * HZ); +} + +/* + * Set constant timeout + */ +void +xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) { to->to_initval = to->to_increment = incr; - to->to_maxval = to->to_initval + (incr * retr); + to->to_maxval = incr * retr; to->to_retries = retr; to->to_exponential = 0; } -static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) +unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; +unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; + +/* + * Initialize an RPC client + */ +static struct rpc_xprt * +xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) { - int result; struct rpc_xprt *xprt; + unsigned int entries; + size_t slot_table_size; struct rpc_rqst *req; + dprintk("RPC: setting up %s transport...\n", + proto == IPPROTO_UDP? "UDP" : "TCP"); + + entries = (proto == IPPROTO_TCP)? + xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries; + if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) return ERR_PTR(-ENOMEM); memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ - - xprt->addr = *ap; - - switch (proto) { - case IPPROTO_UDP: - result = xs_setup_udp(xprt, to); - break; - case IPPROTO_TCP: - result = xs_setup_tcp(xprt, to); - break; - default: - printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", - proto); - result = -EIO; - break; - } - if (result) { + xprt->max_reqs = entries; + slot_table_size = entries * sizeof(xprt->slot[0]); + xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + if (xprt->slot == NULL) { kfree(xprt); - return ERR_PTR(result); + return ERR_PTR(-ENOMEM); } + memset(xprt->slot, 0, slot_table_size); - spin_lock_init(&xprt->transport_lock); - spin_lock_init(&xprt->reserve_lock); + xprt->addr = *ap; + xprt->prot = proto; + xprt->stream = (proto == IPPROTO_TCP)? 1 : 0; + if (xprt->stream) { + xprt->cwnd = RPC_MAXCWND(xprt); + xprt->nocong = 1; + xprt->max_payload = (1U << 31) - 1; + } else { + xprt->cwnd = RPC_INITCWND; + xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); + } + spin_lock_init(&xprt->sock_lock); + spin_lock_init(&xprt->xprt_lock); + init_waitqueue_head(&xprt->cong_wait); INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); + INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); + INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; xprt->last_used = jiffies; - xprt->cwnd = RPC_INITCWND; + xprt->port = XPRT_MAX_RESVPORT; + + /* Set timeout parameters */ + if (to) { + xprt->timeout = *to; + } else + xprt_default_timeout(&xprt->timeout, xprt->prot); rpc_init_wait_queue(&xprt->pending, "xprt_pending"); rpc_init_wait_queue(&xprt->sending, "xprt_sending"); @@ -944,25 +1529,139 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); /* initialize free list */ - for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) + for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--) list_add(&req->rq_list, &xprt->free); xprt_init_xid(xprt); + /* Check whether we want to use a reserved port */ + xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; + dprintk("RPC: created transport %p with %u slots\n", xprt, xprt->max_reqs); return xprt; } -/** - * xprt_create_proto - create an RPC client transport - * @proto: requested transport protocol - * @sap: remote peer's address - * @to: timeout parameters for new transport - * +/* + * Bind to a reserved port + */ +static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) +{ + struct sockaddr_in myaddr = { + .sin_family = AF_INET, + }; + int err, port; + + /* Were we already bound to a given port? Try to reuse it */ + port = xprt->port; + do { + myaddr.sin_port = htons(port); + err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, + sizeof(myaddr)); + if (err == 0) { + xprt->port = port; + return 0; + } + if (--port == 0) + port = XPRT_MAX_RESVPORT; + } while (err == -EADDRINUSE && port != xprt->port); + + printk("RPC: Can't bind to reserved port (%d).\n", -err); + return err; +} + +static void +xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (xprt->inet) + return; + + write_lock_bh(&sk->sk_callback_lock); + sk->sk_user_data = xprt; + xprt->old_data_ready = sk->sk_data_ready; + xprt->old_state_change = sk->sk_state_change; + xprt->old_write_space = sk->sk_write_space; + if (xprt->prot == IPPROTO_UDP) { + sk->sk_data_ready = udp_data_ready; + sk->sk_no_check = UDP_CSUM_NORCV; + xprt_set_connected(xprt); + } else { + tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ + sk->sk_data_ready = tcp_data_ready; + sk->sk_state_change = tcp_state_change; + xprt_clear_connected(xprt); + } + sk->sk_write_space = xprt_write_space; + + /* Reset to new socket */ + xprt->sock = sock; + xprt->inet = sk; + write_unlock_bh(&sk->sk_callback_lock); + + return; +} + +/* + * Set socket buffer length + */ +void +xprt_sock_setbufsize(struct rpc_xprt *xprt) +{ + struct sock *sk = xprt->inet; + + if (xprt->stream) + return; + if (xprt->rcvsize) { + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; + } + if (xprt->sndsize) { + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; + sk->sk_write_space(sk); + } +} + +/* + * Datastream sockets are created here, but xprt_connect will create + * and connect stream sockets. + */ +static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport) +{ + struct socket *sock; + int type, err; + + dprintk("RPC: xprt_create_socket(%s %d)\n", + (proto == IPPROTO_UDP)? "udp" : "tcp", proto); + + type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; + + if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { + printk("RPC: can't create socket (%d).\n", -err); + return NULL; + } + + /* If the caller has the capability, bind to a reserved port */ + if (resvport && xprt_bindresvport(xprt, sock) < 0) { + printk("RPC: can't bind to reserved port.\n"); + goto failed; + } + + return sock; + +failed: + sock_release(sock); + return NULL; +} + +/* + * Create an RPC client transport given the protocol and peer address. */ -struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) +struct rpc_xprt * +xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) { struct rpc_xprt *xprt; @@ -974,26 +1673,46 @@ struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rp return xprt; } -static void xprt_shutdown(struct rpc_xprt *xprt) +/* + * Prepare for transport shutdown. + */ +static void +xprt_shutdown(struct rpc_xprt *xprt) { xprt->shutdown = 1; rpc_wake_up(&xprt->sending); rpc_wake_up(&xprt->resend); - xprt_wake_pending_tasks(xprt, -EIO); + rpc_wake_up(&xprt->pending); rpc_wake_up(&xprt->backlog); + wake_up(&xprt->cong_wait); del_timer_sync(&xprt->timer); + + /* synchronously wait for connect worker to finish */ + cancel_delayed_work(&xprt->sock_connect); + flush_scheduled_work(); } -/** - * xprt_destroy - destroy an RPC transport, killing off all requests. - * @xprt: transport to destroy - * +/* + * Clear the xprt backlog queue */ -int xprt_destroy(struct rpc_xprt *xprt) +static int +xprt_clear_backlog(struct rpc_xprt *xprt) { + rpc_wake_up_next(&xprt->backlog); + wake_up(&xprt->cong_wait); + return 1; +} + +/* + * Destroy an RPC transport, killing off all requests. + */ +int +xprt_destroy(struct rpc_xprt *xprt) { dprintk("RPC: destroying transport %p\n", xprt); xprt_shutdown(xprt); - xprt->ops->destroy(xprt); + xprt_disconnect(xprt); + xprt_close(xprt); + kfree(xprt->slot); kfree(xprt); return 0; diff --git a/trunk/net/sunrpc/xprtsock.c b/trunk/net/sunrpc/xprtsock.c deleted file mode 100644 index 2e1529217e65..000000000000 --- a/trunk/net/sunrpc/xprtsock.c +++ /dev/null @@ -1,1252 +0,0 @@ -/* - * linux/net/sunrpc/xprtsock.c - * - * Client-side transport implementation for sockets. - * - * TCP callback races fixes (C) 1998 Red Hat Software - * TCP send fixes (C) 1998 Red Hat Software - * TCP NFS related read + write fixes - * (C) 1999 Dave Airlie, University of Limerick, Ireland - * - * Rewrite of larges part of the code in order to stabilize TCP stuff. - * Fix behaviour when socket buffer is full. - * (C) 1999 Trond Myklebust - * - * IP socket transport implementation, (C) 2005 Chuck Lever - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* - * How many times to try sending a request on a socket before waiting - * for the socket buffer to clear. - */ -#define XS_SENDMSG_RETRY (10U) - -/* - * Time out for an RPC UDP socket connect. UDP socket connects are - * synchronous, but we set a timeout anyway in case of resource - * exhaustion on the local host. - */ -#define XS_UDP_CONN_TO (5U * HZ) - -/* - * Wait duration for an RPC TCP connection to be established. Solaris - * NFS over TCP uses 60 seconds, for example, which is in line with how - * long a server takes to reboot. - */ -#define XS_TCP_CONN_TO (60U * HZ) - -/* - * Wait duration for a reply from the RPC portmapper. - */ -#define XS_BIND_TO (60U * HZ) - -/* - * Delay if a UDP socket connect error occurs. This is most likely some - * kind of resource problem on the local host. - */ -#define XS_UDP_REEST_TO (2U * HZ) - -/* - * The reestablish timeout allows clients to delay for a bit before attempting - * to reconnect to a server that just dropped our connection. - * - * We implement an exponential backoff when trying to reestablish a TCP - * transport connection with the server. Some servers like to drop a TCP - * connection when they are overworked, so we start with a short timeout and - * increase over time if the server is down or not responding. - */ -#define XS_TCP_INIT_REEST_TO (3U * HZ) -#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) - -/* - * TCP idle timeout; client drops the transport socket if it is idle - * for this long. Note that we also timeout UDP sockets to prevent - * holding port numbers when there is no RPC traffic. - */ -#define XS_IDLE_DISC_TO (5U * 60 * HZ) - -#ifdef RPC_DEBUG -# undef RPC_DEBUG_DATA -# define RPCDBG_FACILITY RPCDBG_TRANS -#endif - -#ifdef RPC_DEBUG_DATA -static void xs_pktdump(char *msg, u32 *packet, unsigned int count) -{ - u8 *buf = (u8 *) packet; - int j; - - dprintk("RPC: %s\n", msg); - for (j = 0; j < count && j < 128; j += 4) { - if (!(j & 31)) { - if (j) - dprintk("\n"); - dprintk("0x%04x ", j); - } - dprintk("%02x%02x%02x%02x ", - buf[j], buf[j+1], buf[j+2], buf[j+3]); - } - dprintk("\n"); -} -#else -static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) -{ - /* NOP */ -} -#endif - -#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) - -static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) -{ - struct kvec iov = { - .iov_base = xdr->head[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_name = addr, - .msg_namelen = addrlen, - .msg_flags = XS_SENDMSG_FLAGS, - }; - - if (xdr->len > len) - msg.msg_flags |= MSG_MORE; - - if (likely(iov.iov_len)) - return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); - return kernel_sendmsg(sock, &msg, NULL, 0, 0); -} - -static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) -{ - struct kvec iov = { - .iov_base = xdr->tail[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_flags = XS_SENDMSG_FLAGS, - }; - - return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); -} - -/** - * xs_sendpages - write pages directly to a socket - * @sock: socket to send on - * @addr: UDP only -- address of destination - * @addrlen: UDP only -- length of destination address - * @xdr: buffer containing this request - * @base: starting position in the buffer - * - */ -static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) -{ - struct page **ppage = xdr->pages; - unsigned int len, pglen = xdr->page_len; - int err, ret = 0; - ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); - - if (unlikely(!sock)) - return -ENOTCONN; - - clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - - len = xdr->head[0].iov_len; - if (base < len || (addr != NULL && base == 0)) { - err = xs_send_head(sock, addr, addrlen, xdr, base, len); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - if (err != (len - base)) - goto out; - base = 0; - } else - base -= len; - - if (unlikely(pglen == 0)) - goto copy_tail; - if (unlikely(base >= pglen)) { - base -= pglen; - goto copy_tail; - } - if (base || xdr->page_base) { - pglen -= base; - base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; - } - - sendpage = sock->ops->sendpage ? : sock_no_sendpage; - do { - int flags = XS_SENDMSG_FLAGS; - - len = PAGE_CACHE_SIZE; - if (base) - len -= base; - if (pglen < len) - len = pglen; - - if (pglen != len || xdr->tail[0].iov_len != 0) - flags |= MSG_MORE; - - /* Hmm... We might be dealing with highmem pages */ - if (PageHighMem(*ppage)) - sendpage = sock_no_sendpage; - err = sendpage(sock, *ppage, base, len, flags); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - if (err != len) - goto out; - base = 0; - ppage++; - } while ((pglen -= len) != 0); -copy_tail: - len = xdr->tail[0].iov_len; - if (base < len) { - err = xs_send_tail(sock, xdr, base, len); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - } -out: - return ret; -} - -/** - * xs_nospace - place task on wait queue if transmit was incomplete - * @task: task to put to sleep - * - */ -static void xs_nospace(struct rpc_task *task) -{ - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_xprt *xprt = req->rq_xprt; - - dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", - task->tk_pid, req->rq_slen - req->rq_bytes_sent, - req->rq_slen); - - if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { - /* Protect against races with write_space */ - spin_lock_bh(&xprt->transport_lock); - - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) - xprt_wait_for_buffer_space(task); - - spin_unlock_bh(&xprt->transport_lock); - } else - /* Keep holding the socket if it is blocked */ - rpc_delay(task, HZ>>4); -} - -/** - * xs_udp_send_request - write an RPC request to a UDP socket - * @task: address of RPC task that manages the state of an RPC request - * - * Return values: - * 0: The request has been sent - * EAGAIN: The socket was blocked, please call again later to - * complete the request - * ENOTCONN: Caller needs to invoke connect logic then call again - * other: Some other error occured, the request was not sent - */ -static int xs_udp_send_request(struct rpc_task *task) -{ - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_xprt *xprt = req->rq_xprt; - struct xdr_buf *xdr = &req->rq_snd_buf; - int status; - - xs_pktdump("packet data:", - req->rq_svec->iov_base, - req->rq_svec->iov_len); - - req->rq_xtime = jiffies; - status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, - sizeof(xprt->addr), xdr, req->rq_bytes_sent); - - dprintk("RPC: xs_udp_send_request(%u) = %d\n", - xdr->len - req->rq_bytes_sent, status); - - if (likely(status >= (int) req->rq_slen)) - return 0; - - /* Still some bytes left; set up for a retry later. */ - if (status > 0) - status = -EAGAIN; - - switch (status) { - case -ENETUNREACH: - case -EPIPE: - case -ECONNREFUSED: - /* When the server has died, an ICMP port unreachable message - * prompts ECONNREFUSED. */ - break; - case -EAGAIN: - xs_nospace(task); - break; - default: - dprintk("RPC: sendmsg returned unrecognized error %d\n", - -status); - break; - } - - return status; -} - -static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) -{ - u32 reclen = buf->len - sizeof(rpc_fraghdr); - rpc_fraghdr *base = buf->head[0].iov_base; - *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); -} - -/** - * xs_tcp_send_request - write an RPC request to a TCP socket - * @task: address of RPC task that manages the state of an RPC request - * - * Return values: - * 0: The request has been sent - * EAGAIN: The socket was blocked, please call again later to - * complete the request - * ENOTCONN: Caller needs to invoke connect logic then call again - * other: Some other error occured, the request was not sent - * - * XXX: In the case of soft timeouts, should we eventually give up - * if sendmsg is not able to make progress? - */ -static int xs_tcp_send_request(struct rpc_task *task) -{ - struct rpc_rqst *req = task->tk_rqstp; - struct rpc_xprt *xprt = req->rq_xprt; - struct xdr_buf *xdr = &req->rq_snd_buf; - int status, retry = 0; - - xs_encode_tcp_record_marker(&req->rq_snd_buf); - - xs_pktdump("packet data:", - req->rq_svec->iov_base, - req->rq_svec->iov_len); - - /* Continue transmitting the packet/record. We must be careful - * to cope with writespace callbacks arriving _after_ we have - * called sendmsg(). */ - while (1) { - req->rq_xtime = jiffies; - status = xs_sendpages(xprt->sock, NULL, 0, xdr, - req->rq_bytes_sent); - - dprintk("RPC: xs_tcp_send_request(%u) = %d\n", - xdr->len - req->rq_bytes_sent, status); - - if (unlikely(status < 0)) - break; - - /* If we've sent the entire packet, immediately - * reset the count of bytes sent. */ - req->rq_bytes_sent += status; - if (likely(req->rq_bytes_sent >= req->rq_slen)) { - req->rq_bytes_sent = 0; - return 0; - } - - status = -EAGAIN; - if (retry++ > XS_SENDMSG_RETRY) - break; - } - - switch (status) { - case -EAGAIN: - xs_nospace(task); - break; - case -ECONNREFUSED: - case -ECONNRESET: - case -ENOTCONN: - case -EPIPE: - status = -ENOTCONN; - break; - default: - dprintk("RPC: sendmsg returned unrecognized error %d\n", - -status); - xprt_disconnect(xprt); - break; - } - - return status; -} - -/** - * xs_close - close a socket - * @xprt: transport - * - * This is used when all requests are complete; ie, no DRC state remains - * on the server we want to save. - */ -static void xs_close(struct rpc_xprt *xprt) -{ - struct socket *sock = xprt->sock; - struct sock *sk = xprt->inet; - - if (!sk) - return; - - dprintk("RPC: xs_close xprt %p\n", xprt); - - write_lock_bh(&sk->sk_callback_lock); - xprt->inet = NULL; - xprt->sock = NULL; - - sk->sk_user_data = NULL; - sk->sk_data_ready = xprt->old_data_ready; - sk->sk_state_change = xprt->old_state_change; - sk->sk_write_space = xprt->old_write_space; - write_unlock_bh(&sk->sk_callback_lock); - - sk->sk_no_check = 0; - - sock_release(sock); -} - -/** - * xs_destroy - prepare to shutdown a transport - * @xprt: doomed transport - * - */ -static void xs_destroy(struct rpc_xprt *xprt) -{ - dprintk("RPC: xs_destroy xprt %p\n", xprt); - - cancel_delayed_work(&xprt->connect_worker); - flush_scheduled_work(); - - xprt_disconnect(xprt); - xs_close(xprt); - kfree(xprt->slot); -} - -static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) -{ - return (struct rpc_xprt *) sk->sk_user_data; -} - -/** - * xs_udp_data_ready - "data ready" callback for UDP sockets - * @sk: socket with data to read - * @len: how much data to read - * - */ -static void xs_udp_data_ready(struct sock *sk, int len) -{ - struct rpc_task *task; - struct rpc_xprt *xprt; - struct rpc_rqst *rovr; - struct sk_buff *skb; - int err, repsize, copied; - u32 _xid, *xp; - - read_lock(&sk->sk_callback_lock); - dprintk("RPC: xs_udp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) - goto out; - - if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) - goto out; - - if (xprt->shutdown) - goto dropit; - - repsize = skb->len - sizeof(struct udphdr); - if (repsize < 4) { - dprintk("RPC: impossible RPC reply size %d!\n", repsize); - goto dropit; - } - - /* Copy the XID from the skb... */ - xp = skb_header_pointer(skb, sizeof(struct udphdr), - sizeof(_xid), &_xid); - if (xp == NULL) - goto dropit; - - /* Look up and lock the request corresponding to the given XID */ - spin_lock(&xprt->transport_lock); - rovr = xprt_lookup_rqst(xprt, *xp); - if (!rovr) - goto out_unlock; - task = rovr->rq_task; - - if ((copied = rovr->rq_private_buf.buflen) > repsize) - copied = repsize; - - /* Suck it into the iovec, verify checksum if not done by hw. */ - if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) - goto out_unlock; - - /* Something worked... */ - dst_confirm(skb->dst); - - xprt_adjust_cwnd(task, copied); - xprt_update_rtt(task); - xprt_complete_rqst(task, copied); - - out_unlock: - spin_unlock(&xprt->transport_lock); - dropit: - skb_free_datagram(sk, skb); - out: - read_unlock(&sk->sk_callback_lock); -} - -static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) -{ - if (len > desc->count) - len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, p, len)) { - dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", - len, desc->count); - return 0; - } - desc->offset += len; - desc->count -= len; - dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", - len, desc->count); - return len; -} - -static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len, used; - char *p; - - p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; - len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; - used = xs_tcp_copy_data(desc, p, len); - xprt->tcp_offset += used; - if (used != len) - return; - - xprt->tcp_reclen = ntohl(xprt->tcp_recm); - if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) - xprt->tcp_flags |= XPRT_LAST_FRAG; - else - xprt->tcp_flags &= ~XPRT_LAST_FRAG; - xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; - - xprt->tcp_flags &= ~XPRT_COPY_RECM; - xprt->tcp_offset = 0; - - /* Sanity check of the record length */ - if (unlikely(xprt->tcp_reclen < 4)) { - dprintk("RPC: invalid TCP record fragment length\n"); - xprt_disconnect(xprt); - return; - } - dprintk("RPC: reading TCP record fragment of length %d\n", - xprt->tcp_reclen); -} - -static void xs_tcp_check_recm(struct rpc_xprt *xprt) -{ - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); - if (xprt->tcp_offset == xprt->tcp_reclen) { - xprt->tcp_flags |= XPRT_COPY_RECM; - xprt->tcp_offset = 0; - if (xprt->tcp_flags & XPRT_LAST_FRAG) { - xprt->tcp_flags &= ~XPRT_COPY_DATA; - xprt->tcp_flags |= XPRT_COPY_XID; - xprt->tcp_copied = 0; - } - } -} - -static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len, used; - char *p; - - len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; - dprintk("RPC: reading XID (%Zu bytes)\n", len); - p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; - used = xs_tcp_copy_data(desc, p, len); - xprt->tcp_offset += used; - if (used != len) - return; - xprt->tcp_flags &= ~XPRT_COPY_XID; - xprt->tcp_flags |= XPRT_COPY_DATA; - xprt->tcp_copied = 4; - dprintk("RPC: reading reply for XID %08x\n", - ntohl(xprt->tcp_xid)); - xs_tcp_check_recm(xprt); -} - -static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - struct rpc_rqst *req; - struct xdr_buf *rcvbuf; - size_t len; - ssize_t r; - - /* Find and lock the request corresponding to this xid */ - spin_lock(&xprt->transport_lock); - req = xprt_lookup_rqst(xprt, xprt->tcp_xid); - if (!req) { - xprt->tcp_flags &= ~XPRT_COPY_DATA; - dprintk("RPC: XID %08x request not found!\n", - ntohl(xprt->tcp_xid)); - spin_unlock(&xprt->transport_lock); - return; - } - - rcvbuf = &req->rq_private_buf; - len = desc->count; - if (len > xprt->tcp_reclen - xprt->tcp_offset) { - skb_reader_t my_desc; - - len = xprt->tcp_reclen - xprt->tcp_offset; - memcpy(&my_desc, desc, sizeof(my_desc)); - my_desc.count = len; - r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - &my_desc, xs_tcp_copy_data); - desc->count -= r; - desc->offset += r; - } else - r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - desc, xs_tcp_copy_data); - - if (r > 0) { - xprt->tcp_copied += r; - xprt->tcp_offset += r; - } - if (r != len) { - /* Error when copying to the receive buffer, - * usually because we weren't able to allocate - * additional buffer pages. All we can do now - * is turn off XPRT_COPY_DATA, so the request - * will not receive any additional updates, - * and time out. - * Any remaining data from this record will - * be discarded. - */ - xprt->tcp_flags &= ~XPRT_COPY_DATA; - dprintk("RPC: XID %08x truncated request\n", - ntohl(xprt->tcp_xid)); - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); - goto out; - } - - dprintk("RPC: XID %08x read %Zd bytes\n", - ntohl(xprt->tcp_xid), r); - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); - - if (xprt->tcp_copied == req->rq_private_buf.buflen) - xprt->tcp_flags &= ~XPRT_COPY_DATA; - else if (xprt->tcp_offset == xprt->tcp_reclen) { - if (xprt->tcp_flags & XPRT_LAST_FRAG) - xprt->tcp_flags &= ~XPRT_COPY_DATA; - } - -out: - if (!(xprt->tcp_flags & XPRT_COPY_DATA)) - xprt_complete_rqst(req->rq_task, xprt->tcp_copied); - spin_unlock(&xprt->transport_lock); - xs_tcp_check_recm(xprt); -} - -static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len; - - len = xprt->tcp_reclen - xprt->tcp_offset; - if (len > desc->count) - len = desc->count; - desc->count -= len; - desc->offset += len; - xprt->tcp_offset += len; - dprintk("RPC: discarded %Zu bytes\n", len); - xs_tcp_check_recm(xprt); -} - -static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) -{ - struct rpc_xprt *xprt = rd_desc->arg.data; - skb_reader_t desc = { - .skb = skb, - .offset = offset, - .count = len, - .csum = 0 - }; - - dprintk("RPC: xs_tcp_data_recv started\n"); - do { - /* Read in a new fragment marker if necessary */ - /* Can we ever really expect to get completely empty fragments? */ - if (xprt->tcp_flags & XPRT_COPY_RECM) { - xs_tcp_read_fraghdr(xprt, &desc); - continue; - } - /* Read in the xid if necessary */ - if (xprt->tcp_flags & XPRT_COPY_XID) { - xs_tcp_read_xid(xprt, &desc); - continue; - } - /* Read in the request data */ - if (xprt->tcp_flags & XPRT_COPY_DATA) { - xs_tcp_read_request(xprt, &desc); - continue; - } - /* Skip over any trailing bytes on short reads */ - xs_tcp_read_discard(xprt, &desc); - } while (desc.count); - dprintk("RPC: xs_tcp_data_recv done\n"); - return len - desc.count; -} - -/** - * xs_tcp_data_ready - "data ready" callback for TCP sockets - * @sk: socket with data to read - * @bytes: how much data to read - * - */ -static void xs_tcp_data_ready(struct sock *sk, int bytes) -{ - struct rpc_xprt *xprt; - read_descriptor_t rd_desc; - - read_lock(&sk->sk_callback_lock); - dprintk("RPC: xs_tcp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) - goto out; - if (xprt->shutdown) - goto out; - - /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ - rd_desc.arg.data = xprt; - rd_desc.count = 65536; - tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); -out: - read_unlock(&sk->sk_callback_lock); -} - -/** - * xs_tcp_state_change - callback to handle TCP socket state changes - * @sk: socket whose state has changed - * - */ -static void xs_tcp_state_change(struct sock *sk) -{ - struct rpc_xprt *xprt; - - read_lock(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk))) - goto out; - dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); - dprintk("RPC: state %x conn %d dead %d zapped %d\n", - sk->sk_state, xprt_connected(xprt), - sock_flag(sk, SOCK_DEAD), - sock_flag(sk, SOCK_ZAPPED)); - - switch (sk->sk_state) { - case TCP_ESTABLISHED: - spin_lock_bh(&xprt->transport_lock); - if (!xprt_test_and_set_connected(xprt)) { - /* Reset TCP record info */ - xprt->tcp_offset = 0; - xprt->tcp_reclen = 0; - xprt->tcp_copied = 0; - xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; - xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; - xprt_wake_pending_tasks(xprt, 0); - } - spin_unlock_bh(&xprt->transport_lock); - break; - case TCP_SYN_SENT: - case TCP_SYN_RECV: - break; - default: - xprt_disconnect(xprt); - break; - } - out: - read_unlock(&sk->sk_callback_lock); -} - -/** - * xs_udp_write_space - callback invoked when socket buffer space - * becomes available - * @sk: socket whose state has changed - * - * Called when more output buffer space is available for this socket. - * We try not to wake our writers until they can make "significant" - * progress, otherwise we'll waste resources thrashing kernel_sendmsg - * with a bunch of small requests. - */ -static void xs_udp_write_space(struct sock *sk) -{ - read_lock(&sk->sk_callback_lock); - - /* from net/core/sock.c:sock_def_write_space */ - if (sock_writeable(sk)) { - struct socket *sock; - struct rpc_xprt *xprt; - - if (unlikely(!(sock = sk->sk_socket))) - goto out; - if (unlikely(!(xprt = xprt_from_sock(sk)))) - goto out; - if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) - goto out; - - xprt_write_space(xprt); - } - - out: - read_unlock(&sk->sk_callback_lock); -} - -/** - * xs_tcp_write_space - callback invoked when socket buffer space - * becomes available - * @sk: socket whose state has changed - * - * Called when more output buffer space is available for this socket. - * We try not to wake our writers until they can make "significant" - * progress, otherwise we'll waste resources thrashing kernel_sendmsg - * with a bunch of small requests. - */ -static void xs_tcp_write_space(struct sock *sk) -{ - read_lock(&sk->sk_callback_lock); - - /* from net/core/stream.c:sk_stream_write_space */ - if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { - struct socket *sock; - struct rpc_xprt *xprt; - - if (unlikely(!(sock = sk->sk_socket))) - goto out; - if (unlikely(!(xprt = xprt_from_sock(sk)))) - goto out; - if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) - goto out; - - xprt_write_space(xprt); - } - - out: - read_unlock(&sk->sk_callback_lock); -} - -static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) -{ - struct sock *sk = xprt->inet; - - if (xprt->rcvsize) { - sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; - } - if (xprt->sndsize) { - sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; - sk->sk_write_space(sk); - } -} - -/** - * xs_udp_set_buffer_size - set send and receive limits - * @xprt: generic transport - * @sndsize: requested size of send buffer, in bytes - * @rcvsize: requested size of receive buffer, in bytes - * - * Set socket send and receive buffer size limits. - */ -static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) -{ - xprt->sndsize = 0; - if (sndsize) - xprt->sndsize = sndsize + 1024; - xprt->rcvsize = 0; - if (rcvsize) - xprt->rcvsize = rcvsize + 1024; - - xs_udp_do_set_buffer_size(xprt); -} - -/** - * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport - * @task: task that timed out - * - * Adjust the congestion window after a retransmit timeout has occurred. - */ -static void xs_udp_timer(struct rpc_task *task) -{ - xprt_adjust_cwnd(task, -ETIMEDOUT); -} - -static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) -{ - struct sockaddr_in myaddr = { - .sin_family = AF_INET, - }; - int err; - unsigned short port = xprt->port; - - do { - myaddr.sin_port = htons(port); - err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, - sizeof(myaddr)); - if (err == 0) { - xprt->port = port; - dprintk("RPC: xs_bindresvport bound to port %u\n", - port); - return 0; - } - if (port <= xprt_min_resvport) - port = xprt_max_resvport; - else - port--; - } while (err == -EADDRINUSE && port != xprt->port); - - dprintk("RPC: can't bind to reserved port (%d).\n", -err); - return err; -} - -/** - * xs_udp_connect_worker - set up a UDP socket - * @args: RPC transport to connect - * - * Invoked by a work queue tasklet. - */ -static void xs_udp_connect_worker(void *args) -{ - struct rpc_xprt *xprt = (struct rpc_xprt *) args; - struct socket *sock = xprt->sock; - int err, status = -EIO; - - if (xprt->shutdown || xprt->addr.sin_port == 0) - goto out; - - dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); - - /* Start by resetting any existing state */ - xs_close(xprt); - - if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { - dprintk("RPC: can't create UDP transport socket (%d).\n", -err); - goto out; - } - - if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { - sock_release(sock); - goto out; - } - - if (!xprt->inet) { - struct sock *sk = sock->sk; - - write_lock_bh(&sk->sk_callback_lock); - - sk->sk_user_data = xprt; - xprt->old_data_ready = sk->sk_data_ready; - xprt->old_state_change = sk->sk_state_change; - xprt->old_write_space = sk->sk_write_space; - sk->sk_data_ready = xs_udp_data_ready; - sk->sk_write_space = xs_udp_write_space; - sk->sk_no_check = UDP_CSUM_NORCV; - - xprt_set_connected(xprt); - - /* Reset to new socket */ - xprt->sock = sock; - xprt->inet = sk; - - write_unlock_bh(&sk->sk_callback_lock); - } - xs_udp_do_set_buffer_size(xprt); - status = 0; -out: - xprt_wake_pending_tasks(xprt, status); - xprt_clear_connecting(xprt); -} - -/* - * We need to preserve the port number so the reply cache on the server can - * find our cached RPC replies when we get around to reconnecting. - */ -static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) -{ - int result; - struct socket *sock = xprt->sock; - struct sockaddr any; - - dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); - - /* - * Disconnect the transport socket by doing a connect operation - * with AF_UNSPEC. This should return immediately... - */ - memset(&any, 0, sizeof(any)); - any.sa_family = AF_UNSPEC; - result = sock->ops->connect(sock, &any, sizeof(any), 0); - if (result) - dprintk("RPC: AF_UNSPEC connect return code %d\n", - result); -} - -/** - * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint - * @args: RPC transport to connect - * - * Invoked by a work queue tasklet. - */ -static void xs_tcp_connect_worker(void *args) -{ - struct rpc_xprt *xprt = (struct rpc_xprt *)args; - struct socket *sock = xprt->sock; - int err, status = -EIO; - - if (xprt->shutdown || xprt->addr.sin_port == 0) - goto out; - - dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); - - if (!xprt->sock) { - /* start from scratch */ - if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { - dprintk("RPC: can't create TCP transport socket (%d).\n", -err); - goto out; - } - - if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { - sock_release(sock); - goto out; - } - } else - /* "close" the socket, preserving the local port */ - xs_tcp_reuse_connection(xprt); - - if (!xprt->inet) { - struct sock *sk = sock->sk; - - write_lock_bh(&sk->sk_callback_lock); - - sk->sk_user_data = xprt; - xprt->old_data_ready = sk->sk_data_ready; - xprt->old_state_change = sk->sk_state_change; - xprt->old_write_space = sk->sk_write_space; - sk->sk_data_ready = xs_tcp_data_ready; - sk->sk_state_change = xs_tcp_state_change; - sk->sk_write_space = xs_tcp_write_space; - - /* socket options */ - sk->sk_userlocks |= SOCK_BINDPORT_LOCK; - sock_reset_flag(sk, SOCK_LINGER); - tcp_sk(sk)->linger2 = 0; - tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; - - xprt_clear_connected(xprt); - - /* Reset to new socket */ - xprt->sock = sock; - xprt->inet = sk; - - write_unlock_bh(&sk->sk_callback_lock); - } - - /* Tell the socket layer to start connecting... */ - status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, - sizeof(xprt->addr), O_NONBLOCK); - dprintk("RPC: %p connect status %d connected %d sock state %d\n", - xprt, -status, xprt_connected(xprt), sock->sk->sk_state); - if (status < 0) { - switch (status) { - case -EINPROGRESS: - case -EALREADY: - goto out_clear; - case -ECONNREFUSED: - case -ECONNRESET: - /* retry with existing socket, after a delay */ - break; - default: - /* get rid of existing socket, and retry */ - xs_close(xprt); - break; - } - } -out: - xprt_wake_pending_tasks(xprt, status); -out_clear: - xprt_clear_connecting(xprt); -} - -/** - * xs_connect - connect a socket to a remote endpoint - * @task: address of RPC task that manages state of connect request - * - * TCP: If the remote end dropped the connection, delay reconnecting. - * - * UDP socket connects are synchronous, but we use a work queue anyway - * to guarantee that even unprivileged user processes can set up a - * socket on a privileged port. - * - * If a UDP socket connect fails, the delay behavior here prevents - * retry floods (hard mounts). - */ -static void xs_connect(struct rpc_task *task) -{ - struct rpc_xprt *xprt = task->tk_xprt; - - if (xprt_test_and_set_connecting(xprt)) - return; - - if (xprt->sock != NULL) { - dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", - xprt, xprt->reestablish_timeout / HZ); - schedule_delayed_work(&xprt->connect_worker, - xprt->reestablish_timeout); - xprt->reestablish_timeout <<= 1; - if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) - xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; - } else { - dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_work(&xprt->connect_worker); - - /* flush_scheduled_work can sleep... */ - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); - } -} - -static struct rpc_xprt_ops xs_udp_ops = { - .set_buffer_size = xs_udp_set_buffer_size, - .reserve_xprt = xprt_reserve_xprt_cong, - .release_xprt = xprt_release_xprt_cong, - .connect = xs_connect, - .send_request = xs_udp_send_request, - .set_retrans_timeout = xprt_set_retrans_timeout_rtt, - .timer = xs_udp_timer, - .release_request = xprt_release_rqst_cong, - .close = xs_close, - .destroy = xs_destroy, -}; - -static struct rpc_xprt_ops xs_tcp_ops = { - .reserve_xprt = xprt_reserve_xprt, - .release_xprt = xprt_release_xprt, - .connect = xs_connect, - .send_request = xs_tcp_send_request, - .set_retrans_timeout = xprt_set_retrans_timeout_def, - .close = xs_close, - .destroy = xs_destroy, -}; - -/** - * xs_setup_udp - Set up transport to use a UDP socket - * @xprt: transport to set up - * @to: timeout parameters - * - */ -int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) -{ - size_t slot_table_size; - - dprintk("RPC: setting up udp-ipv4 transport...\n"); - - xprt->max_reqs = xprt_udp_slot_table_entries; - slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); - if (xprt->slot == NULL) - return -ENOMEM; - memset(xprt->slot, 0, slot_table_size); - - xprt->prot = IPPROTO_UDP; - xprt->port = xprt_max_resvport; - xprt->tsh_size = 0; - xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; - /* XXX: header size can vary due to auth type, IPv6, etc. */ - xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - - INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); - xprt->bind_timeout = XS_BIND_TO; - xprt->connect_timeout = XS_UDP_CONN_TO; - xprt->reestablish_timeout = XS_UDP_REEST_TO; - xprt->idle_timeout = XS_IDLE_DISC_TO; - - xprt->ops = &xs_udp_ops; - - if (to) - xprt->timeout = *to; - else - xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); - - return 0; -} - -/** - * xs_setup_tcp - Set up transport to use a TCP socket - * @xprt: transport to set up - * @to: timeout parameters - * - */ -int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) -{ - size_t slot_table_size; - - dprintk("RPC: setting up tcp-ipv4 transport...\n"); - - xprt->max_reqs = xprt_tcp_slot_table_entries; - slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); - if (xprt->slot == NULL) - return -ENOMEM; - memset(xprt->slot, 0, slot_table_size); - - xprt->prot = IPPROTO_TCP; - xprt->port = xprt_max_resvport; - xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); - xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; - xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; - - INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); - xprt->bind_timeout = XS_BIND_TO; - xprt->connect_timeout = XS_TCP_CONN_TO; - xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; - xprt->idle_timeout = XS_IDLE_DISC_TO; - - xprt->ops = &xs_tcp_ops; - - if (to) - xprt->timeout = *to; - else - xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); - - return 0; -}