From a5871deea050dfff1e0efcba3777afecb76c978b Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 4 Feb 2013 08:18:05 -0500 Subject: [PATCH] --- yaml --- r: 359723 b: refs/heads/master c: aca8a23de60c705e2458b2c6731ad59aa0717f83 h: refs/heads/master i: 359721: aeaa2244dea30ebad5519a8419ef3ef36693c612 359719: 2f968478a3fd1153e6dd4cf7b3c3143acb085b6a v: v3 --- [refs] | 2 +- trunk/fs/nfsd/nfscache.c | 50 +++++++++++++++++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/[refs] b/[refs] index 02adda10affc..a7db1853523c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2c6b691c05bf77c4bc7c9f1a9b6d93a160928421 +refs/heads/master: aca8a23de60c705e2458b2c6731ad59aa0717f83 diff --git a/trunk/fs/nfsd/nfscache.c b/trunk/fs/nfsd/nfscache.c index e8ea785e295d..d7b088bee684 100644 --- a/trunk/fs/nfsd/nfscache.c +++ b/trunk/fs/nfsd/nfscache.c @@ -36,6 +36,7 @@ static inline u32 request_hash(u32 xid) } static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); +static void cache_cleaner_func(struct work_struct *unused); /* * locking for the reply cache: @@ -43,6 +44,7 @@ static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); * Otherwise, it when accessing _prev or _next, the lock must be held. */ static DEFINE_SPINLOCK(cache_lock); +static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); /* * Put a cap on the size of the DRC based on the amount of available @@ -131,6 +133,8 @@ void nfsd_reply_cache_shutdown(void) { struct svc_cacherep *rp; + cancel_delayed_work_sync(&cache_cleaner); + while (!list_empty(&lru_head)) { rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); nfsd_reply_cache_free_locked(rp); @@ -146,13 +150,15 @@ void nfsd_reply_cache_shutdown(void) } /* - * Move cache entry to end of LRU list + * Move cache entry to end of LRU list, and queue the cleaner to run if it's + * not already scheduled. */ static void lru_put_end(struct svc_cacherep *rp) { rp->c_timestamp = jiffies; list_move_tail(&rp->c_lru, &lru_head); + schedule_delayed_work(&cache_cleaner, RC_EXPIRE); } /* @@ -172,6 +178,42 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp) time_after(jiffies, rp->c_timestamp + RC_EXPIRE); } +/* + * Walk the LRU list and prune off entries that are older than RC_EXPIRE. + * Also prune the oldest ones when the total exceeds the max number of entries. + */ +static void +prune_cache_entries(void) +{ + struct svc_cacherep *rp, *tmp; + + list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { + if (!nfsd_cache_entry_expired(rp) && + num_drc_entries <= max_drc_entries) + break; + nfsd_reply_cache_free_locked(rp); + } + + /* + * Conditionally rearm the job. If we cleaned out the list, then + * cancel any pending run (since there won't be any work to do). + * Otherwise, we rearm the job or modify the existing one to run in + * RC_EXPIRE since we just ran the pruner. + */ + if (list_empty(&lru_head)) + cancel_delayed_work(&cache_cleaner); + else + mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); +} + +static void +cache_cleaner_func(struct work_struct *unused) +{ + spin_lock(&cache_lock); + prune_cache_entries(); + spin_unlock(&cache_lock); +} + /* * Search the request hash for an entry that matches the given rqstp. * Must be called with cache_lock held. Returns the found entry or @@ -192,7 +234,6 @@ nfsd_cache_search(struct svc_rqst *rqstp) hlist_for_each_entry(rp, hn, rh, c_hash) { if (xid == rp->c_xid && proc == rp->c_proc && proto == rp->c_prot && vers == rp->c_vers && - !nfsd_cache_entry_expired(rp) && rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) return rp; @@ -234,8 +275,11 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) if (!list_empty(&lru_head)) { rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); if (nfsd_cache_entry_expired(rp) || - num_drc_entries >= max_drc_entries) + num_drc_entries >= max_drc_entries) { + lru_put_end(rp); + prune_cache_entries(); goto setup_entry; + } } spin_unlock(&cache_lock);