Skip to content

Commit

Permalink
nfsd: add recurring workqueue job to clean the cache
Browse files Browse the repository at this point in the history
It's not sufficient to only clean the cache when requests come in. What
if we have a flurry of activity and then the server goes idle? Add a
workqueue job that will clean the cache every RC_EXPIRE period.

Care is taken to only run this when we expect to have entries expiring.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
  • Loading branch information
Jeff Layton authored and J. Bruce Fields committed Feb 4, 2013
1 parent 2c6b691 commit aca8a23
Showing 1 changed file with 47 additions and 3 deletions.
50 changes: 47 additions & 3 deletions fs/nfsd/nfscache.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,15 @@ static inline u32 request_hash(u32 xid)
}

static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static void cache_cleaner_func(struct work_struct *unused);

/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
*/
static DEFINE_SPINLOCK(cache_lock);
static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);

/*
* Put a cap on the size of the DRC based on the amount of available
Expand Down Expand Up @@ -131,6 +133,8 @@ void nfsd_reply_cache_shutdown(void)
{
struct svc_cacherep *rp;

cancel_delayed_work_sync(&cache_cleaner);

while (!list_empty(&lru_head)) {
rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
nfsd_reply_cache_free_locked(rp);
Expand All @@ -146,13 +150,15 @@ void nfsd_reply_cache_shutdown(void)
}

/*
* Move cache entry to end of LRU list
* Move cache entry to end of LRU list, and queue the cleaner to run if it's
* not already scheduled.
*/
static void
lru_put_end(struct svc_cacherep *rp)
{
rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &lru_head);
schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
}

/*
Expand All @@ -172,6 +178,42 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp)
time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
}

/*
* Walk the LRU list and prune off entries that are older than RC_EXPIRE.
* Also prune the oldest ones when the total exceeds the max number of entries.
*/
static void
prune_cache_entries(void)
{
struct svc_cacherep *rp, *tmp;

list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
if (!nfsd_cache_entry_expired(rp) &&
num_drc_entries <= max_drc_entries)
break;
nfsd_reply_cache_free_locked(rp);
}

/*
* Conditionally rearm the job. If we cleaned out the list, then
* cancel any pending run (since there won't be any work to do).
* Otherwise, we rearm the job or modify the existing one to run in
* RC_EXPIRE since we just ran the pruner.
*/
if (list_empty(&lru_head))
cancel_delayed_work(&cache_cleaner);
else
mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
}

static void
cache_cleaner_func(struct work_struct *unused)
{
spin_lock(&cache_lock);
prune_cache_entries();
spin_unlock(&cache_lock);
}

/*
* Search the request hash for an entry that matches the given rqstp.
* Must be called with cache_lock held. Returns the found entry or
Expand All @@ -192,7 +234,6 @@ nfsd_cache_search(struct svc_rqst *rqstp)
hlist_for_each_entry(rp, hn, rh, c_hash) {
if (xid == rp->c_xid && proc == rp->c_proc &&
proto == rp->c_prot && vers == rp->c_vers &&
!nfsd_cache_entry_expired(rp) &&
rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) &&
rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr))
return rp;
Expand Down Expand Up @@ -234,8 +275,11 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
if (!list_empty(&lru_head)) {
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
if (nfsd_cache_entry_expired(rp) ||
num_drc_entries >= max_drc_entries)
num_drc_entries >= max_drc_entries) {
lru_put_end(rp);
prune_cache_entries();
goto setup_entry;
}
}

spin_unlock(&cache_lock);
Expand Down

0 comments on commit aca8a23

Please sign in to comment.