diff --git a/[refs] b/[refs] index d60ae80d53f9..7fb9da71825a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d7144556195ab04a148e722a93103da5438bdf78 +refs/heads/master: 4b20db3de8dab005b07c74161cb041db8c5ff3a7 diff --git a/trunk/include/linux/kref.h b/trunk/include/linux/kref.h index 65af6887872f..4972e6e9ca93 100644 --- a/trunk/include/linux/kref.h +++ b/trunk/include/linux/kref.h @@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref, } return 0; } + +/** + * kref_get_unless_zero - Increment refcount for object unless it is zero. + * @kref: object. + * + * Return non-zero if the increment succeeded. Otherwise return 0. + * + * This function is intended to simplify locking around refcounting for + * objects that can be looked up from a lookup structure, and which are + * removed from that lookup structure in the object destructor. + * Operations on such objects require at least a read lock around + * lookup + kref_get, and a write lock around kref_put + remove from lookup + * structure. Furthermore, RCU implementations become extremely tricky. + * With a lookup followed by a kref_get_unless_zero *with return value check* + * locking in the kref_put path can be deferred to the actual removal from + * the lookup structure and RCU lookups become trivial. + */ +static inline int __must_check kref_get_unless_zero(struct kref *kref) +{ + return atomic_add_unless(&kref->refcount, 1, 0); +} #endif /* _KREF_H_ */