Skip to content

Commit

Permalink
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/s…
Browse files Browse the repository at this point in the history
…cm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "Misc fixes:

   - Fix a S390 boot hang that was caused by the lock-break logic.
     Remove lock-break to begin with, as review suggested it was
     unreasonably fragile and our confidence in its continued good
     health is lower than our confidence in its removal.

   - Remove the lockdep cross-release checking code for now, because of
     unresolved false positive warnings. This should make lockdep work
     well everywhere again.

   - Get rid of the final (and single) ACCESS_ONCE() straggler and
     remove the API from v4.15.

   - Fix a liblockdep build warning"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools/lib/lockdep: Add missing declaration of 'pr_cont()'
  checkpatch: Remove ACCESS_ONCE() warning
  compiler.h: Remove ACCESS_ONCE()
  tools/include: Remove ACCESS_ONCE()
  tools/perf: Convert ACCESS_ONCE() to READ_ONCE()
  locking/lockdep: Remove the cross-release locking checks
  locking/core: Remove break_lock field when CONFIG_GENERIC_LOCKBREAK=y
  locking/core: Fix deadlock during boot on systems with GENERIC_LOCKBREAK
  • Loading branch information
Linus Torvalds committed Dec 15, 2017
2 parents a58653c + 92ccc26 commit 1f76a75
Show file tree
Hide file tree
Showing 15 changed files with 60 additions and 1,797 deletions.
874 changes: 0 additions & 874 deletions Documentation/locking/crossrelease.txt

This file was deleted.

47 changes: 11 additions & 36 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
* particular ordering. One way to make the compiler aware of ordering is to
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
* statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
* least two memcpy()s: one for the __builtin_memcpy() and then one for
* the macro doing the copy of variable - '__u' allocated on the stack.
* These two macros will also work on aggregate data types like structs or
* unions. If the size of the accessed data type exceeds the word size of
* the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
* fall back to memcpy(). There's at least two memcpy()s: one for the
* __builtin_memcpy() and then one for the macro doing the copy of variable
* - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
Expand Down Expand Up @@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")

/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
* but only when the compiler is aware of some particular ordering. One way
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
* on a union member will work as long as the size of the member matches the
* size of the union and the size is smaller than word size.
*
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
* between process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
* If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))

#endif /* __LINUX_COMPILER_H */
45 changes: 0 additions & 45 deletions include/linux/completion.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@
*/

#include <linux/wait.h>
#ifdef CONFIG_LOCKDEP_COMPLETIONS
#include <linux/lockdep.h>
#endif

/*
* struct completion - structure used to maintain state for a "completion"
Expand All @@ -29,58 +26,16 @@
struct completion {
unsigned int done;
wait_queue_head_t wait;
#ifdef CONFIG_LOCKDEP_COMPLETIONS
struct lockdep_map_cross map;
#endif
};

#ifdef CONFIG_LOCKDEP_COMPLETIONS
static inline void complete_acquire(struct completion *x)
{
lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
}

static inline void complete_release(struct completion *x)
{
lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
}

static inline void complete_release_commit(struct completion *x)
{
lock_commit_crosslock((struct lockdep_map *)&x->map);
}

#define init_completion_map(x, m) \
do { \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
(m)->name, (m)->key, 0); \
__init_completion(x); \
} while (0)

#define init_completion(x) \
do { \
static struct lock_class_key __key; \
lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
"(completion)" #x, \
&__key, 0); \
__init_completion(x); \
} while (0)
#else
#define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
static inline void complete_release_commit(struct completion *x) {}
#endif

#ifdef CONFIG_LOCKDEP_COMPLETIONS
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
#else
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#endif

#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); }))
Expand Down
125 changes: 0 additions & 125 deletions include/linux/lockdep.h
Original file line number Diff line number Diff line change
Expand Up @@ -158,12 +158,6 @@ struct lockdep_map {
int cpu;
unsigned long ip;
#endif
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Whether it's a crosslock.
*/
int cross;
#endif
};

static inline void lockdep_copy_map(struct lockdep_map *to,
Expand Down Expand Up @@ -267,95 +261,8 @@ struct held_lock {
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
#ifdef CONFIG_LOCKDEP_CROSSRELEASE
/*
* Generation id.
*
* A value of cross_gen_id will be stored when holding this,
* which is globally increased whenever each crosslock is held.
*/
unsigned int gen_id;
#endif
};

#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCK_TRACE_ENTRIES 5

/*
* This is for keeping locks waiting for commit so that true dependencies
* can be added at commit step.
*/
struct hist_lock {
/*
* Id for each entry in the ring buffer. This is used to
* decide whether the ring buffer was overwritten or not.
*
* For example,
*
* |<----------- hist_lock ring buffer size ------->|
* pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
* wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
*
* where 'p' represents an acquisition in process
* context, 'i' represents an acquisition in irq
* context.
*
* In this example, the ring buffer was overwritten by
* acquisitions in irq context, that should be detected on
* rollback or commit.
*/
unsigned int hist_id;

/*
* Seperate stack_trace data. This will be used at commit step.
*/
struct stack_trace trace;
unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];

/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
};

/*
* To initialize a lock as crosslock, lockdep_init_map_crosslock() should
* be called instead of lockdep_init_map().
*/
struct cross_lock {
/*
* When more than one acquisition of crosslocks are overlapped,
* we have to perform commit for them based on cross_gen_id of
* the first acquisition, which allows us to add more true
* dependencies.
*
* Moreover, when no acquisition of a crosslock is in progress,
* we should not perform commit because the lock might not exist
* any more, which might cause incorrect memory access. So we
* have to track the number of acquisitions of a crosslock.
*/
int nr_acquire;

/*
* Seperate hlock instance. This will be used at commit step.
*
* TODO: Use a smaller data structure containing only necessary
* data. However, we should make lockdep code able to handle the
* smaller one first.
*/
struct held_lock hlock;
};

struct lockdep_map_cross {
struct lockdep_map map;
struct cross_lock xlock;
};
#endif

/*
* Initialization, self-test and debugging-output methods:
*/
Expand Down Expand Up @@ -560,37 +467,6 @@ enum xhlock_context_t {
XHLOCK_CTX_NR,
};

#ifdef CONFIG_LOCKDEP_CROSSRELEASE
extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
const char *name,
struct lock_class_key *key,
int subclass);
extern void lock_commit_crosslock(struct lockdep_map *lock);

/*
* What we essencially have to initialize is 'nr_acquire'. Other members
* will be initialized in add_xlock().
*/
#define STATIC_CROSS_LOCK_INIT() \
{ .nr_acquire = 0,}

#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
{ .map.name = (_name), .map.key = (void *)(_key), \
.map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }

/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), .cross = 0, }

extern void crossrelease_hist_start(enum xhlock_context_t c);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_invariant_state(bool force);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
#else /* !CROSSRELEASE */
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/*
* To initialize a lockdep_map statically use this macro.
Expand All @@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_invariant_state(bool force) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
#endif /* CROSSRELEASE */

#ifdef CONFIG_LOCK_STAT

Expand Down
3 changes: 0 additions & 3 deletions include/linux/rwlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,6 @@
*/
typedef struct {
arch_rwlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
Expand Down
11 changes: 0 additions & 11 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -849,17 +849,6 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif

#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
struct hist_lock *xhlocks; /* Crossrelease history locks */
unsigned int xhlock_idx;
/* For restoring at history boundaries */
unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
unsigned int hist_id;
/* For overwrite check at each context exit */
unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif

#ifdef CONFIG_UBSAN
unsigned int in_ubsan;
#endif
Expand Down
5 changes: 0 additions & 5 deletions include/linux/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,16 +107,11 @@ do { \

#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)

#ifdef CONFIG_GENERIC_LOCKBREAK
#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else

#ifdef arch_spin_is_contended
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
#define raw_spin_is_contended(lock) (((void)(lock), 0))
#endif /*arch_spin_is_contended*/
#endif

/*
* This barrier must provide two things:
Expand Down
3 changes: 0 additions & 3 deletions include/linux/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,6 @@

typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
unsigned int magic, owner_cpu;
void *owner;
Expand Down
Loading

0 comments on commit 1f76a75

Please sign in to comment.