Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 362338
b: refs/heads/master
c: 386afc9
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Apr 9, 2013
1 parent 17857b2 commit 2fa7e70
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3e2e0d2c222bdf5bafd722dec1618fa6073ef372
refs/heads/master: 386afc91144b36b42117b0092893f15bc8798a80
22 changes: 14 additions & 8 deletions trunk/include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,14 +93,20 @@ do { \

#else /* !CONFIG_PREEMPT_COUNT */

#define preempt_disable() do { } while (0)
#define sched_preempt_enable_no_resched() do { } while (0)
#define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0)

#define preempt_disable_notrace() do { } while (0)
#define preempt_enable_no_resched_notrace() do { } while (0)
#define preempt_enable_notrace() do { } while (0)
/*
* Even if we don't have any preemption, we need preempt disable/enable
* to be barriers, so that we don't have things like get_user/put_user
* that can cause faults and scheduling migrate into our preempt-protected
* region.
*/
#define preempt_disable() barrier()
#define sched_preempt_enable_no_resched() barrier()
#define preempt_enable_no_resched() barrier()
#define preempt_enable() barrier()

#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()

#endif /* CONFIG_PREEMPT_COUNT */

Expand Down
29 changes: 18 additions & 11 deletions trunk/include/linux/spinlock_up.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@
* In the debug case, 1 means unlocked, 0 means locked. (the values
* are inverted, to catch initialization bugs)
*
* No atomicity anywhere, we are on UP.
* No atomicity anywhere, we are on UP. However, we still need
* the compiler barriers, because we do not want the compiler to
* move potentially faulting instructions (notably user accesses)
* into the locked sequence, resulting in non-atomic execution.
*/

#ifdef CONFIG_DEBUG_SPINLOCK
Expand All @@ -25,46 +28,50 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
barrier();
}

static inline void
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
barrier();
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;

lock->slock = 0;
barrier();

return oldval > 0;
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
barrier();
lock->slock = 1;
}

/*
* Read-write spinlocks. No debug version.
*/
#define arch_read_lock(lock) do { (void)(lock); } while (0)
#define arch_write_lock(lock) do { (void)(lock); } while (0)
#define arch_read_trylock(lock) ({ (void)(lock); 1; })
#define arch_write_trylock(lock) ({ (void)(lock); 1; })
#define arch_read_unlock(lock) do { (void)(lock); } while (0)
#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)

#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */

#define arch_spin_is_contended(lock) (((void)(lock), 0))
Expand Down

0 comments on commit 2fa7e70

Please sign in to comment.