Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 88187
b: refs/heads/master
c: 5b13d86
h: refs/heads/master
i:
  88185: a7fbb3e
  88183: b357cbf
v: v3
  • Loading branch information
Ingo Molnar committed Apr 7, 2008
1 parent b53818d commit c19ed20
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 39 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2557a933b795c1988c721ebb871cd735128bb9cb
refs/heads/master: 5b13d863573e746739ccfc24ac1a9473cfee8df1
15 changes: 1 addition & 14 deletions trunk/arch/x86/kernel/tsc_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,27 +287,14 @@ core_initcall(cpufreq_tsc);
/* clock source code */

static unsigned long current_tsc_khz = 0;
static struct clocksource clocksource_tsc;

/*
* We compare the TSC to the cycle_last value in the clocksource
* structure to avoid a nasty time-warp issue. This can be observed in
* a very small window right after one CPU updated cycle_last under
* xtime lock and the other CPU reads a TSC value which is smaller
* than the cycle_last reference value due to a TSC which is slighty
* behind. This delta is nowhere else observable, but in that case it
* results in a forward time jump in the range of hours due to the
* unsigned delta calculation of the time keeping core code, which is
* necessary to support wrapping clocksources like pm timer.
*/
static cycle_t read_tsc(void)
{
cycle_t ret;

rdtscll(ret);

return ret >= clocksource_tsc.cycle_last ?
ret : clocksource_tsc.cycle_last;
return ret;
}

static struct clocksource clocksource_tsc = {
Expand Down
23 changes: 3 additions & 20 deletions trunk/arch/x86/kernel/tsc_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
#include <asm/hpet.h>
#include <asm/timex.h>
#include <asm/timer.h>
#include <asm/vgtod.h>

static int notsc __initdata = 0;

Expand Down Expand Up @@ -291,34 +290,18 @@ int __init notsc_setup(char *s)

__setup("notsc", notsc_setup);

static struct clocksource clocksource_tsc;

/*
* We compare the TSC to the cycle_last value in the clocksource
* structure to avoid a nasty time-warp. This can be observed in a
* very small window right after one CPU updated cycle_last under
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
* is smaller than the cycle_last reference value due to a TSC which
* is slighty behind. This delta is nowhere else observable, but in
* that case it results in a forward time jump in the range of hours
* due to the unsigned delta calculation of the time keeping core
* code, which is necessary to support wrapping clocksources like pm
* timer.
*/
/* clock source code: */
static cycle_t read_tsc(void)
{
cycle_t ret = (cycle_t)get_cycles();

return ret >= clocksource_tsc.cycle_last ?
ret : clocksource_tsc.cycle_last;
return ret;
}

static cycle_t __vsyscall_fn vread_tsc(void)
{
cycle_t ret = (cycle_t)vget_cycles();

return ret >= __vsyscall_gtod_data.clock.cycle_last ?
ret : __vsyscall_gtod_data.clock.cycle_last;
return ret;
}

static struct clocksource clocksource_tsc = {
Expand Down
3 changes: 3 additions & 0 deletions trunk/drivers/virtio/virtio_ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,10 @@ static void vring_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);

START_USE(vq);
BUG_ON(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
END_USE(vq);
}

static bool vring_enable_cb(struct virtqueue *_vq)
Expand Down
5 changes: 1 addition & 4 deletions trunk/include/linux/virtio.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,14 @@ struct virtqueue
* Returns NULL or the "data" token handed to add_buf.
* @disable_cb: disable callbacks
* vq: the struct virtqueue we're talking about.
* Note that this is not necessarily synchronous, hence unreliable and only
* useful as an optimization.
* @enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
* of @disable_cb.
* locking. No two operations may be invoked simultaneously.
*
* All operations can be called in any context.
*/
Expand Down

0 comments on commit c19ed20

Please sign in to comment.