Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 73214
b: refs/heads/master
c: cacd40e
h: refs/heads/master
v: v3
  • Loading branch information
David Miller authored and Jeff Garzik committed Nov 5, 2007
1 parent 66ed3b3 commit 04b0c49
Show file tree
Hide file tree
Showing 98 changed files with 303 additions and 608 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2655e2cee2d77459fcb7e10228259e4ee0328697
refs/heads/master: cacd40e07c5ad7068221b3910098f1d364e74e45
23 changes: 0 additions & 23 deletions trunk/Documentation/local_ops.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,29 +45,6 @@ long fails. The definition looks like :
typedef struct { atomic_long_t a; } local_t;


* Rules to follow when using local atomic operations

- Variables touched by local ops must be per cpu variables.
- _Only_ the CPU owner of these variables must write to them.
- This CPU can use local ops from any context (process, irq, softirq, nmi, ...)
to update its local_t variables.
- Preemption (or interrupts) must be disabled when using local ops in
process context to make sure the process won't be migrated to a
different CPU between getting the per-cpu variable and doing the
actual local op.
- When using local ops in interrupt context, no special care must be
taken on a mainline kernel, since they will run on the local CPU with
preemption already disabled. I suggest, however, to explicitly
disable preemption anyway to make sure it will still work correctly on
-rt kernels.
- Reading the local cpu variable will provide the current copy of the
variable.
- Reads of these variables can be done from any CPU, because updates to
"long", aligned, variables are always atomic. Since no memory
synchronization is done by the writer CPU, an outdated copy of the
variable can be read when reading some _other_ cpu's variables.


* Rules to follow when using local atomic operations

- Variables touched by local ops must be per cpu variables.
Expand Down
3 changes: 0 additions & 3 deletions trunk/Documentation/video4linux/CARDLIST.em28xx
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,4 @@
7 -> Leadtek Winfast USB II (em2800)
8 -> Kworld USB2800 (em2800)
9 -> Pinnacle Dazzle DVC 90 (em2820/em2840) [2304:0207]
10 -> Hauppauge WinTV HVR 900 (em2880)
11 -> Terratec Hybrid XS (em2880)
12 -> Kworld PVR TV 2800 RF (em2820/em2840)
13 -> Terratec Prodigy XS (em2880)
6 changes: 2 additions & 4 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -2549,7 +2549,7 @@ S: Supported

MISCELLANEOUS MCA-SUPPORT
P: James Bottomley
M: James.Bottomley@HansenPartnership.com
M: jejb@steeleye.com
L: linux-kernel@vger.kernel.org
S: Maintained

Expand Down Expand Up @@ -3301,11 +3301,9 @@ S: Maintained

SCSI SUBSYSTEM
P: James E.J. Bottomley
M: James.Bottomley@HansenPartnership.com
M: James.Bottomley@SteelEye.com
L: linux-scsi@vger.kernel.org
T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
T: git kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-pending-2.6.git
S: Maintained

SCSI TAPE DRIVER
Expand Down
19 changes: 3 additions & 16 deletions trunk/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -528,22 +528,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)

# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# But warn user when we do so
warn-assign = \
$(warning "WARNING: Appending $$K$(1) ($(K$(1))) from $(origin K$(1)) to kernel $$$(1)")

ifneq ($(KCPPFLAGS),)
$(call warn-assign,CPPFLAGS)
KBUILD_CPPFLAGS += $(KCPPFLAGS)
endif
ifneq ($(KAFLAGS),)
$(call warn-assign,AFLAGS)
KBUILD_AFLAGS += $(KAFLAGS)
endif
ifneq ($(KCFLAGS),)
$(call warn-assign,CFLAGS)
KBUILD_CFLAGS += $(KCFLAGS)
endif
KBUILD_CPPFLAGS += $(CPPFLAGS)
KBUILD_AFLAGS += $(AFLAGS)
KBUILD_CFLAGS += $(CFLAGS)

# Use --build-id when available.
LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
Expand Down
4 changes: 1 addition & 3 deletions trunk/arch/s390/kernel/early.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,13 +200,11 @@ static noinline __init void find_memory_chunks(unsigned long memsize)
cc = __tprot(addr);
while (cc == old_cc) {
addr += CHUNK_INCR;
if (addr >= memsize)
break;
cc = __tprot(addr);
#ifndef CONFIG_64BIT
if (addr == ADDR2G)
break;
#endif
cc = __tprot(addr);
}

if (old_addr != addr &&
Expand Down
8 changes: 2 additions & 6 deletions trunk/arch/s390/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,15 +92,14 @@ EXPORT_SYMBOL(unregister_idle_notifier);

void do_monitor_call(struct pt_regs *regs, long interruption_code)
{
#ifdef CONFIG_SMP
struct s390_idle_data *idle;

idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0;
spin_unlock(&idle->lock);
#endif

/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);

Expand All @@ -115,9 +114,7 @@ extern void s390_handle_mcck(void);
static void default_idle(void)
{
int cpu, rc;
#ifdef CONFIG_SMP
struct s390_idle_data *idle;
#endif

/* CPU is going idle. */
cpu = smp_processor_id();
Expand Down Expand Up @@ -154,14 +151,13 @@ static void default_idle(void)
s390_handle_mcck();
return;
}
#ifdef CONFIG_SMP

idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
#endif
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/s390/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -788,14 +788,14 @@ static ssize_t show_idle_time(struct sys_device *dev, char *buf)
}
new_time = idle->idle_time;
spin_unlock_irq(&idle->lock);
return sprintf(buf, "%llu\n", new_time >> 12);
return sprintf(buf, "%llu us\n", new_time >> 12);
}
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
static SYSDEV_ATTR(idle_time, 0444, show_idle_time, NULL);

static struct attribute *cpu_attrs[] = {
&attr_capability.attr,
&attr_idle_count.attr,
&attr_idle_time_us.attr,
&attr_idle_time.attr,
NULL,
};

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/s390/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ static cycle_t read_tod_clock(void)

static struct clocksource clocksource_tod = {
.name = "tod",
.rating = 400,
.rating = 100,
.read = read_tod_clock,
.mask = -1ULL,
.mult = 1000,
Expand Down
7 changes: 2 additions & 5 deletions trunk/arch/um/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -70,12 +70,9 @@ include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
# in KBUILD_CFLAGS. Otherwise, it would cause ld to complain about the two different
# errnos.
# These apply to kernelspace only.
#
# strip leading and trailing whitespace to make the USER_CFLAGS removal of these
# defines more robust

KERNEL_DEFINES = $(strip -Derrno=kernel_errno -Dsigprocmask=kernel_sigprocmask \
-Dmktime=kernel_mktime $(ARCH_KERNEL_DEFINES))
KERNEL_DEFINES = -Derrno=kernel_errno -Dsigprocmask=kernel_sigprocmask \
-Dmktime=kernel_mktime $(ARCH_KERNEL_DEFINES)
KBUILD_CFLAGS += $(KERNEL_DEFINES)
KBUILD_CFLAGS += $(call cc-option,-fno-unit-at-a-time,)

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/um/drivers/ubd_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,7 @@ static int ubd_add(int n, char **error_out)
ubd_dev->size = ROUND_BLOCK(ubd_dev->size);

INIT_LIST_HEAD(&ubd_dev->restart);
sg_init_table(ubd_dev->sg, MAX_SG);
sg_init_table(&ubd_dev->sg, MAX_SG);

err = -ENOMEM;
ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
Expand Down
8 changes: 3 additions & 5 deletions trunk/arch/x86/boot/pmjump.S
Original file line number Diff line number Diff line change
Expand Up @@ -28,19 +28,17 @@
* void protected_mode_jump(u32 entrypoint, u32 bootparams);
*/
protected_mode_jump:
xorl %ebx, %ebx # Flag to indicate this is a boot
movl %edx, %esi # Pointer to boot_params table
movl %eax, 2f # Patch ljmpl instruction
jmp 1f # Short jump to flush instruction q.

1:
movw $__BOOT_DS, %cx
xorl %ebx, %ebx # Per the 32-bit boot protocol
xorl %ebp, %ebp # Per the 32-bit boot protocol
xorl %edi, %edi # Per the 32-bit boot protocol

movl %cr0, %edx
orb $1, %dl # Protected mode (PE) bit
movl %edx, %cr0
jmp 1f # Short jump to serialize on 386/486
1:

movw %cx, %ds
movw %cx, %es
Expand Down
69 changes: 35 additions & 34 deletions trunk/arch/x86/lguest/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,38 @@ struct lguest_data lguest_data = {
};
static cycle_t clock_base;

/*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
* real optimization trick!
*
* When lazy_mode is set, it means we're allowed to defer all hypercalls and do
* them as a batch when lazy_mode is eventually turned off. Because hypercalls
* are reasonably expensive, batching them up makes sense. For example, a
* large munmap might update dozens of page table entries: that code calls
* paravirt_enter_lazy_mmu(), does the dozen updates, then calls
* lguest_leave_lazy_mode().
*
* So, when we're in lazy mode, we call async_hypercall() to store the call for
* future processing. When lazy mode is turned off we issue a hypercall to
* flush the stored calls.
*/
static void lguest_leave_lazy_mode(void)
{
paravirt_leave_lazy(paravirt_get_lazy_mode());
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
}

static void lazy_hcall(unsigned long call,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
hcall(call, arg1, arg2, arg3);
else
async_hcall(call, arg1, arg2, arg3);
}

/* async_hcall() is pretty simple: I'm quite proud of it really. We have a
* ring buffer of stored hypercalls which the Host will run though next time we
* do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
* arguments, and a "hcall_status" word which is 0 if the call is ready to go,
Expand All @@ -103,8 +134,8 @@ static cycle_t clock_base;
* full and we just make the hypercall directly. This has the nice side
* effect of causing the Host to run all the stored calls in the ring buffer
* which empties it for next time! */
static void async_hcall(unsigned long call, unsigned long arg1,
unsigned long arg2, unsigned long arg3)
void async_hcall(unsigned long call,
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
/* Note: This code assumes we're uniprocessor. */
static unsigned int next_call;
Expand All @@ -130,37 +161,7 @@ static void async_hcall(unsigned long call, unsigned long arg1,
}
local_irq_restore(flags);
}

/*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
* real optimization trick!
*
* When lazy_mode is set, it means we're allowed to defer all hypercalls and do
* them as a batch when lazy_mode is eventually turned off. Because hypercalls
* are reasonably expensive, batching them up makes sense. For example, a
* large munmap might update dozens of page table entries: that code calls
* paravirt_enter_lazy_mmu(), does the dozen updates, then calls
* lguest_leave_lazy_mode().
*
* So, when we're in lazy mode, we call async_hcall() to store the call for
* future processing. */
static void lazy_hcall(unsigned long call,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3)
{
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
hcall(call, arg1, arg2, arg3);
else
async_hcall(call, arg1, arg2, arg3);
}

/* When lazy mode is turned off reset the per-cpu lazy mode variable and then
* issue a hypercall to flush any stored calls. */
static void lguest_leave_lazy_mode(void)
{
paravirt_leave_lazy(paravirt_get_lazy_mode());
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
}
/*:*/

/*G:033
* After that diversion we return to our first native-instruction
Expand Down
1 change: 0 additions & 1 deletion trunk/drivers/ata/ata_piix.c
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,6 @@ struct ich_laptop {
static const struct ich_laptop ich_laptop[] = {
/* devid, subvendor, subdev */
{ 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
{ 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
{ 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
{ 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
{ 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/block/paride/pt.c
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ static int pt_open(struct inode *inode, struct file *file)
goto out;

err = -EROFS;
if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & 2))
if ((!tape->flags & PT_WRITE_OK) && (file->f_mode & 2))
goto out;

if (!(iminor(inode) & 128))
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/char/virtio_console.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
* never remove the console device we never need this pointer again.
*
* Finally we put our input buffer in the input queue, ready to receive. */
static int __devinit virtcons_probe(struct virtio_device *dev)
static int virtcons_probe(struct virtio_device *dev)
{
int err;
struct hvc_struct *hvc;
Expand Down
22 changes: 4 additions & 18 deletions trunk/drivers/ide/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -152,22 +152,9 @@ config BLK_DEV_IDEDISK
If unsure, say Y.

config IDEDISK_MULTI_MODE
bool "Use multiple sector mode for Programmed Input/Output by default"
help
This setting is irrelevant for most IDE disks, with direct memory
access, to which multiple sector mode does not apply. Multiple sector
mode is a feature of most modern IDE hard drives, permitting the
transfer of multiple sectors per Programmed Input/Output interrupt,
rather than the usual one sector per interrupt. When this feature is
enabled, it can reduce operating system overhead for disk Programmed
Input/Output. On some systems, it also can increase the data
throughput of Programmed Input/Output. Some drives, however, seemed
to run slower with multiple sector mode enabled. Some drives claimed
to support multiple sector mode, but lost data at some settings.
Under rare circumstances, such failures could result in massive
filesystem corruption.

If you get the following error, try to say Y here:
bool "Use multi-mode by default"
help
If you get this error, try to say Y here:

hda: set_multmode: status=0x51 { DriveReady SeekComplete Error }
hda: set_multmode: error=0x04 { DriveStatusError }
Expand Down Expand Up @@ -393,10 +380,9 @@ config IDEPCI_SHARE_IRQ
config IDEPCI_PCIBUS_ORDER
def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI

# TODO: split it on per host driver config options (or module parameters)
config BLK_DEV_OFFBOARD
bool "Boot off-board chipsets first support"
depends on BLK_DEV_IDEPCI && (BLK_DEV_AEC62XX || BLK_DEV_GENERIC || BLK_DEV_HPT34X || BLK_DEV_HPT366 || BLK_DEV_PDC202XX_NEW || BLK_DEV_PDC202XX_OLD || BLK_DEV_TC86C001)
depends on BLK_DEV_IDEPCI
help
Normally, IDE controllers built into the motherboard (on-board
controllers) are assigned to ide0 and ide1 while those on add-in PCI
Expand Down
5 changes: 2 additions & 3 deletions trunk/drivers/ide/ide-dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ static int config_drive_for_dma (ide_drive_t *drive)

if (drive->media != ide_disk) {
if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
return 0;
return -1;
}

/*
Expand Down Expand Up @@ -752,8 +752,7 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
mode = XFER_MW_DMA_1;
}

printk(KERN_DEBUG "%s: %s mode selected\n", drive->name,
mode ? ide_xfer_verbose(mode) : "no DMA");
printk(KERN_DEBUG "%s: selected mode 0x%x\n", drive->name, mode);

return min(mode, req_mode);
}
Expand Down
Loading

0 comments on commit 04b0c49

Please sign in to comment.