Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 65239
b: refs/heads/master
c: ca074a3
h: refs/heads/master
i:
  65237: 319f6ae
  65235: 54b28c3
  65231: 690bfea
v: v3
  • Loading branch information
Ralf Baechle committed Oct 1, 2007
1 parent c32cbe2 commit af073f3
Show file tree
Hide file tree
Showing 19 changed files with 107 additions and 99 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: cf8dc57cbac0fe089308f57c333ab763c36782ff
refs/heads/master: ca074a33928762c65e261dd94006c1b6a47e46fa
2 changes: 0 additions & 2 deletions trunk/Documentation/devices.txt
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,6 @@ Your cooperation is appreciated.
9 = /dev/urandom Faster, less secure random number gen.
10 = /dev/aio Asynchronous I/O notification interface
11 = /dev/kmsg Writes to this come out as printk's
12 = /dev/oldmem Used by crashdump kernels to access
the memory of the kernel that crashed.

1 block RAM disk
0 = /dev/ram0 First RAM disk
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/arm/kernel/bios32.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
* pcibios_fixup_bus - Called after each bus is probed,
* but before its children are examined.
*/
void pcibios_fixup_bus(struct pci_bus *bus)
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_sys_data *root = bus->sysdata;
struct pci_dev *dev;
Expand Down Expand Up @@ -419,7 +419,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
/*
* Convert from Linux-centric to bus-centric addresses for bridge devices.
*/
void
void __devinit
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86_64/vdso/voffset.h
Original file line number Diff line number Diff line change
@@ -1 +1 @@
#define VDSO_TEXT_OFFSET 0x600
#define VDSO_TEXT_OFFSET 0x500
15 changes: 5 additions & 10 deletions trunk/drivers/char/vt_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -770,7 +770,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
/*
* Switching-from response
*/
acquire_console_sem();
if (vc->vt_newvt >= 0) {
if (arg == 0)
/*
Expand All @@ -785,6 +784,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
* complete the switch.
*/
int newvt;
acquire_console_sem();
newvt = vc->vt_newvt;
vc->vt_newvt = -1;
i = vc_allocate(newvt);
Expand All @@ -798,6 +798,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
* other console switches..
*/
complete_change_console(vc_cons[newvt].d);
release_console_sem();
}
}

Expand All @@ -809,12 +810,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
/*
* If it's just an ACK, ignore it
*/
if (arg != VT_ACKACQ) {
release_console_sem();
if (arg != VT_ACKACQ)
return -EINVAL;
}
}
release_console_sem();

return 0;

Expand Down Expand Up @@ -1210,18 +1208,15 @@ void change_console(struct vc_data *new_vc)
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
* is awry.
*
* We need to set vt_newvt *before* sending the signal or we
* have a race.
* is awry
*/
vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
/*
* It worked. Mark the vt to switch to and
* return. The process needs to send us a
* VT_RELDISP ioctl to complete the switch.
*/
vc->vt_newvt = new_vc->vc_num;
return;
}

Expand Down
6 changes: 2 additions & 4 deletions trunk/drivers/media/video/ivtv/ivtv-fileops.c
Original file line number Diff line number Diff line change
Expand Up @@ -754,11 +754,9 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
ivtv_yuv_close(itv);
}
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
itv->output_mode = OUT_NONE;
else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
itv->output_mode = OUT_NONE;
itv->output_mode = OUT_NONE;
else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
itv->output_mode = OUT_NONE;
itv->output_mode = OUT_NONE;

itv->speed = 0;
clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/net/mv643xx_eth.c
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
}

/* PHY status changed */
if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {
if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) {
struct ethtool_cmd cmd;

if (mii_link_ok(&mp->mii)) {
Expand Down
4 changes: 1 addition & 3 deletions trunk/drivers/net/mv643xx_eth.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,7 @@
#define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8)
#define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
#define ETH_INT_CAUSE_PHY 0x00010000
#define ETH_INT_CAUSE_STATE 0x00100000
#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
ETH_INT_CAUSE_STATE)
#define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)

#define ETH_INT_MASK_ALL 0x00000000
#define ETH_INT_MASK_ALL_EXT 0x00000000
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/net/wireless/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o

obj-$(CONFIG_USB_ZD1201) += zd1201.o
obj-$(CONFIG_LIBERTAS) += libertas/
obj-$(CONFIG_LIBERTAS_USB) += libertas/

rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
obj-$(CONFIG_RTL8187) += rtl8187.o
46 changes: 12 additions & 34 deletions trunk/fs/splice.c
Original file line number Diff line number Diff line change
Expand Up @@ -1223,33 +1223,6 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL;
}

/*
* Do a copy-from-user while holding the mmap_semaphore for reading, in a
* manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
* for writing) and page faulting on the user memory pointed to by src.
* This assumes that we will very rarely hit the partial != 0 path, or this
* will not be a win.
*/
static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
{
int partial;

pagefault_disable();
partial = __copy_from_user_inatomic(dst, src, n);
pagefault_enable();

/*
* Didn't copy everything, drop the mmap_sem and do a faulting copy
*/
if (unlikely(partial)) {
up_read(&current->mm->mmap_sem);
partial = copy_from_user(dst, src, n);
down_read(&current->mm->mmap_sem);
}

return partial;
}

/*
* Map an iov into an array of pages and offset/length tupples. With the
* partial_page structure, we can map several non-contiguous ranges into
Expand All @@ -1263,26 +1236,31 @@ static int get_iovec_page_array(const struct iovec __user *iov,
{
int buffers = 0, error = 0;

/*
* It's ok to take the mmap_sem for reading, even
* across a "get_user()".
*/
down_read(&current->mm->mmap_sem);

while (nr_vecs) {
unsigned long off, npages;
struct iovec entry;
void __user *base;
size_t len;
int i;

error = -EFAULT;
if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
/*
* Get user address base and length for this iovec.
*/
error = get_user(base, &iov->iov_base);
if (unlikely(error))
break;
error = get_user(len, &iov->iov_len);
if (unlikely(error))
break;

base = entry.iov_base;
len = entry.iov_len;

/*
* Sanity check this iovec. 0 read succeeds.
*/
error = 0;
if (unlikely(!len))
break;
error = -EFAULT;
Expand Down
5 changes: 5 additions & 0 deletions trunk/fs/xfs/xfs_buf_item.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ typedef struct xfs_buf_log_format_t {
#define XFS_BLI_UDQUOT_BUF 0x4
#define XFS_BLI_PDQUOT_BUF 0x8
#define XFS_BLI_GDQUOT_BUF 0x10
/*
* This flag indicates that the buffer contains newly allocated
* inodes.
*/
#define XFS_BLI_INODE_NEW_BUF 0x20

#define XFS_BLI_CHUNK 128
#define XFS_BLI_SHIFT 7
Expand Down
51 changes: 48 additions & 3 deletions trunk/fs/xfs/xfs_log_recover.c
Original file line number Diff line number Diff line change
Expand Up @@ -1874,6 +1874,7 @@ xlog_recover_do_inode_buffer(
/*ARGSUSED*/
STATIC void
xlog_recover_do_reg_buffer(
xfs_mount_t *mp,
xlog_recover_item_t *item,
xfs_buf_t *bp,
xfs_buf_log_format_t *buf_f)
Expand All @@ -1884,6 +1885,50 @@ xlog_recover_do_reg_buffer(
unsigned int *data_map = NULL;
unsigned int map_size = 0;
int error;
int stale_buf = 1;

/*
* Scan through the on-disk inode buffer and attempt to
* determine if it has been written to since it was logged.
*
* - If any of the magic numbers are incorrect then the buffer is stale
* - If any of the modes are non-zero then the buffer is not stale
* - If all of the modes are zero and at least one of the generation
* counts is non-zero then the buffer is stale
*
* If the end result is a stale buffer then the log buffer is replayed
* otherwise it is skipped.
*
* This heuristic is not perfect. It can be improved by scanning the
* entire inode chunk for evidence that any of the inode clusters have
* been updated. To fix this problem completely we will need a major
* architectural change to the logging system.
*/
if (buf_f->blf_flags & XFS_BLI_INODE_NEW_BUF) {
xfs_dinode_t *dip;
int inodes_per_buf;
int mode_count = 0;
int gen_count = 0;

stale_buf = 0;
inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
for (i = 0; i < inodes_per_buf; i++) {
dip = (xfs_dinode_t *)xfs_buf_offset(bp,
i * mp->m_sb.sb_inodesize);
if (be16_to_cpu(dip->di_core.di_magic) !=
XFS_DINODE_MAGIC) {
stale_buf = 1;
break;
}
if (dip->di_core.di_mode)
mode_count++;
if (dip->di_core.di_gen)
gen_count++;
}

if (!mode_count && gen_count)
stale_buf = 1;
}

switch (buf_f->blf_type) {
case XFS_LI_BUF:
Expand Down Expand Up @@ -1917,7 +1962,7 @@ xlog_recover_do_reg_buffer(
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
}
if (!error)
if (!error && stale_buf)
memcpy(xfs_buf_offset(bp,
(uint)bit << XFS_BLI_SHIFT), /* dest */
item->ri_buf[i].i_addr, /* source */
Expand Down Expand Up @@ -2089,7 +2134,7 @@ xlog_recover_do_dquot_buffer(
if (log->l_quotaoffs_flag & type)
return;

xlog_recover_do_reg_buffer(item, bp, buf_f);
xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}

/*
Expand Down Expand Up @@ -2190,7 +2235,7 @@ xlog_recover_do_buffer_trans(
(XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
} else {
xlog_recover_do_reg_buffer(item, bp, buf_f);
xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
}
if (error)
return XFS_ERROR(error);
Expand Down
1 change: 1 addition & 0 deletions trunk/fs/xfs/xfs_trans_buf.c
Original file line number Diff line number Diff line change
Expand Up @@ -966,6 +966,7 @@ xfs_trans_inode_alloc_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);

bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
bip->bli_format.blf_flags |= XFS_BLI_INODE_NEW_BUF;
}


Expand Down
5 changes: 5 additions & 0 deletions trunk/include/asm-i386/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,11 @@ static inline unsigned long get_limit(unsigned long segment)
*/


/*
* Actually only lfence would be needed for mb() because all stores done
* by the kernel should be already ordered. But keep a full barrier for now.
*/

#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)

Expand Down
1 change: 1 addition & 0 deletions trunk/include/asm-mips/fcntl.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#define O_SYNC 0x0010
#define O_NONBLOCK 0x0080
#define O_CREAT 0x0100 /* not fcntl */
#define O_TRUNC 0x0200 /* not fcntl */
#define O_EXCL 0x0400 /* not fcntl */
#define O_NOCTTY 0x0800 /* not fcntl */
#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
Expand Down
26 changes: 10 additions & 16 deletions trunk/kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -1943,10 +1943,9 @@ static inline int fetch_robust_entry(struct robust_list __user **entry,
void exit_robust_list(struct task_struct *curr)
{
struct robust_list_head __user *head = curr->robust_list;
struct robust_list __user *entry, *next_entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
struct robust_list __user *entry, *pending;
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
unsigned long futex_offset;
int rc;

/*
* Fetch the list head (which was registered earlier, via
Expand All @@ -1966,13 +1965,11 @@ void exit_robust_list(struct task_struct *curr)
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
return;

next_entry = NULL; /* avoid warning with gcc */
if (pending)
handle_futex_death((void __user *)pending + futex_offset,
curr, pip);

while (entry != &head->list) {
/*
* Fetch the next entry in the list before calling
* handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
/*
* A pending lock might already be on the list, so
* don't process it twice:
Expand All @@ -1981,10 +1978,11 @@ void exit_robust_list(struct task_struct *curr)
if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi))
return;
if (rc)
/*
* Fetch the next entry in the list:
*/
if (fetch_robust_entry(&entry, &entry->next, &pi))
return;
entry = next_entry;
pi = next_pi;
/*
* Avoid excessively long or circular lists:
*/
Expand All @@ -1993,10 +1991,6 @@ void exit_robust_list(struct task_struct *curr)

cond_resched();
}

if (pending)
handle_futex_death((void __user *)pending + futex_offset,
curr, pip);
}

long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
Expand Down
Loading

0 comments on commit af073f3

Please sign in to comment.