diff --git a/[refs] b/[refs]
index 45d13fe39d78..07527b0f6f8a 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 62a36c43c898d45efcfe3376ea1da6a9a182e1ad
+refs/heads/master: 972d512a17c1bb7c4b784a9da2ca75745fcc6989
diff --git a/trunk/Documentation/DocBook/kernel-hacking.tmpl b/trunk/Documentation/DocBook/kernel-hacking.tmpl
index 582032eea872..6367bba32d22 100644
--- a/trunk/Documentation/DocBook/kernel-hacking.tmpl
+++ b/trunk/Documentation/DocBook/kernel-hacking.tmpl
@@ -1105,7 +1105,7 @@ static struct block_device_operations opt_fops = {
- Function names as strings (__FUNCTION__).
+ Function names as strings (__func__).
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index ade7415d2467..dc8f3babcabd 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -2266,12 +2266,6 @@ M: kristen.c.accardi@intel.com
L: pcihpd-discuss@lists.sourceforge.net
S: Maintained
-SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
-P: Stephen Hemminger
-M: shemminger@osdl.org
-L: netdev@vger.kernel.org
-S: Maintained
-
SPARC (sparc32):
P: William L. Irwin
M: wli@holomorphy.com
diff --git a/trunk/README b/trunk/README
index 2b5844d8cfa0..76dd780d88ed 100644
--- a/trunk/README
+++ b/trunk/README
@@ -149,9 +149,6 @@ CONFIGURING the kernel:
"make gconfig" X windows (Gtk) based configuration tool.
"make oldconfig" Default all questions based on the contents of
your existing ./.config file.
- "make silentoldconfig"
- Like above, but avoids cluttering the screen
- with question already answered.
NOTES on "make config":
- having unnecessary drivers will make the kernel bigger, and can
@@ -172,6 +169,9 @@ CONFIGURING the kernel:
should probably answer 'n' to the questions for
"development", "experimental", or "debugging" features.
+ - Check the top Makefile for further site-dependent configuration
+ (default SVGA mode etc).
+
COMPILING the kernel:
- Make sure you have gcc 2.95.3 available.
@@ -199,9 +199,6 @@ COMPILING the kernel:
are installing a new kernel with the same version number as your
working kernel, make a backup of your modules directory before you
do a "make modules_install".
- In alternative, before compiling, edit your Makefile and change the
- "EXTRAVERSION" line - its content is appended to the regular kernel
- version.
- In order to boot your new kernel, you'll need to copy the kernel
image (e.g. .../linux/arch/i386/boot/bzImage after compilation)
diff --git a/trunk/arch/ppc64/Makefile b/trunk/arch/ppc64/Makefile
index 521c2a5a2862..17d2c1eac3b8 100644
--- a/trunk/arch/ppc64/Makefile
+++ b/trunk/arch/ppc64/Makefile
@@ -107,7 +107,7 @@ install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
defaultimage-$(CONFIG_PPC_PSERIES) := zImage
-defaultimage-$(CONFIG_PPC_PMAC) := zImage.vmode
+defaultimage-$(CONFIG_PPC_PMAC) := vmlinux
defaultimage-$(CONFIG_PPC_MAPLE) := zImage
defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
KBUILD_IMAGE := $(defaultimage-y)
diff --git a/trunk/arch/ppc64/kernel/pSeries_iommu.c b/trunk/arch/ppc64/kernel/pSeries_iommu.c
index 8c6313e7e145..f0fd7fbd6531 100644
--- a/trunk/arch/ppc64/kernel/pSeries_iommu.c
+++ b/trunk/arch/ppc64/kernel/pSeries_iommu.c
@@ -265,10 +265,8 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT;
/* Test if we are going over 2GB of DMA space */
- if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
- udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
+ if (phb->dma_window_base_cur + phb->dma_window_size > (1L << 31))
panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
- }
phb->dma_window_base_cur += phb->dma_window_size;
@@ -312,84 +310,92 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
static void iommu_bus_setup_pSeries(struct pci_bus *bus)
{
- struct device_node *dn;
- struct iommu_table *tbl;
- struct device_node *isa_dn, *isa_dn_orig;
- struct device_node *tmp;
+ struct device_node *dn, *pdn;
struct pci_dn *pci;
- int children;
+ struct iommu_table *tbl;
DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
- dn = pci_bus_to_OF_node(bus);
- pci = PCI_DN(dn);
-
- if (bus->self) {
- /* This is not a root bus, any setup will be done for the
- * device-side of the bridge in iommu_dev_setup_pSeries().
- */
- return;
- }
-
- /* Check if the ISA bus on the system is under
- * this PHB.
- */
- isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
-
- while (isa_dn && isa_dn != dn)
- isa_dn = isa_dn->parent;
-
- if (isa_dn_orig)
- of_node_put(isa_dn_orig);
-
- /* Count number of direct PCI children of the PHB.
- * All PCI device nodes have class-code property, so it's
- * an easy way to find them.
- */
- for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
- if (get_property(tmp, "class-code", NULL))
- children++;
-
- DBG("Children: %d\n", children);
-
- /* Calculate amount of DMA window per slot. Each window must be
- * a power of two (due to pci_alloc_consistent requirements).
+ /* For each (root) bus, we carve up the available DMA space in 256MB
+ * pieces. Since each piece is used by one (sub) bus/device, that would
+ * give a maximum of 7 devices per PHB. In most cases, this is plenty.
*
- * Keep 256MB aside for PHBs with ISA.
+ * The exception is on Python PHBs (pre-POWER4). Here we don't have EADS
+ * bridges below the PHB to allocate the sectioned tables to, so instead
+ * we allocate a 1GB table at the PHB level.
*/
- if (!isa_dn) {
- /* No ISA/IDE - just set window size and return */
- pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
-
- while (pci->phb->dma_window_size * children > 0x80000000ul)
- pci->phb->dma_window_size >>= 1;
- DBG("No ISA/IDE, window size is %x\n", pci->phb->dma_window_size);
- pci->phb->dma_window_base_cur = 0;
-
- return;
- }
-
- /* If we have ISA, then we probably have an IDE
- * controller too. Allocate a 128MB table but
- * skip the first 128MB to avoid stepping on ISA
- * space.
- */
- pci->phb->dma_window_size = 0x8000000ul;
- pci->phb->dma_window_base_cur = 0x8000000ul;
-
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+ dn = pci_bus_to_OF_node(bus);
+ pci = dn->data;
+
+ if (!bus->self) {
+ /* Root bus */
+ if (is_python(dn)) {
+ unsigned int *iohole;
+
+ DBG("Python root bus %s\n", bus->name);
+
+ iohole = (unsigned int *)get_property(dn, "io-hole", 0);
+
+ if (iohole) {
+ /* On first bus we need to leave room for the
+ * ISA address space. Just skip the first 256MB
+ * alltogether. This leaves 768MB for the window.
+ */
+ DBG("PHB has io-hole, reserving 256MB\n");
+ pci->phb->dma_window_size = 3 << 28;
+ pci->phb->dma_window_base_cur = 1 << 28;
+ } else {
+ /* 1GB window by default */
+ pci->phb->dma_window_size = 1 << 30;
+ pci->phb->dma_window_base_cur = 0;
+ }
+
+ tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+
+ iommu_table_setparms(pci->phb, dn, tbl);
+ pci->iommu_table = iommu_init_table(tbl);
+ } else {
+ /* Do a 128MB table at root. This is used for the IDE
+ * controller on some SMP-mode POWER4 machines. It
+ * doesn't hurt to allocate it on other machines
+ * -- it'll just be unused since new tables are
+ * allocated on the EADS level.
+ *
+ * Allocate at offset 128MB to avoid having to deal
+ * with ISA holes; 128MB table for IDE is plenty.
+ */
+ pci->phb->dma_window_size = 1 << 27;
+ pci->phb->dma_window_base_cur = 1 << 27;
+
+ tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
+
+ iommu_table_setparms(pci->phb, dn, tbl);
+ pci->iommu_table = iommu_init_table(tbl);
+
+ /* All child buses have 256MB tables */
+ pci->phb->dma_window_size = 1 << 28;
+ }
+ } else {
+ pdn = pci_bus_to_OF_node(bus->parent);
- iommu_table_setparms(pci->phb, dn, tbl);
- pci->iommu_table = iommu_init_table(tbl);
+ if (!bus->parent->self && !is_python(pdn)) {
+ struct iommu_table *tbl;
+ /* First child and not python means this is the EADS
+ * level. Allocate new table for this slot with 256MB
+ * window.
+ */
- /* Divide the rest (1.75GB) among the children */
- pci->phb->dma_window_size = 0x80000000ul;
- while (pci->phb->dma_window_size * children > 0x70000000ul)
- pci->phb->dma_window_size >>= 1;
+ tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
- DBG("ISA/IDE, window size is %x\n", pci->phb->dma_window_size);
+ iommu_table_setparms(pci->phb, dn, tbl);
+ pci->iommu_table = iommu_init_table(tbl);
+ } else {
+ /* Lower than first child or under python, use parent table */
+ pci->iommu_table = PCI_DN(pdn)->iommu_table;
+ }
+ }
}
@@ -440,28 +446,13 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
static void iommu_dev_setup_pSeries(struct pci_dev *dev)
{
struct device_node *dn, *mydn;
- struct iommu_table *tbl;
DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, dev->pretty_name);
-
- mydn = dn = pci_device_to_OF_node(dev);
-
- /* If we're the direct child of a root bus, then we need to allocate
- * an iommu table ourselves. The bus setup code should have setup
- * the window sizes already.
- */
- if (!dev->bus->self) {
- DBG(" --> first child, no bridge. Allocating iommu table.\n");
- tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
- iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
- PCI_DN(mydn)->iommu_table = iommu_init_table(tbl);
-
- return;
- }
-
- /* If this device is further down the bus tree, search upwards until
- * an already allocated iommu table is found and use that.
+ /* Now copy the iommu_table ptr from the bus device down to the
+ * pci device_node. This means get_iommu_table() won't need to search
+ * up the device tree to find it.
*/
+ mydn = dn = pci_device_to_OF_node(dev);
while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL)
dn = dn->parent;
diff --git a/trunk/arch/ppc64/kernel/pci.c b/trunk/arch/ppc64/kernel/pci.c
index ff4be1da69d5..861138ad092c 100644
--- a/trunk/arch/ppc64/kernel/pci.c
+++ b/trunk/arch/ppc64/kernel/pci.c
@@ -246,14 +246,11 @@ static unsigned int pci_parse_of_flags(u32 addr0)
unsigned int flags = 0;
if (addr0 & 0x02000000) {
- flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
- flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
- flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
+ flags |= IORESOURCE_MEM;
if (addr0 & 0x40000000)
- flags |= IORESOURCE_PREFETCH
- | PCI_BASE_ADDRESS_MEM_PREFETCH;
+ flags |= IORESOURCE_PREFETCH;
} else if (addr0 & 0x01000000)
- flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
+ flags |= IORESOURCE_IO;
return flags;
}
diff --git a/trunk/arch/ppc64/kernel/prom_init.c b/trunk/arch/ppc64/kernel/prom_init.c
index f252670874a4..9979919cdf92 100644
--- a/trunk/arch/ppc64/kernel/prom_init.c
+++ b/trunk/arch/ppc64/kernel/prom_init.c
@@ -1711,7 +1711,6 @@ static void __init flatten_device_tree(void)
unsigned long offset = reloc_offset();
unsigned long mem_start, mem_end, room;
struct boot_param_header *hdr;
- struct prom_t *_prom = PTRRELOC(&prom);
char *namep;
u64 *rsvmap;
@@ -1766,7 +1765,6 @@ static void __init flatten_device_tree(void)
RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
/* Finish header */
- hdr->boot_cpuid_phys = _prom->cpu;
hdr->magic = OF_DT_HEADER;
hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
@@ -1856,6 +1854,7 @@ static void __init prom_find_boot_cpu(void)
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
+ prom_setprop(cpu_pkg, "linux,boot-cpu", NULL, 0);
prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
_prom->cpu = getprop_rval;
diff --git a/trunk/arch/x86_64/Kconfig b/trunk/arch/x86_64/Kconfig
index 21afa69a086d..0969d570f3b5 100644
--- a/trunk/arch/x86_64/Kconfig
+++ b/trunk/arch/x86_64/Kconfig
@@ -308,7 +308,7 @@ config HPET_TIMER
present. The HPET provides a stable time base on SMP
systems, unlike the TSC, but it is more expensive to access,
as it is off-chip. You can find the HPET spec at
- .
+ .
config X86_PM_TIMER
bool "PM timer"
diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c
index 486b6e1c7dfb..c56f995aadad 100644
--- a/trunk/drivers/block/cciss.c
+++ b/trunk/drivers/block/cciss.c
@@ -483,6 +483,9 @@ static int cciss_open(struct inode *inode, struct file *filep)
printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
#endif /* CCISS_DEBUG */
+ if (host->busy_initializing)
+ return -EBUSY;
+
if (host->busy_initializing || drv->busy_configuring)
return -EBUSY;
/*
@@ -2988,7 +2991,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
cciss_procinit(i);
- hba[i]->busy_initializing = 0;
for(j=0; j < NWD; j++) { /* mfm */
drive_info_struct *drv = &(hba[i]->drv[j]);
@@ -3031,6 +3033,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
add_disk(disk);
}
+ hba[i]->busy_initializing = 0;
return(1);
clean4:
diff --git a/trunk/drivers/block/ll_rw_blk.c b/trunk/drivers/block/ll_rw_blk.c
index baedac522945..483d71b10cf9 100644
--- a/trunk/drivers/block/ll_rw_blk.c
+++ b/trunk/drivers/block/ll_rw_blk.c
@@ -2373,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
EXPORT_SYMBOL(blkdev_issue_flush);
+/**
+ * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
+ * @q: device queue
+ * @disk: gendisk
+ * @error_sector: error offset
+ *
+ * Description:
+ * Devices understanding the SCSI command set, can use this function as
+ * a helper for issuing a cache flush. Note: driver is required to store
+ * the error offset (in case of error flushing) in ->sector of struct
+ * request.
+ */
+int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+{
+ struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ int ret;
+
+ rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
+ rq->sector = 0;
+ memset(rq->cmd, 0, sizeof(rq->cmd));
+ rq->cmd[0] = 0x35;
+ rq->cmd_len = 12;
+ rq->data = NULL;
+ rq->data_len = 0;
+ rq->timeout = 60 * HZ;
+
+ ret = blk_execute_rq(q, disk, rq, 0);
+
+ if (ret && error_sector)
+ *error_sector = rq->sector;
+
+ blk_put_request(rq);
+ return ret;
+}
+
+EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
+
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{
int rw = rq_data_dir(rq);
diff --git a/trunk/drivers/infiniband/core/mad_rmpp.c b/trunk/drivers/infiniband/core/mad_rmpp.c
index 2bd8b1cc57c4..8f7cef0812f6 100644
--- a/trunk/drivers/infiniband/core/mad_rmpp.c
+++ b/trunk/drivers/infiniband/core/mad_rmpp.c
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_rmpp_mad *rmpp_mad;
int timeout;
+ u32 paylen;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
if (mad_send_wr->seg_num == 1) {
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
- rmpp_mad->rmpp_hdr.paylen_newwin =
- cpu_to_be32(mad_send_wr->total_seg *
- (sizeof(struct ib_rmpp_mad) -
- offsetof(struct ib_rmpp_mad, data)) -
- mad_send_wr->pad);
+ paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
+ mad_send_wr->pad;
+ rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
} else {
mad_send_wr->send_wr.num_sge = 2;
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
- rmpp_mad->rmpp_hdr.paylen_newwin =
- cpu_to_be32(sizeof(struct ib_rmpp_mad) -
- offsetof(struct ib_rmpp_mad, data) -
- mad_send_wr->pad);
+ paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
+ rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
}
/* 2 seconds for an ACK until we can find the packet lifetime */
diff --git a/trunk/drivers/message/i2o/config-osm.c b/trunk/drivers/message/i2o/config-osm.c
index 10432f665201..af32ab4e90cd 100644
--- a/trunk/drivers/message/i2o/config-osm.c
+++ b/trunk/drivers/message/i2o/config-osm.c
@@ -56,11 +56,8 @@ static int __init i2o_config_init(void)
return -EBUSY;
}
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
- if (i2o_config_old_init()) {
- osm_err("old config handler initialization failed\n");
+ if (i2o_config_old_init())
i2o_driver_unregister(&i2o_config_driver);
- return -EBUSY;
- }
#endif
return 0;
diff --git a/trunk/fs/fat/inode.c b/trunk/fs/fat/inode.c
index e2effe2dc9b2..51b1d15d9d5c 100644
--- a/trunk/fs/fat/inode.c
+++ b/trunk/fs/fat/inode.c
@@ -300,9 +300,9 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_blksize = sbi->cluster_size;
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
- inode->i_mtime.tv_sec =
+ inode->i_mtime.tv_sec = inode->i_atime.tv_sec =
date_dos2unix(le16_to_cpu(de->time), le16_to_cpu(de->date));
- inode->i_mtime.tv_nsec = 0;
+ inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0;
if (sbi->options.isvfat) {
int secs = de->ctime_cs / 100;
int csecs = de->ctime_cs % 100;
@@ -310,11 +310,8 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
date_dos2unix(le16_to_cpu(de->ctime),
le16_to_cpu(de->cdate)) + secs;
inode->i_ctime.tv_nsec = csecs * 10000000;
- inode->i_atime.tv_sec =
- date_dos2unix(le16_to_cpu(0), le16_to_cpu(de->adate));
- inode->i_atime.tv_nsec = 0;
} else
- inode->i_ctime = inode->i_atime = inode->i_mtime;
+ inode->i_ctime = inode->i_mtime;
return 0;
}
@@ -516,9 +513,7 @@ static int fat_write_inode(struct inode *inode, int wait)
raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16);
fat_date_unix2dos(inode->i_mtime.tv_sec, &raw_entry->time, &raw_entry->date);
if (sbi->options.isvfat) {
- __le16 atime;
fat_date_unix2dos(inode->i_ctime.tv_sec,&raw_entry->ctime,&raw_entry->cdate);
- fat_date_unix2dos(inode->i_atime.tv_sec,&atime,&raw_entry->adate);
raw_entry->ctime_cs = (inode->i_ctime.tv_sec & 1) * 100 +
inode->i_ctime.tv_nsec / 10000000;
}
diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c
index fb34f88a4a74..23db452ab428 100644
--- a/trunk/fs/proc/base.c
+++ b/trunk/fs/proc/base.c
@@ -340,52 +340,6 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
return result;
}
-
-/* Same as proc_root_link, but this addionally tries to get fs from other
- * threads in the group */
-static int proc_task_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
-{
- struct fs_struct *fs;
- int result = -ENOENT;
- struct task_struct *leader = proc_task(inode);
-
- task_lock(leader);
- fs = leader->fs;
- if (fs) {
- atomic_inc(&fs->count);
- task_unlock(leader);
- } else {
- /* Try to get fs from other threads */
- task_unlock(leader);
- struct task_struct *task = leader;
- read_lock(&tasklist_lock);
- if (pid_alive(task)) {
- while ((task = next_thread(task)) != leader) {
- task_lock(task);
- fs = task->fs;
- if (fs) {
- atomic_inc(&fs->count);
- task_unlock(task);
- break;
- }
- task_unlock(task);
- }
- }
- read_unlock(&tasklist_lock);
- }
-
- if (fs) {
- read_lock(&fs->lock);
- *mnt = mntget(fs->rootmnt);
- *dentry = dget(fs->root);
- read_unlock(&fs->lock);
- result = 0;
- put_fs_struct(fs);
- }
- return result;
-}
-
-
#define MAY_PTRACE(task) \
(task == current || \
(task->parent == current && \
@@ -517,14 +471,14 @@ static int proc_oom_score(struct task_struct *task, char *buffer)
/* permission checks */
-/* If the process being read is separated by chroot from the reading process,
- * don't let the reader access the threads.
- */
-static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
+static int proc_check_root(struct inode *inode)
{
- struct dentry *de, *base;
- struct vfsmount *our_vfsmnt, *mnt;
+ struct dentry *de, *base, *root;
+ struct vfsmount *our_vfsmnt, *vfsmnt, *mnt;
int res = 0;
+
+ if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */
+ return -ENOENT;
read_lock(¤t->fs->lock);
our_vfsmnt = mntget(current->fs->rootmnt);
base = dget(current->fs->root);
@@ -557,16 +511,6 @@ static int proc_check_chroot(struct dentry *root, struct vfsmount *vfsmnt)
goto exit;
}
-static int proc_check_root(struct inode *inode)
-{
- struct dentry *root;
- struct vfsmount *vfsmnt;
-
- if (proc_root_link(inode, &root, &vfsmnt)) /* Ewww... */
- return -ENOENT;
- return proc_check_chroot(root, vfsmnt);
-}
-
static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
{
if (generic_permission(inode, mask, NULL) != 0)
@@ -574,20 +518,6 @@ static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
return proc_check_root(inode);
}
-static int proc_task_permission(struct inode *inode, int mask, struct nameidata *nd)
-{
- struct dentry *root;
- struct vfsmount *vfsmnt;
-
- if (generic_permission(inode, mask, NULL) != 0)
- return -EACCES;
-
- if (proc_task_root_link(inode, &root, &vfsmnt))
- return -ENOENT;
-
- return proc_check_chroot(root, vfsmnt);
-}
-
extern struct seq_operations proc_pid_maps_op;
static int maps_open(struct inode *inode, struct file *file)
{
@@ -1489,7 +1419,7 @@ static struct inode_operations proc_fd_inode_operations = {
static struct inode_operations proc_task_inode_operations = {
.lookup = proc_task_lookup,
- .permission = proc_task_permission,
+ .permission = proc_permission,
};
#ifdef CONFIG_SECURITY
diff --git a/trunk/include/linux/mm.h b/trunk/include/linux/mm.h
index 097b3a3c693d..82d7024f0765 100644
--- a/trunk/include/linux/mm.h
+++ b/trunk/include/linux/mm.h
@@ -136,7 +136,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_EXEC 0x00000004
#define VM_SHARED 0x00000008
-/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
#define VM_MAYWRITE 0x00000020
#define VM_MAYEXEC 0x00000040
@@ -351,8 +350,7 @@ static inline void put_page(struct page *page)
* only one copy in memory, at most, normally.
*
* For the non-reserved pages, page_count(page) denotes a reference count.
- * page_count() == 0 means the page is free. page->lru is then used for
- * freelist management in the buddy allocator.
+ * page_count() == 0 means the page is free.
* page_count() == 1 means the page is used for exactly one purpose
* (e.g. a private data page of one process).
*
@@ -378,8 +376,10 @@ static inline void put_page(struct page *page)
* attaches, plus 1 if `private' contains something, plus one for
* the page cache itself.
*
- * Instead of keeping dirty/clean pages in per address-space lists, we instead
- * now tag pages as dirty/under writeback in the radix tree.
+ * All pages belonging to an inode are in these doubly linked lists:
+ * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
+ * using the page->list list_head. These fields are also used for
+ * freelist managemet (when page_count()==0).
*
* There is also a per-mapping radix tree mapping index to the page
* in memory if present. The tree is rooted at mapping->root.
diff --git a/trunk/include/linux/syscalls.h b/trunk/include/linux/syscalls.h
index a6f03e473737..425f58c8ea4a 100644
--- a/trunk/include/linux/syscalls.h
+++ b/trunk/include/linux/syscalls.h
@@ -508,7 +508,5 @@ asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who);
-asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
- unsigned long maxnode);
#endif
diff --git a/trunk/include/rdma/ib_mad.h b/trunk/include/rdma/ib_mad.h
index 53184a38fdf6..0e293fe733b0 100644
--- a/trunk/include/rdma/ib_mad.h
+++ b/trunk/include/rdma/ib_mad.h
@@ -108,6 +108,13 @@
#define IB_QP1_QKEY 0x80010000
#define IB_QP_SET_QKEY 0x80000000
+enum {
+ IB_MGMT_MAD_DATA = 232,
+ IB_MGMT_RMPP_DATA = 220,
+ IB_MGMT_VENDOR_DATA = 216,
+ IB_MGMT_SA_DATA = 200
+};
+
struct ib_mad_hdr {
u8 base_version;
u8 mgmt_class;
@@ -149,20 +156,20 @@ struct ib_sa_hdr {
struct ib_mad {
struct ib_mad_hdr mad_hdr;
- u8 data[232];
+ u8 data[IB_MGMT_MAD_DATA];
};
struct ib_rmpp_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
- u8 data[220];
+ u8 data[IB_MGMT_RMPP_DATA];
};
struct ib_sa_mad {
struct ib_mad_hdr mad_hdr;
struct ib_rmpp_hdr rmpp_hdr;
struct ib_sa_hdr sa_hdr;
- u8 data[200];
+ u8 data[IB_MGMT_SA_DATA];
} __attribute__ ((packed));
struct ib_vendor_mad {
@@ -170,7 +177,7 @@ struct ib_vendor_mad {
struct ib_rmpp_hdr rmpp_hdr;
u8 reserved;
u8 oui[3];
- u8 data[216];
+ u8 data[IB_MGMT_VENDOR_DATA];
};
struct ib_class_port_info
diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c
index 4b8f0f9230a4..a967605bc2e3 100644
--- a/trunk/kernel/printk.c
+++ b/trunk/kernel/printk.c
@@ -488,11 +488,6 @@ static int __init printk_time_setup(char *str)
__setup("time", printk_time_setup);
-__attribute__((weak)) unsigned long long printk_clock(void)
-{
- return sched_clock();
-}
-
/*
* This is printk. It can be called from any context. We want it to work.
*
@@ -570,7 +565,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
loglev_char = default_message_loglevel
+ '0';
}
- t = printk_clock();
+ t = sched_clock();
nanosec_rem = do_div(t, 1000000000);
tlen = sprintf(tbuf,
"<%c>[%5lu.%06lu] ",
diff --git a/trunk/mm/mmap.c b/trunk/mm/mmap.c
index fa11d91242e8..8b8e05f07cdb 100644
--- a/trunk/mm/mmap.c
+++ b/trunk/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
/*
* Get rid of page table information in the indicated region.
*
- * Called with the mm semaphore held.
+ * Called with the page table lock held.
*/
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
diff --git a/trunk/mm/mprotect.c b/trunk/mm/mprotect.c
index 57577f63b305..e9fbd013ad9a 100644
--- a/trunk/mm/mprotect.c
+++ b/trunk/mm/mprotect.c
@@ -248,8 +248,7 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
- /* newflags >> 4 shift VM_MAY% in place of VM_% */
- if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
+ if ((newflags & ~(newflags >> 4)) & 0xf) {
error = -EACCES;
goto out;
}