diff --git a/[refs] b/[refs]
index 388a3094b962..9c189dc30807 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 939e3428066962b7c5966d8f9648058e644f5395
+refs/heads/master: f4dcd3c229a0745aff7d6835ca7c45aaeb293714
diff --git a/trunk/Documentation/DocBook/kernel-locking.tmpl b/trunk/Documentation/DocBook/kernel-locking.tmpl
index 0a441f73261a..644c3884fab9 100644
--- a/trunk/Documentation/DocBook/kernel-locking.tmpl
+++ b/trunk/Documentation/DocBook/kernel-locking.tmpl
@@ -551,12 +551,10 @@
spin_lock_irqsave(), which is a superset
of all other spinlock primitives.
-
Table of Locking Requirements
-
IRQ Handler A
@@ -578,128 +576,97 @@
IRQ Handler B
-SLIS
+spin_lock_irqsave
None
Softirq A
-SLI
-SLI
-SL
+spin_lock_irq
+spin_lock_irq
+spin_lock
Softirq B
-SLI
-SLI
-SL
-SL
+spin_lock_irq
+spin_lock_irq
+spin_lock
+spin_lock
Tasklet A
-SLI
-SLI
-SL
-SL
+spin_lock_irq
+spin_lock_irq
+spin_lock
+spin_lock
None
Tasklet B
-SLI
-SLI
-SL
-SL
-SL
+spin_lock_irq
+spin_lock_irq
+spin_lock
+spin_lock
+spin_lock
None
Timer A
-SLI
-SLI
-SL
-SL
-SL
-SL
+spin_lock_irq
+spin_lock_irq
+spin_lock
+spin_lock
+spin_lock
+spin_lock
None
Timer B
-SLI
-SLI
-SL
-SL
-SL
-SL
-SL
+spin_lock_irq
+spin_lock_irq
+spin_lock
+spin_lock
+spin_lock
+spin_lock
+spin_lock
None
User Context A
-SLI
-SLI
-SLBH
-SLBH
-SLBH
-SLBH
-SLBH
-SLBH
+spin_lock_irq
+spin_lock_irq
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
None
User Context B
-SLI
-SLI
-SLBH
-SLBH
-SLBH
-SLBH
-SLBH
-SLBH
-DI
-None
-
-
-
-
-
-
-
-Legend for Locking Requirements Table
-
-
-
-
-SLIS
-spin_lock_irqsave
-
-
-SLI
spin_lock_irq
-
-
-SL
-spin_lock
-
-
-SLBH
+spin_lock_irq
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
+spin_lock_bh
spin_lock_bh
-
-
-DI
down_interruptible
+None
-
diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt
index 5c8695a3d139..498ff31f3aa1 100644
--- a/trunk/Documentation/feature-removal-schedule.txt
+++ b/trunk/Documentation/feature-removal-schedule.txt
@@ -328,20 +328,21 @@ Who: Adrian Bunk
---------------------------
-What: libata spindown skipping and warning
+What: libata.spindown_compat module parameter
When: Dec 2008
-Why: Some halt(8) implementations synchronize caches for and spin
- down libata disks because libata didn't use to spin down disk on
- system halt (only synchronized caches).
- Spin down on system halt is now implemented. sysfs node
- /sys/class/scsi_disk/h:c:i:l/manage_start_stop is present if
- spin down support is available.
+Why: halt(8) synchronizes caches for and spins down libata disks
+ because libata didn't use to spin down disk on system halt
+ (only synchronized caches).
+ Spin down on system halt is now implemented and can be tested
+ using sysfs node /sys/class/scsi_disk/h:c:i:l/manage_start_stop.
Because issuing spin down command to an already spun down disk
- makes some disks spin up just to spin down again, libata tracks
- device spindown status to skip the extra spindown command and
- warn about it.
- This is to give userspace tools the time to get updated and will
- be removed after userspace is reasonably updated.
+ makes some disks spin up just to spin down again, the old
+ behavior needs to be maintained till userspace tool is updated
+ to check the sysfs node and not to spin down disks with the
+ node set to one.
+ This module parameter is to give userspace tool the time to
+ get updated and should be removed after userspace is
+ reasonably updated.
Who: Tejun Heo
---------------------------
diff --git a/trunk/Documentation/gpio.txt b/trunk/Documentation/gpio.txt
index 36af58eba136..e8be0abb346c 100644
--- a/trunk/Documentation/gpio.txt
+++ b/trunk/Documentation/gpio.txt
@@ -111,9 +111,7 @@ setting up a platform_device using the GPIO, is mark its direction:
The return value is zero for success, else a negative errno. It should
be checked, since the get/set calls don't have error returns and since
-misconfiguration is possible. You should normally issue these calls from
-a task context. However, for spinlock-safe GPIOs it's OK to use them
-before tasking is enabled, as part of early board setup.
+misconfiguration is possible. (These calls could sleep.)
For output GPIOs, the value provided becomes the initial output value.
This helps avoid signal glitching during system startup.
@@ -199,9 +197,7 @@ However, many platforms don't currently support this mechanism.
Passing invalid GPIO numbers to gpio_request() will fail, as will requesting
GPIOs that have already been claimed with that call. The return value of
-gpio_request() must be checked. You should normally issue these calls from
-a task context. However, for spinlock-safe GPIOs it's OK to request GPIOs
-before tasking is enabled, as part of early board setup.
+gpio_request() must be checked. (These calls could sleep.)
These calls serve two basic purposes. One is marking the signals which
are actually in use as GPIOs, for better diagnostics; systems may have
diff --git a/trunk/Documentation/networking/netdevices.txt b/trunk/Documentation/networking/netdevices.txt
index ce1361f95243..847cedb238f6 100644
--- a/trunk/Documentation/networking/netdevices.txt
+++ b/trunk/Documentation/networking/netdevices.txt
@@ -49,7 +49,7 @@ dev->hard_start_xmit:
for this and return -1 when the spin lock fails.
The locking there should also properly protect against
set_multicast_list
- Context: Process with BHs disabled or BH (timer).
+ Context: BHs disabled
Notes: netif_queue_stopped() is guaranteed false
Interrupts must be enabled when calling hard_start_xmit.
(Interrupts must also be enabled when enabling the BH handler.)
diff --git a/trunk/Documentation/vm/slabinfo.c b/trunk/Documentation/vm/slabinfo.c
index d4f21ffd1404..686a8e04a4f3 100644
--- a/trunk/Documentation/vm/slabinfo.c
+++ b/trunk/Documentation/vm/slabinfo.c
@@ -242,9 +242,6 @@ void decode_numa_list(int *numa, char *t)
memset(numa, 0, MAX_NODES * sizeof(int));
- if (!t)
- return;
-
while (*t == 'N') {
t++;
node = strtoul(t, &t, 10);
@@ -262,17 +259,11 @@ void decode_numa_list(int *numa, char *t)
void slab_validate(struct slabinfo *s)
{
- if (strcmp(s->name, "*") == 0)
- return;
-
set_obj(s, "validate", 1);
}
void slab_shrink(struct slabinfo *s)
{
- if (strcmp(s->name, "*") == 0)
- return;
-
set_obj(s, "shrink", 1);
}
@@ -395,9 +386,7 @@ void report(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
return;
-
- printf("\nSlabcache: %-20s Aliases: %2d Order : %2d Objects: %d\n",
- s->name, s->aliases, s->order, s->objects);
+ printf("\nSlabcache: %-20s Aliases: %2d Order : %2d\n", s->name, s->aliases, s->order);
if (s->hwcache_align)
printf("** Hardware cacheline aligned\n");
if (s->cache_dma)
@@ -556,9 +545,6 @@ int slab_empty(struct slabinfo *s)
void slab_debug(struct slabinfo *s)
{
- if (strcmp(s->name, "*") == 0)
- return;
-
if (sanity && !s->sanity_checks) {
set_obj(s, "sanity", 1);
}
@@ -805,11 +791,11 @@ void totals(void)
store_size(b1, total_size);store_size(b2, total_waste);
store_size(b3, total_waste * 100 / total_used);
- printf("Memory used: %6s # Loss : %6s MRatio:%6s%%\n", b1, b2, b3);
+ printf("Memory used: %6s # Loss : %6s MRatio: %6s%%\n", b1, b2, b3);
store_size(b1, total_objects);store_size(b2, total_partobj);
store_size(b3, total_partobj * 100 / total_objects);
- printf("# Objects : %6s # PartObj: %6s ORatio:%6s%%\n", b1, b2, b3);
+ printf("# Objects : %6s # PartObj: %6s ORatio: %6s%%\n", b1, b2, b3);
printf("\n");
printf("Per Cache Average Min Max Total\n");
@@ -832,7 +818,7 @@ void totals(void)
store_size(b1, avg_ppart);store_size(b2, min_ppart);
store_size(b3, max_ppart);
store_size(b4, total_partial * 100 / total_slabs);
- printf("%%PartSlab%10s%% %10s%% %10s%% %10s%%\n",
+ printf("%%PartSlab %10s%% %10s%% %10s%% %10s%%\n",
b1, b2, b3, b4);
store_size(b1, avg_partobj);store_size(b2, min_partobj);
@@ -844,7 +830,7 @@ void totals(void)
store_size(b1, avg_ppartobj);store_size(b2, min_ppartobj);
store_size(b3, max_ppartobj);
store_size(b4, total_partobj * 100 / total_objects);
- printf("%% PartObj%10s%% %10s%% %10s%% %10s%%\n",
+ printf("%% PartObj %10s%% %10s%% %10s%% %10s%%\n",
b1, b2, b3, b4);
store_size(b1, avg_size);store_size(b2, min_size);
@@ -1114,8 +1100,6 @@ void output_slabs(void)
ops(slab);
else if (show_slab)
slabcache(slab);
- else if (show_report)
- report(slab);
}
}
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 4c3277cb925e..bbeb5b6b5b05 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -2689,13 +2689,13 @@ L: i2c@lm-sensors.org
S: Maintained
PARALLEL PORT SUPPORT
-L: linux-parport@lists.infradead.org (subscribers-only)
+L: linux-parport@lists.infradead.org
S: Orphan
PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
P: Tim Waugh
M: tim@cyberelk.net
-L: linux-parport@lists.infradead.org (subscribers-only)
+L: linux-parport@lists.infradead.org
W: http://www.torque.net/linux-pp.html
S: Maintained
diff --git a/trunk/Makefile b/trunk/Makefile
index 948fa09442f1..e6990e2cdafc 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -491,7 +491,7 @@ endif
include $(srctree)/arch/$(ARCH)/Makefile
ifdef CONFIG_FRAME_POINTER
-CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
+CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
CFLAGS += -fomit-frame-pointer
endif
diff --git a/trunk/arch/blackfin/Kconfig b/trunk/arch/blackfin/Kconfig
index d80e5b1d686e..1a4930509325 100644
--- a/trunk/arch/blackfin/Kconfig
+++ b/trunk/arch/blackfin/Kconfig
@@ -560,6 +560,14 @@ endchoice
source "mm/Kconfig"
+config LARGE_ALLOCS
+ bool "Allow allocating large blocks (> 1MB) of memory"
+ help
+ Allow the slab memory allocator to keep chains for very large
+ memory sizes - upto 32MB. You may need this if your system has
+ a lot of RAM, and you need to able to allocate very large
+ contiguous chunks. If unsure, say N.
+
config BFIN_DMA_5XX
bool "Enable DMA Support"
depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
diff --git a/trunk/arch/frv/Kconfig b/trunk/arch/frv/Kconfig
index 74eef7111f2b..114738a45582 100644
--- a/trunk/arch/frv/Kconfig
+++ b/trunk/arch/frv/Kconfig
@@ -102,6 +102,14 @@ config HIGHPTE
with a lot of RAM, this can be wasteful of precious low memory.
Setting this option will put user-space page tables in high memory.
+config LARGE_ALLOCS
+ bool "Allow allocating large blocks (> 1MB) of memory"
+ help
+ Allow the slab memory allocator to keep chains for very large memory
+ sizes - up to 32MB. You may need this if your system has a lot of
+ RAM, and you need to able to allocate very large contiguous chunks.
+ If unsure, say N.
+
source "mm/Kconfig"
choice
diff --git a/trunk/arch/i386/Makefile b/trunk/arch/i386/Makefile
index bd28f9f9b4b7..6dc5e5d90fec 100644
--- a/trunk/arch/i386/Makefile
+++ b/trunk/arch/i386/Makefile
@@ -34,7 +34,7 @@ CHECKFLAGS += -D__i386__
CFLAGS += -pipe -msoft-float -mregparm=3 -freg-struct-return
# prevent gcc from keeping the stack 16 byte aligned
-CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
+CFLAGS += -mpreferred-stack-boundary=4
# CPU-specific tuning. Anything which can be shared with UML should go here.
include $(srctree)/arch/i386/Makefile.cpu
diff --git a/trunk/arch/i386/kernel/cpu/mtrr/generic.c b/trunk/arch/i386/kernel/cpu/mtrr/generic.c
index c4ebb5126ef7..5367e32e0403 100644
--- a/trunk/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/trunk/arch/i386/kernel/cpu/mtrr/generic.c
@@ -78,7 +78,7 @@ static void __cpuinit print_fixed(unsigned base, unsigned step, const mtrr_type*
}
/* Grab all of the MTRR state for this CPU into *state */
-void get_mtrr_state(void)
+void __init get_mtrr_state(void)
{
unsigned int i;
struct mtrr_var_range *vrs;
diff --git a/trunk/arch/i386/kernel/cpu/mtrr/main.c b/trunk/arch/i386/kernel/cpu/mtrr/main.c
index 1cf466df330a..02a2f39e5e0a 100644
--- a/trunk/arch/i386/kernel/cpu/mtrr/main.c
+++ b/trunk/arch/i386/kernel/cpu/mtrr/main.c
@@ -639,7 +639,7 @@ static struct sysdev_driver mtrr_sysdev_driver = {
* initialized (i.e. before smp_init()).
*
*/
-void mtrr_bp_init(void)
+void __init mtrr_bp_init(void)
{
init_ifs();
diff --git a/trunk/arch/i386/kernel/smp.c b/trunk/arch/i386/kernel/smp.c
index 6299c080f6e2..c9a7c9835aba 100644
--- a/trunk/arch/i386/kernel/smp.c
+++ b/trunk/arch/i386/kernel/smp.c
@@ -421,7 +421,7 @@ void flush_tlb_mm (struct mm_struct * mm)
}
if (!cpus_empty(cpu_mask))
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
+ check_pgt_cache();
preempt_enable();
}
diff --git a/trunk/arch/m68knommu/Kconfig b/trunk/arch/m68knommu/Kconfig
index adc64a2bafbb..823f73736bb5 100644
--- a/trunk/arch/m68knommu/Kconfig
+++ b/trunk/arch/m68knommu/Kconfig
@@ -470,6 +470,14 @@ config AVNET
default y
depends on (AVNET5282)
+config LARGE_ALLOCS
+ bool "Allow allocating large blocks (> 1MB) of memory"
+ help
+ Allow the slab memory allocator to keep chains for very large
+ memory sizes - upto 32MB. You may need this if your system has
+ a lot of RAM, and you need to able to allocate very large
+ contiguous chunks. If unsure, say N.
+
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
default y
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c
index 7150730e2ff1..a93f328a7317 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c
@@ -71,7 +71,9 @@ spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
{
struct spufs_inode_info *ei = p;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&ei->vfs_inode);
+ }
}
static struct inode *
diff --git a/trunk/arch/sparc64/kernel/time.c b/trunk/arch/sparc64/kernel/time.c
index 2d63d7689962..6b9a06e42542 100644
--- a/trunk/arch/sparc64/kernel/time.c
+++ b/trunk/arch/sparc64/kernel/time.c
@@ -1030,7 +1030,7 @@ void __devinit setup_sparc64_timer(void)
clockevents_register_device(sevt);
}
-#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
+#define SPARC64_NSEC_PER_CYC_SHIFT 32UL
static struct clocksource clocksource_tick = {
.rating = 100,
diff --git a/trunk/arch/v850/Kconfig b/trunk/arch/v850/Kconfig
index ace479ab273f..5f54c1236c18 100644
--- a/trunk/arch/v850/Kconfig
+++ b/trunk/arch/v850/Kconfig
@@ -240,6 +240,14 @@ menu "Processor type and features"
config RESET_GUARD
bool "Reset Guard"
+ config LARGE_ALLOCS
+ bool "Allow allocating large blocks (> 1MB) of memory"
+ help
+ Allow the slab memory allocator to keep chains for very large
+ memory sizes - upto 32MB. You may need this if your system has
+ a lot of RAM, and you need to able to allocate very large
+ contiguous chunks. If unsure, say N.
+
source "mm/Kconfig"
endmenu
diff --git a/trunk/drivers/acpi/numa.c b/trunk/drivers/acpi/numa.c
index a2efae8a4c4e..8fcd6a15517f 100644
--- a/trunk/drivers/acpi/numa.c
+++ b/trunk/drivers/acpi/numa.c
@@ -40,19 +40,19 @@ static nodemask_t nodes_found_map = NODE_MASK_NONE;
#define NID_INVAL -1
/* maps to convert between proximity domain and logical node ID */
-static int pxm_to_node_map[MAX_PXM_DOMAINS]
+int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
= { [0 ... MAX_PXM_DOMAINS - 1] = NID_INVAL };
-static int node_to_pxm_map[MAX_NUMNODES]
+int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
-int pxm_to_node(int pxm)
+int __cpuinit pxm_to_node(int pxm)
{
if (pxm < 0)
return NID_INVAL;
return pxm_to_node_map[pxm];
}
-int node_to_pxm(int node)
+int __cpuinit node_to_pxm(int node)
{
if (node < 0)
return PXM_INVAL;
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index d3ea7f55283c..d5939e659cbb 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -101,6 +101,12 @@ int libata_noacpi = 1;
module_param_named(noacpi, libata_noacpi, int, 0444);
MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
+int ata_spindown_compat = 1;
+module_param_named(spindown_compat, ata_spindown_compat, int, 0644);
+MODULE_PARM_DESC(spindown_compat, "Enable backward compatible spindown "
+ "behavior. Will be removed. More info can be found in "
+ "Documentation/feature-removal-schedule.txt\n");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c
index 242c43eef807..b6a1de8fad5b 100644
--- a/trunk/drivers/ata/libata-scsi.c
+++ b/trunk/drivers/ata/libata-scsi.c
@@ -893,7 +893,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
return queue_depth;
}
-/* XXX: for spindown warning */
+/* XXX: for ata_spindown_compat */
static void ata_delayed_done_timerfn(unsigned long arg)
{
struct scsi_cmnd *scmd = (void *)arg;
@@ -901,7 +901,7 @@ static void ata_delayed_done_timerfn(unsigned long arg)
scmd->scsi_done(scmd);
}
-/* XXX: for spindown warning */
+/* XXX: for ata_spindown_compat */
static void ata_delayed_done(struct scsi_cmnd *scmd)
{
static struct timer_list timer;
@@ -966,7 +966,8 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
* removed. Read Documentation/feature-removal-schedule.txt
* for more info.
*/
- if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
+ if (ata_spindown_compat &&
+ (qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
(system_state == SYSTEM_HALT ||
system_state == SYSTEM_POWER_OFF)) {
static unsigned long warned = 0;
@@ -1394,7 +1395,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
}
}
- /* XXX: track spindown state for spindown skipping and warning */
+ /* XXX: track spindown state for spindown_compat */
if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
qc->tf.command == ATA_CMD_STANDBYNOW1))
qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
diff --git a/trunk/drivers/ata/libata.h b/trunk/drivers/ata/libata.h
index 5e2466658420..13cb0c9af68d 100644
--- a/trunk/drivers/ata/libata.h
+++ b/trunk/drivers/ata/libata.h
@@ -58,6 +58,7 @@ extern int atapi_enabled;
extern int atapi_dmadir;
extern int libata_fua;
extern int libata_noacpi;
+extern int ata_spindown_compat;
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
diff --git a/trunk/drivers/ata/sata_nv.c b/trunk/drivers/ata/sata_nv.c
index 1a49c777fa6a..4cea3ef75226 100644
--- a/trunk/drivers/ata/sata_nv.c
+++ b/trunk/drivers/ata/sata_nv.c
@@ -229,6 +229,7 @@ struct nv_host_priv {
#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static void nv_remove_one (struct pci_dev *pdev);
#ifdef CONFIG_PM
static int nv_pci_device_resume(struct pci_dev *pdev);
#endif
@@ -287,6 +288,12 @@ static const struct pci_device_id nv_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
{ } /* terminate list */
};
@@ -299,7 +306,7 @@ static struct pci_driver nv_pci_driver = {
.suspend = ata_pci_device_suspend,
.resume = nv_pci_device_resume,
#endif
- .remove = ata_pci_remove_one,
+ .remove = nv_remove_one,
};
static struct scsi_host_template nv_sht = {
@@ -1606,6 +1613,15 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
IRQF_SHARED, ppi[0]->sht);
}
+static void nv_remove_one (struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct nv_host_priv *hpriv = host->private_data;
+
+ ata_pci_remove_one(pdev);
+ kfree(hpriv);
+}
+
#ifdef CONFIG_PM
static int nv_pci_device_resume(struct pci_dev *pdev)
{
diff --git a/trunk/drivers/ata/sata_via.c b/trunk/drivers/ata/sata_via.c
index ac4f43c4993f..d105d2c189d2 100644
--- a/trunk/drivers/ata/sata_via.c
+++ b/trunk/drivers/ata/sata_via.c
@@ -441,7 +441,7 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
return -ENOMEM;
}
- rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
+ rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
"PCI BARs (errno=%d)\n", rc);
diff --git a/trunk/drivers/mtd/ubi/eba.c b/trunk/drivers/mtd/ubi/eba.c
index 74002945b71b..3dba5733ed1f 100644
--- a/trunk/drivers/mtd/ubi/eba.c
+++ b/trunk/drivers/mtd/ubi/eba.c
@@ -940,6 +940,9 @@ static void ltree_entry_ctor(void *obj, struct kmem_cache *cache,
{
struct ltree_entry *le = obj;
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ return;
+
le->users = 0;
init_rwsem(&le->mutex);
}
diff --git a/trunk/drivers/net/e1000/e1000.h b/trunk/drivers/net/e1000/e1000.h
index 16a6edfeba41..a9ea67e75c1b 100644
--- a/trunk/drivers/net/e1000/e1000.h
+++ b/trunk/drivers/net/e1000/e1000.h
@@ -333,9 +333,11 @@ struct e1000_adapter {
struct e1000_tx_ring test_tx_ring;
struct e1000_rx_ring test_rx_ring;
+
int msg_enable;
+#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
-
+#endif
/* to not mess up cache alignment, always add to the bottom */
boolean_t tso_force;
boolean_t smart_power_down; /* phy smart power down */
diff --git a/trunk/drivers/net/e1000/e1000_main.c b/trunk/drivers/net/e1000/e1000_main.c
index 49be393e1c1d..637ae8f68791 100644
--- a/trunk/drivers/net/e1000/e1000_main.c
+++ b/trunk/drivers/net/e1000/e1000_main.c
@@ -158,7 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
+#ifdef CONFIG_PCI_MSI
static irqreturn_t e1000_intr_msi(int irq, void *data);
+#endif
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
#ifdef CONFIG_E1000_NAPI
@@ -298,26 +300,31 @@ module_exit(e1000_exit_module);
static int e1000_request_irq(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- void (*handler) = &e1000_intr;
- int irq_flags = IRQF_SHARED;
- int err;
+ int flags, err = 0;
+ flags = IRQF_SHARED;
+#ifdef CONFIG_PCI_MSI
if (adapter->hw.mac_type >= e1000_82571) {
- adapter->have_msi = !pci_enable_msi(adapter->pdev);
- if (adapter->have_msi) {
- handler = &e1000_intr_msi;
- irq_flags = 0;
+ adapter->have_msi = TRUE;
+ if ((err = pci_enable_msi(adapter->pdev))) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = FALSE;
}
}
-
- err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
- netdev);
- if (err) {
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
+ if (adapter->have_msi) {
+ flags &= ~IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+ netdev->name, netdev);
+ if (err)
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+ } else
+#endif
+ if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
+ netdev->name, netdev)))
DPRINTK(PROBE, ERR,
"Unable to allocate interrupt Error: %d\n", err);
- }
return err;
}
@@ -328,8 +335,10 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
free_irq(adapter->pdev->irq, netdev);
+#ifdef CONFIG_PCI_MSI
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
+#endif
}
/**
@@ -3735,6 +3744,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+#ifdef CONFIG_PCI_MSI
/**
* e1000_intr_msi - Interrupt Handler
@@ -3800,6 +3810,7 @@ e1000_intr_msi(int irq, void *data)
return IRQ_HANDLED;
}
+#endif
/**
* e1000_intr - Interrupt Handler
diff --git a/trunk/drivers/net/gianfar.c b/trunk/drivers/net/gianfar.c
index f5b3cba23fc5..b666a0cc0642 100644
--- a/trunk/drivers/net/gianfar.c
+++ b/trunk/drivers/net/gianfar.c
@@ -1025,15 +1025,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
- /* The powerpc-specific eieio() is used, as wmb() has too strong
- * semantics (it requires synchronization between cacheable and
- * uncacheable mappings, which eieio doesn't provide and which we
- * don't need), thus requiring a more expensive sync instruction. At
- * some point, the set of architecture-independent barrier functions
- * should be expanded to include weaker barriers.
- */
-
- eieio();
txbdp->status = status;
/* If this was the last BD in the ring, the next one */
@@ -1310,7 +1301,6 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
bdp->length = 0;
/* Mark the buffer empty */
- eieio();
bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
return skb;
@@ -1494,7 +1484,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
bdp = priv->cur_rx;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
- rmb();
skb = priv->rx_skbuff[priv->skb_currx];
if (!(bdp->status &
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_core.c b/trunk/drivers/net/ibm_emac/ibm_emac_core.c
index f752e5fc65ba..50035ebd4f52 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_core.c
@@ -926,7 +926,7 @@ static int emac_link_differs(struct ocp_enet_private *dev)
int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
int speed, pause, asym_pause;
- if (r & EMAC_MR1_MF_1000)
+ if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
speed = SPEED_1000;
else if (r & EMAC_MR1_MF_100)
speed = SPEED_100;
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_mal.c b/trunk/drivers/net/ibm_emac/ibm_emac_mal.c
index cabd9846a5ee..6c0f071e4052 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_mal.c
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -59,7 +59,8 @@ int __init mal_register_commac(struct ibm_ocp_mal *mal,
return 0;
}
-void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
+void __exit mal_unregister_commac(struct ibm_ocp_mal *mal,
+ struct mal_commac *commac)
{
unsigned long flags;
local_irq_save(flags);
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_mal.h b/trunk/drivers/net/ibm_emac/ibm_emac_mal.h
index 64bc338acc6c..407d2acbf7c7 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -223,7 +223,8 @@ void mal_exit(void) __exit;
int mal_register_commac(struct ibm_ocp_mal *mal,
struct mal_commac *commac) __init;
-void mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac);
+void mal_unregister_commac(struct ibm_ocp_mal *mal,
+ struct mal_commac *commac) __exit;
int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size);
/* Returns BD ring offset for a particular channel
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_phy.c b/trunk/drivers/net/ibm_emac/ibm_emac_phy.c
index e57862b34cae..9074f76ee2bf 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_phy.c
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -22,7 +22,6 @@
#include
-#include "ibm_emac_core.h"
#include "ibm_emac_phy.h"
static inline int phy_read(struct mii_phy *phy, int reg)
@@ -35,38 +34,10 @@ static inline void phy_write(struct mii_phy *phy, int reg, int val)
phy->mdio_write(phy->dev, phy->address, reg, val);
}
-/*
- * polls MII_BMCR until BMCR_RESET bit clears or operation times out.
- *
- * returns:
- * >= 0 => success, value in BMCR returned to caller
- * -EBUSY => failure, RESET bit never cleared
- * otherwise => failure, lower level PHY read failed
- */
-static int mii_spin_reset_complete(struct mii_phy *phy)
-{
- int val;
- int limit = 10000;
-
- while (limit--) {
- val = phy_read(phy, MII_BMCR);
- if (val >= 0 && !(val & BMCR_RESET))
- return val; /* success */
- udelay(10);
- }
- if (val & BMCR_RESET)
- val = -EBUSY;
-
- if (net_ratelimit())
- printk(KERN_ERR "emac%d: PHY reset timeout (%d)\n",
- ((struct ocp_enet_private *)phy->dev->priv)->def->index,
- val);
- return val;
-}
-
int mii_reset_phy(struct mii_phy *phy)
{
int val;
+ int limit = 10000;
val = phy_read(phy, MII_BMCR);
val &= ~BMCR_ISOLATE;
@@ -75,11 +46,16 @@ int mii_reset_phy(struct mii_phy *phy)
udelay(300);
- val = mii_spin_reset_complete(phy);
- if (val >= 0 && (val & BMCR_ISOLATE))
+ while (limit--) {
+ val = phy_read(phy, MII_BMCR);
+ if (val >= 0 && (val & BMCR_RESET) == 0)
+ break;
+ udelay(10);
+ }
+ if ((val & BMCR_ISOLATE) && limit > 0)
phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
- return val < 0;
+ return limit <= 0;
}
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
@@ -126,14 +102,8 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
}
/* Start/Restart aneg */
- /* on some PHYs (e.g. National DP83843) a write to MII_ADVERTISE
- * causes BMCR_RESET to be set on the next read of MII_BMCR, which
- * if not checked for causes the PHY to be reset below */
- ctl = mii_spin_reset_complete(phy);
- if (ctl < 0)
- return ctl;
-
- ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
+ ctl = phy_read(phy, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(phy, MII_BMCR, ctl);
return 0;
@@ -148,13 +118,13 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
phy->duplex = fd;
phy->pause = phy->asym_pause = 0;
- /* First reset the PHY */
- mii_reset_phy(phy);
-
ctl = phy_read(phy, MII_BMCR);
if (ctl < 0)
return ctl;
- ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE | BMCR_SPEED1000);
+ ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
+
+ /* First reset the PHY */
+ phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch (speed) {
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c
index 9dbb5e5936c3..53d281cb9a16 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.c
@@ -162,7 +162,7 @@ void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
out_be32(&dev->base->ssr, ssr);
}
-void __rgmii_fini(struct ocp_device *ocpdev, int input)
+void __exit __rgmii_fini(struct ocp_device *ocpdev, int input)
{
struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
BUG_ON(!dev || dev->users == 0);
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h
index 971e45815c6c..117ea486c2ca 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -37,7 +37,7 @@ struct ibm_ocp_rgmii {
#ifdef CONFIG_IBM_EMAC_RGMII
int rgmii_attach(void *emac) __init;
-void __rgmii_fini(struct ocp_device *ocpdev, int input);
+void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit;
static inline void rgmii_fini(struct ocp_device *ocpdev, int input)
{
if (ocpdev)
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_tah.c b/trunk/drivers/net/ibm_emac/ibm_emac_tah.c
index 3c2d5ba522a1..e287b451bb44 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_tah.c
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_tah.c
@@ -63,7 +63,7 @@ int __init tah_attach(void *emac)
return 0;
}
-void __tah_fini(struct ocp_device *ocpdev)
+void __exit __tah_fini(struct ocp_device *ocpdev)
{
struct tah_regs *p = ocp_get_drvdata(ocpdev);
BUG_ON(!p);
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_tah.h b/trunk/drivers/net/ibm_emac/ibm_emac_tah.h
index ccf64915e1e4..38153945a240 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_tah.h
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_tah.h
@@ -55,7 +55,7 @@ struct tah_regs {
#ifdef CONFIG_IBM_EMAC_TAH
int tah_attach(void *emac) __init;
-void __tah_fini(struct ocp_device *ocpdev);
+void __tah_fini(struct ocp_device *ocpdev) __exit;
static inline void tah_fini(struct ocp_device *ocpdev)
{
if (ocpdev)
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c
index 2c0fdb0cabff..37dc8f342868 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.c
@@ -215,7 +215,7 @@ void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
out_be32(&dev->base->ssr, ssr);
}
-void __zmii_fini(struct ocp_device *ocpdev, int input)
+void __exit __zmii_fini(struct ocp_device *ocpdev, int input)
{
struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
BUG_ON(!dev || dev->users == 0);
diff --git a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h
index fad6d8bf983a..972e3a44a09f 100644
--- a/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h
+++ b/trunk/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -40,7 +40,7 @@ struct ibm_ocp_zmii {
#ifdef CONFIG_IBM_EMAC_ZMII
int zmii_attach(void *emac) __init;
-void __zmii_fini(struct ocp_device *ocpdev, int input);
+void __zmii_fini(struct ocp_device *ocpdev, int input) __exit;
static inline void zmii_fini(struct ocp_device *ocpdev, int input)
{
if (ocpdev)
diff --git a/trunk/drivers/net/ixgb/ixgb.h b/trunk/drivers/net/ixgb/ixgb.h
index 3569d5b03388..c8e90861f869 100644
--- a/trunk/drivers/net/ixgb/ixgb.h
+++ b/trunk/drivers/net/ixgb/ixgb.h
@@ -193,6 +193,8 @@ struct ixgb_adapter {
u16 msg_enable;
struct ixgb_hw_stats stats;
uint32_t alloc_rx_buff_failed;
+#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
+#endif
};
#endif /* _IXGB_H_ */
diff --git a/trunk/drivers/net/ixgb/ixgb_main.c b/trunk/drivers/net/ixgb/ixgb_main.c
index 991c8833e23c..6d2b059371f1 100644
--- a/trunk/drivers/net/ixgb/ixgb_main.c
+++ b/trunk/drivers/net/ixgb/ixgb_main.c
@@ -227,7 +227,7 @@ int
ixgb_up(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int err, irq_flags = IRQF_SHARED;
+ int err;
int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
struct ixgb_hw *hw = &adapter->hw;
@@ -246,21 +246,26 @@ ixgb_up(struct ixgb_adapter *adapter)
/* disable interrupts and get the hardware into a known state */
IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
- /* only enable MSI if bus is in PCI-X mode */
- if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->have_msi = 1;
- irq_flags = 0;
- }
+#ifdef CONFIG_PCI_MSI
+ {
+ boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
+ IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
+ adapter->have_msi = TRUE;
+
+ if (!pcix)
+ adapter->have_msi = FALSE;
+ else if((err = pci_enable_msi(adapter->pdev))) {
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = FALSE;
/* proceed to try to request regular interrupt */
}
+ }
- err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags,
- netdev->name, netdev);
- if (err) {
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
+#endif
+ if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
+ IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ netdev->name, netdev))) {
DPRINTK(PROBE, ERR,
"Unable to allocate interrupt Error: %d\n", err);
return err;
@@ -302,10 +307,11 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
ixgb_irq_disable(adapter);
free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
+#ifdef CONFIG_PCI_MSI
+ if(adapter->have_msi == TRUE)
pci_disable_msi(adapter->pdev);
+#endif
if(kill_watchdog)
del_timer_sync(&adapter->watchdog_timer);
#ifdef CONFIG_IXGB_NAPI
diff --git a/trunk/drivers/net/netxen/netxen_nic_init.c b/trunk/drivers/net/netxen/netxen_nic_init.c
index a36892457761..cf0e96adfe44 100644
--- a/trunk/drivers/net/netxen/netxen_nic_init.c
+++ b/trunk/drivers/net/netxen/netxen_nic_init.c
@@ -1216,7 +1216,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
/* Window = 1 */
writel(consumer,
NETXEN_CRB_NORMALIZE(adapter,
- recv_crb_registers[adapter->portnum].
+ recv_crb_registers[ctxid].
crb_rcv_status_consumer));
}
diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c
index 832fd69a0e59..104e20456e6f 100644
--- a/trunk/drivers/net/sky2.c
+++ b/trunk/drivers/net/sky2.c
@@ -40,6 +40,7 @@
#include
#include
#include
+#include
#include
@@ -150,6 +151,8 @@ static const char *yukon2_name[] = {
"FE", /* 0xb7 */
};
+static int dmi_blacklisted;
+
/* Access to external PHY */
static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
{
@@ -304,13 +307,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
PHY_M_EC_MAC_S_MSK);
ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
- /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
if (hw->chip_id == CHIP_ID_YUKON_EC)
- /* set downshift counter to 3x and enable downshift */
ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
else
- /* set master & slave downshift counter to 1x */
- ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
+ ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
}
@@ -327,12 +327,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
/* enable automatic crossover */
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
- /* downshift on PHY 88E1112 and 88E1149 is changed */
if (sky2->autoneg == AUTONEG_ENABLE
&& (hw->chip_id == CHIP_ID_YUKON_XL
|| hw->chip_id == CHIP_ID_YUKON_EC_U
|| hw->chip_id == CHIP_ID_YUKON_EX)) {
- /* set downshift counter to 3x and enable downshift */
ctrl &= ~PHY_M_PC_DSC_MSK;
ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
}
@@ -844,12 +842,10 @@ static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
/* Update chip's next pointer */
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
{
- /* Make sure write' to descriptors are complete before we tell hardware */
+ q = Y2_QADDR(q, PREF_UNIT_PUT_IDX);
wmb();
- sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
-
- /* Synchronize I/O on since next processor may write to tail */
- mmiowb();
+ sky2_write16(hw, q, idx);
+ sky2_read16(hw, q);
}
@@ -981,7 +977,6 @@ static void sky2_rx_stop(struct sky2_port *sky2)
/* reset the Rx prefetch unit */
sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
- mmiowb();
}
/* Clean out receive buffer area, assumes receiver hardware stopped */
@@ -1201,7 +1196,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
}
/* Tell chip about available buffers */
- sky2_put_idx(hw, rxq, sky2->rx_put);
+ sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
return 0;
nomem:
sky2_rx_clean(sky2);
@@ -1543,8 +1538,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
}
sky2->tx_cons = idx;
- smp_mb();
-
if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
netif_wake_queue(dev);
}
@@ -1584,6 +1577,13 @@ static int sky2_down(struct net_device *dev)
imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ /*
+ * Both ports share the NAPI poll on port 0, so if necessary undo the
+ * the disable that is done in dev_close.
+ */
+ if (sky2->port == 0 && hw->ports > 1)
+ netif_poll_enable(dev);
+
sky2_gmac_reset(hw, port);
/* Stop transmitter */
@@ -2139,10 +2139,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT:
skb = sky2_receive(dev, length, status);
- if (unlikely(!skb)) {
- sky2->net_stats.rx_dropped++;
+ if (!skb)
goto force_update;
- }
skb->protocol = eth_type_trans(skb, dev);
sky2->net_stats.rx_packets++;
@@ -2223,7 +2221,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
/* Fully processed status ring so clear irq */
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
- mmiowb();
exit_loop:
if (buf_write[0]) {
@@ -2344,12 +2341,6 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
dev->name, status);
- if (status & GM_IS_RX_CO_OV)
- gma_read16(hw, port, GM_RX_IRQ_SRC);
-
- if (status & GM_IS_TX_CO_OV)
- gma_read16(hw, port, GM_TX_IRQ_SRC);
-
if (status & GM_IS_RX_FF_OR) {
++sky2->net_stats.rx_fifo_errors;
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
@@ -2448,7 +2439,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if (work_done < work_limit) {
netif_rx_complete(dev0);
- /* end of interrupt, re-enables also acts as I/O synchronization */
sky2_read32(hw, B0_Y2_SP_LISR);
return 0;
} else {
@@ -2544,6 +2534,17 @@ static int __devinit sky2_init(struct sky2_hw *hw)
return -EOPNOTSUPP;
}
+
+ /* Some Gigabyte motherboards have 88e8056 but cause problems
+ * There is some unresolved hardware related problem that causes
+ * descriptor errors and receive data corruption.
+ */
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && dmi_blacklisted) {
+ dev_err(&hw->pdev->dev,
+ "88E8056 on this motherboard not supported\n");
+ return -EOPNOTSUPP;
+ }
+
hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->ports = 1;
t8 = sky2_read8(hw, B2_Y2_HW_RES);
@@ -3909,8 +3910,24 @@ static struct pci_driver sky2_driver = {
.shutdown = sky2_shutdown,
};
+static struct dmi_system_id __initdata broken_dmi_table[] = {
+ {
+ .ident = "Gigabyte 965P-S3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "965P-S3"),
+
+ },
+ },
+ { }
+};
+
static int __init sky2_init_module(void)
{
+ /* Look for sick motherboards */
+ if (dmi_check_system(broken_dmi_table))
+ dmi_blacklisted = 1;
+
return pci_register_driver(&sky2_driver);
}
diff --git a/trunk/drivers/net/spider_net.c b/trunk/drivers/net/spider_net.c
index c3964c3d89d9..108adbf5b5eb 100644
--- a/trunk/drivers/net/spider_net.c
+++ b/trunk/drivers/net/spider_net.c
@@ -430,8 +430,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
/* and we need to have it 128 byte aligned, therefore we allocate a
* bit more */
/* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
+ descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
if (!descr->skb) {
if (netif_msg_rx_err(card) && net_ratelimit())
pr_err("Not enough memory to allocate rx buffer\n");
diff --git a/trunk/drivers/rtc/Kconfig b/trunk/drivers/rtc/Kconfig
index 4e4c10a7fd3a..95ce8f49e382 100644
--- a/trunk/drivers/rtc/Kconfig
+++ b/trunk/drivers/rtc/Kconfig
@@ -59,7 +59,7 @@ comment "RTC interfaces"
depends on RTC_CLASS
config RTC_INTF_SYSFS
- boolean "/sys/class/rtc/rtcN (sysfs)"
+ boolean "sysfs"
depends on RTC_CLASS && SYSFS
default RTC_CLASS
help
@@ -70,7 +70,7 @@ config RTC_INTF_SYSFS
will be called rtc-sysfs.
config RTC_INTF_PROC
- boolean "/proc/driver/rtc (procfs for rtc0)"
+ boolean "proc"
depends on RTC_CLASS && PROC_FS
default RTC_CLASS
help
@@ -82,7 +82,7 @@ config RTC_INTF_PROC
will be called rtc-proc.
config RTC_INTF_DEV
- boolean "/dev/rtcN (character devices)"
+ boolean "dev"
depends on RTC_CLASS
default RTC_CLASS
help
diff --git a/trunk/drivers/rtc/rtc-omap.c b/trunk/drivers/rtc/rtc-omap.c
index a2f84f169588..60a8a4bb8bd2 100644
--- a/trunk/drivers/rtc/rtc-omap.c
+++ b/trunk/drivers/rtc/rtc-omap.c
@@ -371,7 +371,7 @@ static int __devinit omap_rtc_probe(struct platform_device *pdev)
goto fail;
}
platform_set_drvdata(pdev, rtc);
- dev_set_drvdata(&rtc->dev, mem);
+ dev_set_devdata(&rtc->dev, mem);
/* clear pending irqs, and set 1/second periodic,
* which we'll use instead of update irqs
@@ -453,7 +453,7 @@ static int __devexit omap_rtc_remove(struct platform_device *pdev)
free_irq(omap_rtc_timer, rtc);
free_irq(omap_rtc_alarm, rtc);
- release_resource(dev_get_drvdata(&rtc->dev));
+ release_resource(dev_get_devdata(&rtc->dev));
rtc_device_unregister(rtc);
return 0;
}
diff --git a/trunk/drivers/serial/8250.c b/trunk/drivers/serial/8250.c
index c84dab083a85..48e259a0167d 100644
--- a/trunk/drivers/serial/8250.c
+++ b/trunk/drivers/serial/8250.c
@@ -894,7 +894,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
quot = serial_dl_read(up);
quot <<= 3;
- status1 = serial_in(up, 0x04); /* EXCR2 */
+ status1 = serial_in(up, 0x04); /* EXCR1 */
status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
serial_outp(up, 0x04, status1);
@@ -2617,22 +2617,7 @@ void serial8250_suspend_port(int line)
*/
void serial8250_resume_port(int line)
{
- struct uart_8250_port *up = &serial8250_ports[line];
-
- if (up->capabilities & UART_NATSEMI) {
- unsigned char tmp;
-
- /* Ensure it's still in high speed mode */
- serial_outp(up, UART_LCR, 0xE0);
-
- tmp = serial_in(up, 0x04); /* EXCR2 */
- tmp &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
- tmp |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
- serial_outp(up, 0x04, tmp);
-
- serial_outp(up, UART_LCR, 0);
- }
- uart_resume_port(&serial8250_reg, &up->port);
+ uart_resume_port(&serial8250_reg, &serial8250_ports[line].port);
}
/*
@@ -2709,7 +2694,7 @@ static int serial8250_resume(struct platform_device *dev)
struct uart_8250_port *up = &serial8250_ports[i];
if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
- serial8250_resume_port(i);
+ uart_resume_port(&serial8250_reg, &up->port);
}
return 0;
diff --git a/trunk/drivers/serial/icom.c b/trunk/drivers/serial/icom.c
index 9d3105b64a7a..6202995e8211 100644
--- a/trunk/drivers/serial/icom.c
+++ b/trunk/drivers/serial/icom.c
@@ -69,40 +69,33 @@
static const struct pci_device_id icom_pci_table[] = {
{
- .vendor = PCI_VENDOR_ID_IBM,
- .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = ADAPTER_V1,
- },
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = ADAPTER_V1,
+ },
{
- .vendor = PCI_VENDOR_ID_IBM,
- .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
- .subvendor = PCI_VENDOR_ID_IBM,
- .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
- .driver_data = ADAPTER_V2,
- },
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX,
+ .driver_data = ADAPTER_V2,
+ },
{
- .vendor = PCI_VENDOR_ID_IBM,
- .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
- .subvendor = PCI_VENDOR_ID_IBM,
- .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
- .driver_data = ADAPTER_V2,
- },
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM,
+ .driver_data = ADAPTER_V2,
+ },
{
- .vendor = PCI_VENDOR_ID_IBM,
- .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
- .subvendor = PCI_VENDOR_ID_IBM,
- .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
- .driver_data = ADAPTER_V2,
- },
- {
- .vendor = PCI_VENDOR_ID_IBM,
- .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
- .subvendor = PCI_VENDOR_ID_IBM,
- .subdevice = PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE,
- .driver_data = ADAPTER_V2,
- },
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL,
+ .driver_data = ADAPTER_V2,
+ },
{}
};
diff --git a/trunk/drivers/video/console/vgacon.c b/trunk/drivers/video/console/vgacon.c
index f46fe95f69fb..2460b82a1d93 100644
--- a/trunk/drivers/video/console/vgacon.c
+++ b/trunk/drivers/video/console/vgacon.c
@@ -368,14 +368,9 @@ static const char *vgacon_startup(void)
#endif
}
- /* SCREEN_INFO initialized? */
- if ((ORIG_VIDEO_MODE == 0) &&
- (ORIG_VIDEO_LINES == 0) &&
- (ORIG_VIDEO_COLS == 0))
- goto no_vga;
-
/* VGA16 modes are not handled by VGACON */
- if ((ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */
+ if ((ORIG_VIDEO_MODE == 0x00) || /* SCREEN_INFO not initialized */
+ (ORIG_VIDEO_MODE == 0x0D) || /* 320x200/4 */
(ORIG_VIDEO_MODE == 0x0E) || /* 640x200/4 */
(ORIG_VIDEO_MODE == 0x10) || /* 640x350/4 */
(ORIG_VIDEO_MODE == 0x12) || /* 640x480/4 */
diff --git a/trunk/fs/adfs/super.c b/trunk/fs/adfs/super.c
index de2ed5ca3351..30c296508497 100644
--- a/trunk/fs/adfs/super.c
+++ b/trunk/fs/adfs/super.c
@@ -232,7 +232,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/affs/super.c b/trunk/fs/affs/super.c
index b800d451cd60..beff7d21e6e2 100644
--- a/trunk/fs/affs/super.c
+++ b/trunk/fs/affs/super.c
@@ -87,9 +87,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;
- init_MUTEX(&ei->i_link_lock);
- init_MUTEX(&ei->i_ext_lock);
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ init_MUTEX(&ei->i_link_lock);
+ init_MUTEX(&ei->i_ext_lock);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/afs/super.c b/trunk/fs/afs/super.c
index 8d47ad88a093..370cecc910db 100644
--- a/trunk/fs/afs/super.c
+++ b/trunk/fs/afs/super.c
@@ -451,15 +451,17 @@ static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
{
struct afs_vnode *vnode = _vnode;
- memset(vnode, 0, sizeof(*vnode));
- inode_init_once(&vnode->vfs_inode);
- init_waitqueue_head(&vnode->update_waitq);
- mutex_init(&vnode->permits_lock);
- mutex_init(&vnode->validate_lock);
- spin_lock_init(&vnode->writeback_lock);
- spin_lock_init(&vnode->lock);
- INIT_LIST_HEAD(&vnode->writebacks);
- INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ memset(vnode, 0, sizeof(*vnode));
+ inode_init_once(&vnode->vfs_inode);
+ init_waitqueue_head(&vnode->update_waitq);
+ mutex_init(&vnode->permits_lock);
+ mutex_init(&vnode->validate_lock);
+ spin_lock_init(&vnode->writeback_lock);
+ spin_lock_init(&vnode->lock);
+ INIT_LIST_HEAD(&vnode->writebacks);
+ INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
+ }
}
/*
diff --git a/trunk/fs/befs/linuxvfs.c b/trunk/fs/befs/linuxvfs.c
index a5c5171c2828..fe96108a788d 100644
--- a/trunk/fs/befs/linuxvfs.c
+++ b/trunk/fs/befs/linuxvfs.c
@@ -292,8 +292,10 @@ befs_destroy_inode(struct inode *inode)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct befs_inode_info *bi = (struct befs_inode_info *) foo;
-
- inode_init_once(&bi->vfs_inode);
+
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&bi->vfs_inode);
+ }
}
static void
diff --git a/trunk/fs/bfs/inode.c b/trunk/fs/bfs/inode.c
index 58c7bd9f5301..edc08d89aabc 100644
--- a/trunk/fs/bfs/inode.c
+++ b/trunk/fs/bfs/inode.c
@@ -248,7 +248,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct bfs_inode_info *bi = foo;
- inode_init_once(&bi->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&bi->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c
index ea1480a16f51..742899240872 100644
--- a/trunk/fs/block_dev.c
+++ b/trunk/fs/block_dev.c
@@ -458,15 +458,17 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
struct bdev_inode *ei = (struct bdev_inode *) foo;
struct block_device *bdev = &ei->bdev;
- memset(bdev, 0, sizeof(*bdev));
- mutex_init(&bdev->bd_mutex);
- sema_init(&bdev->bd_mount_sem, 1);
- INIT_LIST_HEAD(&bdev->bd_inodes);
- INIT_LIST_HEAD(&bdev->bd_list);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ memset(bdev, 0, sizeof(*bdev));
+ mutex_init(&bdev->bd_mutex);
+ sema_init(&bdev->bd_mount_sem, 1);
+ INIT_LIST_HEAD(&bdev->bd_inodes);
+ INIT_LIST_HEAD(&bdev->bd_list);
#ifdef CONFIG_SYSFS
- INIT_LIST_HEAD(&bdev->bd_holder_list);
+ INIT_LIST_HEAD(&bdev->bd_holder_list);
#endif
- inode_init_once(&ei->vfs_inode);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static inline void __bd_forget(struct inode *inode)
diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c
index 49590d590d7d..aecd057cd0e0 100644
--- a/trunk/fs/buffer.c
+++ b/trunk/fs/buffer.c
@@ -981,8 +981,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
struct page *page;
struct buffer_head *bh;
- page = find_or_create_page(inode->i_mapping, index,
- mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+ page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page)
return NULL;
@@ -2899,9 +2898,8 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
- INIT_LIST_HEAD(&ret->b_assoc_buffers);
get_cpu_var(bh_accounting).nr++;
recalc_bh_state();
put_cpu_var(bh_accounting);
@@ -2920,6 +2918,17 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL(free_buffer_head);
+static void
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
+{
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ struct buffer_head * bh = (struct buffer_head *)data;
+
+ memset(bh, 0, sizeof(*bh));
+ INIT_LIST_HEAD(&bh->b_assoc_buffers);
+ }
+}
+
static void buffer_exit_cpu(int cpu)
{
int i;
@@ -2946,8 +2955,12 @@ void __init buffer_init(void)
{
int nrpages;
- bh_cachep = KMEM_CACHE(buffer_head,
- SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
+ bh_cachep = kmem_cache_create("buffer_head",
+ sizeof(struct buffer_head), 0,
+ (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+ SLAB_MEM_SPREAD),
+ init_buffer_head,
+ NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
diff --git a/trunk/fs/cifs/cifsfs.c b/trunk/fs/cifs/cifsfs.c
index d38c69b591cf..8568e100953c 100644
--- a/trunk/fs/cifs/cifsfs.c
+++ b/trunk/fs/cifs/cifsfs.c
@@ -701,8 +701,10 @@ cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
{
struct cifsInodeInfo *cifsi = inode;
- inode_init_once(&cifsi->vfs_inode);
- INIT_LIST_HEAD(&cifsi->lockList);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&cifsi->vfs_inode);
+ INIT_LIST_HEAD(&cifsi->lockList);
+ }
}
static int
diff --git a/trunk/fs/coda/inode.c b/trunk/fs/coda/inode.c
index dbff1bd4fb96..0aaff3651d14 100644
--- a/trunk/fs/coda/inode.c
+++ b/trunk/fs/coda/inode.c
@@ -62,7 +62,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct coda_inode_info *ei = (struct coda_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
int coda_init_inodecache(void)
diff --git a/trunk/fs/compat.c b/trunk/fs/compat.c
index 1de2331db844..7b21b0a82596 100644
--- a/trunk/fs/compat.c
+++ b/trunk/fs/compat.c
@@ -2230,16 +2230,21 @@ asmlinkage long compat_sys_signalfd(int ufd,
asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
const struct compat_itimerspec __user *utmr)
{
+ long res;
struct itimerspec t;
struct itimerspec __user *ut;
+ res = -EFAULT;
if (get_compat_itimerspec(&t, utmr))
- return -EFAULT;
+ goto err_exit;
ut = compat_alloc_user_space(sizeof(*ut));
- if (copy_to_user(ut, &t, sizeof(t)))
- return -EFAULT;
+ if (copy_to_user(ut, &t, sizeof(t)) )
+ goto err_exit;
- return sys_timerfd(ufd, clockid, flags, ut);
+ res = sys_timerfd(ufd, clockid, flags, ut);
+err_exit:
+ return res;
}
#endif /* CONFIG_TIMERFD */
+
diff --git a/trunk/fs/dquot.c b/trunk/fs/dquot.c
index 8819d281500c..3a995841de90 100644
--- a/trunk/fs/dquot.c
+++ b/trunk/fs/dquot.c
@@ -1421,7 +1421,7 @@ int vfs_quota_off(struct super_block *sb, int type)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_enabled(sb, cnt)) {
- mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock(&toputinode[cnt]->i_mutex);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
diff --git a/trunk/fs/ecryptfs/main.c b/trunk/fs/ecryptfs/main.c
index 606128f5c927..8cbf3f69ebe5 100644
--- a/trunk/fs/ecryptfs/main.c
+++ b/trunk/fs/ecryptfs/main.c
@@ -583,7 +583,8 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
{
struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static struct ecryptfs_cache_info {
diff --git a/trunk/fs/ecryptfs/mmap.c b/trunk/fs/ecryptfs/mmap.c
index 88ea6697908f..0770c4b66f53 100644
--- a/trunk/fs/ecryptfs/mmap.c
+++ b/trunk/fs/ecryptfs/mmap.c
@@ -364,14 +364,18 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
{
struct inode *inode = page->mapping->host;
int end_byte_in_page;
+ char *page_virt;
if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
goto out;
end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
if (to > end_byte_in_page)
end_byte_in_page = to;
- zero_user_page(page, end_byte_in_page,
- PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
+ page_virt = kmap_atomic(page, KM_USER0);
+ memset((page_virt + end_byte_in_page), 0,
+ (PAGE_CACHE_SIZE - end_byte_in_page));
+ kunmap_atomic(page_virt, KM_USER0);
+ flush_dcache_page(page);
out:
return 0;
}
@@ -736,6 +740,7 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
{
int rc = 0;
struct page *tmp_page;
+ char *tmp_page_virt;
tmp_page = ecryptfs_get1page(file, index);
if (IS_ERR(tmp_page)) {
@@ -752,7 +757,10 @@ int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
page_cache_release(tmp_page);
goto out;
}
- zero_user_page(tmp_page, start, num_zeros, KM_USER0);
+ tmp_page_virt = kmap_atomic(tmp_page, KM_USER0);
+ memset(((char *)tmp_page_virt + start), 0, num_zeros);
+ kunmap_atomic(tmp_page_virt, KM_USER0);
+ flush_dcache_page(tmp_page);
rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros);
if (rc < 0) {
ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
diff --git a/trunk/fs/efs/super.c b/trunk/fs/efs/super.c
index e0a6839e68ae..ba7a8b9da0c1 100644
--- a/trunk/fs/efs/super.c
+++ b/trunk/fs/efs/super.c
@@ -72,7 +72,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct efs_inode_info *ei = (struct efs_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/exec.c b/trunk/fs/exec.c
index 0b685888ff6f..70fa36554c14 100644
--- a/trunk/fs/exec.c
+++ b/trunk/fs/exec.c
@@ -60,7 +60,7 @@
#endif
int core_uses_pid;
-char core_pattern[CORENAME_MAX_SIZE] = "core";
+char core_pattern[128] = "core";
int suid_dumpable = 0;
EXPORT_SYMBOL(suid_dumpable);
@@ -1264,6 +1264,8 @@ int set_binfmt(struct linux_binfmt *new)
EXPORT_SYMBOL(set_binfmt);
+#define CORENAME_MAX_SIZE 64
+
/* format_corename will inspect the pattern parameter, and output a
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
diff --git a/trunk/fs/ext2/super.c b/trunk/fs/ext2/super.c
index 16337bff0272..685a1c287177 100644
--- a/trunk/fs/ext2/super.c
+++ b/trunk/fs/ext2/super.c
@@ -160,11 +160,13 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
- rwlock_init(&ei->i_meta_lock);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ rwlock_init(&ei->i_meta_lock);
#ifdef CONFIG_EXT2_FS_XATTR
- init_rwsem(&ei->xattr_sem);
+ init_rwsem(&ei->xattr_sem);
#endif
- inode_init_once(&ei->vfs_inode);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/ext3/super.c b/trunk/fs/ext3/super.c
index 6e3062913a92..54d3c9041259 100644
--- a/trunk/fs/ext3/super.c
+++ b/trunk/fs/ext3/super.c
@@ -466,12 +466,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
- INIT_LIST_HEAD(&ei->i_orphan);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT3_FS_XATTR
- init_rwsem(&ei->xattr_sem);
+ init_rwsem(&ei->xattr_sem);
#endif
- mutex_init(&ei->truncate_mutex);
- inode_init_once(&ei->vfs_inode);
+ mutex_init(&ei->truncate_mutex);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c
index cb9afdd0e26e..719126932354 100644
--- a/trunk/fs/ext4/super.c
+++ b/trunk/fs/ext4/super.c
@@ -517,12 +517,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
- INIT_LIST_HEAD(&ei->i_orphan);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT4DEV_FS_XATTR
- init_rwsem(&ei->xattr_sem);
+ init_rwsem(&ei->xattr_sem);
#endif
- mutex_init(&ei->truncate_mutex);
- inode_init_once(&ei->vfs_inode);
+ mutex_init(&ei->truncate_mutex);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/fat/cache.c b/trunk/fs/fat/cache.c
index 3c9c8a15ec73..1959143c1d27 100644
--- a/trunk/fs/fat/cache.c
+++ b/trunk/fs/fat/cache.c
@@ -40,7 +40,8 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct fat_cache *cache = (struct fat_cache *)foo;
- INIT_LIST_HEAD(&cache->cache_list);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ INIT_LIST_HEAD(&cache->cache_list);
}
int __init fat_cache_init(void)
diff --git a/trunk/fs/fat/inode.c b/trunk/fs/fat/inode.c
index 479722d89667..2c55e8dce793 100644
--- a/trunk/fs/fat/inode.c
+++ b/trunk/fs/fat/inode.c
@@ -500,12 +500,14 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
- spin_lock_init(&ei->cache_lru_lock);
- ei->nr_caches = 0;
- ei->cache_valid_id = FAT_CACHE_VALID + 1;
- INIT_LIST_HEAD(&ei->cache_lru);
- INIT_HLIST_NODE(&ei->i_fat_hash);
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ spin_lock_init(&ei->cache_lru_lock);
+ ei->nr_caches = 0;
+ ei->cache_valid_id = FAT_CACHE_VALID + 1;
+ INIT_LIST_HEAD(&ei->cache_lru);
+ INIT_HLIST_NODE(&ei->i_fat_hash);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int __init fat_init_inodecache(void)
diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c
index c3a2ad0da43c..1397018ff476 100644
--- a/trunk/fs/fuse/inode.c
+++ b/trunk/fs/fuse/inode.c
@@ -687,7 +687,8 @@ static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
{
struct inode * inode = foo;
- inode_init_once(inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(inode);
}
static int __init fuse_fs_init(void)
diff --git a/trunk/fs/gfs2/main.c b/trunk/fs/gfs2/main.c
index 787a0edef100..e460487c0557 100644
--- a/trunk/fs/gfs2/main.c
+++ b/trunk/fs/gfs2/main.c
@@ -27,27 +27,29 @@
static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_inode *ip = foo;
-
- inode_init_once(&ip->i_inode);
- spin_lock_init(&ip->i_spin);
- init_rwsem(&ip->i_rw_mutex);
- memset(ip->i_cache, 0, sizeof(ip->i_cache));
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&ip->i_inode);
+ spin_lock_init(&ip->i_spin);
+ init_rwsem(&ip->i_rw_mutex);
+ memset(ip->i_cache, 0, sizeof(ip->i_cache));
+ }
}
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_glock *gl = foo;
-
- INIT_HLIST_NODE(&gl->gl_list);
- spin_lock_init(&gl->gl_spin);
- INIT_LIST_HEAD(&gl->gl_holders);
- INIT_LIST_HEAD(&gl->gl_waiters1);
- INIT_LIST_HEAD(&gl->gl_waiters3);
- gl->gl_lvb = NULL;
- atomic_set(&gl->gl_lvb_count, 0);
- INIT_LIST_HEAD(&gl->gl_reclaim);
- INIT_LIST_HEAD(&gl->gl_ail_list);
- atomic_set(&gl->gl_ail_count, 0);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ INIT_HLIST_NODE(&gl->gl_list);
+ spin_lock_init(&gl->gl_spin);
+ INIT_LIST_HEAD(&gl->gl_holders);
+ INIT_LIST_HEAD(&gl->gl_waiters1);
+ INIT_LIST_HEAD(&gl->gl_waiters3);
+ gl->gl_lvb = NULL;
+ atomic_set(&gl->gl_lvb_count, 0);
+ INIT_LIST_HEAD(&gl->gl_reclaim);
+ INIT_LIST_HEAD(&gl->gl_ail_list);
+ atomic_set(&gl->gl_ail_count, 0);
+ }
}
/**
diff --git a/trunk/fs/hfs/super.c b/trunk/fs/hfs/super.c
index 92cf8751e428..4f1888f16cf0 100644
--- a/trunk/fs/hfs/super.c
+++ b/trunk/fs/hfs/super.c
@@ -434,7 +434,8 @@ static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flag
{
struct hfs_inode_info *i = p;
- inode_init_once(&i->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&i->vfs_inode);
}
static int __init init_hfs_fs(void)
diff --git a/trunk/fs/hfsplus/super.c b/trunk/fs/hfsplus/super.c
index ebd1b380cbbc..37afbec8a761 100644
--- a/trunk/fs/hfsplus/super.c
+++ b/trunk/fs/hfsplus/super.c
@@ -470,7 +470,8 @@ static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long
{
struct hfsplus_inode_info *i = p;
- inode_init_once(&i->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&i->vfs_inode);
}
static int __init init_hfsplus_fs(void)
diff --git a/trunk/fs/hpfs/super.c b/trunk/fs/hpfs/super.c
index fca1165d7192..1b95f39fbc37 100644
--- a/trunk/fs/hpfs/super.c
+++ b/trunk/fs/hpfs/super.c
@@ -176,9 +176,11 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
- mutex_init(&ei->i_mutex);
- mutex_init(&ei->i_parent_mutex);
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ mutex_init(&ei->i_mutex);
+ mutex_init(&ei->i_parent_mutex);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/hugetlbfs/inode.c b/trunk/fs/hugetlbfs/inode.c
index aa083dd34e92..98959b87cdf8 100644
--- a/trunk/fs/hugetlbfs/inode.c
+++ b/trunk/fs/hugetlbfs/inode.c
@@ -556,7 +556,8 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
const struct file_operations hugetlbfs_file_operations = {
diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c
index 9a012cc5b6cd..df2ef15d03d2 100644
--- a/trunk/fs/inode.c
+++ b/trunk/fs/inode.c
@@ -213,7 +213,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct inode * inode = (struct inode *) foo;
- inode_init_once(inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(inode);
}
/*
diff --git a/trunk/fs/isofs/inode.c b/trunk/fs/isofs/inode.c
index 5c3eecf7542e..e99f7ff4ecb4 100644
--- a/trunk/fs/isofs/inode.c
+++ b/trunk/fs/isofs/inode.c
@@ -77,7 +77,8 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
{
struct iso_inode_info *ei = foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/jffs2/super.c b/trunk/fs/jffs2/super.c
index 6488af43bc9b..45368f8bbe72 100644
--- a/trunk/fs/jffs2/super.c
+++ b/trunk/fs/jffs2/super.c
@@ -47,8 +47,10 @@ static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned l
{
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
- init_MUTEX(&ei->sem);
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ init_MUTEX(&ei->sem);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int jffs2_sync_fs(struct super_block *sb, int wait)
diff --git a/trunk/fs/jfs/jfs_metapage.c b/trunk/fs/jfs/jfs_metapage.c
index 43d4f69afbec..6b3acb0b5781 100644
--- a/trunk/fs/jfs/jfs_metapage.c
+++ b/trunk/fs/jfs/jfs_metapage.c
@@ -184,14 +184,16 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct metapage *mp = (struct metapage *)foo;
- mp->lid = 0;
- mp->lsn = 0;
- mp->flag = 0;
- mp->data = NULL;
- mp->clsn = 0;
- mp->log = NULL;
- set_bit(META_free, &mp->flag);
- init_waitqueue_head(&mp->wait);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ mp->lid = 0;
+ mp->lsn = 0;
+ mp->flag = 0;
+ mp->data = NULL;
+ mp->clsn = 0;
+ mp->log = NULL;
+ set_bit(META_free, &mp->flag);
+ init_waitqueue_head(&mp->wait);
+ }
}
static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
diff --git a/trunk/fs/jfs/super.c b/trunk/fs/jfs/super.c
index 20e4ac1c79a3..ea9dc3e65dcf 100644
--- a/trunk/fs/jfs/super.c
+++ b/trunk/fs/jfs/super.c
@@ -752,18 +752,20 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
{
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
- memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
- INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
- init_rwsem(&jfs_ip->rdwrlock);
- mutex_init(&jfs_ip->commit_mutex);
- init_rwsem(&jfs_ip->xattr_sem);
- spin_lock_init(&jfs_ip->ag_lock);
- jfs_ip->active_ag = -1;
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
+ INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
+ init_rwsem(&jfs_ip->rdwrlock);
+ mutex_init(&jfs_ip->commit_mutex);
+ init_rwsem(&jfs_ip->xattr_sem);
+ spin_lock_init(&jfs_ip->ag_lock);
+ jfs_ip->active_ag = -1;
#ifdef CONFIG_JFS_POSIX_ACL
- jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
- jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
+ jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
+ jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
#endif
- inode_init_once(&jfs_ip->vfs_inode);
+ inode_init_once(&jfs_ip->vfs_inode);
+ }
}
static int __init init_jfs_fs(void)
diff --git a/trunk/fs/lockd/clntlock.c b/trunk/fs/lockd/clntlock.c
index d070b18e539d..f4d45d4d835b 100644
--- a/trunk/fs/lockd/clntlock.c
+++ b/trunk/fs/lockd/clntlock.c
@@ -153,7 +153,7 @@ nlmclnt_recovery(struct nlm_host *host)
if (!host->h_reclaiming++) {
nlm_get_host(host);
__module_get(THIS_MODULE);
- if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0)
+ if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
module_put(THIS_MODULE);
}
}
diff --git a/trunk/fs/lockd/host.c b/trunk/fs/lockd/host.c
index 96070bff93fc..ad21c0713efa 100644
--- a/trunk/fs/lockd/host.c
+++ b/trunk/fs/lockd/host.c
@@ -221,7 +221,7 @@ nlm_bind_host(struct nlm_host *host)
host->h_nextrebind - jiffies);
}
} else {
- unsigned long increment = nlmsvc_timeout;
+ unsigned long increment = nlmsvc_timeout * HZ;
struct rpc_timeout timeparms = {
.to_initval = increment,
.to_increment = increment,
diff --git a/trunk/fs/lockd/xdr.c b/trunk/fs/lockd/xdr.c
index 5316e307a49d..9702956d206c 100644
--- a/trunk/fs/lockd/xdr.c
+++ b/trunk/fs/lockd/xdr.c
@@ -586,6 +586,10 @@ static struct rpc_version nlm_version3 = {
.procs = nlm_procedures,
};
+#ifdef CONFIG_LOCKD_V4
+extern struct rpc_version nlm_version4;
+#endif
+
static struct rpc_version * nlm_versions[] = {
[1] = &nlm_version1,
[3] = &nlm_version3,
diff --git a/trunk/fs/lockd/xdr4.c b/trunk/fs/lockd/xdr4.c
index 846fc1d639dd..ce1efdbe1b3a 100644
--- a/trunk/fs/lockd/xdr4.c
+++ b/trunk/fs/lockd/xdr4.c
@@ -123,8 +123,7 @@ static __be32 *
nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
- __u64 len, start;
- __s64 end;
+ __s64 len, start, end;
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN))
@@ -418,8 +417,7 @@ nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
if (resp->status == nlm_lck_denied) {
struct file_lock *fl = &resp->lock.fl;
u32 excl;
- __u64 start, len;
- __s64 end;
+ s64 start, end, len;
memset(&resp->lock, 0, sizeof(resp->lock));
locks_init_lock(fl);
diff --git a/trunk/fs/locks.c b/trunk/fs/locks.c
index 431a8b871fce..8ec16ab5ef74 100644
--- a/trunk/fs/locks.c
+++ b/trunk/fs/locks.c
@@ -203,6 +203,9 @@ static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
{
struct file_lock *lock = (struct file_lock *) foo;
+ if (!(flags & SLAB_CTOR_CONSTRUCTOR))
+ return;
+
locks_init_lock(lock);
}
diff --git a/trunk/fs/minix/inode.c b/trunk/fs/minix/inode.c
index be4044614ac8..2f4d43a2a310 100644
--- a/trunk/fs/minix/inode.c
+++ b/trunk/fs/minix/inode.c
@@ -73,7 +73,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct minix_inode_info *ei = (struct minix_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/ncpfs/inode.c b/trunk/fs/ncpfs/inode.c
index cf06eb9f050e..c29f00ad495d 100644
--- a/trunk/fs/ncpfs/inode.c
+++ b/trunk/fs/ncpfs/inode.c
@@ -60,8 +60,10 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
- mutex_init(&ei->open_mutex);
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ mutex_init(&ei->open_mutex);
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/nfs/callback.h b/trunk/fs/nfs/callback.h
index c2bb14e053e1..db3d7919c601 100644
--- a/trunk/fs/nfs/callback.h
+++ b/trunk/fs/nfs/callback.h
@@ -24,7 +24,7 @@ enum nfs4_callback_opnum {
};
struct cb_compound_hdr_arg {
- unsigned int taglen;
+ int taglen;
const char *tag;
unsigned int callback_ident;
unsigned nops;
@@ -32,7 +32,7 @@ struct cb_compound_hdr_arg {
struct cb_compound_hdr_res {
__be32 *status;
- unsigned int taglen;
+ int taglen;
const char *tag;
__be32 *nops;
};
diff --git a/trunk/fs/nfs/delegation.c b/trunk/fs/nfs/delegation.c
index 7f37d1bea83f..841c99a9b11c 100644
--- a/trunk/fs/nfs/delegation.c
+++ b/trunk/fs/nfs/delegation.c
@@ -226,7 +226,7 @@ void nfs_return_all_delegations(struct super_block *sb)
spin_unlock(&clp->cl_lock);
}
-static int nfs_do_expire_all_delegations(void *ptr)
+int nfs_do_expire_all_delegations(void *ptr)
{
struct nfs_client *clp = ptr;
struct nfs_delegation *delegation;
diff --git a/trunk/fs/nfs/dir.c b/trunk/fs/nfs/dir.c
index ac92e45432a3..3df428816559 100644
--- a/trunk/fs/nfs/dir.c
+++ b/trunk/fs/nfs/dir.c
@@ -607,7 +607,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
return res;
}
-static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
+loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
mutex_lock(&filp->f_path.dentry->d_inode->i_mutex);
switch (origin) {
@@ -633,7 +633,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
* All directory operations under NFS are synchronous, so fsync()
* is a dummy operation.
*/
-static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
+int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
{
dfprintk(VFS, "NFS: fsync_dir(%s/%s) datasync %d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
diff --git a/trunk/fs/nfs/inode.c b/trunk/fs/nfs/inode.c
index 2b26ad7c9770..2a3fd9573207 100644
--- a/trunk/fs/nfs/inode.c
+++ b/trunk/fs/nfs/inode.c
@@ -1164,19 +1164,21 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
- inode_init_once(&nfsi->vfs_inode);
- spin_lock_init(&nfsi->req_lock);
- INIT_LIST_HEAD(&nfsi->dirty);
- INIT_LIST_HEAD(&nfsi->commit);
- INIT_LIST_HEAD(&nfsi->open_files);
- INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
- INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
- INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
- atomic_set(&nfsi->data_updates, 0);
- nfsi->ndirty = 0;
- nfsi->ncommit = 0;
- nfsi->npages = 0;
- nfs4_init_once(nfsi);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&nfsi->vfs_inode);
+ spin_lock_init(&nfsi->req_lock);
+ INIT_LIST_HEAD(&nfsi->dirty);
+ INIT_LIST_HEAD(&nfsi->commit);
+ INIT_LIST_HEAD(&nfsi->open_files);
+ INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
+ INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
+ INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
+ atomic_set(&nfsi->data_updates, 0);
+ nfsi->ndirty = 0;
+ nfsi->ncommit = 0;
+ nfsi->npages = 0;
+ nfs4_init_once(nfsi);
+ }
}
static int __init nfs_init_inodecache(void)
diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c
index 648e0ac0f90e..d6a30e965787 100644
--- a/trunk/fs/nfs/nfs4proc.c
+++ b/trunk/fs/nfs/nfs4proc.c
@@ -790,7 +790,7 @@ static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openf
return -EACCES;
}
-static int nfs4_recover_expired_lease(struct nfs_server *server)
+int nfs4_recover_expired_lease(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
int ret;
@@ -2748,7 +2748,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
-static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{
struct nfs_client *clp = server->nfs_client;
int ret = errorcode;
diff --git a/trunk/fs/nfs/nfs4state.c b/trunk/fs/nfs/nfs4state.c
index 8ed79d5c54f9..5fffbdfa971f 100644
--- a/trunk/fs/nfs/nfs4state.c
+++ b/trunk/fs/nfs/nfs4state.c
@@ -104,7 +104,7 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
return cred;
}
-static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
diff --git a/trunk/fs/nfs/nfs4xdr.c b/trunk/fs/nfs/nfs4xdr.c
index 8003c91ccb9a..938f37166788 100644
--- a/trunk/fs/nfs/nfs4xdr.c
+++ b/trunk/fs/nfs/nfs4xdr.c
@@ -646,10 +646,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg)
{
__be32 *p;
- RESERVE_SPACE(8+NFS4_STATEID_SIZE);
+ RESERVE_SPACE(8+sizeof(arg->stateid->data));
WRITE32(OP_CLOSE);
WRITE32(arg->seqid->sequence->counter);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
return 0;
}
@@ -793,17 +793,17 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args)
WRITE64(nfs4_lock_length(args->fl));
WRITE32(args->new_lock_owner);
if (args->new_lock_owner){
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+20);
+ RESERVE_SPACE(40);
WRITE32(args->open_seqid->sequence->counter);
- WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(args->open_stateid->data, sizeof(args->open_stateid->data));
WRITE32(args->lock_seqid->sequence->counter);
WRITE64(args->lock_owner.clientid);
WRITE32(4);
WRITE32(args->lock_owner.id);
}
else {
- RESERVE_SPACE(NFS4_STATEID_SIZE+4);
- WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE);
+ RESERVE_SPACE(20);
+ WRITEMEM(args->lock_stateid->data, sizeof(args->lock_stateid->data));
WRITE32(args->lock_seqid->sequence->counter);
}
@@ -830,11 +830,11 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *arg
{
__be32 *p;
- RESERVE_SPACE(12+NFS4_STATEID_SIZE+16);
+ RESERVE_SPACE(44);
WRITE32(OP_LOCKU);
WRITE32(nfs4_lock_type(args->fl, 0));
WRITE32(args->seqid->sequence->counter);
- WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(args->stateid->data, sizeof(args->stateid->data));
WRITE64(args->fl->fl_start);
WRITE64(nfs4_lock_length(args->fl));
@@ -966,9 +966,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
+ RESERVE_SPACE(4+sizeof(stateid->data));
WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
- WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(stateid->data, sizeof(stateid->data));
encode_string(xdr, name->len, name->name);
}
@@ -996,9 +996,9 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
+ RESERVE_SPACE(8+sizeof(arg->stateid->data));
WRITE32(OP_OPEN_CONFIRM);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
WRITE32(arg->seqid->sequence->counter);
return 0;
@@ -1008,9 +1008,9 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE+4);
+ RESERVE_SPACE(8+sizeof(arg->stateid->data));
WRITE32(OP_OPEN_DOWNGRADE);
- WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
WRITE32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->open_flags);
return 0;
@@ -1045,12 +1045,12 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context
nfs4_stateid stateid;
__be32 *p;
- RESERVE_SPACE(NFS4_STATEID_SIZE);
+ RESERVE_SPACE(16);
if (ctx->state != NULL) {
nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
- WRITEMEM(stateid.data, NFS4_STATEID_SIZE);
+ WRITEMEM(stateid.data, sizeof(stateid.data));
} else
- WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
+ WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data));
}
static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args)
@@ -1079,10 +1079,10 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
int replen;
__be32 *p;
- RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20);
+ RESERVE_SPACE(32+sizeof(nfs4_verifier));
WRITE32(OP_READDIR);
WRITE64(readdir->cookie);
- WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE);
+ WRITEMEM(readdir->verifier.data, sizeof(readdir->verifier.data));
WRITE32(readdir->count >> 1); /* We're not doing readdirplus */
WRITE32(readdir->count);
WRITE32(2);
@@ -1190,9 +1190,9 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg)
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
+ RESERVE_SPACE(4+sizeof(zero_stateid.data));
WRITE32(OP_SETATTR);
- WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE);
+ WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data));
RESERVE_SPACE(2*4);
WRITE32(1);
WRITE32(FATTR4_WORD0_ACL);
@@ -1220,9 +1220,9 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *
int status;
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
+ RESERVE_SPACE(4+sizeof(arg->stateid.data));
WRITE32(OP_SETATTR);
- WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
+ WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
if ((status = encode_attrs(xdr, arg->iap, server)))
return status;
@@ -1234,9 +1234,9 @@ static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclien
{
__be32 *p;
- RESERVE_SPACE(4 + NFS4_VERIFIER_SIZE);
+ RESERVE_SPACE(4 + sizeof(setclientid->sc_verifier->data));
WRITE32(OP_SETCLIENTID);
- WRITEMEM(setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
+ WRITEMEM(setclientid->sc_verifier->data, sizeof(setclientid->sc_verifier->data));
encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
RESERVE_SPACE(4);
@@ -1253,10 +1253,10 @@ static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_c
{
__be32 *p;
- RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE);
+ RESERVE_SPACE(12 + sizeof(client_state->cl_confirm.data));
WRITE32(OP_SETCLIENTID_CONFIRM);
WRITE64(client_state->cl_clientid);
- WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ WRITEMEM(client_state->cl_confirm.data, sizeof(client_state->cl_confirm.data));
return 0;
}
@@ -1284,10 +1284,10 @@ static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *statei
{
__be32 *p;
- RESERVE_SPACE(4+NFS4_STATEID_SIZE);
+ RESERVE_SPACE(20);
WRITE32(OP_DELEGRETURN);
- WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
+ WRITEMEM(stateid->data, sizeof(stateid->data));
return 0;
}
@@ -2494,7 +2494,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
int i;
dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations);
for (i = loc->nservers; i < m; i++) {
- unsigned int len;
+ int len;
char *data;
status = decode_opaque_inline(xdr, &len, &data);
if (unlikely(status != 0))
@@ -2642,7 +2642,7 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
return 0;
}
-static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *uid)
+static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, int32_t *uid)
{
uint32_t len;
__be32 *p;
@@ -2667,7 +2667,7 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
return 0;
}
-static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *gid)
+static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, int32_t *gid)
{
uint32_t len;
__be32 *p;
@@ -2897,8 +2897,8 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
status = decode_op_hdr(xdr, OP_CLOSE);
if (status)
return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
return 0;
}
@@ -3186,8 +3186,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
status = decode_op_hdr(xdr, OP_LOCK);
if (status == 0) {
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
} else if (status == -NFS4ERR_DENIED)
return decode_lock_denied(xdr, NULL);
return status;
@@ -3209,8 +3209,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
status = decode_op_hdr(xdr, OP_LOCKU);
if (status == 0) {
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
}
return status;
}
@@ -3251,8 +3251,8 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
res->delegation_type = 0;
return 0;
}
- READ_BUF(NFS4_STATEID_SIZE+4);
- COPYMEM(res->delegation.data, NFS4_STATEID_SIZE);
+ READ_BUF(20);
+ COPYMEM(res->delegation.data, sizeof(res->delegation.data));
READ32(res->do_recall);
switch (delegation_type) {
case NFS4_OPEN_DELEGATE_READ:
@@ -3275,8 +3275,8 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
status = decode_op_hdr(xdr, OP_OPEN);
if (status)
return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
decode_change_info(xdr, &res->cinfo);
@@ -3302,8 +3302,8 @@ static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmre
status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
if (status)
return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
return 0;
}
@@ -3315,8 +3315,8 @@ static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *re
status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
if (status)
return status;
- READ_BUF(NFS4_STATEID_SIZE);
- COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
return 0;
}
@@ -3590,9 +3590,9 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
}
READ32(nfserr);
if (nfserr == NFS_OK) {
- READ_BUF(8 + NFS4_VERIFIER_SIZE);
+ READ_BUF(8 + sizeof(clp->cl_confirm.data));
READ64(clp->cl_clientid);
- COPYMEM(clp->cl_confirm.data, NFS4_VERIFIER_SIZE);
+ COPYMEM(clp->cl_confirm.data, sizeof(clp->cl_confirm.data));
} else if (nfserr == NFSERR_CLID_INUSE) {
uint32_t len;
diff --git a/trunk/fs/nfs/read.c b/trunk/fs/nfs/read.c
index 7bd7cb95c034..9a55807b2a70 100644
--- a/trunk/fs/nfs/read.c
+++ b/trunk/fs/nfs/read.c
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
static
int nfs_return_empty_page(struct page *page)
{
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
unlock_page(page);
return 0;
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
pglen = PAGE_CACHE_SIZE - base;
for (;;) {
if (remainder <= pglen) {
- zero_user_page(*pages, base, remainder, KM_USER0);
+ memclear_highpage_flush(*pages, base, remainder);
break;
}
- zero_user_page(*pages, base, pglen, KM_USER0);
+ memclear_highpage_flush(*pages, base, pglen);
pages++;
remainder -= pglen;
pglen = PAGE_CACHE_SIZE;
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
return PTR_ERR(new);
}
if (len < PAGE_CACHE_SIZE)
- zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
+ memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
nfs_list_add_request(new, &one_request);
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
return PTR_ERR(new);
}
if (len < PAGE_CACHE_SIZE)
- zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
+ memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
nfs_pageio_add_request(desc->pgio, new);
return 0;
}
diff --git a/trunk/fs/nfs/write.c b/trunk/fs/nfs/write.c
index b084c03ce493..de92b9509d94 100644
--- a/trunk/fs/nfs/write.c
+++ b/trunk/fs/nfs/write.c
@@ -58,7 +58,7 @@ struct nfs_write_data *nfs_commit_alloc(void)
return p;
}
-static void nfs_commit_rcu_free(struct rcu_head *head)
+void nfs_commit_rcu_free(struct rcu_head *head)
{
struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
@@ -168,7 +168,7 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
if (count != nfs_page_length(page))
return;
if (count != PAGE_CACHE_SIZE)
- zero_user_page(page, count, PAGE_CACHE_SIZE - count, KM_USER0);
+ memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
SetPageUptodate(page);
}
@@ -922,7 +922,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
return 0;
out_bad:
while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
+ struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_redirty_request(req);
nfs_end_page_writeback(req->wb_page);
diff --git a/trunk/fs/ntfs/super.c b/trunk/fs/ntfs/super.c
index 4566b9182551..21d834e5ed73 100644
--- a/trunk/fs/ntfs/super.c
+++ b/trunk/fs/ntfs/super.c
@@ -3085,7 +3085,8 @@ static void ntfs_big_inode_init_once(void *foo, struct kmem_cache *cachep,
{
ntfs_inode *ni = (ntfs_inode *)foo;
- inode_init_once(VFS_I(ni));
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(VFS_I(ni));
}
/*
diff --git a/trunk/fs/ocfs2/dlm/dlmfs.c b/trunk/fs/ocfs2/dlm/dlmfs.c
index fd8cb1badc9b..5671cf9d6383 100644
--- a/trunk/fs/ocfs2/dlm/dlmfs.c
+++ b/trunk/fs/ocfs2/dlm/dlmfs.c
@@ -262,10 +262,12 @@ static void dlmfs_init_once(void *foo,
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
- ip->ip_dlm = NULL;
- ip->ip_parent = NULL;
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ ip->ip_dlm = NULL;
+ ip->ip_parent = NULL;
- inode_init_once(&ip->ip_vfs_inode);
+ inode_init_once(&ip->ip_vfs_inode);
+ }
}
static struct inode *dlmfs_alloc_inode(struct super_block *sb)
diff --git a/trunk/fs/ocfs2/super.c b/trunk/fs/ocfs2/super.c
index 86b559c7dce9..7c5e3f5d6634 100644
--- a/trunk/fs/ocfs2/super.c
+++ b/trunk/fs/ocfs2/super.c
@@ -937,29 +937,31 @@ static void ocfs2_inode_init_once(void *data,
{
struct ocfs2_inode_info *oi = data;
- oi->ip_flags = 0;
- oi->ip_open_count = 0;
- spin_lock_init(&oi->ip_lock);
- ocfs2_extent_map_init(&oi->vfs_inode);
- INIT_LIST_HEAD(&oi->ip_io_markers);
- oi->ip_created_trans = 0;
- oi->ip_last_trans = 0;
- oi->ip_dir_start_lookup = 0;
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ oi->ip_flags = 0;
+ oi->ip_open_count = 0;
+ spin_lock_init(&oi->ip_lock);
+ ocfs2_extent_map_init(&oi->vfs_inode);
+ INIT_LIST_HEAD(&oi->ip_io_markers);
+ oi->ip_created_trans = 0;
+ oi->ip_last_trans = 0;
+ oi->ip_dir_start_lookup = 0;
- init_rwsem(&oi->ip_alloc_sem);
- mutex_init(&oi->ip_io_mutex);
+ init_rwsem(&oi->ip_alloc_sem);
+ mutex_init(&oi->ip_io_mutex);
- oi->ip_blkno = 0ULL;
- oi->ip_clusters = 0;
+ oi->ip_blkno = 0ULL;
+ oi->ip_clusters = 0;
- ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
- ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
- ocfs2_lock_res_init_once(&oi->ip_data_lockres);
- ocfs2_lock_res_init_once(&oi->ip_open_lockres);
+ ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
+ ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
+ ocfs2_lock_res_init_once(&oi->ip_data_lockres);
+ ocfs2_lock_res_init_once(&oi->ip_open_lockres);
- ocfs2_metadata_cache_init(&oi->vfs_inode);
+ ocfs2_metadata_cache_init(&oi->vfs_inode);
- inode_init_once(&oi->vfs_inode);
+ inode_init_once(&oi->vfs_inode);
+ }
}
static int ocfs2_initialize_mem_caches(void)
diff --git a/trunk/fs/openpromfs/inode.c b/trunk/fs/openpromfs/inode.c
index e62397341c36..731a90e9f0cd 100644
--- a/trunk/fs/openpromfs/inode.c
+++ b/trunk/fs/openpromfs/inode.c
@@ -419,7 +419,8 @@ static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned
{
struct op_inode_info *oi = (struct op_inode_info *) data;
- inode_init_once(&oi->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&oi->vfs_inode);
}
static int __init init_openprom_fs(void)
diff --git a/trunk/fs/proc/inode.c b/trunk/fs/proc/inode.c
index d5ce65c68d7b..b8171907c83b 100644
--- a/trunk/fs/proc/inode.c
+++ b/trunk/fs/proc/inode.c
@@ -109,7 +109,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct proc_inode *ei = (struct proc_inode *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
int __init proc_init_inodecache(void)
diff --git a/trunk/fs/qnx4/inode.c b/trunk/fs/qnx4/inode.c
index 8d256eb11813..75fc8498f2e2 100644
--- a/trunk/fs/qnx4/inode.c
+++ b/trunk/fs/qnx4/inode.c
@@ -536,7 +536,8 @@ static void init_once(void *foo, struct kmem_cache * cachep,
{
struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/quota.c b/trunk/fs/quota.c
index 9f237d6182c9..e9d88fd0eca8 100644
--- a/trunk/fs/quota.c
+++ b/trunk/fs/quota.c
@@ -157,6 +157,7 @@ static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t
static void quota_sync_sb(struct super_block *sb, int type)
{
int cnt;
+ struct inode *discard[MAXQUOTAS];
sb->s_qcop->quota_sync(sb, type);
/* This is not very clever (and fast) but currently I don't know about
@@ -166,21 +167,29 @@ static void quota_sync_sb(struct super_block *sb, int type)
sb->s_op->sync_fs(sb, 1);
sync_blockdev(sb->s_bdev);
- /*
- * Now when everything is written we can discard the pagecache so
- * that userspace sees the changes.
- */
+ /* Now when everything is written we can discard the pagecache so
+ * that userspace sees the changes. We need i_mutex and so we could
+ * not do it inside dqonoff_mutex. Moreover we need to be carefull
+ * about races with quotaoff() (that is the reason why we have own
+ * reference to inode). */
mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ discard[cnt] = NULL;
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_enabled(sb, cnt))
continue;
- mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
- truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
- mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+ discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]);
}
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (discard[cnt]) {
+ mutex_lock(&discard[cnt]->i_mutex);
+ truncate_inode_pages(&discard[cnt]->i_data, 0);
+ mutex_unlock(&discard[cnt]->i_mutex);
+ iput(discard[cnt]);
+ }
+ }
}
void sync_dquots(struct super_block *sb, int type)
diff --git a/trunk/fs/reiserfs/super.c b/trunk/fs/reiserfs/super.c
index b4ac9119200e..c7762140c425 100644
--- a/trunk/fs/reiserfs/super.c
+++ b/trunk/fs/reiserfs/super.c
@@ -511,12 +511,14 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
{
struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
- INIT_LIST_HEAD(&ei->i_prealloc_list);
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ INIT_LIST_HEAD(&ei->i_prealloc_list);
+ inode_init_once(&ei->vfs_inode);
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
- ei->i_acl_access = NULL;
- ei->i_acl_default = NULL;
+ ei->i_acl_access = NULL;
+ ei->i_acl_default = NULL;
#endif
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/romfs/inode.c b/trunk/fs/romfs/inode.c
index 2284e03342c6..804285190271 100644
--- a/trunk/fs/romfs/inode.c
+++ b/trunk/fs/romfs/inode.c
@@ -566,11 +566,12 @@ static void romfs_destroy_inode(struct inode *inode)
kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
}
-static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
- struct romfs_inode_info *ei = foo;
+ struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/smbfs/inode.c b/trunk/fs/smbfs/inode.c
index 5c9243a23b9b..424a3ddf86dd 100644
--- a/trunk/fs/smbfs/inode.c
+++ b/trunk/fs/smbfs/inode.c
@@ -70,7 +70,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct smb_inode_info *ei = (struct smb_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/sysv/inode.c b/trunk/fs/sysv/inode.c
index 564411693394..3152d7415606 100644
--- a/trunk/fs/sysv/inode.c
+++ b/trunk/fs/sysv/inode.c
@@ -322,7 +322,8 @@ static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
{
struct sysv_inode_info *si = (struct sysv_inode_info *)p;
- inode_init_once(&si->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&si->vfs_inode);
}
const struct super_operations sysv_sops = {
diff --git a/trunk/fs/udf/super.c b/trunk/fs/udf/super.c
index 3a743d854c17..9b8644a06e53 100644
--- a/trunk/fs/udf/super.c
+++ b/trunk/fs/udf/super.c
@@ -134,8 +134,10 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct udf_inode_info *ei = (struct udf_inode_info *) foo;
- ei->i_ext.i_data = NULL;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ ei->i_ext.i_data = NULL;
+ inode_init_once(&ei->vfs_inode);
+ }
}
static int init_inodecache(void)
diff --git a/trunk/fs/ufs/super.c b/trunk/fs/ufs/super.c
index 22ff6ed55ce9..be7c48c5f203 100644
--- a/trunk/fs/ufs/super.c
+++ b/trunk/fs/ufs/super.c
@@ -1237,7 +1237,8 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag
{
struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.c b/trunk/fs/xfs/linux-2.6/xfs_super.c
index bf9a9d5909be..14e2cbe5a8d5 100644
--- a/trunk/fs/xfs/linux-2.6/xfs_super.c
+++ b/trunk/fs/xfs/linux-2.6/xfs_super.c
@@ -360,7 +360,8 @@ xfs_fs_inode_init_once(
kmem_zone_t *zonep,
unsigned long flags)
{
- inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
}
STATIC int
diff --git a/trunk/include/acpi/acpi_numa.h b/trunk/include/acpi/acpi_numa.h
index b62cd36ff324..f9d2bde9a7bb 100644
--- a/trunk/include/acpi/acpi_numa.h
+++ b/trunk/include/acpi/acpi_numa.h
@@ -11,8 +11,11 @@
#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
#endif
-extern int pxm_to_node(int);
-extern int node_to_pxm(int);
+extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];
+extern int __cpuinitdata node_to_pxm_map[MAX_NUMNODES];
+
+extern int __cpuinit pxm_to_node(int);
+extern int __cpuinit node_to_pxm(int);
extern int __cpuinit acpi_map_pxm_to_node(int);
extern void __cpuinit acpi_unmap_pxm_to_node(int);
diff --git a/trunk/include/asm-powerpc/mpc8260.h b/trunk/include/asm-powerpc/mpc8260.h
index f1b83b09ab2e..e0d480790e12 100644
--- a/trunk/include/asm-powerpc/mpc8260.h
+++ b/trunk/include/asm-powerpc/mpc8260.h
@@ -5,8 +5,8 @@
* this one and the configuration switching is done here.
*/
#ifdef __KERNEL__
-#ifndef __ASM_PPC_MPC8260_H__
-#define __ASM_PPC_MPC8260_H__
+#ifndef __ASM_POWERPC_MPC8260_H__
+#define __ASM_POWERPC_MPC8260_H__
#ifdef CONFIG_8260
@@ -20,5 +20,5 @@
#endif
#endif /* CONFIG_8260 */
-#endif /* !__ASM_PPC_MPC8260_H__ */
+#endif /* !__ASM_POWERPC_MPC8260_H__ */
#endif /* __KERNEL__ */
diff --git a/trunk/include/asm-powerpc/pmac_feature.h b/trunk/include/asm-powerpc/pmac_feature.h
index d43d91beba9b..26bcb0aa164a 100644
--- a/trunk/include/asm-powerpc/pmac_feature.h
+++ b/trunk/include/asm-powerpc/pmac_feature.h
@@ -28,8 +28,8 @@
*/
#ifdef __KERNEL__
-#ifndef __PPC_ASM_PMAC_FEATURE_H
-#define __PPC_ASM_PMAC_FEATURE_H
+#ifndef __ASM_POWERPC_PMAC_FEATURE_H
+#define __ASM_POWERPC_PMAC_FEATURE_H
#include
#include
@@ -393,5 +393,5 @@ extern u32 __iomem *uninorth_base;
#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
-#endif /* __PPC_ASM_PMAC_FEATURE_H */
+#endif /* __ASM_POWERPC_PMAC_FEATURE_H */
#endif /* __KERNEL__ */
diff --git a/trunk/include/asm-powerpc/tsi108_irq.h b/trunk/include/asm-powerpc/tsi108_irq.h
index 3e4d04effa57..6ed93979fbe4 100644
--- a/trunk/include/asm-powerpc/tsi108_irq.h
+++ b/trunk/include/asm-powerpc/tsi108_irq.h
@@ -26,8 +26,8 @@
* demultiplexing on TSI108EMU/SVB boards.
*/
-#ifndef _ASM_PPC_TSI108_IRQ_H
-#define _ASM_PPC_TSI108_IRQ_H
+#ifndef _ASM_POWERPC_TSI108_IRQ_H
+#define _ASM_POWERPC_TSI108_IRQ_H
/*
* Tsi108 interrupts
@@ -121,4 +121,4 @@ typedef enum {
TSI108_IRQ_DIRECTED,
TSI108_IRQ_DISTRIBUTED,
} TSI108_IRQ_MODE;
-#endif /* _ASM_PPC_TSI108_IRQ_H */
+#endif /* _ASM_POWERPC_TSI108_IRQ_H */
diff --git a/trunk/include/asm-powerpc/tsi108_pci.h b/trunk/include/asm-powerpc/tsi108_pci.h
index a9f92f73232c..5653d7cc3e24 100644
--- a/trunk/include/asm-powerpc/tsi108_pci.h
+++ b/trunk/include/asm-powerpc/tsi108_pci.h
@@ -18,8 +18,8 @@
* MA 02111-1307 USA
*/
-#ifndef _ASM_PPC_TSI108_PCI_H
-#define _ASM_PPC_TSI108_PCI_H
+#ifndef _ASM_POWERPC_TSI108_PCI_H
+#define _ASM_POWERPC_TSI108_PCI_H
#include
@@ -42,4 +42,4 @@ extern void tsi108_pci_int_init(struct device_node *node);
extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
extern void tsi108_clear_pci_cfg_error(void);
-#endif /* _ASM_PPC_TSI108_PCI_H */
+#endif /* _ASM_POWERPC_TSI108_PCI_H */
diff --git a/trunk/include/asm-powerpc/unistd.h b/trunk/include/asm-powerpc/unistd.h
index 21f004aef508..908dfe4c2bfa 100644
--- a/trunk/include/asm-powerpc/unistd.h
+++ b/trunk/include/asm-powerpc/unistd.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_PPC_UNISTD_H_
-#define _ASM_PPC_UNISTD_H_
+#ifndef _ASM_POWERPC_UNISTD_H_
+#define _ASM_POWERPC_UNISTD_H_
/*
* This file contains the system call numbers.
@@ -381,4 +381,4 @@
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
-#endif /* _ASM_PPC_UNISTD_H_ */
+#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/trunk/include/linux/binfmts.h b/trunk/include/linux/binfmts.h
index e1a708337be3..2d956cd566ae 100644
--- a/trunk/include/linux/binfmts.h
+++ b/trunk/include/linux/binfmts.h
@@ -17,8 +17,6 @@ struct pt_regs;
#ifdef __KERNEL__
-#define CORENAME_MAX_SIZE 128
-
/*
* This structure is used to hold the arguments that are used when loading binaries.
*/
diff --git a/trunk/include/linux/kmalloc_sizes.h b/trunk/include/linux/kmalloc_sizes.h
index e576b848ce10..bda23e00ed71 100644
--- a/trunk/include/linux/kmalloc_sizes.h
+++ b/trunk/include/linux/kmalloc_sizes.h
@@ -19,27 +19,17 @@
CACHE(32768)
CACHE(65536)
CACHE(131072)
-#if KMALLOC_MAX_SIZE >= 262144
+#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU)
CACHE(262144)
#endif
-#if KMALLOC_MAX_SIZE >= 524288
+#ifndef CONFIG_MMU
CACHE(524288)
-#endif
-#if KMALLOC_MAX_SIZE >= 1048576
CACHE(1048576)
-#endif
-#if KMALLOC_MAX_SIZE >= 2097152
+#ifdef CONFIG_LARGE_ALLOCS
CACHE(2097152)
-#endif
-#if KMALLOC_MAX_SIZE >= 4194304
CACHE(4194304)
-#endif
-#if KMALLOC_MAX_SIZE >= 8388608
CACHE(8388608)
-#endif
-#if KMALLOC_MAX_SIZE >= 16777216
CACHE(16777216)
-#endif
-#if KMALLOC_MAX_SIZE >= 33554432
CACHE(33554432)
-#endif
+#endif /* CONFIG_LARGE_ALLOCS */
+#endif /* CONFIG_MMU */
diff --git a/trunk/include/linux/lockd/xdr4.h b/trunk/include/linux/lockd/xdr4.h
index 12bfe09de2b1..dd12b4c9e613 100644
--- a/trunk/include/linux/lockd/xdr4.h
+++ b/trunk/include/linux/lockd/xdr4.h
@@ -42,6 +42,5 @@ int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
*/
-extern struct rpc_version nlm_version4;
#endif /* LOCKD_XDR4_H */
diff --git a/trunk/include/linux/mii.h b/trunk/include/linux/mii.h
index 151b7e0182c7..beddc6d3b0f6 100644
--- a/trunk/include/linux/mii.h
+++ b/trunk/include/linux/mii.h
@@ -56,8 +56,8 @@
#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */
#define BMSR_RESV 0x00c0 /* Unused... */
#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
-#define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */
-#define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */
+#define BMSR_100FULL2 0x0200 /* Can do 100BASE-T2 HDX */
+#define BMSR_100HALF2 0x0400 /* Can do 100BASE-T2 FDX */
#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
diff --git a/trunk/include/linux/nfs4.h b/trunk/include/linux/nfs4.h
index 7e7f33a38fc0..1be5be88debe 100644
--- a/trunk/include/linux/nfs4.h
+++ b/trunk/include/linux/nfs4.h
@@ -16,7 +16,6 @@
#include
#define NFS4_VERIFIER_SIZE 8
-#define NFS4_STATEID_SIZE 16
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
@@ -114,7 +113,7 @@ struct nfs4_acl {
};
typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
-typedef struct { char data[NFS4_STATEID_SIZE]; } nfs4_stateid;
+typedef struct { char data[16]; } nfs4_stateid;
enum nfs_opnum4 {
OP_ACCESS = 3,
diff --git a/trunk/include/linux/pci_ids.h b/trunk/include/linux/pci_ids.h
index 62b3e008e641..3b1fbf49fa7d 100644
--- a/trunk/include/linux/pci_ids.h
+++ b/trunk/include/linux/pci_ids.h
@@ -471,7 +471,6 @@
#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219
#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
-#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
diff --git a/trunk/include/linux/rmap.h b/trunk/include/linux/rmap.h
index 97347f22fc20..bdd277223af0 100644
--- a/trunk/include/linux/rmap.h
+++ b/trunk/include/linux/rmap.h
@@ -74,14 +74,17 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *, struct vm_area_struct *);
-#ifdef CONFIG_DEBUG_VM
-void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
-#else
-static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
+/**
+ * page_dup_rmap - duplicate pte mapping to a page
+ * @page: the page to add the mapping to
+ *
+ * For copy_page_range only: minimal extract from page_add_rmap,
+ * avoiding unnecessary tests (already checked) so it's quicker.
+ */
+static inline void page_dup_rmap(struct page *page)
{
atomic_inc(&page->_mapcount);
}
-#endif
/*
* Called from mm/vmscan.c to handle paging out
diff --git a/trunk/include/linux/slab.h b/trunk/include/linux/slab.h
index a015236cc572..71829efc40ba 100644
--- a/trunk/include/linux/slab.h
+++ b/trunk/include/linux/slab.h
@@ -32,6 +32,9 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+/* Flags passed to a constructor functions */
+#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
+
/*
* struct kmem_cache related prototypes
*/
@@ -73,21 +76,6 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
}
#endif
-/*
- * The largest kmalloc size supported by the slab allocators is
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
- * less than 32 MB.
- *
- * WARNING: Its not easy to increase this value since the allocators have
- * to do various tricks to work around compiler limitations in order to
- * ensure proper constant folding.
- */
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT) : 25)
-
-#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
-#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
-
/*
* Common kmalloc functions provided by all allocators
*/
@@ -245,6 +233,9 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#endif /* DEBUG_SLAB */
+extern const struct seq_operations slabinfo_op;
+ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SLAB_H */
diff --git a/trunk/include/linux/slab_def.h b/trunk/include/linux/slab_def.h
index 8d81a60518e4..5e4364644ed1 100644
--- a/trunk/include/linux/slab_def.h
+++ b/trunk/include/linux/slab_def.h
@@ -109,7 +109,4 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif /* CONFIG_NUMA */
-extern const struct seq_operations slabinfo_op;
-ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
-
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/trunk/include/linux/slub_def.h b/trunk/include/linux/slub_def.h
index 0764c829d967..c6c1f4a120e3 100644
--- a/trunk/include/linux/slub_def.h
+++ b/trunk/include/linux/slub_def.h
@@ -40,6 +40,7 @@ struct kmem_cache {
int objects; /* Number of objects in slab */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(void *, struct kmem_cache *, unsigned long);
+ void (*dtor)(void *, struct kmem_cache *, unsigned long);
int inuse; /* Offset to metadata */
int align; /* Alignment */
const char *name; /* Name (only for display!) */
@@ -58,6 +59,17 @@ struct kmem_cache {
*/
#define KMALLOC_SHIFT_LOW 3
+#ifdef CONFIG_LARGE_ALLOCS
+#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
+ (MAX_ORDER + PAGE_SHIFT - 1) : 25)
+#else
+#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
+#define KMALLOC_SHIFT_HIGH 20
+#else
+#define KMALLOC_SHIFT_HIGH 18
+#endif
+#endif
+
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
@@ -68,7 +80,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-static inline int kmalloc_index(size_t size)
+static inline int kmalloc_index(int size)
{
/*
* We should return 0 if size == 0 but we use the smallest object
@@ -76,7 +88,7 @@ static inline int kmalloc_index(size_t size)
*/
WARN_ON_ONCE(size == 0);
- if (size > KMALLOC_MAX_SIZE)
+ if (size > (1 << KMALLOC_SHIFT_HIGH))
return -1;
if (size > 64 && size <= 96)
@@ -99,13 +111,17 @@ static inline int kmalloc_index(size_t size)
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
+#if KMALLOC_SHIFT_HIGH > 18
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
+#endif
+#if KMALLOC_SHIFT_HIGH > 20
if (size <= 2 * 1024 * 1024) return 21;
if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
+#endif
return -1;
/*
@@ -130,12 +146,7 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
if (index == 0)
return NULL;
- /*
- * This function only gets expanded if __builtin_constant_p(size), so
- * testing it here shouldn't be needed. But some versions of gcc need
- * help.
- */
- if (__builtin_constant_p(size) && index < 0) {
+ if (index < 0) {
/*
* Generate a link failure. Would be great if we could
* do something to stop the compile here.
diff --git a/trunk/include/linux/smp.h b/trunk/include/linux/smp.h
index 96ac21f8dd73..3f70149eabbb 100644
--- a/trunk/include/linux/smp.h
+++ b/trunk/include/linux/smp.h
@@ -6,7 +6,6 @@
* Alan Cox.
*/
-#include
extern void cpu_idle(void);
@@ -100,9 +99,11 @@ static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
- void *info, int retry, int wait)
+ void *info, int retry, int wait)
{
- return -EBUSY;
+ /* Disable interrupts here? */
+ func(info);
+ return 0;
}
#endif /* !SMP */
diff --git a/trunk/include/linux/sunrpc/rpc_pipe_fs.h b/trunk/include/linux/sunrpc/rpc_pipe_fs.h
index ad293760f6eb..4a68125b6de6 100644
--- a/trunk/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/trunk/include/linux/sunrpc/rpc_pipe_fs.h
@@ -47,8 +47,6 @@ extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct r
extern int rpc_unlink(struct dentry *);
extern struct vfsmount *rpc_get_mount(void);
extern void rpc_put_mount(void);
-extern int register_rpc_pipefs(void);
-extern void unregister_rpc_pipefs(void);
#endif
#endif
diff --git a/trunk/include/linux/sunrpc/xprt.h b/trunk/include/linux/sunrpc/xprt.h
index 34f7590506fa..fa89ce6ce076 100644
--- a/trunk/include/linux/sunrpc/xprt.h
+++ b/trunk/include/linux/sunrpc/xprt.h
@@ -244,8 +244,6 @@ void xprt_disconnect(struct rpc_xprt *xprt);
*/
struct rpc_xprt * xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
struct rpc_xprt * xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
-int init_socket_xprt(void);
-void cleanup_socket_xprt(void);
/*
* Reserved bit positions in xprt->state
diff --git a/trunk/include/linux/workqueue.h b/trunk/include/linux/workqueue.h
index ce0719a2cfeb..d555f31c0746 100644
--- a/trunk/include/linux/workqueue.h
+++ b/trunk/include/linux/workqueue.h
@@ -122,7 +122,7 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
int singlethread,
int freezeable);
#define create_workqueue(name) __create_workqueue((name), 0, 0)
-#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
extern void destroy_workqueue(struct workqueue_struct *wq);
@@ -160,7 +160,7 @@ static inline int cancel_delayed_work(struct delayed_work *work)
{
int ret;
- ret = del_timer_sync(&work->timer);
+ ret = del_timer(&work->timer);
if (ret)
work_clear_pending(&work->work);
return ret;
diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig
index a9e99f8328ff..4e009fde4b69 100644
--- a/trunk/init/Kconfig
+++ b/trunk/init/Kconfig
@@ -567,6 +567,7 @@ config SLAB
a slab allocator.
config SLUB
+ depends on EXPERIMENTAL && !ARCH_USES_SLAB_PAGE_STRUCT
bool "SLUB (Unqueued Allocator)"
help
SLUB is a slab allocator that minimizes cache line usage
@@ -576,11 +577,14 @@ config SLUB
and has enhanced diagnostics.
config SLOB
- depends on EMBEDDED && !SPARSEMEM
+#
+# SLOB does not support SMP because SLAB_DESTROY_BY_RCU is unsupported
+#
+ depends on EMBEDDED && !SMP && !SPARSEMEM
bool "SLOB (Simple Allocator)"
help
SLOB replaces the SLAB allocator with a drastically simpler
- allocator. SLOB is more space efficient than SLAB but does not
+ allocator. SLOB is more space efficient that SLAB but does not
scale well (single lock for all operations) and is also highly
susceptible to fragmentation. SLUB can accomplish a higher object
density. It is usually better to use SLUB instead of SLOB.
diff --git a/trunk/ipc/mqueue.c b/trunk/ipc/mqueue.c
index a242c83d89d6..fab5707cb5f7 100644
--- a/trunk/ipc/mqueue.c
+++ b/trunk/ipc/mqueue.c
@@ -215,7 +215,8 @@ static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags
{
struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
- inode_init_once(&p->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&p->vfs_inode);
}
static struct inode *mqueue_alloc_inode(struct super_block *sb)
diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c
index 87069cfc18a1..49530e40ea8b 100644
--- a/trunk/kernel/fork.c
+++ b/trunk/kernel/fork.c
@@ -1427,8 +1427,10 @@ static void sighand_ctor(void *data, struct kmem_cache *cachep,
{
struct sighand_struct *sighand = data;
- spin_lock_init(&sighand->siglock);
- INIT_LIST_HEAD(&sighand->signalfd_list);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ spin_lock_init(&sighand->siglock);
+ INIT_LIST_HEAD(&sighand->signalfd_list);
+ }
}
void __init proc_caches_init(void)
diff --git a/trunk/kernel/power/disk.c b/trunk/kernel/power/disk.c
index f445b9cd60fb..b5f0543ed84d 100644
--- a/trunk/kernel/power/disk.c
+++ b/trunk/kernel/power/disk.c
@@ -416,8 +416,7 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
mutex_lock(&pm_mutex);
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
- if (len == strlen(hibernation_modes[i])
- && !strncmp(buf, hibernation_modes[i], len)) {
+ if (!strncmp(buf, hibernation_modes[i], len)) {
mode = i;
break;
}
diff --git a/trunk/kernel/power/main.c b/trunk/kernel/power/main.c
index 8812985f3029..b98b80ccf437 100644
--- a/trunk/kernel/power/main.c
+++ b/trunk/kernel/power/main.c
@@ -290,13 +290,13 @@ static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
len = p ? p - buf : n;
/* First, check if we are requested to hibernate */
- if (len == 4 && !strncmp(buf, "disk", len)) {
+ if (!strncmp(buf, "disk", len)) {
error = hibernate();
return error ? error : n;
}
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) {
- if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
+ if (*s && !strncmp(buf, *s, len))
break;
}
if (state < PM_SUSPEND_MAX && *s)
diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c
index 30ee462ee79f..4073353abd4f 100644
--- a/trunk/kernel/sysctl.c
+++ b/trunk/kernel/sysctl.c
@@ -227,7 +227,7 @@ static ctl_table kern_table[] = {
.ctl_name = KERN_CORE_PATTERN,
.procname = "core_pattern",
.data = core_pattern,
- .maxlen = CORENAME_MAX_SIZE,
+ .maxlen = 128,
.mode = 0644,
.proc_handler = &proc_dostring,
.strategy = &sysctl_string,
diff --git a/trunk/mm/memory.c b/trunk/mm/memory.c
index cb94488ab96d..1d647ab0ee72 100644
--- a/trunk/mm/memory.c
+++ b/trunk/mm/memory.c
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
- page_dup_rmap(page, vma, addr);
+ page_dup_rmap(page);
rss[!!PageAnon(page)]++;
}
diff --git a/trunk/mm/rmap.c b/trunk/mm/rmap.c
index 850165d32b7a..304f51985c78 100644
--- a/trunk/mm/rmap.c
+++ b/trunk/mm/rmap.c
@@ -162,10 +162,12 @@ void anon_vma_unlink(struct vm_area_struct *vma)
static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
unsigned long flags)
{
- struct anon_vma *anon_vma = data;
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ struct anon_vma *anon_vma = data;
- spin_lock_init(&anon_vma->lock);
- INIT_LIST_HEAD(&anon_vma->head);
+ spin_lock_init(&anon_vma->lock);
+ INIT_LIST_HEAD(&anon_vma->head);
+ }
}
void __init anon_vma_init(void)
@@ -529,52 +531,20 @@ static void __page_set_anon_rmap(struct page *page,
__inc_zone_page_state(page, NR_ANON_PAGES);
}
-/**
- * page_set_anon_rmap - sanity check anonymous rmap addition
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @address: the user virtual address mapped
- */
-static void __page_check_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
-{
-#ifdef CONFIG_DEBUG_VM
- /*
- * The page's anon-rmap details (mapping and index) are guaranteed to
- * be set up correctly at this point.
- *
- * We have exclusion against page_add_anon_rmap because the caller
- * always holds the page locked, except if called from page_dup_rmap,
- * in which case the page is already known to be setup.
- *
- * We have exclusion against page_add_new_anon_rmap because those pages
- * are initially only visible via the pagetables, and the pte is locked
- * over the call to page_add_new_anon_rmap.
- */
- struct anon_vma *anon_vma = vma->anon_vma;
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- BUG_ON(page->mapping != (struct address_space *)anon_vma);
- BUG_ON(page->index != linear_page_index(vma, address));
-#endif
-}
-
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * The caller needs to hold the pte lock and the page must be locked.
+ * The caller needs to hold the pte lock.
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- VM_BUG_ON(!PageLocked(page));
- VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
if (atomic_inc_and_test(&page->_mapcount))
__page_set_anon_rmap(page, vma, address);
- else
- __page_check_anon_rmap(page, vma, address);
+ /* else checking page index and mapping is racy */
}
/*
@@ -585,12 +555,10 @@ void page_add_anon_rmap(struct page *page,
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
- * Page does not have to be locked.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address);
}
@@ -607,26 +575,6 @@ void page_add_file_rmap(struct page *page)
__inc_zone_page_state(page, NR_FILE_MAPPED);
}
-#ifdef CONFIG_DEBUG_VM
-/**
- * page_dup_rmap - duplicate pte mapping to a page
- * @page: the page to add the mapping to
- *
- * For copy_page_range only: minimal extract from page_add_file_rmap /
- * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
- * quicker.
- *
- * The caller needs to hold the pte lock.
- */
-void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
-{
- BUG_ON(page_mapcount(page) == 0);
- if (PageAnon(page))
- __page_check_anon_rmap(page, vma, address);
- atomic_inc(&page->_mapcount);
-}
-#endif
-
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
diff --git a/trunk/mm/shmem.c b/trunk/mm/shmem.c
index e537317bec4d..f01e8deed645 100644
--- a/trunk/mm/shmem.c
+++ b/trunk/mm/shmem.c
@@ -2358,11 +2358,13 @@ static void init_once(void *foo, struct kmem_cache *cachep,
{
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
- inode_init_once(&p->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&p->vfs_inode);
#ifdef CONFIG_TMPFS_POSIX_ACL
- p->i_acl = NULL;
- p->i_default_acl = NULL;
+ p->i_acl = NULL;
+ p->i_default_acl = NULL;
#endif
+ }
}
static int init_inodecache(void)
diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c
index 528243e15cc8..944b20581f8c 100644
--- a/trunk/mm/slab.c
+++ b/trunk/mm/slab.c
@@ -409,6 +409,9 @@ struct kmem_cache {
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);
+ /* de-constructor func */
+ void (*dtor) (void *, struct kmem_cache *, unsigned long);
+
/* 5) cache creation/removal */
const char *name;
struct list_head next;
@@ -568,6 +571,21 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
+/*
+ * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
+ * order.
+ */
+#if defined(CONFIG_LARGE_ALLOCS)
+#define MAX_OBJ_ORDER 13 /* up to 32Mb */
+#define MAX_GFP_ORDER 13 /* up to 32Mb */
+#elif defined(CONFIG_MMU)
+#define MAX_OBJ_ORDER 5 /* 32 pages */
+#define MAX_GFP_ORDER 5 /* 32 pages */
+#else
+#define MAX_OBJ_ORDER 8 /* up to 1Mb */
+#define MAX_GFP_ORDER 8 /* up to 1Mb */
+#endif
+
/*
* Do not go above this order unless 0 objects fit into the slab.
*/
@@ -774,7 +792,6 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
- WARN_ON_ONCE(size == 0);
while (size > csizep->cs_size)
csizep++;
@@ -1894,11 +1911,20 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
slab_error(cachep, "end of a freed object "
"was overwritten");
}
+ if (cachep->dtor && !(cachep->flags & SLAB_POISON))
+ (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
}
#else
static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
+ if (cachep->dtor) {
+ int i;
+ for (i = 0; i < cachep->num; i++) {
+ void *objp = index_to_obj(cachep, slabp, i);
+ (cachep->dtor) (objp, cachep, 0);
+ }
+ }
}
#endif
@@ -1987,7 +2013,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t left_over = 0;
int gfporder;
- for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
+ for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
@@ -2098,7 +2124,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep)
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
- * @dtor: A destructor for the objects (not implemented anymore).
+ * @dtor: A destructor for the objects.
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
@@ -2133,7 +2159,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- size > KMALLOC_MAX_SIZE || dtor) {
+ (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();
@@ -2187,6 +2213,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
+ if (flags & SLAB_DESTROY_BY_RCU)
+ BUG_ON(dtor);
+
/*
* Always checks flags, a caller might be expecting debug support which
* isn't available.
@@ -2341,6 +2370,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG_ON(!cachep->slabp_cache);
}
cachep->ctor = ctor;
+ cachep->dtor = dtor;
cachep->name = name;
if (setup_cpu_cache(cachep)) {
@@ -2595,7 +2625,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
}
static void cache_init_objs(struct kmem_cache *cachep,
- struct slab *slabp)
+ struct slab *slabp, unsigned long ctor_flags)
{
int i;
@@ -2619,7 +2649,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp + obj_offset(cachep), cachep,
- 0);
+ ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2635,7 +2665,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
cachep->buffer_size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
- cachep->ctor(objp, cachep, 0);
+ cachep->ctor(objp, cachep, ctor_flags);
#endif
slab_bufctl(slabp)[i] = i + 1;
}
@@ -2724,6 +2754,7 @@ static int cache_grow(struct kmem_cache *cachep,
struct slab *slabp;
size_t offset;
gfp_t local_flags;
+ unsigned long ctor_flags;
struct kmem_list3 *l3;
/*
@@ -2732,6 +2763,7 @@ static int cache_grow(struct kmem_cache *cachep,
*/
BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+ ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & GFP_LEVEL_MASK);
/* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
@@ -2776,7 +2808,7 @@ static int cache_grow(struct kmem_cache *cachep,
slabp->nodeid = nodeid;
slab_map_pages(cachep, slabp, objp);
- cache_init_objs(cachep, slabp);
+ cache_init_objs(cachep, slabp, ctor_flags);
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2803,6 +2835,7 @@ static int cache_grow(struct kmem_cache *cachep,
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
+ * - destructor calls, for caches with POISON+dtor
*/
static void kfree_debugcheck(const void *objp)
{
@@ -2861,6 +2894,12 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
+ if (cachep->flags & SLAB_POISON && cachep->dtor) {
+ /* we want to cache poison the object,
+ * call the destruction callback
+ */
+ cachep->dtor(objp + obj_offset(cachep), cachep, 0);
+ }
#ifdef CONFIG_DEBUG_SLAB_LEAK
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
@@ -3060,7 +3099,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
- cachep->ctor(objp, cachep, 0);
+ cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
#if ARCH_SLAB_MINALIGN
if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
diff --git a/trunk/mm/slob.c b/trunk/mm/slob.c
index 71976c5d40d3..c6933bc19bcd 100644
--- a/trunk/mm/slob.c
+++ b/trunk/mm/slob.c
@@ -35,7 +35,6 @@
#include
#include
#include
-#include
struct slob_block {
int units;
@@ -54,16 +53,6 @@ struct bigblock {
};
typedef struct bigblock bigblock_t;
-/*
- * struct slob_rcu is inserted at the tail of allocated slob blocks, which
- * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
- * the block using call_rcu.
- */
-struct slob_rcu {
- struct rcu_head head;
- int size;
-};
-
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
@@ -277,9 +266,9 @@ size_t ksize(const void *block)
struct kmem_cache {
unsigned int size, align;
- unsigned long flags;
const char *name;
void (*ctor)(void *, struct kmem_cache *, unsigned long);
+ void (*dtor)(void *, struct kmem_cache *, unsigned long);
};
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
@@ -294,12 +283,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (c) {
c->name = name;
c->size = size;
- if (flags & SLAB_DESTROY_BY_RCU) {
- /* leave room for rcu footer at the end of object */
- c->size += sizeof(struct slob_rcu);
- }
- c->flags = flags;
c->ctor = ctor;
+ c->dtor = dtor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < align)
@@ -327,7 +312,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
b = (void *)__get_free_pages(flags, get_order(c->size));
if (c->ctor)
- c->ctor(b, c, 0);
+ c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
return b;
}
@@ -343,33 +328,15 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
}
EXPORT_SYMBOL(kmem_cache_zalloc);
-static void __kmem_cache_free(void *b, int size)
-{
- if (size < PAGE_SIZE)
- slob_free(b, size);
- else
- free_pages((unsigned long)b, get_order(size));
-}
-
-static void kmem_rcu_free(struct rcu_head *head)
-{
- struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
- void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
-
- __kmem_cache_free(b, slob_rcu->size);
-}
-
void kmem_cache_free(struct kmem_cache *c, void *b)
{
- if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
- struct slob_rcu *slob_rcu;
- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
- INIT_RCU_HEAD(&slob_rcu->head);
- slob_rcu->size = c->size;
- call_rcu(&slob_rcu->head, kmem_rcu_free);
- } else {
- __kmem_cache_free(b, c->size);
- }
+ if (c->dtor)
+ c->dtor(b, c, 0);
+
+ if (c->size < PAGE_SIZE)
+ slob_free(b, c->size);
+ else
+ free_pages((unsigned long)b, get_order(c->size));
}
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c
index 98801d404d69..5e3e8bc9838f 100644
--- a/trunk/mm/slub.c
+++ b/trunk/mm/slub.c
@@ -78,18 +78,10 @@
*
* Overloading of page flags that are otherwise used for LRU management.
*
- * PageActive The slab is frozen and exempt from list processing.
- * This means that the slab is dedicated to a purpose
- * such as satisfying allocations for a specific
- * processor. Objects may be freed in the slab while
- * it is frozen but slab_free will then skip the usual
- * list operations. It is up to the processor holding
- * the slab to integrate the slab into the slab lists
- * when the slab is no longer needed.
- *
- * One use of this flag is to mark slabs that are
- * used for allocations. Then such a slab becomes a cpu
- * slab. The cpu slab may be equipped with an additional
+ * PageActive The slab is used as a cpu cache. Allocations
+ * may be performed from the slab. The slab is not
+ * on any slab list and cannot be moved onto one.
+ * The cpu slab may be equipped with an additioanl
* lockless_freelist that allows lockless access to
* free objects in addition to the regular freelist
* that requires the slab lock.
@@ -99,42 +91,27 @@
* the fast path and disables lockless freelists.
*/
-#define FROZEN (1 << PG_active)
-
+static inline int SlabDebug(struct page *page)
+{
#ifdef CONFIG_SLUB_DEBUG
-#define SLABDEBUG (1 << PG_error)
+ return PageError(page);
#else
-#define SLABDEBUG 0
+ return 0;
#endif
-
-static inline int SlabFrozen(struct page *page)
-{
- return page->flags & FROZEN;
-}
-
-static inline void SetSlabFrozen(struct page *page)
-{
- page->flags |= FROZEN;
-}
-
-static inline void ClearSlabFrozen(struct page *page)
-{
- page->flags &= ~FROZEN;
-}
-
-static inline int SlabDebug(struct page *page)
-{
- return page->flags & SLABDEBUG;
}
static inline void SetSlabDebug(struct page *page)
{
- page->flags |= SLABDEBUG;
+#ifdef CONFIG_SLUB_DEBUG
+ SetPageError(page);
+#endif
}
static inline void ClearSlabDebug(struct page *page)
{
- page->flags &= ~SLABDEBUG;
+#ifdef CONFIG_SLUB_DEBUG
+ ClearPageError(page);
+#endif
}
/*
@@ -742,22 +719,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
return search == NULL;
}
-static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
-{
- if (s->flags & SLAB_TRACE) {
- printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
- s->name,
- alloc ? "alloc" : "free",
- object, page->inuse,
- page->freelist);
-
- if (!alloc)
- print_section("Object", (void *)object, s->objsize);
-
- dump_stack();
- }
-}
-
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
@@ -782,18 +743,8 @@ static void remove_full(struct kmem_cache *s, struct page *page)
spin_unlock(&n->list_lock);
}
-static void setup_object_debug(struct kmem_cache *s, struct page *page,
- void *object)
-{
- if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
- return;
-
- init_object(s, object, 0);
- init_tracking(s, object);
-}
-
-static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
- void *object, void *addr)
+static int alloc_object_checks(struct kmem_cache *s, struct page *page,
+ void *object)
{
if (!check_slab(s, page))
goto bad;
@@ -808,16 +759,13 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
goto bad;
}
- if (object && !check_object(s, page, object, 0))
+ if (!object)
+ return 1;
+
+ if (!check_object(s, page, object, 0))
goto bad;
- /* Success perform special debug activities for allocs */
- if (s->flags & SLAB_STORE_USER)
- set_track(s, object, TRACK_ALLOC, addr);
- trace(s, page, object, 1);
- init_object(s, object, 1);
return 1;
-
bad:
if (PageSlab(page)) {
/*
@@ -835,8 +783,8 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
return 0;
}
-static int free_debug_processing(struct kmem_cache *s, struct page *page,
- void *object, void *addr)
+static int free_object_checks(struct kmem_cache *s, struct page *page,
+ void *object)
{
if (!check_slab(s, page))
goto fail;
@@ -870,22 +818,29 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
"to slab %s", object, page->slab->name);
goto fail;
}
-
- /* Special debug activities for freeing objects */
- if (!SlabFrozen(page) && !page->freelist)
- remove_full(s, page);
- if (s->flags & SLAB_STORE_USER)
- set_track(s, object, TRACK_FREE, addr);
- trace(s, page, object, 0);
- init_object(s, object, 0);
return 1;
-
fail:
printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
s->name, page, object);
return 0;
}
+static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
+{
+ if (s->flags & SLAB_TRACE) {
+ printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
+ s->name,
+ alloc ? "alloc" : "free",
+ object, page->inuse,
+ page->freelist);
+
+ if (!alloc)
+ print_section("Object", (void *)object, s->objsize);
+
+ dump_stack();
+ }
+}
+
static int __init setup_slub_debug(char *str)
{
if (!str || *str != '=')
@@ -936,13 +891,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
- * Debugging or ctor may create a need to move the free
+ * Debugging or ctor/dtors may create a need to move the free
* pointer. Fail if this happens.
*/
if (s->size >= 65535 * sizeof(void *)) {
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
- BUG_ON(s->ctor);
+ BUG_ON(s->ctor || s->dtor);
}
else
/*
@@ -954,20 +909,26 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
s->flags |= slub_debug;
}
#else
-static inline void setup_object_debug(struct kmem_cache *s,
- struct page *page, void *object) {}
-static inline int alloc_debug_processing(struct kmem_cache *s,
- struct page *page, void *object, void *addr) { return 0; }
+static inline int alloc_object_checks(struct kmem_cache *s,
+ struct page *page, void *object) { return 0; }
-static inline int free_debug_processing(struct kmem_cache *s,
- struct page *page, void *object, void *addr) { return 0; }
+static inline int free_object_checks(struct kmem_cache *s,
+ struct page *page, void *object) { return 0; }
+static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct page *page) {}
+static inline void trace(struct kmem_cache *s, struct page *page,
+ void *object, int alloc) {}
+static inline void init_object(struct kmem_cache *s,
+ void *object, int active) {}
+static inline void init_tracking(struct kmem_cache *s, void *object) {}
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+static inline void set_track(struct kmem_cache *s, void *object,
+ enum track_item alloc, void *addr) {}
static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
#define slub_debug 0
#endif
@@ -1004,9 +965,13 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
- setup_object_debug(s, page, object);
+ if (SlabDebug(page)) {
+ init_object(s, object, 0);
+ init_tracking(s, object);
+ }
+
if (unlikely(s->ctor))
- s->ctor(object, s, 0);
+ s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR);
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1065,12 +1030,15 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
{
int pages = 1 << s->order;
- if (unlikely(SlabDebug(page))) {
+ if (unlikely(SlabDebug(page) || s->dtor)) {
void *p;
slab_pad_check(s, page);
- for_each_object(p, s, page_address(page))
+ for_each_object(p, s, page_address(page)) {
+ if (s->dtor)
+ s->dtor(p, s, 0);
check_object(s, page, p, 0);
+ }
}
mod_zone_page_state(page_zone(page),
@@ -1170,12 +1138,11 @@ static void remove_partial(struct kmem_cache *s,
*
* Must hold list_lock.
*/
-static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
+static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
{
if (slab_trylock(page)) {
list_del(&page->lru);
n->nr_partial--;
- SetSlabFrozen(page);
return 1;
}
return 0;
@@ -1199,7 +1166,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
- if (lock_and_freeze_slab(n, page))
+ if (lock_and_del_slab(n, page))
goto out;
page = NULL;
out:
@@ -1278,11 +1245,10 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
*
* On exit the slab lock will have been dropped.
*/
-static void unfreeze_slab(struct kmem_cache *s, struct page *page)
+static void putback_slab(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- ClearSlabFrozen(page);
if (page->inuse) {
if (page->freelist)
@@ -1333,7 +1299,9 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
page->inuse--;
}
s->cpu_slab[cpu] = NULL;
- unfreeze_slab(s, page);
+ ClearPageActive(page);
+
+ putback_slab(s, page);
}
static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
@@ -1424,7 +1392,9 @@ static void *__slab_alloc(struct kmem_cache *s,
new_slab:
page = get_partial(s, gfpflags, node);
if (page) {
+have_slab:
s->cpu_slab[cpu] = page;
+ SetPageActive(page);
goto load_freelist;
}
@@ -1454,15 +1424,17 @@ static void *__slab_alloc(struct kmem_cache *s,
flush_slab(s, s->cpu_slab[cpu], cpu);
}
slab_lock(page);
- SetSlabFrozen(page);
- s->cpu_slab[cpu] = page;
- goto load_freelist;
+ goto have_slab;
}
return NULL;
debug:
object = page->freelist;
- if (!alloc_debug_processing(s, page, object, addr))
+ if (!alloc_object_checks(s, page, object))
goto another_slab;
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, object, TRACK_ALLOC, addr);
+ trace(s, page, object, 1);
+ init_object(s, object, 1);
page->inuse++;
page->freelist = object[page->offset];
@@ -1539,7 +1511,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
page->freelist = object;
page->inuse--;
- if (unlikely(SlabFrozen(page)))
+ if (unlikely(PageActive(page)))
+ /*
+ * Cpu slabs are never on partial lists and are
+ * never freed.
+ */
goto out_unlock;
if (unlikely(!page->inuse))
@@ -1569,8 +1545,14 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return;
debug:
- if (!free_debug_processing(s, page, x, addr))
+ if (!free_object_checks(s, page, x))
goto out_unlock;
+ if (!PageActive(page) && !page->freelist)
+ remove_full(s, page);
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, x, TRACK_FREE, addr);
+ trace(s, page, object, 0);
+ init_object(s, object, 0);
goto checks_ok;
}
@@ -1807,7 +1789,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
page->freelist = get_freepointer(kmalloc_caches, n);
page->inuse++;
kmalloc_caches->node[node] = n;
- setup_object_debug(kmalloc_caches, page, n);
+ init_object(kmalloc_caches, n, 1);
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
add_partial(n, page);
@@ -1889,7 +1871,7 @@ static int calculate_sizes(struct kmem_cache *s)
* then we should never poison the object itself.
*/
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
- !s->ctor)
+ !s->ctor && !s->dtor)
s->flags |= __OBJECT_POISON;
else
s->flags &= ~__OBJECT_POISON;
@@ -1919,7 +1901,7 @@ static int calculate_sizes(struct kmem_cache *s)
#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
- s->ctor)) {
+ s->ctor || s->dtor)) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
@@ -1988,11 +1970,13 @@ static int calculate_sizes(struct kmem_cache *s)
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
const char *name, size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long),
+ void (*dtor)(void *, struct kmem_cache *, unsigned long))
{
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
+ s->dtor = dtor;
s->objsize = size;
s->flags = flags;
s->align = align;
@@ -2177,7 +2161,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
- flags, NULL))
+ flags, NULL, NULL))
goto panic;
list_add(&s->list, &slab_caches);
@@ -2479,7 +2463,7 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
- if (s->ctor)
+ if (s->ctor || s->dtor)
return 1;
return 0;
@@ -2487,14 +2471,15 @@ static int slab_unmergeable(struct kmem_cache *s)
static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long),
+ void (*dtor)(void *, struct kmem_cache *, unsigned long))
{
struct list_head *h;
if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL;
- if (ctor)
+ if (ctor || dtor)
return NULL;
size = ALIGN(size, sizeof(void *));
@@ -2536,9 +2521,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *s;
- BUG_ON(dtor);
down_write(&slub_lock);
- s = find_mergeable(size, align, flags, ctor);
+ s = find_mergeable(size, align, flags, ctor, dtor);
if (s) {
s->refcount++;
/*
@@ -2552,7 +2536,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
} else {
s = kmalloc(kmem_size, GFP_KERNEL);
if (s && kmem_cache_open(s, GFP_KERNEL, name,
- size, align, flags, ctor)) {
+ size, align, flags, ctor, dtor)) {
if (sysfs_slab_add(s)) {
kfree(s);
goto err;
@@ -3193,6 +3177,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(ctor);
+static ssize_t dtor_show(struct kmem_cache *s, char *buf)
+{
+ if (s->dtor) {
+ int n = sprint_symbol(buf, (unsigned long)s->dtor);
+
+ return n + sprintf(buf + n, "\n");
+ }
+ return 0;
+}
+SLAB_ATTR_RO(dtor);
+
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->refcount - 1);
@@ -3424,6 +3419,7 @@ static struct attribute * slab_attrs[] = {
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,
+ &dtor_attr.attr,
&aliases_attr.attr,
&align_attr.attr,
&sanity_checks_attr.attr,
diff --git a/trunk/mm/vmalloc.c b/trunk/mm/vmalloc.c
index d3a9c5368257..faa2a521dea3 100644
--- a/trunk/mm/vmalloc.c
+++ b/trunk/mm/vmalloc.c
@@ -311,7 +311,7 @@ struct vm_struct *remove_vm_area(void *addr)
return v;
}
-static void __vunmap(void *addr, int deallocate_pages)
+void __vunmap(void *addr, int deallocate_pages)
{
struct vm_struct *area;
diff --git a/trunk/net/bluetooth/hci_sock.c b/trunk/net/bluetooth/hci_sock.c
index 1dae3dfc66a9..bfc9a35bad33 100644
--- a/trunk/net/bluetooth/hci_sock.c
+++ b/trunk/net/bluetooth/hci_sock.c
@@ -665,8 +665,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
sk_for_each(sk, node, &hci_sk_list.head) {
- local_bh_disable();
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE;
@@ -675,8 +674,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
hci_dev_put(hdev);
}
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
}
read_unlock(&hci_sk_list.lock);
}
diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c
index f2b61111e26d..8301e2ac747f 100644
--- a/trunk/net/core/dev.c
+++ b/trunk/net/core/dev.c
@@ -116,7 +116,6 @@
#include
#include
#include
-#include
/*
* The list of packet types we will receive (as opposed to discard)
@@ -218,73 +217,6 @@ extern void netdev_unregister_sysfs(struct net_device *);
#define netdev_unregister_sysfs(dev) do { } while(0)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-/*
- * register_netdevice() inits dev->_xmit_lock and sets lockdep class
- * according to dev->type
- */
-static const unsigned short netdev_lock_type[] =
- {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
- ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
- ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
- ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
- ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
- ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
- ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
- ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
- ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
- ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
- ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
- ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
- ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
- ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
- ARPHRD_NONE};
-
-static const char *netdev_lock_name[] =
- {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
- "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
- "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
- "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
- "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
- "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
- "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
- "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
- "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
- "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
- "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
- "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
- "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
- "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
- "_xmit_NONE"};
-
-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-
-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
- if (netdev_lock_type[i] == dev_type)
- return i;
- /* the last key is used by default */
- return ARRAY_SIZE(netdev_lock_type) - 1;
-}
-
-static inline void netdev_set_lockdep_class(spinlock_t *lock,
- unsigned short dev_type)
-{
- int i;
-
- i = netdev_lock_pos(dev_type);
- lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
- netdev_lock_name[i]);
-}
-#else
-static inline void netdev_set_lockdep_class(spinlock_t *lock,
- unsigned short dev_type)
-{
-}
-#endif
/*******************************************************************************
@@ -3069,7 +3001,6 @@ int register_netdevice(struct net_device *dev)
spin_lock_init(&dev->queue_lock);
spin_lock_init(&dev->_xmit_lock);
- netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
dev->xmit_lock_owner = -1;
spin_lock_init(&dev->ingress_lock);
diff --git a/trunk/net/ipv4/Kconfig b/trunk/net/ipv4/Kconfig
index 010fbb2d45e9..c68196cc56ab 100644
--- a/trunk/net/ipv4/Kconfig
+++ b/trunk/net/ipv4/Kconfig
@@ -43,11 +43,11 @@ config IP_ADVANCED_ROUTER
asymmetric routing (packets from you to a host take a different path
than packets from that host to you) or if you operate a non-routing
host which has several IP addresses on different interfaces. To turn
- rp_filter on use:
+ rp_filter off use:
- echo 1 > /proc/sys/net/ipv4/conf//rp_filter
+ echo 0 > /proc/sys/net/ipv4/conf//rp_filter
or
- echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
+ echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter
If unsure, say N here.
@@ -577,7 +577,6 @@ config TCP_CONG_VENO
config TCP_CONG_YEAH
tristate "YeAH TCP"
depends on EXPERIMENTAL
- select TCP_CONG_VEGAS
default n
---help---
YeAH-TCP is a sender-side high-speed enabled TCP congestion control
diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c
index df9fe4f2e8cc..cb76e3c725a0 100644
--- a/trunk/net/ipv4/route.c
+++ b/trunk/net/ipv4/route.c
@@ -2396,7 +2396,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
dev_out = ip_dev_find(oldflp->fl4_src);
- if (dev_out == NULL)
+ if ((dev_out == NULL) && !(sysctl_ip_nonlocal_bind))
goto out;
/* I removed check for oif == dev_out->oif here.
@@ -2407,7 +2407,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
of another iface. --ANK
*/
- if (oldflp->oif == 0
+ if (dev_out && oldflp->oif == 0
&& (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
diff --git a/trunk/net/ipv4/tcp_cong.c b/trunk/net/ipv4/tcp_cong.c
index 1260e52ad772..86b26539e54b 100644
--- a/trunk/net/ipv4/tcp_cong.c
+++ b/trunk/net/ipv4/tcp_cong.c
@@ -276,34 +276,30 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
/*
- * Slow start is used when congestion window is less than slow start
- * threshold. This version implements the basic RFC2581 version
- * and optionally supports:
- * RFC3742 Limited Slow Start - growth limited to max_ssthresh
- * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
+ * Slow start (exponential increase) with
+ * RFC3742 Limited Slow Start (fast linear increase) support.
*/
void tcp_slow_start(struct tcp_sock *tp)
{
- int cnt; /* increase in packets */
-
- /* RFC3465: ABC Slow start
- * Increase only after a full MSS of bytes is acked
- *
- * TCP sender SHOULD increase cwnd by the number of
- * previously unacknowledged bytes ACKed by each incoming
- * acknowledgment, provided the increase is not more than L
- */
- if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
- return;
+ int cnt = 0;
+
+ if (sysctl_tcp_abc) {
+ /* RFC3465: Slow Start
+ * TCP sender SHOULD increase cwnd by the number of
+ * previously unacknowledged bytes ACKed by each incoming
+ * acknowledgment, provided the increase is not more than L
+ */
+ if (tp->bytes_acked < tp->mss_cache)
+ return;
+ }
- if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
- cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
+ if (sysctl_tcp_max_ssthresh > 0 &&
+ tp->snd_cwnd > sysctl_tcp_max_ssthresh)
+ cnt += sysctl_tcp_max_ssthresh>>1;
else
- cnt = tp->snd_cwnd; /* exponential increase */
+ cnt += tp->snd_cwnd;
- /* RFC3465: ABC
- * We MAY increase by 2 if discovered delayed ack
- */
+ /* RFC3465: We MAY increase by 2 if discovered delayed ack */
if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
cnt <<= 1;
tp->bytes_acked = 0;
diff --git a/trunk/net/ipx/af_ipx.c b/trunk/net/ipx/af_ipx.c
index 8400525177ab..15419dd682fd 100644
--- a/trunk/net/ipx/af_ipx.c
+++ b/trunk/net/ipx/af_ipx.c
@@ -87,7 +87,7 @@ extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
unsigned char *node);
extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct iovec *iov, size_t len, int noblock);
+ struct iovec *iov, int len, int noblock);
extern int ipxrtr_route_skb(struct sk_buff *skb);
extern struct ipx_route *ipxrtr_lookup(__be32 net);
extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
diff --git a/trunk/net/socket.c b/trunk/net/socket.c
index f4530196a70a..98a8f67abbfc 100644
--- a/trunk/net/socket.c
+++ b/trunk/net/socket.c
@@ -261,7 +261,8 @@ static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct socket_alloc *ei = (struct socket_alloc *)foo;
- inode_init_once(&ei->vfs_inode);
+ if (flags & SLAB_CTOR_CONSTRUCTOR)
+ inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
diff --git a/trunk/net/sunrpc/rpc_pipe.c b/trunk/net/sunrpc/rpc_pipe.c
index 5887457dc936..a2f1893bde53 100644
--- a/trunk/net/sunrpc/rpc_pipe.c
+++ b/trunk/net/sunrpc/rpc_pipe.c
@@ -828,17 +828,19 @@ init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct rpc_inode *rpci = (struct rpc_inode *) foo;
- inode_init_once(&rpci->vfs_inode);
- rpci->private = NULL;
- rpci->nreaders = 0;
- rpci->nwriters = 0;
- INIT_LIST_HEAD(&rpci->in_upcall);
- INIT_LIST_HEAD(&rpci->pipe);
- rpci->pipelen = 0;
- init_waitqueue_head(&rpci->waitq);
- INIT_DELAYED_WORK(&rpci->queue_timeout,
- rpc_timeout_upcall_queue);
- rpci->ops = NULL;
+ if (flags & SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&rpci->vfs_inode);
+ rpci->private = NULL;
+ rpci->nreaders = 0;
+ rpci->nwriters = 0;
+ INIT_LIST_HEAD(&rpci->in_upcall);
+ INIT_LIST_HEAD(&rpci->pipe);
+ rpci->pipelen = 0;
+ init_waitqueue_head(&rpci->waitq);
+ INIT_DELAYED_WORK(&rpci->queue_timeout,
+ rpc_timeout_upcall_queue);
+ rpci->ops = NULL;
+ }
}
int register_rpc_pipefs(void)
diff --git a/trunk/net/sunrpc/sched.c b/trunk/net/sunrpc/sched.c
index 944d75396fb3..b011eb625e49 100644
--- a/trunk/net/sunrpc/sched.c
+++ b/trunk/net/sunrpc/sched.c
@@ -989,6 +989,8 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
spin_unlock(&rpc_sched_lock);
}
+static DECLARE_MUTEX_LOCKED(rpciod_running);
+
static void rpciod_killall(void)
{
unsigned long flags;
diff --git a/trunk/net/sunrpc/sunrpc_syms.c b/trunk/net/sunrpc/sunrpc_syms.c
index 73075dec83c0..0d35bc796d00 100644
--- a/trunk/net/sunrpc/sunrpc_syms.c
+++ b/trunk/net/sunrpc/sunrpc_syms.c
@@ -134,7 +134,11 @@ EXPORT_SYMBOL(nfsd_debug);
EXPORT_SYMBOL(nlm_debug);
#endif
+extern int register_rpc_pipefs(void);
+extern void unregister_rpc_pipefs(void);
extern struct cache_detail ip_map_cache, unix_gid_cache;
+extern int init_socket_xprt(void);
+extern void cleanup_socket_xprt(void);
static int __init
init_sunrpc(void)