From ab77a8c7ed82b3d49e6da584342ead8e6c035525 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 23 Mar 2006 01:10:26 -0800 Subject: [PATCH] --- yaml --- r: 23165 b: refs/heads/master c: ca6549af77f0f28ac5d23b662fb8f72713eb16d3 h: refs/heads/master i: 23163: 59f9a4a71ceebf8a7df8811139966093d2773e41 v: v3 --- [refs] | 2 +- trunk/Documentation/kernel-parameters.txt | 11 +- trunk/Documentation/networking/pktgen.txt | 20 +- trunk/Documentation/power/swsusp.txt | 51 +- trunk/Documentation/power/userland-swsusp.txt | 149 -- trunk/Documentation/power/video.txt | 74 +- trunk/arch/cris/kernel/irq.c | 10 +- trunk/arch/frv/kernel/irq.c | 10 +- trunk/arch/i386/Kconfig | 24 +- trunk/arch/i386/Kconfig.debug | 9 - trunk/arch/i386/kernel/Makefile | 2 +- trunk/arch/i386/kernel/alternative.c | 321 --- trunk/arch/i386/kernel/apic.c | 1 - trunk/arch/i386/kernel/cpu/centaur.c | 1 - trunk/arch/i386/kernel/cpu/common.c | 47 +- .../i386/kernel/cpu/cpufreq/powernow-k8.c | 4 +- trunk/arch/i386/kernel/cpu/intel.c | 12 +- trunk/arch/i386/kernel/cpu/intel_cacheinfo.c | 4 +- trunk/arch/i386/kernel/cpu/proc.c | 2 +- trunk/arch/i386/kernel/crash.c | 2 +- trunk/arch/i386/kernel/entry.S | 4 - trunk/arch/i386/kernel/head.S | 5 +- trunk/arch/i386/kernel/io_apic.c | 25 +- trunk/arch/i386/kernel/kprobes.c | 4 +- trunk/arch/i386/kernel/module.c | 32 +- trunk/arch/i386/kernel/mpparse.c | 7 +- trunk/arch/i386/kernel/nmi.c | 6 +- trunk/arch/i386/kernel/process.c | 2 +- trunk/arch/i386/kernel/ptrace.c | 4 +- trunk/arch/i386/kernel/semaphore.c | 8 +- trunk/arch/i386/kernel/setup.c | 118 +- trunk/arch/i386/kernel/signal.c | 7 +- trunk/arch/i386/kernel/smpboot.c | 3 - trunk/arch/i386/kernel/topology.c | 9 - trunk/arch/i386/kernel/traps.c | 57 +- trunk/arch/i386/kernel/vmlinux.lds.S | 20 - trunk/arch/i386/kernel/vsyscall-sysenter.S | 3 - trunk/arch/i386/mach-es7000/es7000.h | 5 +- trunk/arch/i386/mach-es7000/es7000plat.c | 6 +- trunk/arch/i386/mm/fault.c | 210 +- trunk/arch/i386/mm/init.c | 45 +- trunk/arch/i386/oprofile/nmi_int.c | 7 +- trunk/arch/ia64/hp/sim/simserial.c | 7 +- trunk/arch/m32r/kernel/irq.c | 10 +- trunk/arch/m68k/bvme6000/rtc.c | 4 +- trunk/arch/mips/kernel/irq.c | 10 +- trunk/arch/mips/kernel/smp.c | 4 +- trunk/arch/mips/sgi-ip27/ip27-irq.c | 5 +- trunk/arch/parisc/kernel/smp.c | 25 +- trunk/arch/powerpc/kernel/irq.c | 5 +- trunk/arch/powerpc/kernel/kprobes.c | 4 +- trunk/arch/powerpc/kernel/setup-common.c | 5 +- trunk/arch/powerpc/kernel/setup_32.c | 5 +- trunk/arch/powerpc/platforms/powermac/smp.c | 4 +- trunk/arch/ppc/kernel/setup.c | 10 +- trunk/arch/s390/kernel/smp.c | 4 +- trunk/arch/sh/kernel/irq.c | 5 +- trunk/arch/sh/kernel/setup.c | 5 +- trunk/arch/sh64/kernel/irq.c | 5 +- trunk/arch/sparc/kernel/irq.c | 5 +- trunk/arch/sparc/kernel/smp.c | 24 +- trunk/arch/sparc/kernel/sun4d_irq.c | 8 +- trunk/arch/sparc/kernel/sun4d_smp.c | 8 +- trunk/arch/sparc/kernel/sun4m_smp.c | 6 +- trunk/arch/sparc64/kernel/irq.c | 4 +- trunk/arch/sparc64/kernel/smp.c | 30 +- trunk/arch/sparc64/mm/init.c | 4 +- trunk/arch/um/kernel/um_arch.c | 12 +- trunk/arch/x86_64/kernel/early_printk.c | 26 +- trunk/arch/x86_64/kernel/irq.c | 21 +- trunk/arch/x86_64/kernel/kprobes.c | 4 +- trunk/arch/x86_64/kernel/nmi.c | 4 +- trunk/arch/x86_64/kernel/signal.c | 4 + trunk/arch/xtensa/kernel/irq.c | 15 +- trunk/arch/xtensa/platform-iss/console.c | 4 + trunk/block/ioctl.c | 22 +- trunk/drivers/base/power/suspend.c | 5 +- trunk/drivers/block/cciss.c | 2 +- trunk/drivers/block/floppy.c | 17 +- trunk/drivers/block/loop.c | 18 +- trunk/drivers/block/nbd.c | 16 +- trunk/drivers/block/pktcdvd.c | 27 +- trunk/drivers/block/rd.c | 4 +- trunk/drivers/cdrom/cdrom.c | 874 +++---- trunk/drivers/cdrom/cdu31a.c | 8 +- trunk/drivers/cdrom/cm206.c | 44 +- trunk/drivers/cdrom/sbpcd.c | 2180 +++++++++-------- trunk/drivers/cdrom/viocd.c | 2 +- trunk/drivers/char/amiserial.c | 18 +- trunk/drivers/char/generic_serial.c | 14 +- trunk/drivers/char/istallion.c | 1 + trunk/drivers/char/n_tty.c | 10 +- trunk/drivers/char/nwflash.c | 11 +- trunk/drivers/char/raw.c | 23 +- trunk/drivers/char/ser_a2232.c | 4 +- trunk/drivers/char/snsc.c | 8 +- trunk/drivers/char/snsc_event.c | 5 +- trunk/drivers/char/stallion.c | 1 + trunk/drivers/char/sx.c | 2 +- trunk/drivers/char/tty_io.c | 50 +- trunk/drivers/char/vme_scc.c | 2 +- trunk/drivers/char/vt.c | 22 +- trunk/drivers/char/watchdog/pcwd_usb.c | 7 +- trunk/drivers/connector/connector.c | 15 +- trunk/drivers/firmware/dcdbas.c | 23 +- trunk/drivers/ide/ide-cd.c | 110 +- trunk/drivers/ide/ide-disk.c | 11 +- trunk/drivers/ide/ide-floppy.c | 11 +- trunk/drivers/ide/ide-tape.c | 19 +- trunk/drivers/isdn/capi/kcapi.c | 17 +- trunk/drivers/isdn/hisax/config.c | 1 + trunk/drivers/isdn/hisax/elsa.c | 1 + trunk/drivers/net/loopback.c | 4 +- trunk/drivers/net/ppp_generic.c | 25 +- trunk/drivers/oprofile/cpu_buffer.c | 3 +- trunk/drivers/pnp/pnpbios/rsparser.c | 6 +- trunk/drivers/s390/block/dasd_ioctl.c | 8 +- trunk/drivers/scsi/ide-scsi.c | 11 +- trunk/drivers/scsi/sr.c | 37 +- trunk/drivers/scsi/sr.h | 1 + trunk/drivers/scsi/sr_ioctl.c | 19 + trunk/drivers/serial/68328serial.c | 9 +- trunk/drivers/serial/au1x00_uart.c | 11 +- trunk/drivers/serial/crisv10.c | 68 +- trunk/drivers/serial/m32r_sio.c | 15 +- trunk/drivers/serial/sunsu.c | 13 +- trunk/drivers/tc/zs.c | 9 +- trunk/fs/9p/mux.c | 11 +- trunk/fs/adfs/file.c | 4 + trunk/fs/autofs4/autofs_i.h | 3 +- trunk/fs/autofs4/inode.c | 2 +- trunk/fs/autofs4/waitq.c | 16 +- trunk/fs/bio.c | 8 +- trunk/fs/block_dev.c | 28 +- trunk/fs/buffer.c | 6 +- trunk/fs/cifs/dir.c | 8 +- trunk/fs/cifs/fcntl.c | 4 +- trunk/fs/cifs/file.c | 4 +- trunk/fs/cifs/inode.c | 16 +- trunk/fs/cifs/link.c | 16 +- trunk/fs/cifs/readdir.c | 4 +- trunk/fs/cifs/xattr.c | 16 +- trunk/fs/devpts/inode.c | 76 +- trunk/fs/dquot.c | 167 +- trunk/fs/eventpoll.c | 32 +- trunk/fs/ext2/namei.c | 54 +- trunk/fs/ext3/dir.c | 52 +- trunk/fs/ext3/file.c | 4 +- trunk/fs/ext3/inode.c | 16 +- trunk/fs/ext3/ioctl.c | 4 +- trunk/fs/ext3/super.c | 6 +- trunk/fs/fat/fatent.c | 6 +- trunk/fs/fcntl.c | 9 +- trunk/fs/file.c | 34 +- trunk/fs/file_table.c | 10 +- trunk/fs/hpfs/hpfs_fn.h | 5 +- trunk/fs/hpfs/inode.c | 10 +- trunk/fs/hpfs/namei.c | 60 +- trunk/fs/hpfs/super.c | 4 +- trunk/fs/inode.c | 18 +- trunk/fs/inotify.c | 116 +- trunk/fs/jbd/checkpoint.c | 4 +- trunk/fs/jbd/journal.c | 4 +- trunk/fs/jbd/transaction.c | 4 +- trunk/fs/jffs/inode-v23.c | 86 +- trunk/fs/jffs/intrep.c | 6 +- trunk/fs/jffs/jffs_fm.c | 2 +- trunk/fs/jffs/jffs_fm.h | 5 +- trunk/fs/libfs.c | 14 +- trunk/fs/minix/namei.c | 48 +- trunk/fs/namei.c | 12 +- trunk/fs/ncpfs/file.c | 4 +- trunk/fs/ncpfs/inode.c | 6 +- trunk/fs/ncpfs/ncplib_kernel.c | 4 +- trunk/fs/ncpfs/sock.c | 34 +- trunk/fs/open.c | 8 +- trunk/fs/proc/proc_misc.c | 2 +- trunk/fs/qnx4/file.c | 3 + trunk/fs/quota.c | 6 +- trunk/fs/quota_v2.c | 2 +- trunk/fs/ramfs/file-mmu.c | 11 + trunk/fs/seq_file.c | 10 +- trunk/fs/super.c | 10 +- trunk/fs/sysv/namei.c | 48 +- trunk/fs/udf/balloc.c | 36 +- trunk/fs/udf/ialloc.c | 8 +- trunk/fs/udf/super.c | 2 +- trunk/fs/ufs/file.c | 10 + trunk/fs/ufs/namei.c | 48 +- trunk/fs/xfs/Makefile-linux-2.6 | 40 +- trunk/fs/xfs/linux-2.6/kmem.h | 91 +- trunk/fs/xfs/linux-2.6/xfs_aops.c | 484 ++-- trunk/fs/xfs/linux-2.6/xfs_aops.h | 4 +- trunk/fs/xfs/linux-2.6/xfs_buf.c | 7 +- trunk/fs/xfs/linux-2.6/xfs_export.c | 37 +- trunk/fs/xfs/linux-2.6/xfs_file.c | 187 +- trunk/fs/xfs/linux-2.6/xfs_fs_subr.c | 6 +- trunk/fs/xfs/linux-2.6/xfs_ioctl.c | 138 +- trunk/fs/xfs/linux-2.6/xfs_ioctl32.c | 12 +- trunk/fs/xfs/linux-2.6/xfs_ioctl32.h | 4 +- trunk/fs/xfs/linux-2.6/xfs_iops.c | 317 +-- trunk/fs/xfs/linux-2.6/xfs_iops.h | 12 +- trunk/fs/xfs/linux-2.6/xfs_linux.h | 10 +- trunk/fs/xfs/linux-2.6/xfs_lrw.c | 51 +- trunk/fs/xfs/linux-2.6/xfs_stats.c | 7 +- trunk/fs/xfs/linux-2.6/xfs_super.c | 213 +- trunk/fs/xfs/linux-2.6/xfs_super.h | 7 +- trunk/fs/xfs/linux-2.6/xfs_sysctl.c | 3 +- trunk/fs/xfs/linux-2.6/xfs_vfs.c | 19 +- trunk/fs/xfs/linux-2.6/xfs_vfs.h | 3 +- trunk/fs/xfs/linux-2.6/xfs_vnode.c | 35 +- trunk/fs/xfs/linux-2.6/xfs_vnode.h | 33 +- trunk/fs/xfs/quota/xfs_dquot_item.c | 2 - trunk/fs/xfs/quota/xfs_qm.c | 13 +- trunk/fs/xfs/quota/xfs_qm_bhv.c | 4 +- trunk/fs/xfs/support/ktrace.c | 4 +- trunk/fs/xfs/support/uuid.c | 15 +- trunk/fs/xfs/xfs_acl.h | 4 +- trunk/fs/xfs/xfs_attr.c | 59 +- trunk/fs/xfs/xfs_attr_leaf.c | 729 +++--- trunk/fs/xfs/xfs_attr_leaf.h | 47 +- trunk/fs/xfs/xfs_attr_sf.h | 8 +- trunk/fs/xfs/xfs_bmap.c | 1305 +++++----- trunk/fs/xfs/xfs_bmap.h | 22 +- trunk/fs/xfs/xfs_bmap_btree.c | 10 +- trunk/fs/xfs/xfs_bmap_btree.h | 8 + trunk/fs/xfs/xfs_clnt.h | 2 + trunk/fs/xfs/xfs_da_btree.c | 409 ++-- trunk/fs/xfs/xfs_da_btree.h | 16 +- trunk/fs/xfs/xfs_dfrag.c | 4 +- trunk/fs/xfs/xfs_dir.c | 32 +- trunk/fs/xfs/xfs_dir2.h | 27 +- trunk/fs/xfs/xfs_dir2_block.c | 193 +- trunk/fs/xfs/xfs_dir2_block.h | 7 +- trunk/fs/xfs/xfs_dir2_data.c | 240 +- trunk/fs/xfs/xfs_dir2_data.h | 26 +- trunk/fs/xfs/xfs_dir2_leaf.c | 285 ++- trunk/fs/xfs/xfs_dir2_leaf.h | 15 +- trunk/fs/xfs/xfs_dir2_node.c | 303 ++- trunk/fs/xfs/xfs_dir2_node.h | 10 +- trunk/fs/xfs/xfs_dir2_sf.c | 8 +- trunk/fs/xfs/xfs_dir_leaf.c | 82 +- trunk/fs/xfs/xfs_dir_sf.h | 24 +- trunk/fs/xfs/xfs_dmapi.h | 10 + trunk/fs/xfs/xfs_fsops.c | 1 - trunk/fs/xfs/xfs_ialloc.c | 13 +- trunk/fs/xfs/xfs_iget.c | 6 +- trunk/fs/xfs/xfs_inode.c | 1303 ++-------- trunk/fs/xfs/xfs_inode.h | 78 +- trunk/fs/xfs/xfs_iomap.c | 2 +- trunk/fs/xfs/xfs_itable.c | 5 +- trunk/fs/xfs/xfs_log_recover.c | 2 +- trunk/fs/xfs/xfs_mount.c | 646 +---- trunk/fs/xfs/xfs_mount.h | 44 +- trunk/fs/xfs/xfs_rw.h | 1 - trunk/fs/xfs/xfs_trans.c | 187 +- trunk/fs/xfs/xfs_trans.h | 2 +- trunk/fs/xfs/xfs_vfsops.c | 82 +- trunk/fs/xfs/xfs_vnodeops.c | 15 +- trunk/include/asm-alpha/mmu_context.h | 5 +- trunk/include/asm-alpha/topology.h | 4 +- trunk/include/asm-generic/bug.h | 4 +- trunk/include/asm-generic/percpu.h | 7 +- trunk/include/asm-i386/alternative.h | 129 - trunk/include/asm-i386/arch_hooks.h | 3 - trunk/include/asm-i386/atomic.h | 36 +- trunk/include/asm-i386/bitops.h | 7 +- trunk/include/asm-i386/cache.h | 2 - trunk/include/asm-i386/cpufeature.h | 1 - .../include/asm-i386/mach-default/do_timer.h | 2 +- .../asm-i386/mach-es7000/mach_mpparse.h | 10 +- trunk/include/asm-i386/mach-visws/do_timer.h | 2 +- .../include/asm-i386/mach-voyager/do_timer.h | 2 +- trunk/include/asm-i386/mpspec.h | 1 + trunk/include/asm-i386/mtrr.h | 1 - trunk/include/asm-i386/mutex.h | 6 +- trunk/include/asm-i386/pgtable-2level.h | 2 - trunk/include/asm-i386/pgtable-3level.h | 2 - trunk/include/asm-i386/rwlock.h | 56 +- trunk/include/asm-i386/semaphore.h | 8 +- trunk/include/asm-i386/spinlock.h | 34 +- trunk/include/asm-i386/system.h | 62 +- trunk/include/asm-i386/uaccess.h | 12 +- trunk/include/asm-i386/unistd.h | 36 +- trunk/include/asm-ia64/atomic.h | 8 +- trunk/include/asm-ia64/cache.h | 2 - trunk/include/asm-m68k/atomic.h | 8 +- trunk/include/asm-parisc/cache.h | 2 - trunk/include/asm-powerpc/percpu.h | 7 +- trunk/include/asm-s390/atomic.h | 18 +- trunk/include/asm-s390/percpu.h | 7 +- trunk/include/asm-sparc64/atomic.h | 10 +- trunk/include/asm-sparc64/cache.h | 2 - trunk/include/asm-sparc64/percpu.h | 7 +- trunk/include/asm-um/alternative.h | 6 - trunk/include/asm-x86_64/atomic.h | 8 +- trunk/include/asm-x86_64/cache.h | 2 - trunk/include/asm-x86_64/percpu.h | 7 +- trunk/include/linux/cache.h | 4 +- trunk/include/linux/cdrom.h | 5 +- trunk/include/linux/eventpoll.h | 8 +- trunk/include/linux/ext3_fs.h | 9 +- trunk/include/linux/ext3_fs_i.h | 7 +- trunk/include/linux/file.h | 28 +- trunk/include/linux/fs.h | 22 +- trunk/include/linux/generic_serial.h | 4 +- trunk/include/linux/genhd.h | 14 +- trunk/include/linux/init_task.h | 10 +- trunk/include/linux/jbd.h | 7 +- trunk/include/linux/kernel.h | 3 - trunk/include/linux/kprobes.h | 3 +- trunk/include/linux/loop.h | 3 +- trunk/include/linux/msdos_fs.h | 3 +- trunk/include/linux/nbd.h | 3 +- trunk/include/linux/ncp_fs_i.h | 2 +- trunk/include/linux/ncp_fs_sb.h | 5 +- trunk/include/linux/pm.h | 3 +- trunk/include/linux/profile.h | 2 +- trunk/include/linux/quota.h | 7 +- trunk/include/linux/raid/raid1.h | 2 +- trunk/include/linux/rcupdate.h | 2 + trunk/include/linux/seq_file.h | 4 +- trunk/include/linux/swap.h | 5 +- trunk/include/linux/tty.h | 8 +- trunk/include/linux/tty_flip.h | 12 +- trunk/include/linux/udf_fs_sb.h | 4 +- trunk/include/linux/vt_kern.h | 5 - trunk/init/do_mounts_initrd.c | 1 - trunk/init/main.c | 26 +- trunk/kernel/cpuset.c | 212 +- trunk/kernel/exit.c | 4 +- trunk/kernel/fork.c | 8 +- trunk/kernel/kprobes.c | 14 +- trunk/kernel/kthread.c | 7 +- trunk/kernel/module.c | 53 +- trunk/kernel/panic.c | 97 +- trunk/kernel/posix-timers.c | 1 - trunk/kernel/power/Makefile | 2 +- trunk/kernel/power/disk.c | 20 +- trunk/kernel/power/main.c | 2 +- trunk/kernel/power/pm.c | 21 +- trunk/kernel/power/power.h | 75 +- trunk/kernel/power/process.c | 61 +- trunk/kernel/power/snapshot.c | 335 +-- trunk/kernel/power/swap.c | 544 ---- trunk/kernel/power/swsusp.c | 887 ++++++- trunk/kernel/power/user.c | 333 --- trunk/kernel/profile.c | 11 +- trunk/kernel/rcupdate.c | 14 +- trunk/kernel/sched.c | 13 +- trunk/kernel/signal.c | 11 +- trunk/kernel/spinlock.c | 9 +- trunk/kernel/sys.c | 46 +- trunk/lib/reed_solomon/reed_solomon.c | 11 +- trunk/mm/readahead.c | 1 - trunk/mm/swapfile.c | 57 +- trunk/net/core/pktgen.c | 158 +- trunk/security/seclvl.c | 210 +- trunk/sound/oss/ac97_codec.c | 24 +- trunk/sound/oss/aci.c | 11 +- trunk/sound/oss/ad1889.c | 7 +- trunk/sound/oss/ad1889.h | 2 +- trunk/sound/oss/ali5455.c | 8 +- trunk/sound/oss/au1000.c | 44 +- trunk/sound/oss/au1550_ac97.c | 44 +- trunk/sound/oss/btaudio.c | 36 +- trunk/sound/oss/cmpci.c | 20 +- trunk/sound/oss/cs4281/cs4281m.c | 54 +- trunk/sound/oss/cs46xx.c | 75 +- trunk/sound/oss/dmasound/dmasound_awacs.c | 10 +- trunk/sound/oss/emu10k1/hwaccess.h | 2 +- trunk/sound/oss/emu10k1/main.c | 2 +- trunk/sound/oss/emu10k1/midi.c | 14 +- trunk/sound/oss/es1370.c | 71 +- trunk/sound/oss/es1371.c | 71 +- trunk/sound/oss/esssolo1.c | 50 +- trunk/sound/oss/forte.c | 11 +- trunk/sound/oss/hal2.c | 22 +- trunk/sound/oss/i810_audio.c | 8 +- trunk/sound/oss/ite8172.c | 20 +- trunk/sound/oss/maestro.c | 26 +- trunk/sound/oss/maestro3.c | 20 +- trunk/sound/oss/nec_vrc5477.c | 20 +- trunk/sound/oss/rme96xx.c | 17 +- trunk/sound/oss/sonicvibes.c | 48 +- trunk/sound/oss/swarm_cs4297a.c | 39 +- trunk/sound/oss/trident.c | 62 +- trunk/sound/oss/via82cxxx_audio.c | 49 +- trunk/sound/oss/vwsnd.c | 61 +- trunk/sound/oss/ymfpci.c | 14 +- trunk/sound/oss/ymfpci.h | 3 +- 391 files changed, 8276 insertions(+), 11434 deletions(-) delete mode 100644 trunk/Documentation/power/userland-swsusp.txt delete mode 100644 trunk/arch/i386/kernel/alternative.c delete mode 100644 trunk/include/asm-i386/alternative.h delete mode 100644 trunk/include/asm-um/alternative.h delete mode 100644 trunk/kernel/power/swap.c delete mode 100644 trunk/kernel/power/user.c diff --git a/[refs] b/[refs] index 4951e96fbf33..aadd3d419ea7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: debf798b1ed82053689d900670eb27fb2f1b4bd3 +refs/heads/master: ca6549af77f0f28ac5d23b662fb8f72713eb16d3 diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 7b7382d0f758..fc99075e0af4 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -1008,9 +1008,7 @@ running once the system is up. noexec=on: enable non-executable mappings (default) noexec=off: disable nn-executable mappings - nofxsr [BUGS=IA-32] Disables x86 floating point extended - register save and restore. The kernel will only save - legacy floating-point registers on task switch. + nofxsr [BUGS=IA-32] nohlt [BUGS=ARM] @@ -1055,8 +1053,6 @@ running once the system is up. nosbagart [IA-64] - nosep [BUGS=IA-32] Disables x86 SYSENTER/SYSEXIT support. - nosmp [SMP] Tells an SMP kernel to act as a UP kernel. nosync [HW,M68K] Disables sync negotiation for all devices. @@ -1126,11 +1122,6 @@ running once the system is up. pas16= [HW,SCSI] See header of drivers/scsi/pas16.c. - pause_on_oops= - Halt all CPUs after the first oops has been printed for - the specified number of seconds. This is to be used if - your oopses keep scrolling off the screen. - pcbit= [HW,ISDN] pcd. [PARIDE] diff --git a/trunk/Documentation/networking/pktgen.txt b/trunk/Documentation/networking/pktgen.txt index cc4b4d04129c..278771c9ad99 100644 --- a/trunk/Documentation/networking/pktgen.txt +++ b/trunk/Documentation/networking/pktgen.txt @@ -109,6 +109,22 @@ Examples: cycle through the port range. pgset "udp_dst_max 9" set UDP destination port max. + pgset "mpls 0001000a,0002000a,0000000a" set MPLS labels (in this example + outer label=16,middle label=32, + inner label=0 (IPv4 NULL)) Note that + there must be no spaces between the + arguments. Leading zeros are required. + Do not set the bottom of stack bit, + thats done automatically. If you do + set the bottom of stack bit, that + indicates that you want to randomly + generate that address and the flag + MPLS_RND will be turned on. You + can have any mix of random and fixed + labels in the label stack. + + pgset "mpls 0" turn off mpls (or any invalid argument works too!) + pgset stop aborts injection. Also, ^C aborts generator. @@ -167,6 +183,8 @@ pkt_size min_pkt_size max_pkt_size +mpls + udp_src_min udp_src_max @@ -211,4 +229,4 @@ Grant Grundler for testing on IA-64 and parisc, Harald Welte, Lennert Buytenhek Stephen Hemminger, Andi Kleen, Dave Miller and many others. -Good luck with the linux net-development. \ No newline at end of file +Good luck with the linux net-development. diff --git a/trunk/Documentation/power/swsusp.txt b/trunk/Documentation/power/swsusp.txt index d7814a113ee1..b28b7f04abb8 100644 --- a/trunk/Documentation/power/swsusp.txt +++ b/trunk/Documentation/power/swsusp.txt @@ -17,11 +17,6 @@ Some warnings, first. * but it will probably only crash. * * (*) suspend/resume support is needed to make it safe. - * - * If you have any filesystems on USB devices mounted before suspend, - * they won't be accessible after resume and you may lose data, as though - * you have unplugged the USB devices with mounted filesystems on them - * (see the FAQ below for details). You need to append resume=/dev/your_swap_partition to kernel command line. Then you suspend by @@ -32,18 +27,19 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state echo platform > /sys/power/disk; echo disk > /sys/power/state -. If you have SATA disks, you'll need recent kernels with SATA suspend -support. For suspend and resume to work, make sure your disk drivers -are built into kernel -- not modules. [There's way to make -suspend/resume with modular disk drivers, see FAQ, but you probably -should not do that.] - If you want to limit the suspend image size to N bytes, do echo N > /sys/power/image_size before suspend (it is limited to 500 MB by default). +Encrypted suspend image: +------------------------ +If you want to store your suspend image encrypted with a temporary +key to prevent data gathering after resume you must compile +crypto and the aes algorithm into the kernel - modules won't work +as they cannot be loaded at resume time. + Article about goals and implementation of Software Suspend for Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -337,37 +333,4 @@ init=/bin/bash, then swapon and starting suspend sequence manually usually does the trick. Then it is good idea to try with latest vanilla kernel. -Q: How can distributions ship a swsusp-supporting kernel with modular -disk drivers (especially SATA)? - -A: Well, it can be done, load the drivers, then do echo into -/sys/power/disk/resume file from initrd. Be sure not to mount -anything, not even read-only mount, or you are going to lose your -data. - -Q: How do I make suspend more verbose? - -A: If you want to see any non-error kernel messages on the virtual -terminal the kernel switches to during suspend, you have to set the -kernel console loglevel to at least 5, for example by doing - - echo 5 > /proc/sys/kernel/printk - -Q: Is this true that if I have a mounted filesystem on a USB device and -I suspend to disk, I can lose data unless the filesystem has been mounted -with "sync"? - -A: That's right. It depends on your hardware, and it could be true even for -suspend-to-RAM. In fact, even with "-o sync" you can lose data if your -programs have information in buffers they haven't written out to disk. - -If you're lucky, your hardware will support low-power modes for USB -controllers while the system is asleep. Lots of hardware doesn't, -however. Shutting off the power to a USB controller is equivalent to -unplugging all the attached devices. - -Remember that it's always a bad idea to unplug a disk drive containing a -mounted filesystem. With USB that's true even when your system is asleep! -The safest thing is to unmount all USB-based filesystems before suspending -and remount them after resuming. diff --git a/trunk/Documentation/power/userland-swsusp.txt b/trunk/Documentation/power/userland-swsusp.txt deleted file mode 100644 index 94058220aaf0..000000000000 --- a/trunk/Documentation/power/userland-swsusp.txt +++ /dev/null @@ -1,149 +0,0 @@ -Documentation for userland software suspend interface - (C) 2006 Rafael J. Wysocki - -First, the warnings at the beginning of swsusp.txt still apply. - -Second, you should read the FAQ in swsusp.txt _now_ if you have not -done it already. - -Now, to use the userland interface for software suspend you need special -utilities that will read/write the system memory snapshot from/to the -kernel. Such utilities are available, for example, from -. You may want to have -a look at them if you are going to develop your own suspend/resume -utilities. - -The interface consists of a character device providing the open(), -release(), read(), and write() operations as well as several ioctl() -commands defined in kernel/power/power.h. The major and minor -numbers of the device are, respectively, 10 and 231, and they can -be read from /sys/class/misc/snapshot/dev. - -The device can be open either for reading or for writing. If open for -reading, it is considered to be in the suspend mode. Otherwise it is -assumed to be in the resume mode. The device cannot be open for reading -and writing. It is also impossible to have the device open more than once -at a time. - -The ioctl() commands recognized by the device are: - -SNAPSHOT_FREEZE - freeze user space processes (the current process is - not frozen); this is required for SNAPSHOT_ATOMIC_SNAPSHOT - and SNAPSHOT_ATOMIC_RESTORE to succeed - -SNAPSHOT_UNFREEZE - thaw user space processes frozen by SNAPSHOT_FREEZE - -SNAPSHOT_ATOMIC_SNAPSHOT - create a snapshot of the system memory; the - last argument of ioctl() should be a pointer to an int variable, - the value of which will indicate whether the call returned after - creating the snapshot (1) or after restoring the system memory state - from it (0) (after resume the system finds itself finishing the - SNAPSHOT_ATOMIC_SNAPSHOT ioctl() again); after the snapshot - has been created the read() operation can be used to transfer - it out of the kernel - -SNAPSHOT_ATOMIC_RESTORE - restore the system memory state from the - uploaded snapshot image; before calling it you should transfer - the system memory snapshot back to the kernel using the write() - operation; this call will not succeed if the snapshot - image is not available to the kernel - -SNAPSHOT_FREE - free memory allocated for the snapshot image - -SNAPSHOT_SET_IMAGE_SIZE - set the preferred maximum size of the image - (the kernel will do its best to ensure the image size will not exceed - this number, but if it turns out to be impossible, the kernel will - create the smallest image possible) - -SNAPSHOT_AVAIL_SWAP - return the amount of available swap in bytes (the last - argument should be a pointer to an unsigned int variable that will - contain the result if the call is successful). - -SNAPSHOT_GET_SWAP_PAGE - allocate a swap page from the resume partition - (the last argument should be a pointer to a loff_t variable that - will contain the swap page offset if the call is successful) - -SNAPSHOT_FREE_SWAP_PAGES - free all swap pages allocated with - SNAPSHOT_GET_SWAP_PAGE - -SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument - should specify the device's major and minor numbers in the old - two-byte format, as returned by the stat() function in the .st_rdev - member of the stat structure); it is recommended to always use this - call, because the code to set the resume partition could be removed from - future kernels - -The device's read() operation can be used to transfer the snapshot image from -the kernel. It has the following limitations: -- you cannot read() more than one virtual memory page at a time -- read()s accross page boundaries are impossible (ie. if ypu read() 1/2 of - a page in the previous call, you will only be able to read() - _at_ _most_ 1/2 of the page in the next call) - -The device's write() operation is used for uploading the system memory snapshot -into the kernel. It has the same limitations as the read() operation. - -The release() operation frees all memory allocated for the snapshot image -and all swap pages allocated with SNAPSHOT_GET_SWAP_PAGE (if any). -Thus it is not necessary to use either SNAPSHOT_FREE or -SNAPSHOT_FREE_SWAP_PAGES before closing the device (in fact it will also -unfreeze user space processes frozen by SNAPSHOT_UNFREEZE if they are -still frozen when the device is being closed). - -Currently it is assumed that the userland utilities reading/writing the -snapshot image from/to the kernel will use a swap parition, called the resume -partition, as storage space. However, this is not really required, as they -can use, for example, a special (blank) suspend partition or a file on a partition -that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards. - -These utilities SHOULD NOT make any assumptions regarding the ordering of -data within the snapshot image, except for the image header that MAY be -assumed to start with an swsusp_info structure, as specified in -kernel/power/power.h. This structure MAY be used by the userland utilities -to obtain some information about the snapshot image, such as the size -of the snapshot image, including the metadata and the header itself, -contained in the .size member of swsusp_info. - -The snapshot image MUST be written to the kernel unaltered (ie. all of the image -data, metadata and header MUST be written in _exactly_ the same amount, form -and order in which they have been read). Otherwise, the behavior of the -resumed system may be totally unpredictable. - -While executing SNAPSHOT_ATOMIC_RESTORE the kernel checks if the -structure of the snapshot image is consistent with the information stored -in the image header. If any inconsistencies are detected, -SNAPSHOT_ATOMIC_RESTORE will not succeed. Still, this is not a fool-proof -mechanism and the userland utilities using the interface SHOULD use additional -means, such as checksums, to ensure the integrity of the snapshot image. - -The suspending and resuming utilities MUST lock themselves in memory, -preferrably using mlockall(), before calling SNAPSHOT_FREEZE. - -The suspending utility MUST check the value stored by SNAPSHOT_ATOMIC_SNAPSHOT -in the memory location pointed to by the last argument of ioctl() and proceed -in accordance with it: -1. If the value is 1 (ie. the system memory snapshot has just been - created and the system is ready for saving it): - (a) The suspending utility MUST NOT close the snapshot device - _unless_ the whole suspend procedure is to be cancelled, in - which case, if the snapshot image has already been saved, the - suspending utility SHOULD destroy it, preferrably by zapping - its header. If the suspend is not to be cancelled, the - system MUST be powered off or rebooted after the snapshot - image has been saved. - (b) The suspending utility SHOULD NOT attempt to perform any - file system operations (including reads) on the file systems - that were mounted before SNAPSHOT_ATOMIC_SNAPSHOT has been - called. However, it MAY mount a file system that was not - mounted at that time and perform some operations on it (eg. - use it for saving the image). -2. If the value is 0 (ie. the system state has just been restored from - the snapshot image), the suspending utility MUST close the snapshot - device. Afterwards it will be treated as a regular userland process, - so it need not exit. - -The resuming utility SHOULD NOT attempt to mount any file systems that could -be mounted before suspend and SHOULD NOT attempt to perform any operations -involving such file systems. - -For details, please refer to the source code. diff --git a/trunk/Documentation/power/video.txt b/trunk/Documentation/power/video.txt index d18a57d1a531..912bed87c758 100644 --- a/trunk/Documentation/power/video.txt +++ b/trunk/Documentation/power/video.txt @@ -1,7 +1,7 @@ Video issues with S3 resume ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 2003-2006, Pavel Machek + 2003-2005, Pavel Machek During S3 resume, hardware needs to be reinitialized. For most devices, this is easy, and kernel driver knows how to do @@ -15,27 +15,6 @@ run normally so video card is normally initialized. It should not be problem for S1 standby, because hardware should retain its state over that. -We either have to run video BIOS during early resume, or interpret it -using vbetool later, or maybe nothing is neccessary on particular -system because video state is preserved. Unfortunately different -methods work on different systems, and no known method suits all of -them. - -Userland application called s2ram has been developed; it contains long -whitelist of systems, and automatically selects working method for a -given system. It can be downloaded from CVS at -www.sf.net/projects/suspend . If you get a system that is not in the -whitelist, please try to find a working solution, and submit whitelist -entry so that work does not need to be repeated. - -Currently, VBE_SAVE method (6 below) works on most -systems. Unfortunately, vbetool only runs after userland is resumed, -so it makes debugging of early resume problems -hard/impossible. Methods that do not rely on userland are preferable. - -Details -~~~~~~~ - There are a few types of systems where video works after S3 resume: (1) systems where video state is preserved over S3. @@ -125,7 +104,6 @@ HP NX7000 ??? (*) HP Pavilion ZD7000 vbetool post needed, need open-source nv driver for X HP Omnibook XE3 athlon version none (1) HP Omnibook XE3GC none (1), video is S3 Savage/IX-MV -HP Omnibook 5150 none (1), (S1 also works OK) IBM TP T20, model 2647-44G none (1), video is S3 Inc. 86C270-294 Savage/IX-MV, vesafb gets "interesting" but X work. IBM TP A31 / Type 2652-M5G s3_mode (3) [works ok with BIOS 1.04 2002-08-23, but not at all with BIOS 1.11 2004-11-05 :-(] IBM TP R32 / Type 2658-MMG none (1) @@ -142,24 +120,18 @@ IBM ThinkPad T42p (2373-GTG) s3_bios (2) IBM TP X20 ??? (*) IBM TP X30 s3_bios (2) IBM TP X31 / Type 2672-XXH none (1), use radeontool (http://fdd.com/software/radeon/) to turn off backlight. -IBM TP X32 none (1), but backlight is on and video is trashed after long suspend. s3_bios,s3_mode (4) works too. Perhaps that gets better results? +IBM TP X32 none (1), but backlight is on and video is trashed after long suspend IBM Thinkpad X40 Type 2371-7JG s3_bios,s3_mode (4) -IBM TP 600e none(1), but a switch to console and back to X is needed Medion MD4220 ??? (*) Samsung P35 vbetool needed (6) -Sharp PC-AR10 (ATI rage) none (1), backlight does not switch off +Sharp PC-AR10 (ATI rage) none (1) Sony Vaio PCG-C1VRX/K s3_bios (2) Sony Vaio PCG-F403 ??? (*) -Sony Vaio PCG-GRT995MP none (1), works with 'nv' X driver -Sony Vaio PCG-GR7/K none (1), but needs radeonfb, use radeontool (http://fdd.com/software/radeon/) to turn off backlight. Sony Vaio PCG-N505SN ??? (*) Sony Vaio vgn-s260 X or boot-radeon can init it (5) -Sony Vaio vgn-S580BH vga=normal, but suspend from X. Console will be blank unless you return to X. -Sony Vaio vgn-FS115B s3_bios (2),s3_mode (4) Toshiba Libretto L5 none (1) -Toshiba Portege 3020CT s3_mode (3) -Toshiba Satellite 4030CDT s3_mode (3) (S1 also works OK) -Toshiba Satellite 4080XCDT s3_mode (3) (S1 also works OK) +Toshiba Satellite 4030CDT s3_mode (3) +Toshiba Satellite 4080XCDT s3_mode (3) Toshiba Satellite 4090XCDT ??? (*) Toshiba Satellite P10-554 s3_bios,s3_mode (4)(****) Toshiba M30 (2) xor X with nvidia driver using internal AGP @@ -179,3 +151,39 @@ Asus A7V8X nVidia RIVA TNT2 model 64 s3_bios,s3_mode (4) (***) To be tested with a newer kernel. (****) Not with SMP kernel, UP only. + +VBEtool details +~~~~~~~~~~~~~~~ +(with thanks to Carl-Daniel Hailfinger) + +First, boot into X and run the following script ONCE: +#!/bin/bash +statedir=/root/s3/state +mkdir -p $statedir +chvt 2 +sleep 1 +vbetool vbestate save >$statedir/vbe + + +To suspend and resume properly, call the following script as root: +#!/bin/bash +statedir=/root/s3/state +curcons=`fgconsole` +fuser /dev/tty$curcons 2>/dev/null|xargs ps -o comm= -p|grep -q X && chvt 2 +cat /dev/vcsa >$statedir/vcsa +sync +echo 3 >/proc/acpi/sleep +sync +vbetool post +vbetool vbestate restore <$statedir/vbe +cat $statedir/vcsa >/dev/vcsa +rckbd restart +chvt $[curcons%6+1] +chvt $curcons + + +Unless you change your graphics card or other hardware configuration, +the state once saved will be OK for every resume afterwards. +NOTE: The "rckbd restart" command may be different for your +distribution. Simply replace it with the command you would use to +set the fonts on screen. diff --git a/trunk/arch/cris/kernel/irq.c b/trunk/arch/cris/kernel/irq.c index b504def3e346..30deaf1b728a 100644 --- a/trunk/arch/cris/kernel/irq.c +++ b/trunk/arch/cris/kernel/irq.c @@ -52,8 +52,9 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j=0; jtypename); seq_printf(p, " %s", action->name); diff --git a/trunk/arch/frv/kernel/irq.c b/trunk/arch/frv/kernel/irq.c index 11fa326a8f62..27ab4c30aac6 100644 --- a/trunk/arch/frv/kernel/irq.c +++ b/trunk/arch/frv/kernel/irq.c @@ -75,8 +75,9 @@ int show_interrupts(struct seq_file *p, void *v) switch (i) { case 0: seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); break; @@ -99,8 +100,9 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); #endif level = group->sources[ix]->level - frv_irq_levels; diff --git a/trunk/arch/i386/Kconfig b/trunk/arch/i386/Kconfig index bfea1bedcbf2..5b1a7d46d1d9 100644 --- a/trunk/arch/i386/Kconfig +++ b/trunk/arch/i386/Kconfig @@ -80,7 +80,6 @@ config X86_VOYAGER config X86_NUMAQ bool "NUMAQ (IBM/Sequent)" - select SMP select NUMA help This option is used for getting Linux to run on a (IBM/Sequent) NUMA @@ -401,7 +400,6 @@ choice config NOHIGHMEM bool "off" - depends on !X86_NUMAQ ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 @@ -438,7 +436,6 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" - depends on !X86_NUMAQ help Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. @@ -506,6 +503,10 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) +# Need comments to help the hapless user trying to turn on NUMA support +comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" + depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) + comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI) @@ -659,18 +660,13 @@ config BOOT_IOREMAP default y config REGPARM - bool "Use register arguments" - default y + bool "Use register arguments (EXPERIMENTAL)" + depends on EXPERIMENTAL + default n help - Compile the kernel with -mregparm=3. This instructs gcc to use - a more efficient function call ABI which passes the first three - arguments of a function call via registers, which results in denser - and faster code. - - If this option is disabled, then the default ABI of passing - arguments via the stack is used. - - If unsure, say Y. + Compile the kernel with -mregparm=3. This uses a different ABI + and passes the first three arguments of a function call in registers. + This will probably break binary only modules. config SECCOMP bool "Enable seccomp to safely compute untrusted bytecode" diff --git a/trunk/arch/i386/Kconfig.debug b/trunk/arch/i386/Kconfig.debug index 00108ba9a78d..bf32ecc9ad04 100644 --- a/trunk/arch/i386/Kconfig.debug +++ b/trunk/arch/i386/Kconfig.debug @@ -31,15 +31,6 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. -config STACK_BACKTRACE_COLS - int "Stack backtraces per line" if DEBUG_KERNEL - range 1 3 - default 2 - help - Selects how many stack backtrace entries per line to display. - - This can save screen space when displaying traces. - comment "Page alloc debug is incompatible with Software Suspend on i386" depends on DEBUG_KERNEL && SOFTWARE_SUSPEND diff --git a/trunk/arch/i386/kernel/Makefile b/trunk/arch/i386/kernel/Makefile index 5b9ed21216cf..65656c033d70 100644 --- a/trunk/arch/i386/kernel/Makefile +++ b/trunk/arch/i386/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ - quirks.o i8237.o topology.o alternative.o + quirks.o i8237.o topology.o obj-y += cpu/ obj-y += timers/ diff --git a/trunk/arch/i386/kernel/alternative.c b/trunk/arch/i386/kernel/alternative.c deleted file mode 100644 index 5cbd6f99fb2a..000000000000 --- a/trunk/arch/i386/kernel/alternative.c +++ /dev/null @@ -1,321 +0,0 @@ -#include -#include -#include -#include -#include - -#define DEBUG 0 -#if DEBUG -# define DPRINTK(fmt, args...) printk(fmt, args) -#else -# define DPRINTK(fmt, args...) -#endif - -/* Use inline assembly to define this because the nops are defined - as inline assembly strings in the include files and we cannot - get them easily into strings. */ -asm("\t.data\nintelnops: " - GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 - GENERIC_NOP7 GENERIC_NOP8); -asm("\t.data\nk8nops: " - K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 - K8_NOP7 K8_NOP8); -asm("\t.data\nk7nops: " - K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 - K7_NOP7 K7_NOP8); - -extern unsigned char intelnops[], k8nops[], k7nops[]; -static unsigned char *intel_nops[ASM_NOP_MAX+1] = { - NULL, - intelnops, - intelnops + 1, - intelnops + 1 + 2, - intelnops + 1 + 2 + 3, - intelnops + 1 + 2 + 3 + 4, - intelnops + 1 + 2 + 3 + 4 + 5, - intelnops + 1 + 2 + 3 + 4 + 5 + 6, - intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; -static unsigned char *k8_nops[ASM_NOP_MAX+1] = { - NULL, - k8nops, - k8nops + 1, - k8nops + 1 + 2, - k8nops + 1 + 2 + 3, - k8nops + 1 + 2 + 3 + 4, - k8nops + 1 + 2 + 3 + 4 + 5, - k8nops + 1 + 2 + 3 + 4 + 5 + 6, - k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; -static unsigned char *k7_nops[ASM_NOP_MAX+1] = { - NULL, - k7nops, - k7nops + 1, - k7nops + 1 + 2, - k7nops + 1 + 2 + 3, - k7nops + 1 + 2 + 3 + 4, - k7nops + 1 + 2 + 3 + 4 + 5, - k7nops + 1 + 2 + 3 + 4 + 5 + 6, - k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, -}; -static struct nop { - int cpuid; - unsigned char **noptable; -} noptypes[] = { - { X86_FEATURE_K8, k8_nops }, - { X86_FEATURE_K7, k7_nops }, - { -1, NULL } -}; - - -extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; -extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[]; -extern u8 *__smp_locks[], *__smp_locks_end[]; - -extern u8 __smp_alt_begin[], __smp_alt_end[]; - - -static unsigned char** find_nop_table(void) -{ - unsigned char **noptable = intel_nops; - int i; - - for (i = 0; noptypes[i].cpuid >= 0; i++) { - if (boot_cpu_has(noptypes[i].cpuid)) { - noptable = noptypes[i].noptable; - break; - } - } - return noptable; -} - -/* Replace instructions with better alternatives for this CPU type. - This runs before SMP is initialized to avoid SMP problems with - self modifying code. This implies that assymetric systems where - APs have less capabilities than the boot processor are not handled. - Tough. Make sure you disable such features by hand. */ - -void apply_alternatives(struct alt_instr *start, struct alt_instr *end) -{ - unsigned char **noptable = find_nop_table(); - struct alt_instr *a; - int diff, i, k; - - DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); - for (a = start; a < end; a++) { - BUG_ON(a->replacementlen > a->instrlen); - if (!boot_cpu_has(a->cpuid)) - continue; - memcpy(a->instr, a->replacement, a->replacementlen); - diff = a->instrlen - a->replacementlen; - /* Pad the rest with nops */ - for (i = a->replacementlen; diff > 0; diff -= k, i += k) { - k = diff; - if (k > ASM_NOP_MAX) - k = ASM_NOP_MAX; - memcpy(a->instr + i, noptable[k], k); - } - } -} - -static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end) -{ - struct alt_instr *a; - - DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end); - for (a = start; a < end; a++) { - memcpy(a->replacement + a->replacementlen, - a->instr, - a->instrlen); - } -} - -static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end) -{ - struct alt_instr *a; - - for (a = start; a < end; a++) { - memcpy(a->instr, - a->replacement + a->replacementlen, - a->instrlen); - } -} - -static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) -{ - u8 **ptr; - - for (ptr = start; ptr < end; ptr++) { - if (*ptr < text) - continue; - if (*ptr > text_end) - continue; - **ptr = 0xf0; /* lock prefix */ - }; -} - -static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) -{ - unsigned char **noptable = find_nop_table(); - u8 **ptr; - - for (ptr = start; ptr < end; ptr++) { - if (*ptr < text) - continue; - if (*ptr > text_end) - continue; - **ptr = noptable[1][0]; - }; -} - -struct smp_alt_module { - /* what is this ??? */ - struct module *mod; - char *name; - - /* ptrs to lock prefixes */ - u8 **locks; - u8 **locks_end; - - /* .text segment, needed to avoid patching init code ;) */ - u8 *text; - u8 *text_end; - - struct list_head next; -}; -static LIST_HEAD(smp_alt_modules); -static DEFINE_SPINLOCK(smp_alt); - -static int smp_alt_once = 0; -static int __init bootonly(char *str) -{ - smp_alt_once = 1; - return 1; -} -__setup("smp-alt-boot", bootonly); - -void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) -{ - struct smp_alt_module *smp; - unsigned long flags; - - if (smp_alt_once) { - if (boot_cpu_has(X86_FEATURE_UP)) - alternatives_smp_unlock(locks, locks_end, - text, text_end); - return; - } - - smp = kzalloc(sizeof(*smp), GFP_KERNEL); - if (NULL == smp) - return; /* we'll run the (safe but slow) SMP code then ... */ - - smp->mod = mod; - smp->name = name; - smp->locks = locks; - smp->locks_end = locks_end; - smp->text = text; - smp->text_end = text_end; - DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", - __FUNCTION__, smp->locks, smp->locks_end, - smp->text, smp->text_end, smp->name); - - spin_lock_irqsave(&smp_alt, flags); - list_add_tail(&smp->next, &smp_alt_modules); - if (boot_cpu_has(X86_FEATURE_UP)) - alternatives_smp_unlock(smp->locks, smp->locks_end, - smp->text, smp->text_end); - spin_unlock_irqrestore(&smp_alt, flags); -} - -void alternatives_smp_module_del(struct module *mod) -{ - struct smp_alt_module *item; - unsigned long flags; - - if (smp_alt_once) - return; - - spin_lock_irqsave(&smp_alt, flags); - list_for_each_entry(item, &smp_alt_modules, next) { - if (mod != item->mod) - continue; - list_del(&item->next); - spin_unlock_irqrestore(&smp_alt, flags); - DPRINTK("%s: %s\n", __FUNCTION__, item->name); - kfree(item); - return; - } - spin_unlock_irqrestore(&smp_alt, flags); -} - -void alternatives_smp_switch(int smp) -{ - struct smp_alt_module *mod; - unsigned long flags; - - if (smp_alt_once) - return; - BUG_ON(!smp && (num_online_cpus() > 1)); - - spin_lock_irqsave(&smp_alt, flags); - if (smp) { - printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); - clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); - clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); - alternatives_smp_apply(__smp_alt_instructions, - __smp_alt_instructions_end); - list_for_each_entry(mod, &smp_alt_modules, next) - alternatives_smp_lock(mod->locks, mod->locks_end, - mod->text, mod->text_end); - } else { - printk(KERN_INFO "SMP alternatives: switching to UP code\n"); - set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); - set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); - apply_alternatives(__smp_alt_instructions, - __smp_alt_instructions_end); - list_for_each_entry(mod, &smp_alt_modules, next) - alternatives_smp_unlock(mod->locks, mod->locks_end, - mod->text, mod->text_end); - } - spin_unlock_irqrestore(&smp_alt, flags); -} - -void __init alternative_instructions(void) -{ - apply_alternatives(__alt_instructions, __alt_instructions_end); - - /* switch to patch-once-at-boottime-only mode and free the - * tables in case we know the number of CPUs will never ever - * change */ -#ifdef CONFIG_HOTPLUG_CPU - if (num_possible_cpus() < 2) - smp_alt_once = 1; -#else - smp_alt_once = 1; -#endif - - if (smp_alt_once) { - if (1 == num_possible_cpus()) { - printk(KERN_INFO "SMP alternatives: switching to UP code\n"); - set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); - set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); - apply_alternatives(__smp_alt_instructions, - __smp_alt_instructions_end); - alternatives_smp_unlock(__smp_locks, __smp_locks_end, - _text, _etext); - } - free_init_pages("SMP alternatives", - (unsigned long)__smp_alt_begin, - (unsigned long)__smp_alt_end); - } else { - alternatives_smp_save(__smp_alt_instructions, - __smp_alt_instructions_end); - alternatives_smp_module_add(NULL, "core kernel", - __smp_locks, __smp_locks_end, - _text, _etext); - alternatives_smp_switch(0); - } -} diff --git a/trunk/arch/i386/kernel/apic.c b/trunk/arch/i386/kernel/apic.c index eb5279d23b7f..776c90989e06 100644 --- a/trunk/arch/i386/kernel/apic.c +++ b/trunk/arch/i386/kernel/apic.c @@ -38,7 +38,6 @@ #include #include -#include #include #include "io_ports.h" diff --git a/trunk/arch/i386/kernel/cpu/centaur.c b/trunk/arch/i386/kernel/cpu/centaur.c index bd75629dd262..f52669ecb93f 100644 --- a/trunk/arch/i386/kernel/cpu/centaur.c +++ b/trunk/arch/i386/kernel/cpu/centaur.c @@ -4,7 +4,6 @@ #include #include #include -#include #include "cpu.h" #ifdef CONFIG_X86_OOSTORE diff --git a/trunk/arch/i386/kernel/cpu/common.c b/trunk/arch/i386/kernel/cpu/common.c index 7e3d6b6a4e96..e6bd095ae108 100644 --- a/trunk/arch/i386/kernel/cpu/common.c +++ b/trunk/arch/i386/kernel/cpu/common.c @@ -25,10 +25,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); -static int cachesize_override __cpuinitdata = -1; -static int disable_x86_fxsr __cpuinitdata; -static int disable_x86_serial_nr __cpuinitdata = 1; -static int disable_x86_sep __cpuinitdata; +static int cachesize_override __devinitdata = -1; +static int disable_x86_fxsr __devinitdata = 0; +static int disable_x86_serial_nr __devinitdata = 1; struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; @@ -60,7 +59,7 @@ static int __init cachesize_setup(char *str) } __setup("cachesize=", cachesize_setup); -int __cpuinit get_model_name(struct cpuinfo_x86 *c) +int __devinit get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; char *p, *q; @@ -90,7 +89,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) } -void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) +void __devinit display_cacheinfo(struct cpuinfo_x86 *c) { unsigned int n, dummy, ecx, edx, l2size; @@ -131,7 +130,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ /* Look up CPU names by table lookup. */ -static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) +static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) { struct cpu_model_info *info; @@ -152,7 +151,7 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) } -static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) +static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) { char *v = c->x86_vendor_id; int i; @@ -188,14 +187,6 @@ static int __init x86_fxsr_setup(char * s) __setup("nofxsr", x86_fxsr_setup); -static int __init x86_sep_setup(char * s) -{ - disable_x86_sep = 1; - return 1; -} -__setup("nosep", x86_sep_setup); - - /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(u32 flag) { @@ -219,7 +210,7 @@ static inline int flag_is_changeable_p(u32 flag) /* Probe for the CPUID instruction */ -static int __cpuinit have_cpuid_p(void) +static int __devinit have_cpuid_p(void) { return flag_is_changeable_p(X86_EFLAGS_ID); } @@ -263,7 +254,7 @@ static void __init early_cpu_detect(void) } } -void __cpuinit generic_identify(struct cpuinfo_x86 * c) +void __devinit generic_identify(struct cpuinfo_x86 * c) { u32 tfms, xlvl; int junk; @@ -316,7 +307,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) #endif } -static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) +static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { /* Disable processor serial number */ @@ -344,7 +335,7 @@ __setup("serialnumber", x86_serial_nr_setup); /* * This does the hard work of actually picking apart the CPU stuff... */ -void __cpuinit identify_cpu(struct cpuinfo_x86 *c) +void __devinit identify_cpu(struct cpuinfo_x86 *c) { int i; @@ -414,10 +405,6 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) clear_bit(X86_FEATURE_XMM, c->x86_capability); } - /* SEP disabled? */ - if (disable_x86_sep) - clear_bit(X86_FEATURE_SEP, c->x86_capability); - if (disable_pse) clear_bit(X86_FEATURE_PSE, c->x86_capability); @@ -430,7 +417,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) else /* Last resort... */ sprintf(c->x86_model_id, "%02x/%02x", - c->x86, c->x86_model); + c->x86_vendor, c->x86_model); } /* Now the feature flags better reflect actual CPU features! */ @@ -466,7 +453,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c) } #ifdef CONFIG_X86_HT -void __cpuinit detect_ht(struct cpuinfo_x86 *c) +void __devinit detect_ht(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; int index_msb, core_bits; @@ -513,7 +500,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) } #endif -void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) +void __devinit print_cpu_info(struct cpuinfo_x86 *c) { char *vendor = NULL; @@ -536,7 +523,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) printk("\n"); } -cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; +cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; /* This is hacky. :) * We're emulating future behavior. @@ -583,7 +570,7 @@ void __init early_cpu_init(void) * and IDT. We reload them nevertheless, this function acts as a * 'CPU state barrier', nothing should get across. */ -void __cpuinit cpu_init(void) +void __devinit cpu_init(void) { int cpu = smp_processor_id(); struct tss_struct * t = &per_cpu(init_tss, cpu); @@ -683,7 +670,7 @@ void __cpuinit cpu_init(void) } #ifdef CONFIG_HOTPLUG_CPU -void __cpuinit cpu_uninit(void) +void __devinit cpu_uninit(void) { int cpu = raw_smp_processor_id(); cpu_clear(cpu, cpu_initialized); diff --git a/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 3d5110b65cc3..e11a09207ec8 100644 --- a/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/trunk/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -1145,7 +1145,9 @@ static int __cpuinit powernowk8_init(void) { unsigned int i, supported_cpus = 0; - for_each_cpu(i) { + for (i=0; ix86_vendor != X86_VENDOR_INTEL) return; @@ -44,7 +44,7 @@ void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) * This is called before we do cpu ident work */ -int __cpuinit ppro_with_ram_bug(void) +int __devinit ppro_with_ram_bug(void) { /* Uses data from early_cpu_detect now */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && @@ -62,7 +62,7 @@ int __cpuinit ppro_with_ram_bug(void) * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ -static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) +static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; @@ -81,7 +81,7 @@ static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) /* * find out the number of processor cores on the die */ -static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) +static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) { unsigned int eax, ebx, ecx, edx; @@ -96,7 +96,7 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) return 1; } -static void __cpuinit init_intel(struct cpuinfo_x86 *c) +static void __devinit init_intel(struct cpuinfo_x86 *c) { unsigned int l2 = 0; char *p = NULL; @@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) return size; } -static struct cpu_dev intel_cpu_dev __cpuinitdata = { +static struct cpu_dev intel_cpu_dev __devinitdata = { .c_vendor = "Intel", .c_ident = { "GenuineIntel" }, .c_models = { diff --git a/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c b/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c index ce61921369e5..ffe58cee0c48 100644 --- a/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/trunk/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -174,7 +174,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ - if (c->cpuid_level > 3) { + if (c->cpuid_level > 4) { static int is_initialized; if (is_initialized == 0) { @@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) } } } -static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) +static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; int sibling; diff --git a/trunk/arch/i386/kernel/cpu/proc.c b/trunk/arch/i386/kernel/cpu/proc.c index 5cfbd8011698..89a85af33d28 100644 --- a/trunk/arch/i386/kernel/cpu/proc.c +++ b/trunk/arch/i386/kernel/cpu/proc.c @@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) /* Other (Linux-defined) */ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL, - "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL, + "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, diff --git a/trunk/arch/i386/kernel/crash.c b/trunk/arch/i386/kernel/crash.c index e3c5fca0aa8a..d49dbe8dc96b 100644 --- a/trunk/arch/i386/kernel/crash.c +++ b/trunk/arch/i386/kernel/crash.c @@ -105,7 +105,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) return 1; local_irq_disable(); - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } diff --git a/trunk/arch/i386/kernel/entry.S b/trunk/arch/i386/kernel/entry.S index cfc683f153b9..4d704724b2f5 100644 --- a/trunk/arch/i386/kernel/entry.S +++ b/trunk/arch/i386/kernel/entry.S @@ -226,10 +226,6 @@ ENTRY(system_call) pushl %eax # save orig_eax SAVE_ALL GET_THREAD_INFO(%ebp) - testl $TF_MASK,EFLAGS(%esp) - jz no_singlestep - orl $_TIF_SINGLESTEP,TI_flags(%ebp) -no_singlestep: # system call tracing in operation / emulation /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) diff --git a/trunk/arch/i386/kernel/head.S b/trunk/arch/i386/kernel/head.S index 3debc2e26542..e0b7c632efbc 100644 --- a/trunk/arch/i386/kernel/head.S +++ b/trunk/arch/i386/kernel/head.S @@ -450,6 +450,7 @@ int_msg: .globl boot_gdt_descr .globl idt_descr +.globl cpu_gdt_descr ALIGN # early boot GDT descriptor (must use 1:1 address mapping) @@ -469,6 +470,8 @@ cpu_gdt_descr: .word GDT_ENTRIES*8-1 .long cpu_gdt_table + .fill NR_CPUS-1,8,0 # space for the other GDT descriptors + /* * The boot_gdt_table must mirror the equivalent in setup.S and is * used only for booting. @@ -482,7 +485,7 @@ ENTRY(boot_gdt_table) /* * The Global Descriptor Table contains 28 quadwords, per-CPU. */ - .align L1_CACHE_BYTES + .align PAGE_SIZE_asm ENTRY(cpu_gdt_table) .quad 0x0000000000000000 /* NULL descriptor */ .quad 0x0000000000000000 /* 0x0b reserved */ diff --git a/trunk/arch/i386/kernel/io_apic.c b/trunk/arch/i386/kernel/io_apic.c index 311b4e7266f1..39d9a5fa907e 100644 --- a/trunk/arch/i386/kernel/io_apic.c +++ b/trunk/arch/i386/kernel/io_apic.c @@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold) { int i, j; Dprintk("Rotating IRQs among CPUs.\n"); - for_each_online_cpu(i) { - for (j = 0; j < NR_IRQS; j++) { + for (i = 0; i < NR_CPUS; i++) { + for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) { if (!irq_desc[j].action) continue; /* Is it a significant load ? */ @@ -381,7 +381,7 @@ static void do_irq_balance(void) unsigned long imbalance = 0; cpumask_t allowed_mask, target_cpu_mask, tmp; - for_each_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { int package_index; CPU_IRQ(i) = 0; if (!cpu_online(i)) @@ -422,7 +422,9 @@ static void do_irq_balance(void) } } /* Find the least loaded processor package */ - for_each_online_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; if (i != CPU_TO_PACKAGEINDEX(i)) continue; if (min_cpu_irq > CPU_IRQ(i)) { @@ -439,7 +441,9 @@ static void do_irq_balance(void) */ tmp_cpu_irq = 0; tmp_loaded = -1; - for_each_online_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; if (i != CPU_TO_PACKAGEINDEX(i)) continue; if (max_cpu_irq <= CPU_IRQ(i)) @@ -615,7 +619,9 @@ static int __init balanced_irq_init(void) if (smp_num_siblings > 1 && !cpus_empty(tmp)) physical_balance = 1; - for_each_online_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL); if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) { @@ -632,11 +638,9 @@ static int __init balanced_irq_init(void) else printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq"); failed: - for_each_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { kfree(irq_cpu_data[i].irq_delta); - irq_cpu_data[i].irq_delta = NULL; kfree(irq_cpu_data[i].last_irq); - irq_cpu_data[i].last_irq = NULL; } return 0; } @@ -1757,8 +1761,7 @@ static void __init setup_ioapic_ids_from_mpc(void) * Don't check I/O APIC IDs for xAPIC systems. They have * no meaning without the serial APIC bus. */ - if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 < 15)) return; /* * This is broken; anything with a real cpu count has to diff --git a/trunk/arch/i386/kernel/kprobes.c b/trunk/arch/i386/kernel/kprobes.c index 7a59050242a7..694a13997637 100644 --- a/trunk/arch/i386/kernel/kprobes.c +++ b/trunk/arch/i386/kernel/kprobes.c @@ -84,9 +84,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); + down(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); } static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/trunk/arch/i386/kernel/module.c b/trunk/arch/i386/kernel/module.c index 470cf97e7cd3..5149c8a621f0 100644 --- a/trunk/arch/i386/kernel/module.c +++ b/trunk/arch/i386/kernel/module.c @@ -104,38 +104,26 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, return -ENOEXEC; } +extern void apply_alternatives(void *start, void *end); + int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { - const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL; + const Elf_Shdr *s; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + /* look for .altinstructions to patch */ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { - if (!strcmp(".text", secstrings + s->sh_name)) - text = s; - if (!strcmp(".altinstructions", secstrings + s->sh_name)) - alt = s; - if (!strcmp(".smp_locks", secstrings + s->sh_name)) - locks= s; - } - - if (alt) { - /* patch .altinstructions */ - void *aseg = (void *)alt->sh_addr; - apply_alternatives(aseg, aseg + alt->sh_size); - } - if (locks && text) { - void *lseg = (void *)locks->sh_addr; - void *tseg = (void *)text->sh_addr; - alternatives_smp_module_add(me, me->name, - lseg, lseg + locks->sh_size, - tseg, tseg + text->sh_size); - } + void *seg; + if (strcmp(".altinstructions", secstrings + s->sh_name)) + continue; + seg = (void *)s->sh_addr; + apply_alternatives(seg, seg + s->sh_size); + } return 0; } void module_arch_cleanup(struct module *mod) { - alternatives_smp_module_del(mod); } diff --git a/trunk/arch/i386/kernel/mpparse.c b/trunk/arch/i386/kernel/mpparse.c index 8d8aa9d1796d..e6e2f43db85e 100644 --- a/trunk/arch/i386/kernel/mpparse.c +++ b/trunk/arch/i386/kernel/mpparse.c @@ -828,8 +828,6 @@ void __init find_smp_config (void) smp_scan_config(address, 0x400); } -int es7000_plat; - /* -------------------------------------------------------------------------- ACPI-based MP Configuration -------------------------------------------------------------------------- */ @@ -937,8 +935,7 @@ void __init mp_register_ioapic ( mp_ioapics[idx].mpc_apicaddr = address; set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); - if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) tmpid = io_apic_get_unique_id(idx, id); else tmpid = id; @@ -1014,6 +1011,8 @@ void __init mp_override_legacy_irq ( return; } +int es7000_plat; + void __init mp_config_acpi_legacy_irqs (void) { struct mpc_config_intsrc intsrc; diff --git a/trunk/arch/i386/kernel/nmi.c b/trunk/arch/i386/kernel/nmi.c index 9074818b9473..be87c5e2ee95 100644 --- a/trunk/arch/i386/kernel/nmi.c +++ b/trunk/arch/i386/kernel/nmi.c @@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void) local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for_each_cpu(cpu) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { #ifdef CONFIG_SMP /* Check cpu_callin_map here because that is set after the timer is started. */ @@ -510,7 +510,7 @@ void touch_nmi_watchdog (void) * Just reset the alert counters, (other CPUs might be * spinning on locks we hold): */ - for_each_cpu(i) + for (i = 0; i < NR_CPUS; i++) alert_counter[i] = 0; /* @@ -543,7 +543,7 @@ void nmi_watchdog_tick (struct pt_regs * regs) /* * die_nmi will return ONLY if NOTIFY_STOP happens.. */ - die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP"); + die_nmi(regs, "NMI Watchdog detected LOCKUP"); } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; diff --git a/trunk/arch/i386/kernel/process.c b/trunk/arch/i386/kernel/process.c index 299e61674084..0480454ebffa 100644 --- a/trunk/arch/i386/kernel/process.c +++ b/trunk/arch/i386/kernel/process.c @@ -295,7 +295,7 @@ void show_regs(struct pt_regs * regs) printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); print_symbol("EIP is at %s\n", regs->eip); - if (user_mode_vm(regs)) + if (user_mode(regs)) printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); printk(" EFLAGS: %08lx %s (%s %.*s)\n", regs->eflags, print_tainted(), system_utsname.release, diff --git a/trunk/arch/i386/kernel/ptrace.c b/trunk/arch/i386/kernel/ptrace.c index 506462ef36a0..5c1fb6aada5b 100644 --- a/trunk/arch/i386/kernel/ptrace.c +++ b/trunk/arch/i386/kernel/ptrace.c @@ -34,10 +34,10 @@ /* * Determines which flags the user has access to [1 = access, 0 = no access]. - * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9). + * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9). * Also masks reserved bits (31-22, 15, 5, 3, 1). */ -#define FLAG_MASK 0x00050dd5 +#define FLAG_MASK 0x00054dd5 /* set's the trap flag. */ #define TRAP_FLAG 0x100 diff --git a/trunk/arch/i386/kernel/semaphore.c b/trunk/arch/i386/kernel/semaphore.c index 967dc74df9ee..7455ab643943 100644 --- a/trunk/arch/i386/kernel/semaphore.c +++ b/trunk/arch/i386/kernel/semaphore.c @@ -110,11 +110,11 @@ asm( ".align 4\n" ".globl __write_lock_failed\n" "__write_lock_failed:\n\t" - LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" + LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" "1: rep; nop\n\t" "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" "jne 1b\n\t" - LOCK_PREFIX "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" + LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" "jnz __write_lock_failed\n\t" "ret" ); @@ -124,11 +124,11 @@ asm( ".align 4\n" ".globl __read_lock_failed\n" "__read_lock_failed:\n\t" - LOCK_PREFIX "incl (%eax)\n" + LOCK "incl (%eax)\n" "1: rep; nop\n\t" "cmpl $1,(%eax)\n\t" "js 1b\n\t" - LOCK_PREFIX "decl (%eax)\n\t" + LOCK "decl (%eax)\n\t" "js __read_lock_failed\n\t" "ret" ); diff --git a/trunk/arch/i386/kernel/setup.c b/trunk/arch/i386/kernel/setup.c index 2d8782960f41..ab62a9f4701e 100644 --- a/trunk/arch/i386/kernel/setup.c +++ b/trunk/arch/i386/kernel/setup.c @@ -1377,6 +1377,101 @@ static void __init register_memory(void) pci_mem_start, gapstart, gapsize); } +/* Use inline assembly to define this because the nops are defined + as inline assembly strings in the include files and we cannot + get them easily into strings. */ +asm("\t.data\nintelnops: " + GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 + GENERIC_NOP7 GENERIC_NOP8); +asm("\t.data\nk8nops: " + K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 + K8_NOP7 K8_NOP8); +asm("\t.data\nk7nops: " + K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 + K7_NOP7 K7_NOP8); + +extern unsigned char intelnops[], k8nops[], k7nops[]; +static unsigned char *intel_nops[ASM_NOP_MAX+1] = { + NULL, + intelnops, + intelnops + 1, + intelnops + 1 + 2, + intelnops + 1 + 2 + 3, + intelnops + 1 + 2 + 3 + 4, + intelnops + 1 + 2 + 3 + 4 + 5, + intelnops + 1 + 2 + 3 + 4 + 5 + 6, + intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +static unsigned char *k8_nops[ASM_NOP_MAX+1] = { + NULL, + k8nops, + k8nops + 1, + k8nops + 1 + 2, + k8nops + 1 + 2 + 3, + k8nops + 1 + 2 + 3 + 4, + k8nops + 1 + 2 + 3 + 4 + 5, + k8nops + 1 + 2 + 3 + 4 + 5 + 6, + k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +static unsigned char *k7_nops[ASM_NOP_MAX+1] = { + NULL, + k7nops, + k7nops + 1, + k7nops + 1 + 2, + k7nops + 1 + 2 + 3, + k7nops + 1 + 2 + 3 + 4, + k7nops + 1 + 2 + 3 + 4 + 5, + k7nops + 1 + 2 + 3 + 4 + 5 + 6, + k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, +}; +static struct nop { + int cpuid; + unsigned char **noptable; +} noptypes[] = { + { X86_FEATURE_K8, k8_nops }, + { X86_FEATURE_K7, k7_nops }, + { -1, NULL } +}; + +/* Replace instructions with better alternatives for this CPU type. + + This runs before SMP is initialized to avoid SMP problems with + self modifying code. This implies that assymetric systems where + APs have less capabilities than the boot processor are not handled. + Tough. Make sure you disable such features by hand. */ +void apply_alternatives(void *start, void *end) +{ + struct alt_instr *a; + int diff, i, k; + unsigned char **noptable = intel_nops; + for (i = 0; noptypes[i].cpuid >= 0; i++) { + if (boot_cpu_has(noptypes[i].cpuid)) { + noptable = noptypes[i].noptable; + break; + } + } + for (a = start; (void *)a < end; a++) { + if (!boot_cpu_has(a->cpuid)) + continue; + BUG_ON(a->replacementlen > a->instrlen); + memcpy(a->instr, a->replacement, a->replacementlen); + diff = a->instrlen - a->replacementlen; + /* Pad the rest with nops */ + for (i = a->replacementlen; diff > 0; diff -= k, i += k) { + k = diff; + if (k > ASM_NOP_MAX) + k = ASM_NOP_MAX; + memcpy(a->instr + i, noptable[k], k); + } + } +} + +void __init alternative_instructions(void) +{ + extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + apply_alternatives(__alt_instructions, __alt_instructions_end); +} + static char * __init machine_specific_memory_setup(void); #ifdef CONFIG_MCA @@ -1459,16 +1554,6 @@ void __init setup_arch(char **cmdline_p) parse_cmdline_early(cmdline_p); -#ifdef CONFIG_EARLY_PRINTK - { - char *s = strstr(*cmdline_p, "earlyprintk="); - if (s) { - setup_early_printk(strchr(s, '=') + 1); - printk("early console enabled\n"); - } - } -#endif - max_low_pfn = setup_memory(); /* @@ -1493,6 +1578,19 @@ void __init setup_arch(char **cmdline_p) * NOTE: at this point the bootmem allocator is fully available. */ +#ifdef CONFIG_EARLY_PRINTK + { + char *s = strstr(*cmdline_p, "earlyprintk="); + if (s) { + extern void setup_early_printk(char *); + + setup_early_printk(strchr(s, '=') + 1); + printk("early console enabled\n"); + } + } +#endif + + dmi_scan_machine(); #ifdef CONFIG_X86_GENERICARCH diff --git a/trunk/arch/i386/kernel/signal.c b/trunk/arch/i386/kernel/signal.c index 5c352c3a9e7f..963616d364ec 100644 --- a/trunk/arch/i386/kernel/signal.c +++ b/trunk/arch/i386/kernel/signal.c @@ -123,8 +123,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax err |= __get_user(tmp, &sc->seg); \ loadsegment(seg,tmp); } -#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_RF | \ - X86_EFLAGS_OF | X86_EFLAGS_DF | \ +#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | X86_EFLAGS_DF | \ X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \ X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF) @@ -583,6 +582,9 @@ static void fastcall do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; + if (try_to_freeze()) + goto no_signal; + if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; else @@ -611,6 +613,7 @@ static void fastcall do_signal(struct pt_regs *regs) return; } +no_signal: /* Did we come from a system call? */ if (regs->orig_eax >= 0) { /* Restart the system call - no handlers present */ diff --git a/trunk/arch/i386/kernel/smpboot.c b/trunk/arch/i386/kernel/smpboot.c index 4c470e99a742..7007e1783797 100644 --- a/trunk/arch/i386/kernel/smpboot.c +++ b/trunk/arch/i386/kernel/smpboot.c @@ -899,7 +899,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu) unsigned short nmi_high = 0, nmi_low = 0; ++cpucount; - alternatives_smp_switch(1); /* * We can't use kernel_thread since we must avoid to @@ -1369,8 +1368,6 @@ void __cpu_die(unsigned int cpu) /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { printk ("CPU %d is now offline\n", cpu); - if (1 == num_online_cpus()) - alternatives_smp_switch(0); return; } msleep(100); diff --git a/trunk/arch/i386/kernel/topology.c b/trunk/arch/i386/kernel/topology.c index 296355292c7c..67a0e1baa28b 100644 --- a/trunk/arch/i386/kernel/topology.c +++ b/trunk/arch/i386/kernel/topology.c @@ -41,15 +41,6 @@ int arch_register_cpu(int num){ parent = &node_devices[node].node; #endif /* CONFIG_NUMA */ - /* - * CPU0 cannot be offlined due to several - * restrictions and assumptions in kernel. This basically - * doesnt add a control file, one cannot attempt to offline - * BSP. - */ - if (!num) - cpu_devices[num].cpu.no_control = 1; - return register_cpu(&cpu_devices[num].cpu, num, parent); } diff --git a/trunk/arch/i386/kernel/traps.c b/trunk/arch/i386/kernel/traps.c index de5386b01d38..b814dbdcc91e 100644 --- a/trunk/arch/i386/kernel/traps.c +++ b/trunk/arch/i386/kernel/traps.c @@ -99,8 +99,6 @@ int register_die_notifier(struct notifier_block *nb) { int err = 0; unsigned long flags; - - vmalloc_sync_all(); spin_lock_irqsave(&die_notifier_lock, flags); err = notifier_chain_register(&i386die_chain, nb); spin_unlock_irqrestore(&die_notifier_lock, flags); @@ -114,30 +112,12 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p) p < (void *)tinfo + THREAD_SIZE - 3; } -/* - * Print CONFIG_STACK_BACKTRACE_COLS address/symbol entries per line. - */ -static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, - int printed) +static void print_addr_and_symbol(unsigned long addr, char *log_lvl) { - if (!printed) - printk(log_lvl); - -#if CONFIG_STACK_BACKTRACE_COLS == 1 + printk(log_lvl); printk(" [<%08lx>] ", addr); -#else - printk(" <%08lx> ", addr); -#endif print_symbol("%s", addr); - - printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; - - if (printed) - printk(" "); - else - printk("\n"); - - return printed; + printk("\n"); } static inline unsigned long print_context_stack(struct thread_info *tinfo, @@ -145,24 +125,20 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, char *log_lvl) { unsigned long addr; - int printed = 0; /* nr of entries already printed on current line */ #ifdef CONFIG_FRAME_POINTER while (valid_stack_ptr(tinfo, (void *)ebp)) { addr = *(unsigned long *)(ebp + 4); - printed = print_addr_and_symbol(addr, log_lvl, printed); + print_addr_and_symbol(addr, log_lvl); ebp = *(unsigned long *)ebp; } #else while (valid_stack_ptr(tinfo, stack)) { addr = *stack++; if (__kernel_text_address(addr)) - printed = print_addr_and_symbol(addr, log_lvl, printed); + print_addr_and_symbol(addr, log_lvl); } #endif - if (printed) - printk("\n"); - return ebp; } @@ -190,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task, stack = (unsigned long*)context->previous_esp; if (!stack) break; - printk("%s =======================\n", log_lvl); + printk(log_lvl); + printk(" =======================\n"); } } @@ -217,17 +194,21 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, for(i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(stack)) break; - if (i && ((i % 8) == 0)) - printk("\n%s ", log_lvl); + if (i && ((i % 8) == 0)) { + printk("\n"); + printk(log_lvl); + printk(" "); + } printk("%08lx ", *stack++); } - printk("\n%sCall Trace:\n", log_lvl); + printk("\n"); + printk(log_lvl); + printk("Call Trace:\n"); show_trace_log_lvl(task, esp, log_lvl); } void show_stack(struct task_struct *task, unsigned long *esp) { - printk(" "); show_stack_log_lvl(task, esp, ""); } @@ -252,7 +233,7 @@ void show_registers(struct pt_regs *regs) esp = (unsigned long) (®s->esp); savesegment(ss, ss); - if (user_mode_vm(regs)) { + if (user_mode(regs)) { in_kernel = 0; esp = regs->esp; ss = regs->xss & 0xffff; @@ -352,8 +333,6 @@ void die(const char * str, struct pt_regs * regs, long err) static int die_counter; unsigned long flags; - oops_enter(); - if (die.lock_owner != raw_smp_processor_id()) { console_verbose(); spin_lock_irqsave(&die.lock, flags); @@ -406,7 +385,6 @@ void die(const char * str, struct pt_regs * regs, long err) ssleep(5); panic("Fatal exception"); } - oops_exit(); do_exit(SIGSEGV); } @@ -645,7 +623,7 @@ void die_nmi (struct pt_regs *regs, const char *msg) /* If we are in kernel we are probably nested up pretty bad * and might aswell get out now while we still can. */ - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { current->thread.trap_no = 2; crash_kexec(regs); } @@ -716,7 +694,6 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code) void set_nmi_callback(nmi_callback_t callback) { - vmalloc_sync_all(); rcu_assign_pointer(nmi_callback, callback); } EXPORT_SYMBOL_GPL(set_nmi_callback); diff --git a/trunk/arch/i386/kernel/vmlinux.lds.S b/trunk/arch/i386/kernel/vmlinux.lds.S index 3f21c6f6466d..4710195b6b74 100644 --- a/trunk/arch/i386/kernel/vmlinux.lds.S +++ b/trunk/arch/i386/kernel/vmlinux.lds.S @@ -68,26 +68,6 @@ SECTIONS *(.data.init_task) } - /* might get freed after init */ - . = ALIGN(4096); - __smp_alt_begin = .; - __smp_alt_instructions = .; - .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) { - *(.smp_altinstructions) - } - __smp_alt_instructions_end = .; - . = ALIGN(4); - __smp_locks = .; - .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { - *(.smp_locks) - } - __smp_locks_end = .; - .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) { - *(.smp_altinstr_replacement) - } - . = ALIGN(4096); - __smp_alt_end = .; - /* will be freed after init */ . = ALIGN(4096); /* Init code and data */ __init_begin = .; diff --git a/trunk/arch/i386/kernel/vsyscall-sysenter.S b/trunk/arch/i386/kernel/vsyscall-sysenter.S index 3b62baa6a371..76b728159403 100644 --- a/trunk/arch/i386/kernel/vsyscall-sysenter.S +++ b/trunk/arch/i386/kernel/vsyscall-sysenter.S @@ -21,9 +21,6 @@ * instruction clobbers %esp, the user's %esp won't even survive entry * into the kernel. We store %esp in %ebp. Code in entry.S must fetch * arg6 from the stack. - * - * You can not use this vsyscall for the clone() syscall because the - * three dwords on the parent stack do not get copied to the child. */ .text .globl __kernel_vsyscall diff --git a/trunk/arch/i386/mach-es7000/es7000.h b/trunk/arch/i386/mach-es7000/es7000.h index 80566ca4a80a..f1e3204f5dec 100644 --- a/trunk/arch/i386/mach-es7000/es7000.h +++ b/trunk/arch/i386/mach-es7000/es7000.h @@ -83,7 +83,6 @@ struct es7000_oem_table { struct psai psai; }; -#ifdef CONFIG_ACPI struct acpi_table_sdt { unsigned long pa; unsigned long count; @@ -100,9 +99,6 @@ struct oem_table { u32 OEMTableSize; }; -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -#endif - struct mip_reg { unsigned long long off_0; unsigned long long off_8; @@ -118,6 +114,7 @@ struct mip_reg { #define MIP_FUNC(VALUE) (VALUE & 0xff) extern int parse_unisys_oem (char *oemptr); +extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); extern void setup_unisys(void); extern int es7000_start_cpu(int cpu, unsigned long eip); extern void es7000_sw_apic(void); diff --git a/trunk/arch/i386/mach-es7000/es7000plat.c b/trunk/arch/i386/mach-es7000/es7000plat.c index 3d0fc853516d..a9ab0644f403 100644 --- a/trunk/arch/i386/mach-es7000/es7000plat.c +++ b/trunk/arch/i386/mach-es7000/es7000plat.c @@ -51,6 +51,8 @@ struct mip_reg *host_reg; int mip_port; unsigned long mip_addr, host_addr; +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI) + /* * GSI override for ES7000 platforms. */ @@ -74,6 +76,8 @@ es7000_rename_gsi(int ioapic, int gsi) return gsi; } +#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */ + void __init setup_unisys(void) { @@ -156,7 +160,6 @@ parse_unisys_oem (char *oemptr) return es7000_plat; } -#ifdef CONFIG_ACPI int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { @@ -209,7 +212,6 @@ find_unisys_acpi_oem_table(unsigned long *oem_addr) } return -1; } -#endif static void es7000_spin(int n) diff --git a/trunk/arch/i386/mm/fault.c b/trunk/arch/i386/mm/fault.c index 7f0fcf219a26..cf572d9a3b6e 100644 --- a/trunk/arch/i386/mm/fault.c +++ b/trunk/arch/i386/mm/fault.c @@ -214,68 +214,6 @@ static noinline void force_sig_info_fault(int si_signo, int si_code, fastcall void do_invalid_op(struct pt_regs *, unsigned long); -static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) -{ - unsigned index = pgd_index(address); - pgd_t *pgd_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - - pgd += index; - pgd_k = init_mm.pgd + index; - - if (!pgd_present(*pgd_k)) - return NULL; - - /* - * set_pgd(pgd, *pgd_k); here would be useless on PAE - * and redundant with the set_pmd() on non-PAE. As would - * set_pud. - */ - - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); - if (!pud_present(*pud_k)) - return NULL; - - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) - return NULL; - if (!pmd_present(*pmd)) - set_pmd(pmd, *pmd_k); - else - BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); - return pmd_k; -} - -/* - * Handle a fault on the vmalloc or module mapping area - * - * This assumes no large pages in there. - */ -static inline int vmalloc_fault(unsigned long address) -{ - unsigned long pgd_paddr; - pmd_t *pmd_k; - pte_t *pte_k; - /* - * Synchronize this task's top level page-table - * with the 'reference' page table. - * - * Do _not_ use "current" here. We might be inside - * an interrupt in the middle of a task switch.. - */ - pgd_paddr = read_cr3(); - pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); - if (!pmd_k) - return -1; - pte_k = pte_offset_kernel(pmd_k, address); - if (!pte_present(*pte_k)) - return -1; - return 0; -} - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -285,8 +223,6 @@ static inline int vmalloc_fault(unsigned long address) * bit 0 == 0 means no page found, 1 means protection fault * bit 1 == 0 means read, 1 means write * bit 2 == 0 means kernel, 1 means user-mode - * bit 3 == 1 means use of reserved bit detected - * bit 4 == 1 means fault was an instruction fetch */ fastcall void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) @@ -301,6 +237,13 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, /* get the address */ address = read_cr2(); + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + SIGSEGV) == NOTIFY_STOP) + return; + /* It's safe to allow irq's after cr2 has been saved */ + if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) + local_irq_enable(); + tsk = current; si_code = SEGV_MAPERR; @@ -316,29 +259,17 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, * * This verifies that the fault happens in kernel space * (error_code & 4) == 0, and that the fault was not a - * protection error (error_code & 9) == 0. + * protection error (error_code & 1) == 0. */ - if (unlikely(address >= TASK_SIZE)) { - if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0) - return; - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) - return; - /* + if (unlikely(address >= TASK_SIZE)) { + if (!(error_code & 5)) + goto vmalloc_fault; + /* * Don't take the mm semaphore here. If we fixup a prefetch * fault we could otherwise deadlock. */ goto bad_area_nosemaphore; - } - - if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) - return; - - /* It's safe to allow irq's after cr2 has been saved and the vmalloc - fault has been handled. */ - if (regs->eflags & (X86_EFLAGS_IF|VM_MASK)) - local_irq_enable(); + } mm = tsk->mm; @@ -509,31 +440,24 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, bust_spinlocks(1); - if (oops_may_print()) { - #ifdef CONFIG_X86_PAE - if (error_code & 16) { - pte_t *pte = lookup_address(address); +#ifdef CONFIG_X86_PAE + if (error_code & 16) { + pte_t *pte = lookup_address(address); - if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) - printk(KERN_CRIT "kernel tried to execute " - "NX-protected page - exploit attempt? " - "(uid: %d)\n", current->uid); - } - #endif - if (address < PAGE_SIZE) - printk(KERN_ALERT "BUG: unable to handle kernel NULL " - "pointer dereference"); - else - printk(KERN_ALERT "BUG: unable to handle kernel paging" - " request"); - printk(" at virtual address %08lx\n",address); - printk(KERN_ALERT " printing eip:\n"); - printk("%08lx\n", regs->eip); + if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) + printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid); } +#endif + if (address < PAGE_SIZE) + printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel paging request"); + printk(" at virtual address %08lx\n",address); + printk(KERN_ALERT " printing eip:\n"); + printk("%08lx\n", regs->eip); page = read_cr3(); page = ((unsigned long *) __va(page))[address >> 22]; - if (oops_may_print()) - printk(KERN_ALERT "*pde = %08lx\n", page); + printk(KERN_ALERT "*pde = %08lx\n", page); /* * We must not directly access the pte in the highpte * case, the page table might be allocated in highmem. @@ -541,7 +465,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, * it's allocated already. */ #ifndef CONFIG_HIGHPTE - if ((page & 1) && oops_may_print()) { + if (page & 1) { page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; @@ -586,41 +510,51 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs, tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); -} + return; -#ifndef CONFIG_X86_PAE -void vmalloc_sync_all(void) -{ - /* - * Note that races in the updates of insync and start aren't - * problematic: insync can only get set bits added, and updates to - * start are only improving performance (without affecting correctness - * if undone). - */ - static DECLARE_BITMAP(insync, PTRS_PER_PGD); - static unsigned long start = TASK_SIZE; - unsigned long address; +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int index = pgd_index(address); + unsigned long pgd_paddr; + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; - BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); - for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { - if (!test_bit(pgd_index(address), insync)) { - unsigned long flags; - struct page *page; - - spin_lock_irqsave(&pgd_lock, flags); - for (page = pgd_list; page; page = - (struct page *)page->index) - if (!vmalloc_sync_one(page_address(page), - address)) { - BUG_ON(page != pgd_list); - break; - } - spin_unlock_irqrestore(&pgd_lock, flags); - if (!page) - set_bit(pgd_index(address), insync); - } - if (address == start && test_bit(pgd_index(address), insync)) - start = address + PGDIR_SIZE; + pgd_paddr = read_cr3(); + pgd = index + (pgd_t *)__va(pgd_paddr); + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + goto no_context; + + /* + * set_pgd(pgd, *pgd_k); here would be useless on PAE + * and redundant with the set_pmd() on non-PAE. As would + * set_pud. + */ + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (!pud_present(*pud_k)) + goto no_context; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + goto no_context; + set_pmd(pmd, *pmd_k); + + pte_k = pte_offset_kernel(pmd_k, address); + if (!pte_present(*pte_k)) + goto no_context; + return; } } -#endif diff --git a/trunk/arch/i386/mm/init.c b/trunk/arch/i386/mm/init.c index 9f66ac582a8b..7ba55a6e2dbc 100644 --- a/trunk/arch/i386/mm/init.c +++ b/trunk/arch/i386/mm/init.c @@ -720,6 +720,21 @@ static int noinline do_test_wp_bit(void) return flag; } +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long)(&__init_begin); + for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + init_page_count(virt_to_page(addr)); + memset((void *)addr, 0xcc, PAGE_SIZE); + free_page(addr); + totalram_pages++; + } + printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10); +} + #ifdef CONFIG_DEBUG_RODATA extern char __start_rodata, __end_rodata; @@ -743,31 +758,17 @@ void mark_rodata_ro(void) } #endif -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); -} - -void free_initmem(void) -{ - free_init_pages("unused kernel memory", - (unsigned long)(&__init_begin), - (unsigned long)(&__init_end)); -} #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + if (start < end) + printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + init_page_count(virt_to_page(start)); + free_page(start); + totalram_pages++; + } } #endif - diff --git a/trunk/arch/i386/oprofile/nmi_int.c b/trunk/arch/i386/oprofile/nmi_int.c index 1accce50c2c7..0493e8b8ec49 100644 --- a/trunk/arch/i386/oprofile/nmi_int.c +++ b/trunk/arch/i386/oprofile/nmi_int.c @@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy) static void free_msrs(void) { int i; - for_each_cpu(i) { + for (i = 0; i < NR_CPUS; ++i) { kfree(cpu_msrs[i].counters); cpu_msrs[i].counters = NULL; kfree(cpu_msrs[i].controls); @@ -138,7 +138,10 @@ static int allocate_msrs(void) size_t counters_size = sizeof(struct op_msr) * model->num_counters; int i; - for_each_online_cpu(i) { + for (i = 0; i < NR_CPUS; ++i) { + if (!cpu_online(i)) + continue; + cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL); if (!cpu_msrs[i].counters) { success = 0; diff --git a/trunk/arch/ia64/hp/sim/simserial.c b/trunk/arch/ia64/hp/sim/simserial.c index 0e5c6ae50228..626cdc83668b 100644 --- a/trunk/arch/ia64/hp/sim/simserial.c +++ b/trunk/arch/ia64/hp/sim/simserial.c @@ -46,6 +46,11 @@ #define KEYBOARD_INTR 3 /* must match with simulator! */ #define NR_PORTS 1 /* only one port for now */ +#define SERIAL_INLINE 1 + +#ifdef SERIAL_INLINE +#define _INLINE_ inline +#endif #define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT) @@ -232,7 +237,7 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch) local_irq_restore(flags); } -static void transmit_chars(struct async_struct *info, int *intr_done) +static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done) { int count; unsigned long flags; diff --git a/trunk/arch/m32r/kernel/irq.c b/trunk/arch/m32r/kernel/irq.c index a4634b06f675..1ce63926a3c0 100644 --- a/trunk/arch/m32r/kernel/irq.c +++ b/trunk/arch/m32r/kernel/irq.c @@ -37,8 +37,9 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j=0; jtypename); seq_printf(p, " %s", action->name); diff --git a/trunk/arch/m68k/bvme6000/rtc.c b/trunk/arch/m68k/bvme6000/rtc.c index 15c16b62dff5..703cbc6dc9cc 100644 --- a/trunk/arch/m68k/bvme6000/rtc.c +++ b/trunk/arch/m68k/bvme6000/rtc.c @@ -18,7 +18,6 @@ #include #include /* For struct rtc_time and ioctls, etc */ #include -#include #include #include @@ -33,6 +32,9 @@ * ioctls. */ +#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10) +#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10) + static unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; diff --git a/trunk/arch/mips/kernel/irq.c b/trunk/arch/mips/kernel/irq.c index 3dd76b3d2967..7d93992e462c 100644 --- a/trunk/arch/mips/kernel/irq.c +++ b/trunk/arch/mips/kernel/irq.c @@ -68,8 +68,9 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j=0; jtypename); seq_printf(p, " %s", action->name); diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c index 78d171bfa331..06ed90752424 100644 --- a/trunk/arch/mips/kernel/smp.c +++ b/trunk/arch/mips/kernel/smp.c @@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, mb(); /* Send a message to all other CPUs and wait for them to respond */ - for_each_online_cpu(i) - if (i != cpu) + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i) && i != cpu) core_send_ipi(i, SMP_CALL_FUNCTION); /* Wait for response */ diff --git a/trunk/arch/mips/sgi-ip27/ip27-irq.c b/trunk/arch/mips/sgi-ip27/ip27-irq.c index 2854ac4c9be1..73e5e52781d8 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-irq.c +++ b/trunk/arch/mips/sgi-ip27/ip27-irq.c @@ -88,9 +88,12 @@ static inline int find_level(cpuid_t *cpunum, int irq) { int cpu, i; - for_each_online_cpu(cpu) { + for (cpu = 0; cpu <= NR_CPUS; cpu++) { struct slice_data *si = cpu_data[cpu].data; + if (!cpu_online(cpu)) + continue; + for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) if (si->level_to_irq[i] == irq) { *cpunum = cpu; diff --git a/trunk/arch/parisc/kernel/smp.c b/trunk/arch/parisc/kernel/smp.c index d6ac1c60a471..25564b7ca6bb 100644 --- a/trunk/arch/parisc/kernel/smp.c +++ b/trunk/arch/parisc/kernel/smp.c @@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op) { int i; - for_each_online_cpu(i) { - if (i != smp_processor_id()) + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i) && i != smp_processor_id()) send_IPI_single(i, op); } } @@ -643,13 +643,14 @@ int sys_cpus(int argc, char **argv) if ( argc == 1 ){ #ifdef DUMP_MORE_STATE - for_each_online_cpu(i) { + for(i=0; iainsn.insn); - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); } static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/trunk/arch/powerpc/kernel/setup-common.c b/trunk/arch/powerpc/kernel/setup-common.c index c1d62bf11f29..be12041c0fc5 100644 --- a/trunk/arch/powerpc/kernel/setup-common.c +++ b/trunk/arch/powerpc/kernel/setup-common.c @@ -162,8 +162,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) unsigned long bogosum = 0; int i; - for_each_online_cpu(i) - bogosum += loops_per_jiffy; + for (i = 0; i < NR_CPUS; ++i) + if (cpu_online(i)) + bogosum += loops_per_jiffy; seq_printf(m, "total bogomips\t: %lu.%02lu\n", bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); #endif /* CONFIG_SMP && CONFIG_PPC32 */ diff --git a/trunk/arch/powerpc/kernel/setup_32.c b/trunk/arch/powerpc/kernel/setup_32.c index dc2770df25b3..db72a92943bf 100644 --- a/trunk/arch/powerpc/kernel/setup_32.c +++ b/trunk/arch/powerpc/kernel/setup_32.c @@ -272,8 +272,9 @@ int __init ppc_init(void) if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); /* register CPU devices */ - for_each_cpu(i) - register_cpu(&cpu_devices[i], i, NULL); + for (i = 0; i < NR_CPUS; i++) + if (cpu_possible(i)) + register_cpu(&cpu_devices[i], i, NULL); /* call platform init */ if (ppc_md.init != NULL) { diff --git a/trunk/arch/powerpc/platforms/powermac/smp.c b/trunk/arch/powerpc/platforms/powermac/smp.c index 1065d87fc279..6d64a9bf3474 100644 --- a/trunk/arch/powerpc/platforms/powermac/smp.c +++ b/trunk/arch/powerpc/platforms/powermac/smp.c @@ -191,7 +191,9 @@ static void smp_psurge_message_pass(int target, int msg) if (num_online_cpus() < 2) return; - for_each_online_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_online(i)) + continue; if (target == MSG_ALL || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) || target == i) { diff --git a/trunk/arch/ppc/kernel/setup.c b/trunk/arch/ppc/kernel/setup.c index 53e9deacee82..c08ab432e958 100644 --- a/trunk/arch/ppc/kernel/setup.c +++ b/trunk/arch/ppc/kernel/setup.c @@ -168,8 +168,9 @@ int show_cpuinfo(struct seq_file *m, void *v) /* Show summary information */ #ifdef CONFIG_SMP unsigned long bogosum = 0; - for_each_online_cpu(i) - bogosum += cpu_data[i].loops_per_jiffy; + for (i = 0; i < NR_CPUS; ++i) + if (cpu_online(i)) + bogosum += cpu_data[i].loops_per_jiffy; seq_printf(m, "total bogomips\t: %lu.%02lu\n", bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); #endif /* CONFIG_SMP */ @@ -711,8 +712,9 @@ int __init ppc_init(void) if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); /* register CPU devices */ - for_each_cpu(i) - register_cpu(&cpu_devices[i], i, NULL); + for (i = 0; i < NR_CPUS; i++) + if (cpu_possible(i)) + register_cpu(&cpu_devices[i], i, NULL); /* call platform init */ if (ppc_md.init != NULL) { diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index d52d6d211d9f..7dbe00c76c6b 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -799,7 +799,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) */ print_cpu_info(&S390_lowcore.cpu_data); - for_each_cpu(i) { + for(i = 0; i < NR_CPUS; i++) { + if (!cpu_possible(i)) + continue; lowcore_ptr[i] = (struct _lowcore *) __get_free_pages(GFP_KERNEL|GFP_DMA, sizeof(void*) == 8 ? 1 : 0); diff --git a/trunk/arch/sh/kernel/irq.c b/trunk/arch/sh/kernel/irq.c index b56e79632f24..6883c00728cb 100644 --- a/trunk/arch/sh/kernel/irq.c +++ b/trunk/arch/sh/kernel/irq.c @@ -35,8 +35,9 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_puts(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j=0; jflags & SA_INTERRUPT) ? '+' : ' ', diff --git a/trunk/arch/sparc/kernel/sun4d_smp.c b/trunk/arch/sparc/kernel/sun4d_smp.c index 41bb9596be48..4219dd2ce3a2 100644 --- a/trunk/arch/sparc/kernel/sun4d_smp.c +++ b/trunk/arch/sparc/kernel/sun4d_smp.c @@ -249,9 +249,11 @@ void __init smp4d_boot_cpus(void) } else { unsigned long bogosum = 0; - for_each_present_cpu(i) { - bogosum += cpu_data(i).udelay_val; - smp_highest_cpu = i; + for(i = 0; i < NR_CPUS; i++) { + if (cpu_isset(i, cpu_present_map)) { + bogosum += cpu_data(i).udelay_val; + smp_highest_cpu = i; + } } SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100)); printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", diff --git a/trunk/arch/sparc/kernel/sun4m_smp.c b/trunk/arch/sparc/kernel/sun4m_smp.c index 1dde312eebda..fbbd8a474c4c 100644 --- a/trunk/arch/sparc/kernel/sun4m_smp.c +++ b/trunk/arch/sparc/kernel/sun4m_smp.c @@ -218,8 +218,10 @@ void __init smp4m_boot_cpus(void) cpu_present_map = cpumask_of_cpu(smp_processor_id()); } else { unsigned long bogosum = 0; - for_each_present_cpu(i) - bogosum += cpu_data(i).udelay_val; + for(i = 0; i < NR_CPUS; i++) { + if (cpu_isset(i, cpu_present_map)) + bogosum += cpu_data(i).udelay_val; + } printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), diff --git a/trunk/arch/sparc64/kernel/irq.c b/trunk/arch/sparc64/kernel/irq.c index e505a4125e35..8c93ba655b33 100644 --- a/trunk/arch/sparc64/kernel/irq.c +++ b/trunk/arch/sparc64/kernel/irq.c @@ -117,7 +117,9 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for_each_online_cpu(j) { + for (j = 0; j < NR_CPUS; j++) { + if (!cpu_online(j)) + continue; seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); } diff --git a/trunk/arch/sparc64/kernel/smp.c b/trunk/arch/sparc64/kernel/smp.c index 1b6e2ade1008..373a701c90a5 100644 --- a/trunk/arch/sparc64/kernel/smp.c +++ b/trunk/arch/sparc64/kernel/smp.c @@ -57,21 +57,25 @@ void smp_info(struct seq_file *m) int i; seq_printf(m, "State:\n"); - for_each_online_cpu(i) - seq_printf(m, "CPU%d:\t\tonline\n", i); + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) + seq_printf(m, + "CPU%d:\t\tonline\n", i); + } } void smp_bogo(struct seq_file *m) { int i; - for_each_online_cpu(i) - seq_printf(m, - "Cpu%dBogo\t: %lu.%02lu\n" - "Cpu%dClkTck\t: %016lx\n", - i, cpu_data(i).udelay_val / (500000/HZ), - (cpu_data(i).udelay_val / (5000/HZ)) % 100, - i, cpu_data(i).clock_tick); + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + seq_printf(m, + "Cpu%dBogo\t: %lu.%02lu\n" + "Cpu%dClkTck\t: %016lx\n", + i, cpu_data(i).udelay_val / (500000/HZ), + (cpu_data(i).udelay_val / (5000/HZ)) % 100, + i, cpu_data(i).clock_tick); } void __init smp_store_cpu_info(int id) @@ -1278,7 +1282,7 @@ int setup_profiling_timer(unsigned int multiplier) return -EINVAL; spin_lock_irqsave(&prof_setup_lock, flags); - for_each_cpu(i) + for (i = 0; i < NR_CPUS; i++) prof_multiplier(i) = multiplier; current_tick_offset = (timer_tick_offset / multiplier); spin_unlock_irqrestore(&prof_setup_lock, flags); @@ -1380,8 +1384,10 @@ void __init smp_cpus_done(unsigned int max_cpus) unsigned long bogosum = 0; int i; - for_each_online_cpu(i) - bogosum += cpu_data(i).udelay_val; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) + bogosum += cpu_data(i).udelay_val; + } printk("Total of %ld processors activated " "(%lu.%02lu BogoMIPS).\n", (long) num_online_cpus(), diff --git a/trunk/arch/sparc64/mm/init.c b/trunk/arch/sparc64/mm/init.c index 1539a8362b6f..ded63ee9c4fd 100644 --- a/trunk/arch/sparc64/mm/init.c +++ b/trunk/arch/sparc64/mm/init.c @@ -1828,8 +1828,8 @@ void __flush_tlb_all(void) void online_page(struct page *page) { ClearPageReserved(page); - init_page_count(page); - __free_page(page); + set_page_count(page, 0); + free_cold_page(page); totalram_pages++; num_physpages++; } diff --git a/trunk/arch/um/kernel/um_arch.c b/trunk/arch/um/kernel/um_arch.c index 80c9c18aae94..27cdf9164422 100644 --- a/trunk/arch/um/kernel/um_arch.c +++ b/trunk/arch/um/kernel/um_arch.c @@ -491,16 +491,6 @@ void __init check_bugs(void) check_devanon(); } -void apply_alternatives(struct alt_instr *start, struct alt_instr *end) -{ -} - -void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) -{ -} - -void alternatives_smp_module_del(struct module *mod) +void apply_alternatives(void *start, void *end) { } diff --git a/trunk/arch/x86_64/kernel/early_printk.c b/trunk/arch/x86_64/kernel/early_printk.c index a8a6aa70d695..6dffb498ccd7 100644 --- a/trunk/arch/x86_64/kernel/early_printk.c +++ b/trunk/arch/x86_64/kernel/early_printk.c @@ -17,8 +17,11 @@ #define VGABASE ((void __iomem *)0xffffffff800b8000UL) #endif +#define MAX_YPOS max_ypos +#define MAX_XPOS max_xpos + static int max_ypos = 25, max_xpos = 80; -static int current_ypos = 25, current_xpos = 0; +static int current_ypos = 1, current_xpos = 0; static void early_vga_write(struct console *con, const char *str, unsigned n) { @@ -26,26 +29,26 @@ static void early_vga_write(struct console *con, const char *str, unsigned n) int i, k, j; while ((c = *str++) != '\0' && n-- > 0) { - if (current_ypos >= max_ypos) { + if (current_ypos >= MAX_YPOS) { /* scroll 1 line up */ - for (k = 1, j = 0; k < max_ypos; k++, j++) { - for (i = 0; i < max_xpos; i++) { - writew(readw(VGABASE+2*(max_xpos*k+i)), - VGABASE + 2*(max_xpos*j + i)); + for (k = 1, j = 0; k < MAX_YPOS; k++, j++) { + for (i = 0; i < MAX_XPOS; i++) { + writew(readw(VGABASE + 2*(MAX_XPOS*k + i)), + VGABASE + 2*(MAX_XPOS*j + i)); } } - for (i = 0; i < max_xpos; i++) - writew(0x720, VGABASE + 2*(max_xpos*j + i)); - current_ypos = max_ypos-1; + for (i = 0; i < MAX_XPOS; i++) + writew(0x720, VGABASE + 2*(MAX_XPOS*j + i)); + current_ypos = MAX_YPOS-1; } if (c == '\n') { current_xpos = 0; current_ypos++; } else if (c != '\r') { writew(((0x7 << 8) | (unsigned short) c), - VGABASE + 2*(max_xpos*current_ypos + + VGABASE + 2*(MAX_XPOS*current_ypos + current_xpos++)); - if (current_xpos >= max_xpos) { + if (current_xpos >= MAX_XPOS) { current_xpos = 0; current_ypos++; } @@ -241,7 +244,6 @@ int __init setup_early_printk(char *opt) && SCREEN_INFO.orig_video_isVGA == 1) { max_xpos = SCREEN_INFO.orig_video_cols; max_ypos = SCREEN_INFO.orig_video_lines; - current_ypos = SCREEN_INFO.orig_y; early_console = &early_vga_console; } else if (!strncmp(buf, "simnow", 6)) { simnow_init(buf + 6); diff --git a/trunk/arch/x86_64/kernel/irq.c b/trunk/arch/x86_64/kernel/irq.c index d8bd0b345b1e..30d2a1e545fe 100644 --- a/trunk/arch/x86_64/kernel/irq.c +++ b/trunk/arch/x86_64/kernel/irq.c @@ -38,8 +38,9 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j=0; jtypename); @@ -65,13 +68,15 @@ int show_interrupts(struct seq_file *p, void *v) spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); - for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count); seq_putc(p, '\n'); #ifdef CONFIG_X86_LOCAL_APIC seq_printf(p, "LOC: "); - for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs); seq_putc(p, '\n'); #endif seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); diff --git a/trunk/arch/x86_64/kernel/kprobes.c b/trunk/arch/x86_64/kernel/kprobes.c index 14f0ced613b6..8b866a8572cf 100644 --- a/trunk/arch/x86_64/kernel/kprobes.c +++ b/trunk/arch/x86_64/kernel/kprobes.c @@ -222,9 +222,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - mutex_lock(&kprobe_mutex); + down(&kprobe_mutex); free_insn_slot(p->ainsn.insn); - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); } static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/trunk/arch/x86_64/kernel/nmi.c b/trunk/arch/x86_64/kernel/nmi.c index 66c009e10bac..5bf17e41cd2d 100644 --- a/trunk/arch/x86_64/kernel/nmi.c +++ b/trunk/arch/x86_64/kernel/nmi.c @@ -162,7 +162,9 @@ int __init check_nmi_watchdog (void) local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - for_each_online_cpu(cpu) { + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (!cpu_online(cpu)) + continue; if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { endflag = 1; printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", diff --git a/trunk/arch/x86_64/kernel/signal.c b/trunk/arch/x86_64/kernel/signal.c index e5f5ce7909a3..5876df116c92 100644 --- a/trunk/arch/x86_64/kernel/signal.c +++ b/trunk/arch/x86_64/kernel/signal.c @@ -443,6 +443,9 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) if (!user_mode(regs)) return 1; + if (try_to_freeze()) + goto no_signal; + if (!oldset) oldset = ¤t->blocked; @@ -460,6 +463,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) return handle_signal(signr, &info, &ka, oldset, regs); } + no_signal: /* Did we come from a system call? */ if ((long)regs->orig_rax >= 0) { /* Restart the system call - no handlers present */ diff --git a/trunk/arch/xtensa/kernel/irq.c b/trunk/arch/xtensa/kernel/irq.c index 51f9bed455fa..4cbf6d91571f 100644 --- a/trunk/arch/xtensa/kernel/irq.c +++ b/trunk/arch/xtensa/kernel/irq.c @@ -83,8 +83,9 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); + for (j=0; jtypename); seq_printf(p, " %s", action->name); @@ -111,8 +113,9 @@ int show_interrupts(struct seq_file *p, void *v) spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); - for_each_online_cpu(j) - seq_printf(p, "%10u ", nmi_count(j)); + for (j = 0; j < NR_CPUS; j++) + if (cpu_online(j)) + seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); } diff --git a/trunk/arch/xtensa/platform-iss/console.c b/trunk/arch/xtensa/platform-iss/console.c index 2a580efb58ec..94fdfe474ac1 100644 --- a/trunk/arch/xtensa/platform-iss/console.c +++ b/trunk/arch/xtensa/platform-iss/console.c @@ -31,6 +31,10 @@ #include #include +#ifdef SERIAL_INLINE +#define _INLINE_ inline +#endif + #define SERIAL_MAX_NUM_LINES 1 #define SERIAL_TIMER_VALUE (20 * HZ) diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c index 35fdb7dc6512..e1109491c234 100644 --- a/trunk/block/ioctl.c +++ b/trunk/block/ioctl.c @@ -42,9 +42,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user return -EINVAL; } /* partition number in use? */ - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); if (disk->part[part - 1]) { - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return -EBUSY; } /* overlap? */ @@ -55,13 +55,13 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user continue; if (!(start+length <= s->start_sect || start >= s->start_sect + s->nr_sects)) { - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return -EBUSY; } } /* all seems OK */ add_partition(disk, part, start, length); - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return 0; case BLKPG_DEL_PARTITION: if (!disk->part[part-1]) @@ -71,9 +71,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user bdevp = bdget_disk(disk, part); if (!bdevp) return -ENOMEM; - mutex_lock(&bdevp->bd_mutex); + down(&bdevp->bd_sem); if (bdevp->bd_openers) { - mutex_unlock(&bdevp->bd_mutex); + up(&bdevp->bd_sem); bdput(bdevp); return -EBUSY; } @@ -81,10 +81,10 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user fsync_bdev(bdevp); invalidate_bdev(bdevp, 0); - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); delete_partition(disk, part); - mutex_unlock(&bdev->bd_mutex); - mutex_unlock(&bdevp->bd_mutex); + up(&bdev->bd_sem); + up(&bdevp->bd_sem); bdput(bdevp); return 0; @@ -102,10 +102,10 @@ static int blkdev_reread_part(struct block_device *bdev) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (!mutex_trylock(&bdev->bd_mutex)) + if (down_trylock(&bdev->bd_sem)) return -EBUSY; res = rescan_partitions(disk, bdev); - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return res; } diff --git a/trunk/drivers/base/power/suspend.c b/trunk/drivers/base/power/suspend.c index bdb60663f2ef..8660779fb288 100644 --- a/trunk/drivers/base/power/suspend.c +++ b/trunk/drivers/base/power/suspend.c @@ -8,7 +8,6 @@ * */ -#include #include #include "../base.h" #include "power.h" @@ -63,6 +62,7 @@ int suspend_device(struct device * dev, pm_message_t state) return error; } + /** * device_suspend - Save state and stop all devices in system. * @state: Power state to put each device in. @@ -82,9 +82,6 @@ int device_suspend(pm_message_t state) { int error = 0; - if (!is_console_suspend_safe()) - return -EINVAL; - down(&dpm_sem); down(&dpm_list_sem); while (!list_empty(&dpm_active) && error == 0) { diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c index e29b8926f80e..cf39cf9aac25 100644 --- a/trunk/drivers/block/cciss.c +++ b/trunk/drivers/block/cciss.c @@ -3268,8 +3268,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, unregister_blkdev(hba[i]->major, hba[i]->devname); clean1: release_io_mem(hba[i]); - hba[i]->busy_initializing = 0; free_hba(i); + hba[i]->busy_initializing = 0; return(-1); } diff --git a/trunk/drivers/block/floppy.c b/trunk/drivers/block/floppy.c index fb2d0be7cdeb..d23b54332d7e 100644 --- a/trunk/drivers/block/floppy.c +++ b/trunk/drivers/block/floppy.c @@ -179,7 +179,6 @@ static int print_unex = 1; #include #include #include /* for invalidate_buffers() */ -#include /* * PS/2 floppies have much slower step rates than regular floppies. @@ -414,7 +413,7 @@ static struct floppy_write_errors write_errors[N_DRIVE]; static struct timer_list motor_off_timer[N_DRIVE]; static struct gendisk *disks[N_DRIVE]; static struct block_device *opened_bdev[N_DRIVE]; -static DEFINE_MUTEX(open_lock); +static DECLARE_MUTEX(open_lock); static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; /* @@ -3334,7 +3333,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, if (type) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; - mutex_lock(&open_lock); + down(&open_lock); LOCK_FDC(drive, 1); floppy_type[type] = *g; floppy_type[type].name = "user format"; @@ -3348,7 +3347,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, continue; __invalidate_device(bdev); } - mutex_unlock(&open_lock); + up(&open_lock); } else { int oldStretch; LOCK_FDC(drive, 1); @@ -3675,7 +3674,7 @@ static int floppy_release(struct inode *inode, struct file *filp) { int drive = (long)inode->i_bdev->bd_disk->private_data; - mutex_lock(&open_lock); + down(&open_lock); if (UDRS->fd_ref < 0) UDRS->fd_ref = 0; else if (!UDRS->fd_ref--) { @@ -3685,7 +3684,7 @@ static int floppy_release(struct inode *inode, struct file *filp) if (!UDRS->fd_ref) opened_bdev[drive] = NULL; floppy_release_irq_and_dma(); - mutex_unlock(&open_lock); + up(&open_lock); return 0; } @@ -3703,7 +3702,7 @@ static int floppy_open(struct inode *inode, struct file *filp) char *tmp; filp->private_data = (void *)0; - mutex_lock(&open_lock); + down(&open_lock); old_dev = UDRS->fd_device; if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev) goto out2; @@ -3786,7 +3785,7 @@ static int floppy_open(struct inode *inode, struct file *filp) if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE))) goto out; } - mutex_unlock(&open_lock); + up(&open_lock); return 0; out: if (UDRS->fd_ref < 0) @@ -3797,7 +3796,7 @@ static int floppy_open(struct inode *inode, struct file *filp) opened_bdev[drive] = NULL; floppy_release_irq_and_dma(); out2: - mutex_unlock(&open_lock); + up(&open_lock); return res; } diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c index 74bf0255e98f..0010704739e3 100644 --- a/trunk/drivers/block/loop.c +++ b/trunk/drivers/block/loop.c @@ -1144,7 +1144,7 @@ static int lo_ioctl(struct inode * inode, struct file * file, struct loop_device *lo = inode->i_bdev->bd_disk->private_data; int err; - mutex_lock(&lo->lo_ctl_mutex); + down(&lo->lo_ctl_mutex); switch (cmd) { case LOOP_SET_FD: err = loop_set_fd(lo, file, inode->i_bdev, arg); @@ -1170,7 +1170,7 @@ static int lo_ioctl(struct inode * inode, struct file * file, default: err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; } - mutex_unlock(&lo->lo_ctl_mutex); + up(&lo->lo_ctl_mutex); return err; } @@ -1178,9 +1178,9 @@ static int lo_open(struct inode *inode, struct file *file) { struct loop_device *lo = inode->i_bdev->bd_disk->private_data; - mutex_lock(&lo->lo_ctl_mutex); + down(&lo->lo_ctl_mutex); lo->lo_refcnt++; - mutex_unlock(&lo->lo_ctl_mutex); + up(&lo->lo_ctl_mutex); return 0; } @@ -1189,9 +1189,9 @@ static int lo_release(struct inode *inode, struct file *file) { struct loop_device *lo = inode->i_bdev->bd_disk->private_data; - mutex_lock(&lo->lo_ctl_mutex); + down(&lo->lo_ctl_mutex); --lo->lo_refcnt; - mutex_unlock(&lo->lo_ctl_mutex); + up(&lo->lo_ctl_mutex); return 0; } @@ -1233,12 +1233,12 @@ int loop_unregister_transfer(int number) xfer_funcs[n] = NULL; for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) { - mutex_lock(&lo->lo_ctl_mutex); + down(&lo->lo_ctl_mutex); if (lo->lo_encryption == xfer) loop_release_xfer(lo); - mutex_unlock(&lo->lo_ctl_mutex); + up(&lo->lo_ctl_mutex); } return 0; @@ -1285,7 +1285,7 @@ static int __init loop_init(void) lo->lo_queue = blk_alloc_queue(GFP_KERNEL); if (!lo->lo_queue) goto out_mem4; - mutex_init(&lo->lo_ctl_mutex); + init_MUTEX(&lo->lo_ctl_mutex); init_completion(&lo->lo_done); init_completion(&lo->lo_bh_done); lo->lo_number = i; diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c index a9bde30dadad..6997d8e6bfb5 100644 --- a/trunk/drivers/block/nbd.c +++ b/trunk/drivers/block/nbd.c @@ -459,9 +459,9 @@ static void do_nbd_request(request_queue_t * q) req->errors = 0; spin_unlock_irq(q->queue_lock); - mutex_lock(&lo->tx_lock); + down(&lo->tx_lock); if (unlikely(!lo->sock)) { - mutex_unlock(&lo->tx_lock); + up(&lo->tx_lock); printk(KERN_ERR "%s: Attempted send on closed socket\n", lo->disk->disk_name); req->errors++; @@ -484,7 +484,7 @@ static void do_nbd_request(request_queue_t * q) } lo->active_req = NULL; - mutex_unlock(&lo->tx_lock); + up(&lo->tx_lock); wake_up_all(&lo->active_wq); spin_lock_irq(q->queue_lock); @@ -534,9 +534,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file, case NBD_CLEAR_SOCK: error = 0; - mutex_lock(&lo->tx_lock); + down(&lo->tx_lock); lo->sock = NULL; - mutex_unlock(&lo->tx_lock); + up(&lo->tx_lock); file = lo->file; lo->file = NULL; nbd_clear_que(lo); @@ -590,7 +590,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, * FIXME: This code is duplicated from sys_shutdown, but * there should be a more generic interface rather than * calling socket ops directly here */ - mutex_lock(&lo->tx_lock); + down(&lo->tx_lock); if (lo->sock) { printk(KERN_WARNING "%s: shutting down socket\n", lo->disk->disk_name); @@ -598,7 +598,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, SEND_SHUTDOWN|RCV_SHUTDOWN); lo->sock = NULL; } - mutex_unlock(&lo->tx_lock); + up(&lo->tx_lock); file = lo->file; lo->file = NULL; nbd_clear_que(lo); @@ -683,7 +683,7 @@ static int __init nbd_init(void) nbd_dev[i].flags = 0; spin_lock_init(&nbd_dev[i].queue_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); - mutex_init(&nbd_dev[i].tx_lock); + init_MUTEX(&nbd_dev[i].tx_lock); init_waitqueue_head(&nbd_dev[i].active_wq); nbd_dev[i].blksize = 1024; nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */ diff --git a/trunk/drivers/block/pktcdvd.c b/trunk/drivers/block/pktcdvd.c index 1d261f985f31..476a5b553f34 100644 --- a/trunk/drivers/block/pktcdvd.c +++ b/trunk/drivers/block/pktcdvd.c @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -82,7 +81,7 @@ static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; static struct proc_dir_entry *pkt_proc; static int pkt_major; -static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ +static struct semaphore ctl_mutex; /* Serialize open/close/setup/teardown */ static mempool_t *psd_pool; @@ -2019,7 +2018,7 @@ static int pkt_open(struct inode *inode, struct file *file) VPRINTK("pktcdvd: entering open\n"); - mutex_lock(&ctl_mutex); + down(&ctl_mutex); pd = pkt_find_dev_from_minor(iminor(inode)); if (!pd) { ret = -ENODEV; @@ -2045,14 +2044,14 @@ static int pkt_open(struct inode *inode, struct file *file) set_blocksize(inode->i_bdev, CD_FRAMESIZE); } - mutex_unlock(&ctl_mutex); + up(&ctl_mutex); return 0; out_dec: pd->refcnt--; out: VPRINTK("pktcdvd: failed open (%d)\n", ret); - mutex_unlock(&ctl_mutex); + up(&ctl_mutex); return ret; } @@ -2061,14 +2060,14 @@ static int pkt_close(struct inode *inode, struct file *file) struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; int ret = 0; - mutex_lock(&ctl_mutex); + down(&ctl_mutex); pd->refcnt--; BUG_ON(pd->refcnt < 0); if (pd->refcnt == 0) { int flush = test_bit(PACKET_WRITABLE, &pd->flags); pkt_release_dev(pd, flush); } - mutex_unlock(&ctl_mutex); + up(&ctl_mutex); return ret; } @@ -2597,21 +2596,21 @@ static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cm case PKT_CTRL_CMD_SETUP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - mutex_lock(&ctl_mutex); + down(&ctl_mutex); ret = pkt_setup_dev(&ctrl_cmd); - mutex_unlock(&ctl_mutex); + up(&ctl_mutex); break; case PKT_CTRL_CMD_TEARDOWN: if (!capable(CAP_SYS_ADMIN)) return -EPERM; - mutex_lock(&ctl_mutex); + down(&ctl_mutex); ret = pkt_remove_dev(&ctrl_cmd); - mutex_unlock(&ctl_mutex); + up(&ctl_mutex); break; case PKT_CTRL_CMD_STATUS: - mutex_lock(&ctl_mutex); + down(&ctl_mutex); pkt_get_status(&ctrl_cmd); - mutex_unlock(&ctl_mutex); + up(&ctl_mutex); break; default: return -ENOTTY; @@ -2657,7 +2656,7 @@ static int __init pkt_init(void) goto out; } - mutex_init(&ctl_mutex); + init_MUTEX(&ctl_mutex); pkt_proc = proc_mkdir("pktcdvd", proc_root_driver); diff --git a/trunk/drivers/block/rd.c b/trunk/drivers/block/rd.c index 1c54f46d3f70..ffd6abd6d5a0 100644 --- a/trunk/drivers/block/rd.c +++ b/trunk/drivers/block/rd.c @@ -310,12 +310,12 @@ static int rd_ioctl(struct inode *inode, struct file *file, * cache */ error = -EBUSY; - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); if (bdev->bd_openers <= 2) { truncate_inode_pages(bdev->bd_inode->i_mapping, 0); error = 0; } - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return error; } diff --git a/trunk/drivers/cdrom/cdrom.c b/trunk/drivers/cdrom/cdrom.c index a59876a0bfa1..879bbc26ce96 100644 --- a/trunk/drivers/cdrom/cdrom.c +++ b/trunk/drivers/cdrom/cdrom.c @@ -407,6 +407,7 @@ int register_cdrom(struct cdrom_device_info *cdi) ENSURE(get_mcn, CDC_MCN); ENSURE(reset, CDC_RESET); ENSURE(audio_ioctl, CDC_PLAY_AUDIO); + ENSURE(dev_ioctl, CDC_IOCTLS); ENSURE(generic_packet, CDC_GENERIC_PACKET); cdi->mc_flags = 0; cdo->n_minors = 0; @@ -2195,586 +2196,395 @@ static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf, return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); } -static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, - void __user *argp) +/* Just about every imaginable ioctl is supported in the Uniform layer + * these days. ATAPI / SCSI specific code now mainly resides in + * mmc_ioct(). + */ +int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi, + struct inode *ip, unsigned int cmd, unsigned long arg) { - struct cdrom_multisession ms_info; - u8 requested_format; + struct cdrom_device_ops *cdo = cdi->ops; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); - - if (!(cdi->ops->capability & CDC_MULTI_SESSION)) - return -ENOSYS; - - if (copy_from_user(&ms_info, argp, sizeof(ms_info))) - return -EFAULT; - - requested_format = ms_info.addr_format; - if (requested_format != CDROM_MSF && requested_format != CDROM_LBA) - return -EINVAL; - ms_info.addr_format = CDROM_LBA; - - ret = cdi->ops->get_last_session(cdi, &ms_info); - if (ret) + /* Try the generic SCSI command ioctl's first.. */ + ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, (void __user *)arg); + if (ret != -ENOTTY) return ret; - sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format); - - if (copy_to_user(argp, &ms_info, sizeof(ms_info))) - return -EFAULT; - - cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); - return 0; -} - -static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) -{ - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); - - if (!CDROM_CAN(CDC_OPEN_TRAY)) - return -ENOSYS; - if (cdi->use_count != 1 || keeplocked) - return -EBUSY; - if (CDROM_CAN(CDC_LOCK)) { - int ret = cdi->ops->lock_door(cdi, 0); - if (ret) + /* the first few commands do not deal with audio drive_info, but + only with routines in cdrom device operations. */ + switch (cmd) { + case CDROMMULTISESSION: { + struct cdrom_multisession ms_info; + u_char requested_format; + cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); + if (!(cdo->capability & CDC_MULTI_SESSION)) + return -ENOSYS; + IOCTL_IN(arg, struct cdrom_multisession, ms_info); + requested_format = ms_info.addr_format; + if (!((requested_format == CDROM_MSF) || + (requested_format == CDROM_LBA))) + return -EINVAL; + ms_info.addr_format = CDROM_LBA; + if ((ret=cdo->get_last_session(cdi, &ms_info))) return ret; - } - - return cdi->ops->tray_move(cdi, 1); -} - -static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) -{ - cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); - - if (!CDROM_CAN(CDC_CLOSE_TRAY)) - return -ENOSYS; - return cdi->ops->tray_move(cdi, 0); -} - -static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); - - if (!CDROM_CAN(CDC_OPEN_TRAY)) - return -ENOSYS; - if (keeplocked) - return -EBUSY; - - cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); - if (arg) - cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT; - return 0; -} - -static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, - unsigned long arg) -{ - struct cdrom_changer_info *info; - int ret; - - cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); - - if (!CDROM_CAN(CDC_MEDIA_CHANGED)) - return -ENOSYS; - - /* cannot select disc or select current disc */ - if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) - return media_changed(cdi, 1); - - if ((unsigned int)arg >= cdi->capacity) - return -EINVAL; - - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return -ENOMEM; + sanitize_format(&ms_info.addr, &ms_info.addr_format, + requested_format); + IOCTL_OUT(arg, struct cdrom_multisession, ms_info); + cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); + return 0; + } - ret = cdrom_read_mech_status(cdi, info); - if (!ret) - ret = info->slots[arg].change; - kfree(info); - return ret; -} + case CDROMEJECT: { + cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); + if (!CDROM_CAN(CDC_OPEN_TRAY)) + return -ENOSYS; + if (cdi->use_count != 1 || keeplocked) + return -EBUSY; + if (CDROM_CAN(CDC_LOCK)) + if ((ret=cdo->lock_door(cdi, 0))) + return ret; -static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); + return cdo->tray_move(cdi, 1); + } - /* - * Options need to be in sync with capability. - * Too late for that, so we have to check each one separately. - */ - switch (arg) { - case CDO_USE_FFLAGS: - case CDO_CHECK_TYPE: - break; - case CDO_LOCK: - if (!CDROM_CAN(CDC_LOCK)) - return -ENOSYS; - break; - case 0: - return cdi->options; - /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */ - default: - if (!CDROM_CAN(arg)) + case CDROMCLOSETRAY: { + cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); + if (!CDROM_CAN(CDC_CLOSE_TRAY)) return -ENOSYS; - } - cdi->options |= (int) arg; - return cdi->options; -} - -static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); + return cdo->tray_move(cdi, 0); + } - cdi->options &= ~(int) arg; - return cdi->options; -} + case CDROMEJECT_SW: { + cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); + if (!CDROM_CAN(CDC_OPEN_TRAY)) + return -ENOSYS; + if (keeplocked) + return -EBUSY; + cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT); + if (arg) + cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT; + return 0; + } -static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); + case CDROM_MEDIA_CHANGED: { + struct cdrom_changer_info *info; + int changed; - if (!CDROM_CAN(CDC_SELECT_SPEED)) - return -ENOSYS; - return cdi->ops->select_speed(cdi, arg); -} + cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); + if (!CDROM_CAN(CDC_MEDIA_CHANGED)) + return -ENOSYS; -static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); + /* cannot select disc or select current disc */ + if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) + return media_changed(cdi, 1); - if (!CDROM_CAN(CDC_SELECT_DISC)) - return -ENOSYS; - - if (arg != CDSL_CURRENT && arg != CDSL_NONE) { - if ((int)arg >= cdi->capacity) + if ((unsigned int)arg >= cdi->capacity) return -EINVAL; - } - /* - * ->select_disc is a hook to allow a driver-specific way of - * seleting disc. However, since there is no equivalent hook for - * cdrom_slot_status this may not actually be useful... - */ - if (cdi->ops->select_disc) - return cdi->ops->select_disc(cdi, arg); + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; - cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); - return cdrom_select_disc(cdi, arg); -} + if ((ret = cdrom_read_mech_status(cdi, info))) { + kfree(info); + return ret; + } -static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, - struct block_device *bdev) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); + changed = info->slots[arg].change; + kfree(info); + return changed; + } - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (!CDROM_CAN(CDC_RESET)) - return -ENOSYS; - invalidate_bdev(bdev, 0); - return cdi->ops->reset(cdi); -} + case CDROM_SET_OPTIONS: { + cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); + /* options need to be in sync with capability. too late for + that, so we have to check each one separately... */ + switch (arg) { + case CDO_USE_FFLAGS: + case CDO_CHECK_TYPE: + break; + case CDO_LOCK: + if (!CDROM_CAN(CDC_LOCK)) + return -ENOSYS; + break; + case 0: + return cdi->options; + /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */ + default: + if (!CDROM_CAN(arg)) + return -ENOSYS; + } + cdi->options |= (int) arg; + return cdi->options; + } -static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); + case CDROM_CLEAR_OPTIONS: { + cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); + cdi->options &= ~(int) arg; + return cdi->options; + } - if (!CDROM_CAN(CDC_LOCK)) - return -EDRIVE_CANT_DO_THIS; + case CDROM_SELECT_SPEED: { + cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); + if (!CDROM_CAN(CDC_SELECT_SPEED)) + return -ENOSYS; + return cdo->select_speed(cdi, arg); + } - keeplocked = arg ? 1 : 0; + case CDROM_SELECT_DISC: { + cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); + if (!CDROM_CAN(CDC_SELECT_DISC)) + return -ENOSYS; - /* - * Don't unlock the door on multiple opens by default, but allow - * root to do so. - */ - if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN)) - return -EBUSY; - return cdi->ops->lock_door(cdi, arg); -} + if ((arg != CDSL_CURRENT) && (arg != CDSL_NONE)) + if ((int)arg >= cdi->capacity) + return -EINVAL; + + /* cdo->select_disc is a hook to allow a driver-specific + * way of seleting disc. However, since there is no + * equiv hook for cdrom_slot_status this may not + * actually be useful... + */ + if (cdo->select_disc != NULL) + return cdo->select_disc(cdi, arg); + + /* no driver specific select_disc(), call our own */ + cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); + return cdrom_select_disc(cdi, arg); + } -static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); + case CDROMRESET: { + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); + if (!CDROM_CAN(CDC_RESET)) + return -ENOSYS; + invalidate_bdev(ip->i_bdev, 0); + return cdo->reset(cdi); + } - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - debug = arg ? 1 : 0; - return debug; -} + case CDROM_LOCKDOOR: { + cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); + if (!CDROM_CAN(CDC_LOCK)) + return -EDRIVE_CANT_DO_THIS; + keeplocked = arg ? 1 : 0; + /* don't unlock the door on multiple opens,but allow root + * to do so */ + if ((cdi->use_count != 1) && !arg && !capable(CAP_SYS_ADMIN)) + return -EBUSY; + return cdo->lock_door(cdi, arg); + } -static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); - return (cdi->ops->capability & ~cdi->mask); -} + case CDROM_DEBUG: { + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); + debug = arg ? 1 : 0; + return debug; + } -/* - * The following function is implemented, although very few audio + case CDROM_GET_CAPABILITY: { + cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); + return (cdo->capability & ~cdi->mask); + } + +/* The following function is implemented, although very few audio * discs give Universal Product Code information, which should just be * the Medium Catalog Number on the box. Note, that the way the code * is written on the CD is /not/ uniform across all discs! */ -static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_mcn mcn; - int ret; - - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); - - if (!(cdi->ops->capability & CDC_MCN)) - return -ENOSYS; - ret = cdi->ops->get_mcn(cdi, &mcn); - if (ret) - return ret; - - if (copy_to_user(argp, &mcn, sizeof(mcn))) - return -EFAULT; - cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); - return 0; -} - -static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, - unsigned long arg) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); - - if (!(cdi->ops->capability & CDC_DRIVE_STATUS)) - return -ENOSYS; - if (!CDROM_CAN(CDC_SELECT_DISC) || - (arg == CDSL_CURRENT || arg == CDSL_NONE)) - return cdi->ops->drive_status(cdi, CDSL_CURRENT); - if (((int)arg >= cdi->capacity)) - return -EINVAL; - return cdrom_slot_status(cdi, arg); -} - -/* - * Ok, this is where problems start. The current interface for the - * CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that - * CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, while this - * is often the case, it is also very common for CDs to have some tracks - * with data, and some tracks with audio. Just because I feel like it, - * I declare the following to be the best way to cope. If the CD has ANY - * data tracks on it, it will be returned as a data CD. If it has any XA - * tracks, I will return it as that. Now I could simplify this interface - * by combining these returns with the above, but this more clearly - * demonstrates the problem with the current interface. Too bad this - * wasn't designed to use bitmasks... -Erik - * - * Well, now we have the option CDS_MIXED: a mixed-type CD. - * User level programmers might feel the ioctl is not very useful. - * ---david - */ -static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) -{ - tracktype tracks; - - cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); - - cdrom_count_tracks(cdi, &tracks); - if (tracks.error) - return tracks.error; - - /* Policy mode on */ - if (tracks.audio > 0) { - if (!tracks.data && !tracks.cdi && !tracks.xa) - return CDS_AUDIO; - else - return CDS_MIXED; - } - - if (tracks.cdi > 0) - return CDS_XA_2_2; - if (tracks.xa > 0) - return CDS_XA_2_1; - if (tracks.data > 0) - return CDS_DATA_1; - /* Policy mode off */ - - cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); - return CDS_NO_INFO; -} - -static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi) -{ - cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); - return cdi->capacity; -} - -static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_subchnl q; - u8 requested, back; - int ret; - - /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - if (copy_from_user(&q, argp, sizeof(q))) - return -EFAULT; - - requested = q.cdsc_format; - if (requested != CDROM_MSF && requested != CDROM_LBA) - return -EINVAL; - q.cdsc_format = CDROM_MSF; - - ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q); - if (ret) - return ret; - - back = q.cdsc_format; /* local copy */ - sanitize_format(&q.cdsc_absaddr, &back, requested); - sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); - - if (copy_to_user(argp, &q, sizeof(q))) - return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ - return 0; -} - -static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_tochdr header; - int ret; - - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - if (copy_from_user(&header, argp, sizeof(header))) - return -EFAULT; - - ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); - if (ret) - return ret; - - if (copy_to_user(argp, &header, sizeof(header))) - return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ - return 0; -} - -static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_tocentry entry; - u8 requested_format; - int ret; - - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - if (copy_from_user(&entry, argp, sizeof(entry))) - return -EFAULT; - - requested_format = entry.cdte_format; - if (requested_format != CDROM_MSF && requested_format != CDROM_LBA) - return -EINVAL; - /* make interface to low-level uniform */ - entry.cdte_format = CDROM_MSF; - ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry); - if (ret) - return ret; - sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format); - - if (copy_to_user(argp, &entry, sizeof(entry))) - return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ - return 0; -} - -static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_msf msf; - - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - if (copy_from_user(&msf, argp, sizeof(msf))) - return -EFAULT; - return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf); -} - -static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_ti ti; - int ret; - - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - if (copy_from_user(&ti, argp, sizeof(ti))) - return -EFAULT; - - ret = check_for_audio_disc(cdi, cdi->ops); - if (ret) - return ret; - return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti); -} -static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_volctrl volume; - - cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - if (copy_from_user(&volume, argp, sizeof(volume))) - return -EFAULT; - return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume); -} - -static int cdrom_ioctl_volread(struct cdrom_device_info *cdi, - void __user *argp) -{ - struct cdrom_volctrl volume; - int ret; - - cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); - - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - - ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume); - if (ret) - return ret; - - if (copy_to_user(argp, &volume, sizeof(volume))) - return -EFAULT; - return 0; -} - -static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, - unsigned int cmd) -{ - int ret; - - cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); + case CDROM_GET_MCN: { + struct cdrom_mcn mcn; + cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); + if (!(cdo->capability & CDC_MCN)) + return -ENOSYS; + if ((ret=cdo->get_mcn(cdi, &mcn))) + return ret; + IOCTL_OUT(arg, struct cdrom_mcn, mcn); + cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); + return 0; + } - if (!CDROM_CAN(CDC_PLAY_AUDIO)) - return -ENOSYS; - ret = check_for_audio_disc(cdi, cdi->ops); - if (ret) - return ret; - return cdi->ops->audio_ioctl(cdi, cmd, NULL); -} + case CDROM_DRIVE_STATUS: { + cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); + if (!(cdo->capability & CDC_DRIVE_STATUS)) + return -ENOSYS; + if (!CDROM_CAN(CDC_SELECT_DISC)) + return cdo->drive_status(cdi, CDSL_CURRENT); + if ((arg == CDSL_CURRENT) || (arg == CDSL_NONE)) + return cdo->drive_status(cdi, CDSL_CURRENT); + if (((int)arg >= cdi->capacity)) + return -EINVAL; + return cdrom_slot_status(cdi, arg); + } -/* - * Just about every imaginable ioctl is supported in the Uniform layer - * these days. - * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). - */ -int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi, - struct inode *ip, unsigned int cmd, unsigned long arg) -{ - void __user *argp = (void __user *)arg; - int ret; + /* Ok, this is where problems start. The current interface for the + CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption + that CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, + while this is often the case, it is also very common for CDs to + have some tracks with data, and some tracks with audio. Just + because I feel like it, I declare the following to be the best + way to cope. If the CD has ANY data tracks on it, it will be + returned as a data CD. If it has any XA tracks, I will return + it as that. Now I could simplify this interface by combining these + returns with the above, but this more clearly demonstrates + the problem with the current interface. Too bad this wasn't + designed to use bitmasks... -Erik + + Well, now we have the option CDS_MIXED: a mixed-type CD. + User level programmers might feel the ioctl is not very useful. + ---david + */ + case CDROM_DISC_STATUS: { + tracktype tracks; + cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); + cdrom_count_tracks(cdi, &tracks); + if (tracks.error) + return(tracks.error); + + /* Policy mode on */ + if (tracks.audio > 0) { + if (tracks.data==0 && tracks.cdi==0 && tracks.xa==0) + return CDS_AUDIO; + else + return CDS_MIXED; + } + if (tracks.cdi > 0) return CDS_XA_2_2; + if (tracks.xa > 0) return CDS_XA_2_1; + if (tracks.data > 0) return CDS_DATA_1; + /* Policy mode off */ - /* - * Try the generic SCSI command ioctl's first. - */ - ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, argp); - if (ret != -ENOTTY) - return ret; + cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); + return CDS_NO_INFO; + } - switch (cmd) { - case CDROMMULTISESSION: - return cdrom_ioctl_multisession(cdi, argp); - case CDROMEJECT: - return cdrom_ioctl_eject(cdi); - case CDROMCLOSETRAY: - return cdrom_ioctl_closetray(cdi); - case CDROMEJECT_SW: - return cdrom_ioctl_eject_sw(cdi, arg); - case CDROM_MEDIA_CHANGED: - return cdrom_ioctl_media_changed(cdi, arg); - case CDROM_SET_OPTIONS: - return cdrom_ioctl_set_options(cdi, arg); - case CDROM_CLEAR_OPTIONS: - return cdrom_ioctl_clear_options(cdi, arg); - case CDROM_SELECT_SPEED: - return cdrom_ioctl_select_speed(cdi, arg); - case CDROM_SELECT_DISC: - return cdrom_ioctl_select_disc(cdi, arg); - case CDROMRESET: - return cdrom_ioctl_reset(cdi, ip->i_bdev); - case CDROM_LOCKDOOR: - return cdrom_ioctl_lock_door(cdi, arg); - case CDROM_DEBUG: - return cdrom_ioctl_debug(cdi, arg); - case CDROM_GET_CAPABILITY: - return cdrom_ioctl_get_capability(cdi); - case CDROM_GET_MCN: - return cdrom_ioctl_get_mcn(cdi, argp); - case CDROM_DRIVE_STATUS: - return cdrom_ioctl_drive_status(cdi, arg); - case CDROM_DISC_STATUS: - return cdrom_ioctl_disc_status(cdi); - case CDROM_CHANGER_NSLOTS: - return cdrom_ioctl_changer_nslots(cdi); + case CDROM_CHANGER_NSLOTS: { + cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); + return cdi->capacity; + } } - /* - * Use the ioctls that are implemented through the generic_packet() - * interface. this may look at bit funny, but if -ENOTTY is - * returned that particular ioctl is not implemented and we - * let it go through the device specific ones. - */ + /* use the ioctls that are implemented through the generic_packet() + interface. this may look at bit funny, but if -ENOTTY is + returned that particular ioctl is not implemented and we + let it go through the device specific ones. */ if (CDROM_CAN(CDC_GENERIC_PACKET)) { ret = mmc_ioctl(cdi, cmd, arg); - if (ret != -ENOTTY) + if (ret != -ENOTTY) { return ret; + } } - /* - * Note: most of the cdinfo() calls are commented out here, - * because they fill up the sys log when CD players poll - * the drive. - */ + /* note: most of the cdinfo() calls are commented out here, + because they fill up the sys log when CD players poll + the drive. */ switch (cmd) { - case CDROMSUBCHNL: - return cdrom_ioctl_get_subchnl(cdi, argp); - case CDROMREADTOCHDR: - return cdrom_ioctl_read_tochdr(cdi, argp); - case CDROMREADTOCENTRY: - return cdrom_ioctl_read_tocentry(cdi, argp); - case CDROMPLAYMSF: - return cdrom_ioctl_play_msf(cdi, argp); - case CDROMPLAYTRKIND: - return cdrom_ioctl_play_trkind(cdi, argp); - case CDROMVOLCTRL: - return cdrom_ioctl_volctrl(cdi, argp); - case CDROMVOLREAD: - return cdrom_ioctl_volread(cdi, argp); + case CDROMSUBCHNL: { + struct cdrom_subchnl q; + u_char requested, back; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ + IOCTL_IN(arg, struct cdrom_subchnl, q); + requested = q.cdsc_format; + if (!((requested == CDROM_MSF) || + (requested == CDROM_LBA))) + return -EINVAL; + q.cdsc_format = CDROM_MSF; + if ((ret=cdo->audio_ioctl(cdi, cmd, &q))) + return ret; + back = q.cdsc_format; /* local copy */ + sanitize_format(&q.cdsc_absaddr, &back, requested); + sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); + IOCTL_OUT(arg, struct cdrom_subchnl, q); + /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ + return 0; + } + case CDROMREADTOCHDR: { + struct cdrom_tochdr header; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ + IOCTL_IN(arg, struct cdrom_tochdr, header); + if ((ret=cdo->audio_ioctl(cdi, cmd, &header))) + return ret; + IOCTL_OUT(arg, struct cdrom_tochdr, header); + /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ + return 0; + } + case CDROMREADTOCENTRY: { + struct cdrom_tocentry entry; + u_char requested_format; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ + IOCTL_IN(arg, struct cdrom_tocentry, entry); + requested_format = entry.cdte_format; + if (!((requested_format == CDROM_MSF) || + (requested_format == CDROM_LBA))) + return -EINVAL; + /* make interface to low-level uniform */ + entry.cdte_format = CDROM_MSF; + if ((ret=cdo->audio_ioctl(cdi, cmd, &entry))) + return ret; + sanitize_format(&entry.cdte_addr, + &entry.cdte_format, requested_format); + IOCTL_OUT(arg, struct cdrom_tocentry, entry); + /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ + return 0; + } + case CDROMPLAYMSF: { + struct cdrom_msf msf; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); + IOCTL_IN(arg, struct cdrom_msf, msf); + return cdo->audio_ioctl(cdi, cmd, &msf); + } + case CDROMPLAYTRKIND: { + struct cdrom_ti ti; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); + IOCTL_IN(arg, struct cdrom_ti, ti); + CHECKAUDIO; + return cdo->audio_ioctl(cdi, cmd, &ti); + } + case CDROMVOLCTRL: { + struct cdrom_volctrl volume; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); + IOCTL_IN(arg, struct cdrom_volctrl, volume); + return cdo->audio_ioctl(cdi, cmd, &volume); + } + case CDROMVOLREAD: { + struct cdrom_volctrl volume; + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); + if ((ret=cdo->audio_ioctl(cdi, cmd, &volume))) + return ret; + IOCTL_OUT(arg, struct cdrom_volctrl, volume); + return 0; + } case CDROMSTART: case CDROMSTOP: case CDROMPAUSE: - case CDROMRESUME: - return cdrom_ioctl_audioctl(cdi, cmd); - } + case CDROMRESUME: { + if (!CDROM_CAN(CDC_PLAY_AUDIO)) + return -ENOSYS; + cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); + CHECKAUDIO; + return cdo->audio_ioctl(cdi, cmd, NULL); + } + } /* switch */ + /* do the device specific ioctls */ + if (CDROM_CAN(CDC_IOCTLS)) + return cdo->dev_ioctl(cdi, cmd, arg); + return -ENOSYS; } diff --git a/trunk/drivers/cdrom/cdu31a.c b/trunk/drivers/cdrom/cdu31a.c index 72ffd64e8b1e..378e88d20757 100644 --- a/trunk/drivers/cdrom/cdu31a.c +++ b/trunk/drivers/cdrom/cdu31a.c @@ -2668,7 +2668,7 @@ static int scd_audio_ioctl(struct cdrom_device_info *cdi, return retval; } -static int scd_read_audio(struct cdrom_device_info *cdi, +static int scd_dev_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -2894,10 +2894,11 @@ static struct cdrom_device_ops scd_dops = { .get_mcn = scd_get_mcn, .reset = scd_reset, .audio_ioctl = scd_audio_ioctl, + .dev_ioctl = scd_dev_ioctl, .capability = CDC_OPEN_TRAY | CDC_CLOSE_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | - CDC_RESET | CDC_DRIVE_STATUS, + CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS, .n_minors = 1, }; @@ -2935,9 +2936,6 @@ static int scd_block_ioctl(struct inode *inode, struct file *file, case CDROMCLOSETRAY: retval = scd_tray_move(&scd_info, 0); break; - case CDROMREADAUDIO: - retval = scd_read_audio(&scd_info, CDROMREADAUDIO, arg); - break; default: retval = cdrom_ioctl(file, &scd_info, inode, cmd, arg); } diff --git a/trunk/drivers/cdrom/cm206.c b/trunk/drivers/cdrom/cm206.c index fad27a87ce35..ce127f7ec0f6 100644 --- a/trunk/drivers/cdrom/cm206.c +++ b/trunk/drivers/cdrom/cm206.c @@ -1157,6 +1157,32 @@ static int cm206_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, } } +/* Ioctl. These ioctls are specific to the cm206 driver. I have made + some driver statistics accessible through ioctl calls. + */ + +static int cm206_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { +#ifdef STATISTICS + case CM206CTL_GET_STAT: + if (arg >= NR_STATS) + return -EINVAL; + else + return cd->stats[arg]; + case CM206CTL_GET_LAST_STAT: + if (arg >= NR_STATS) + return -EINVAL; + else + return cd->last_stat[arg]; +#endif + default: + debug(("Unknown ioctl call 0x%x\n", cmd)); + return -EINVAL; + } +} + static int cm206_media_changed(struct cdrom_device_info *cdi, int disc_nr) { if (cd != NULL) { @@ -1295,10 +1321,11 @@ static struct cdrom_device_ops cm206_dops = { .get_mcn = cm206_get_upc, .reset = cm206_reset, .audio_ioctl = cm206_audio_ioctl, + .dev_ioctl = cm206_ioctl, .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | CDC_MCN | CDC_PLAY_AUDIO | CDC_SELECT_SPEED | - CDC_DRIVE_STATUS, + CDC_IOCTLS | CDC_DRIVE_STATUS, .n_minors = 1, }; @@ -1323,21 +1350,6 @@ static int cm206_block_release(struct inode *inode, struct file *file) static int cm206_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, unsigned long arg) { - switch (cmd) { -#ifdef STATISTICS - case CM206CTL_GET_STAT: - if (arg >= NR_STATS) - return -EINVAL; - return cd->stats[arg]; - case CM206CTL_GET_LAST_STAT: - if (arg >= NR_STATS) - return -EINVAL; - return cd->last_stat[arg]; -#endif - default: - break; - } - return cdrom_ioctl(file, &cm206_info, inode, cmd, arg); } diff --git a/trunk/drivers/cdrom/sbpcd.c b/trunk/drivers/cdrom/sbpcd.c index 4760f515f591..466e9c2974bd 100644 --- a/trunk/drivers/cdrom/sbpcd.c +++ b/trunk/drivers/cdrom/sbpcd.c @@ -4160,13 +4160,18 @@ static int sbpcd_get_last_session(struct cdrom_device_info *cdi, struct cdrom_mu return 0; } -static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, - void * arg) +/*==========================================================================*/ +/*==========================================================================*/ +/* + * ioctl support + */ +static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd, + u_long arg) { struct sbpcd_drive *p = cdi->handle; - int i, st, j; + int i; - msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08p)\n", cdi->name, cmd, arg); + msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg); if (p->drv_id==-1) { msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name); return (-ENXIO); /* no such drive */ @@ -4178,1192 +4183,1194 @@ static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd); switch (cmd) /* Sun-compatible */ { + case DDIOCSDBG: /* DDI Debug */ + if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM); + i=sbpcd_dbg_ioctl(arg,1); + RETURN_UP(i); + case CDROMRESET: /* hard reset the drive */ + msg(DBG_IOC,"ioctl: CDROMRESET entered.\n"); + i=DriveReset(); + current_drive->audio_state=0; + RETURN_UP(i); - case CDROMPAUSE: /* Pause the drive */ - msg(DBG_IOC,"ioctl: CDROMPAUSE entered.\n"); - /* pause the drive unit when it is currently in PLAY mode, */ - /* or reset the starting and ending locations when in PAUSED mode. */ - /* If applicable, at the next stopping point it reaches */ - /* the drive will discontinue playing. */ - switch (current_drive->audio_state) - { - case audio_playing: - if (famL_drive) i=cc_ReadSubQ(); - else i=cc_Pause_Resume(1); - if (i<0) RETURN_UP(-EIO); - if (famL_drive) i=cc_Pause_Resume(1); - else i=cc_ReadSubQ(); - if (i<0) RETURN_UP(-EIO); - current_drive->pos_audio_start=current_drive->SubQ_run_tot; - current_drive->audio_state=audio_pausing; - RETURN_UP(0); - case audio_pausing: - i=cc_Seek(current_drive->pos_audio_start,1); - if (i<0) RETURN_UP(-EIO); - RETURN_UP(0); - default: - RETURN_UP(-EINVAL); - } - - case CDROMRESUME: /* resume paused audio play */ - msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n"); - /* resume playing audio tracks when a previous PLAY AUDIO call has */ - /* been paused with a PAUSE command. */ - /* It will resume playing from the location saved in SubQ_run_tot. */ - if (current_drive->audio_state!=audio_pausing) RETURN_UP(-EINVAL); - if (famL_drive) - i=cc_PlayAudio(current_drive->pos_audio_start, - current_drive->pos_audio_end); - else i=cc_Pause_Resume(3); - if (i<0) RETURN_UP(-EIO); - current_drive->audio_state=audio_playing; - RETURN_UP(0); - - case CDROMPLAYMSF: - msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n"); + case CDROMREADMODE1: + msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n"); #ifdef SAFE_MIXED if (current_drive->has_data>1) RETURN_UP(-EBUSY); #endif /* SAFE_MIXED */ - if (current_drive->audio_state==audio_playing) - { - i=cc_Pause_Resume(1); - if (i<0) RETURN_UP(-EIO); - i=cc_ReadSubQ(); - if (i<0) RETURN_UP(-EIO); - current_drive->pos_audio_start=current_drive->SubQ_run_tot; - i=cc_Seek(current_drive->pos_audio_start,1); - } - memcpy(&msf, (void *) arg, sizeof(struct cdrom_msf)); - /* values come as msf-bin */ - current_drive->pos_audio_start = (msf.cdmsf_min0<<16) | - (msf.cdmsf_sec0<<8) | - msf.cdmsf_frame0; - current_drive->pos_audio_end = (msf.cdmsf_min1<<16) | - (msf.cdmsf_sec1<<8) | - msf.cdmsf_frame1; - msg(DBG_IOX,"ioctl: CDROMPLAYMSF %08X %08X\n", - current_drive->pos_audio_start,current_drive->pos_audio_end); - i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end); - if (i<0) - { - msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i); - DriveReset(); - current_drive->audio_state=0; - RETURN_UP(-EIO); - } - current_drive->audio_state=audio_playing; + cc_ModeSelect(CD_FRAMESIZE); + cc_ModeSense(); + current_drive->mode=READ_M1; RETURN_UP(0); - case CDROMPLAYTRKIND: /* Play a track. This currently ignores index. */ - msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n"); + case CDROMREADMODE2: /* not usable at the moment */ + msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n"); #ifdef SAFE_MIXED if (current_drive->has_data>1) RETURN_UP(-EBUSY); #endif /* SAFE_MIXED */ - if (current_drive->audio_state==audio_playing) - { - msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n"); -#if 1 - RETURN_UP(0); /* just let us play on */ -#else - RETURN_UP(-EINVAL); /* play on, but say "error" */ -#endif - } - memcpy(&ti,(void *) arg,sizeof(struct cdrom_ti)); - msg(DBG_IOX,"ioctl: trk0: %d, ind0: %d, trk1:%d, ind1:%d\n", - ti.cdti_trk0,ti.cdti_ind0,ti.cdti_trk1,ti.cdti_ind1); - if (ti.cdti_trk0n_first_track) RETURN_UP(-EINVAL); - if (ti.cdti_trk0>current_drive->n_last_track) RETURN_UP(-EINVAL); - if (ti.cdti_trk1current_drive->n_last_track) ti.cdti_trk1=current_drive->n_last_track; - current_drive->pos_audio_start=current_drive->TocBuffer[ti.cdti_trk0].address; - current_drive->pos_audio_end=current_drive->TocBuffer[ti.cdti_trk1+1].address; - i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end); - if (i<0) - { - msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i); - DriveReset(); - current_drive->audio_state=0; - RETURN_UP(-EIO); - } - current_drive->audio_state=audio_playing; + cc_ModeSelect(CD_FRAMESIZE_RAW1); + cc_ModeSense(); + current_drive->mode=READ_M2; RETURN_UP(0); - case CDROMREADTOCHDR: /* Read the table of contents header */ - msg(DBG_IOC,"ioctl: CDROMREADTOCHDR entered.\n"); - tochdr.cdth_trk0=current_drive->n_first_track; - tochdr.cdth_trk1=current_drive->n_last_track; - memcpy((void *) arg, &tochdr, sizeof(struct cdrom_tochdr)); - RETURN_UP(0); + case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */ + msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n"); + if (current_drive->sbp_audsiz>0) + vfree(current_drive->aud_buf); + current_drive->aud_buf=NULL; + current_drive->sbp_audsiz=arg; - case CDROMREADTOCENTRY: /* Read an entry in the table of contents */ - msg(DBG_IOC,"ioctl: CDROMREADTOCENTRY entered.\n"); - memcpy(&tocentry, (void *) arg, sizeof(struct cdrom_tocentry)); - i=tocentry.cdte_track; - if (i==CDROM_LEADOUT) i=current_drive->n_last_track+1; - else if (in_first_track||i>current_drive->n_last_track) - RETURN_UP(-EINVAL); - tocentry.cdte_adr=current_drive->TocBuffer[i].ctl_adr&0x0F; - tocentry.cdte_ctrl=(current_drive->TocBuffer[i].ctl_adr>>4)&0x0F; - tocentry.cdte_datamode=current_drive->TocBuffer[i].format; - if (tocentry.cdte_format==CDROM_MSF) /* MSF-bin required */ + if (current_drive->sbp_audsiz>16) { - tocentry.cdte_addr.msf.minute=(current_drive->TocBuffer[i].address>>16)&0x00FF; - tocentry.cdte_addr.msf.second=(current_drive->TocBuffer[i].address>>8)&0x00FF; - tocentry.cdte_addr.msf.frame=current_drive->TocBuffer[i].address&0x00FF; + current_drive->sbp_audsiz = 0; + RETURN_UP(current_drive->sbp_audsiz); } - else if (tocentry.cdte_format==CDROM_LBA) /* blk required */ - tocentry.cdte_addr.lba=msf2blk(current_drive->TocBuffer[i].address); - else RETURN_UP(-EINVAL); - memcpy((void *) arg, &tocentry, sizeof(struct cdrom_tocentry)); - RETURN_UP(0); + + if (current_drive->sbp_audsiz>0) + { + current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW); + if (current_drive->aud_buf==NULL) + { + msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz); + current_drive->sbp_audsiz=0; + } + else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz); + } + RETURN_UP(current_drive->sbp_audsiz); + + case CDROMREADAUDIO: + { /* start of CDROMREADAUDIO */ + int i=0, j=0, frame, block=0; + u_int try=0; + u_long timeout; + u_char *p; + u_int data_tries = 0; + u_int data_waits = 0; + u_int data_retrying = 0; + int status_tries; + int error_flag; - case CDROMSTOP: /* Spin down the drive */ - msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n"); + msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n"); + if (fam0_drive) RETURN_UP(-EINVAL); + if (famL_drive) RETURN_UP(-EINVAL); + if (famV_drive) RETURN_UP(-EINVAL); + if (famT_drive) RETURN_UP(-EINVAL); #ifdef SAFE_MIXED if (current_drive->has_data>1) RETURN_UP(-EBUSY); #endif /* SAFE_MIXED */ - i=cc_Pause_Resume(1); - current_drive->audio_state=0; -#if 0 - cc_DriveReset(); + if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL); + if (copy_from_user(&read_audio, (void __user *)arg, + sizeof(struct cdrom_read_audio))) + RETURN_UP(-EFAULT); + if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL); + if (!access_ok(VERIFY_WRITE, read_audio.buf, + read_audio.nframes*CD_FRAMESIZE_RAW)) + RETURN_UP(-EFAULT); + + if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */ + block=msf2lba(&read_audio.addr.msf.minute); + else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */ + block=read_audio.addr.lba; + else RETURN_UP(-EINVAL); +#if 000 + i=cc_SetSpeed(speed_150,0,0); + if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i); #endif - RETURN_UP(i); - - case CDROMSTART: /* Spin up the drive */ - msg(DBG_IOC,"ioctl: CDROMSTART entered.\n"); - cc_SpinUp(); - current_drive->audio_state=0; - RETURN_UP(0); - - case CDROMVOLCTRL: /* Volume control */ - msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n"); - memcpy(&volctrl,(char *) arg,sizeof(volctrl)); - current_drive->vol_chan0=0; - current_drive->vol_ctrl0=volctrl.channel0; - current_drive->vol_chan1=1; - current_drive->vol_ctrl1=volctrl.channel1; - i=cc_SetVolume(); - RETURN_UP(0); - - case CDROMVOLREAD: /* read Volume settings from drive */ - msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n"); - st=cc_GetVolume(); - if (st<0) RETURN_UP(st); - volctrl.channel0=current_drive->vol_ctrl0; - volctrl.channel1=current_drive->vol_ctrl1; - volctrl.channel2=0; - volctrl.channel2=0; - memcpy((void *)arg,&volctrl,sizeof(volctrl)); - RETURN_UP(0); - - case CDROMSUBCHNL: /* Get subchannel info */ - msg(DBG_IOS,"ioctl: CDROMSUBCHNL entered.\n"); - /* Bogus, I can do better than this! --AJK - if ((st_spinning)||(!subq_valid)) { - i=cc_ReadSubQ(); - if (i<0) RETURN_UP(-EIO); - } - */ - i=cc_ReadSubQ(); - if (i<0) { - j=cc_ReadError(); /* clear out error status from drive */ - current_drive->audio_state=CDROM_AUDIO_NO_STATUS; - /* get and set the disk state here, - probably not the right place, but who cares! - It makes it work properly! --AJK */ - if (current_drive->CD_changed==0xFF) { - msg(DBG_000,"Disk changed detect\n"); - current_drive->diskstate_flags &= ~cd_size_bit; + msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n", + block, blk2msf(block)); + msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n"); +#if OLD_BUSY + while (busy_data) sbp_sleep(HZ/10); /* wait a bit */ + busy_audio=1; +#endif /* OLD_BUSY */ + error_flag=0; + for (data_tries=5; data_tries>0; data_tries--) + { + msg(DBG_AUD,"data_tries=%d ...\n", data_tries); + current_drive->mode=READ_AU; + cc_ModeSelect(CD_FRAMESIZE_RAW); + cc_ModeSense(); + for (status_tries=3; status_tries > 0; status_tries--) + { + flags_cmd_out |= f_respo3; + cc_ReadStatus(); + if (sbp_status() != 0) break; + if (st_check) cc_ReadError(); + sbp_sleep(1); /* wait a bit, try again */ } - RETURN_UP(-EIO); - } - if (current_drive->CD_changed==0xFF) { - /* reread the TOC because the disk has changed! --AJK */ - msg(DBG_000,"Disk changed STILL detected, rereading TOC!\n"); - i=DiskInfo(); - if(i==0) { - current_drive->CD_changed=0x00; /* cd has changed, procede, */ - RETURN_UP(-EIO); /* and get TOC, etc on next try! --AJK */ - } else { - RETURN_UP(-EIO); /* we weren't ready yet! --AJK */ + if (status_tries == 0) + { + msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__); + continue; } - } - memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl)); - /* - This virtual crap is very bogus! - It doesn't detect when the cd is done playing audio! - Lets do this right with proper hardware register reading! - */ - cc_ReadStatus(); - i=ResponseStatus(); - msg(DBG_000,"Drive Status: door_locked =%d.\n", st_door_locked); - msg(DBG_000,"Drive Status: door_closed =%d.\n", st_door_closed); - msg(DBG_000,"Drive Status: caddy_in =%d.\n", st_caddy_in); - msg(DBG_000,"Drive Status: disk_ok =%d.\n", st_diskok); - msg(DBG_000,"Drive Status: spinning =%d.\n", st_spinning); - msg(DBG_000,"Drive Status: busy =%d.\n", st_busy); - /* st_busy indicates if it's _ACTUALLY_ playing audio */ - switch (current_drive->audio_state) - { - case audio_playing: - if(st_busy==0) { - /* CD has stopped playing audio --AJK */ - current_drive->audio_state=audio_completed; - SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED; - } else { - SC.cdsc_audiostatus=CDROM_AUDIO_PLAY; + msg(DBG_AUD,"read_audio: sbp_status: ok.\n"); + + flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; + if (fam0L_drive) + { + flags_cmd_out |= f_lopsta | f_getsta | f_bit1; + cmd_type=READ_M2; + drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */ + drvcmd[1]=(block>>16)&0x000000ff; + drvcmd[2]=(block>>8)&0x000000ff; + drvcmd[3]=block&0x000000ff; + drvcmd[4]=0; + drvcmd[5]=read_audio.nframes; /* # of frames */ + drvcmd[6]=0; } - break; - case audio_pausing: - SC.cdsc_audiostatus=CDROM_AUDIO_PAUSED; - break; - case audio_completed: - SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED; - break; - default: - SC.cdsc_audiostatus=CDROM_AUDIO_NO_STATUS; - break; - } - SC.cdsc_adr=current_drive->SubQ_ctl_adr; - SC.cdsc_ctrl=current_drive->SubQ_ctl_adr>>4; - SC.cdsc_trk=bcd2bin(current_drive->SubQ_trk); - SC.cdsc_ind=bcd2bin(current_drive->SubQ_pnt_idx); - if (SC.cdsc_format==CDROM_LBA) - { - SC.cdsc_absaddr.lba=msf2blk(current_drive->SubQ_run_tot); - SC.cdsc_reladdr.lba=msf2blk(current_drive->SubQ_run_trk); - } - else /* not only if (SC.cdsc_format==CDROM_MSF) */ - { - SC.cdsc_absaddr.msf.minute=(current_drive->SubQ_run_tot>>16)&0x00FF; - SC.cdsc_absaddr.msf.second=(current_drive->SubQ_run_tot>>8)&0x00FF; - SC.cdsc_absaddr.msf.frame=current_drive->SubQ_run_tot&0x00FF; - SC.cdsc_reladdr.msf.minute=(current_drive->SubQ_run_trk>>16)&0x00FF; - SC.cdsc_reladdr.msf.second=(current_drive->SubQ_run_trk>>8)&0x00FF; - SC.cdsc_reladdr.msf.frame=current_drive->SubQ_run_trk&0x00FF; - } - memcpy((void *) arg, &SC, sizeof(struct cdrom_subchnl)); - msg(DBG_IOS,"CDROMSUBCHNL: %1X %02X %08X %08X %02X %02X %06X %06X\n", - SC.cdsc_format,SC.cdsc_audiostatus, - SC.cdsc_adr,SC.cdsc_ctrl, - SC.cdsc_trk,SC.cdsc_ind, - SC.cdsc_absaddr,SC.cdsc_reladdr); - RETURN_UP(0); - - default: - msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); - RETURN_UP(-EINVAL); - } /* end switch(cmd) */ -} -/*==========================================================================*/ -/* - * Take care of the different block sizes between cdrom and Linux. - */ -static void sbp_transfer(struct request *req) -{ - long offs; - - while ( (req->nr_sectors > 0) && - (req->sector/4 >= current_drive->sbp_first_frame) && - (req->sector/4 <= current_drive->sbp_last_frame) ) - { - offs = (req->sector - current_drive->sbp_first_frame * 4) * 512; - memcpy(req->buffer, current_drive->sbp_buf + offs, 512); - req->nr_sectors--; - req->sector++; - req->buffer += 512; - } -} -/*==========================================================================*/ -/* - * special end_request for sbpcd to solve CURRENT==NULL bug. (GTL) - * GTL = Gonzalo Tornaria - * - * This is a kludge so we don't need to modify end_request. - * We put the req we take out after INIT_REQUEST in the requests list, - * so that end_request will discard it. - * - * The bug could be present in other block devices, perhaps we - * should modify INIT_REQUEST and end_request instead, and - * change every block device.. - * - * Could be a race here?? Could e.g. a timer interrupt schedule() us? - * If so, we should copy end_request here, and do it right.. (or - * modify end_request and the block devices). - * - * In any case, the race here would be much small than it was, and - * I couldn't reproduce.. - * - * The race could be: suppose CURRENT==NULL. We put our req in the list, - * and we are scheduled. Other process takes over, and gets into - * do_sbpcd_request. It sees CURRENT!=NULL (it is == to our req), so - * proceeds. It ends, so CURRENT is now NULL.. Now we awake somewhere in - * end_request, but now CURRENT==NULL... oops! - * - */ -#undef DEBUG_GTL - -/*==========================================================================*/ -/* - * I/O request routine, called from Linux kernel. - */ -static void do_sbpcd_request(request_queue_t * q) -{ - u_int block; - u_int nsect; - int status_tries, data_tries; - struct request *req; - struct sbpcd_drive *p; -#ifdef DEBUG_GTL - static int xx_nr=0; - int xnr; -#endif - - request_loop: -#ifdef DEBUG_GTL - xnr=++xx_nr; - - req = elv_next_request(q); - - if (!req) - { - printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n", - xnr, current->pid, jiffies); - printk( "do_sbpcd_request[%do](NULL) end 0 (null), Time:%li\n", - xnr, jiffies); - return; - } - - printk(" do_sbpcd_request[%di](%p:%ld+%ld), Pid:%d, Time:%li\n", - xnr, req, req->sector, req->nr_sectors, current->pid, jiffies); -#endif - - req = elv_next_request(q); /* take out our request so no other */ - if (!req) - return; - - if (req -> sector == -1) - end_request(req, 0); - spin_unlock_irq(q->queue_lock); - - down(&ioctl_read_sem); - if (rq_data_dir(elv_next_request(q)) != READ) - { - msg(DBG_INF, "bad cmd %d\n", req->cmd[0]); - goto err_done; - } - p = req->rq_disk->private_data; -#if OLD_BUSY - while (busy_audio) sbp_sleep(HZ); /* wait a bit */ - busy_data=1; -#endif /* OLD_BUSY */ - - if (p->audio_state==audio_playing) goto err_done; - if (p != current_drive) - switch_drive(p); - - block = req->sector; /* always numbered as 512-byte-pieces */ - nsect = req->nr_sectors; /* always counted as 512-byte-pieces */ - - msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect); -#if 0 - msg(DBG_MUL,"read LBA %d\n", block/4); -#endif - - sbp_transfer(req); - /* if we satisfied the request from the buffer, we're done. */ - if (req->nr_sectors == 0) - { -#ifdef DEBUG_GTL - printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 2, Time:%li\n", - xnr, req, req->sector, req->nr_sectors, jiffies); -#endif - up(&ioctl_read_sem); - spin_lock_irq(q->queue_lock); - end_request(req, 1); - goto request_loop; - } - -#ifdef FUTURE - i=prepare(0,0); /* at moment not really a hassle check, but ... */ - if (i!=0) - msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i); -#endif /* FUTURE */ - - if (!st_spinning) cc_SpinUp(); - - for (data_tries=n_retries; data_tries > 0; data_tries--) - { - for (status_tries=3; status_tries > 0; status_tries--) - { - flags_cmd_out |= f_respo3; - cc_ReadStatus(); - if (sbp_status() != 0) break; - if (st_check) cc_ReadError(); - sbp_sleep(1); /* wait a bit, try again */ - } - if (status_tries == 0) - { - msg(DBG_INF,"sbp_status: failed after 3 tries in line %d\n", __LINE__); - break; - } - - sbp_read_cmd(req); - sbp_sleep(0); - if (sbp_data(req) != 0) - { -#ifdef SAFE_MIXED - current_drive->has_data=2; /* is really a data disk */ -#endif /* SAFE_MIXED */ -#ifdef DEBUG_GTL - printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n", - xnr, req, req->sector, req->nr_sectors, jiffies); -#endif - up(&ioctl_read_sem); - spin_lock_irq(q->queue_lock); - end_request(req, 1); - goto request_loop; - } - } - - err_done: -#if OLD_BUSY - busy_data=0; -#endif /* OLD_BUSY */ -#ifdef DEBUG_GTL - printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 4 (error), Time:%li\n", - xnr, req, req->sector, req->nr_sectors, jiffies); -#endif - up(&ioctl_read_sem); - sbp_sleep(0); /* wait a bit, try again */ - spin_lock_irq(q->queue_lock); - end_request(req, 0); - goto request_loop; -} -/*==========================================================================*/ -/* - * build and send the READ command. - */ -static void sbp_read_cmd(struct request *req) -{ -#undef OLD + else if (fam1_drive) + { + drvcmd[0]=CMD1_READ; /* "read frames", new drives */ + lba2msf(block,&drvcmd[1]); /* msf-bin format required */ + drvcmd[4]=0; + drvcmd[5]=0; + drvcmd[6]=read_audio.nframes; /* # of frames */ + } + else if (fam2_drive) + { + drvcmd[0]=CMD2_READ_XA2; + lba2msf(block,&drvcmd[1]); /* msf-bin format required */ + drvcmd[4]=0; + drvcmd[5]=read_audio.nframes; /* # of frames */ + drvcmd[6]=0x11; /* raw mode */ + } + else if (famT_drive) /* CD-55A: not tested yet */ + { + } + msg(DBG_AUD,"read_audio: before giving \"read\" command.\n"); + flags_cmd_out=f_putcmd; + response_count=0; + i=cmd_out(); + if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i); + sbp_sleep(0); + msg(DBG_AUD,"read_audio: after giving \"read\" command.\n"); + for (frame=1;frame<2 && !error_flag; frame++) + { + try=maxtim_data; + for (timeout=jiffies+9*HZ; ; ) + { + for ( ; try!=0;try--) + { + j=inb(CDi_status); + if (!(j&s_not_data_ready)) break; + if (!(j&s_not_result_ready)) break; + if (fam0L_drive) if (j&s_attention) break; + } + if (try != 0 || time_after_eq(jiffies, timeout)) break; + if (data_retrying == 0) data_waits++; + data_retrying = 1; + sbp_sleep(1); + try = 1; + } + if (try==0) + { + msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n"); + error_flag++; + break; + } + msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n"); + if (j&s_not_data_ready) + { + msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n"); + error_flag++; + break; + } + msg(DBG_AUD,"read_audio: before reading data.\n"); + error_flag=0; + p = current_drive->aud_buf; + if (sbpro_type==1) OUT(CDo_sel_i_d,1); + if (do_16bit) + { + u_short *p2 = (u_short *) p; - int i; - int block; + for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) + { + if ((inb_p(CDi_status)&s_not_data_ready)) continue; - current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */ - current_drive->sbp_current = 0; - block=req->sector/4; - if (block+current_drive->sbp_bufsiz <= current_drive->CDsize_frm) - current_drive->sbp_read_frames = current_drive->sbp_bufsiz; - else - { - current_drive->sbp_read_frames=current_drive->CDsize_frm-block; - /* avoid reading past end of data */ - if (current_drive->sbp_read_frames < 1) - { - msg(DBG_INF,"requested frame %d, CD size %d ???\n", - block, current_drive->CDsize_frm); - current_drive->sbp_read_frames=1; - } - } + /* get one sample */ + *p2++ = inw_p(CDi_data); + *p2++ = inw_p(CDi_data); + } + } else { + for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) + { + if ((inb_p(CDi_status)&s_not_data_ready)) continue; - flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; - clr_cmdbuf(); - if (famV_drive) - { - drvcmd[0]=CMDV_READ; - lba2msf(block,&drvcmd[1]); /* msf-bcd format required */ - bin2bcdx(&drvcmd[1]); - bin2bcdx(&drvcmd[2]); - bin2bcdx(&drvcmd[3]); - drvcmd[4]=current_drive->sbp_read_frames>>8; - drvcmd[5]=current_drive->sbp_read_frames&0xff; - drvcmd[6]=0x02; /* flag "msf-bcd" */ - } - else if (fam0L_drive) - { - flags_cmd_out |= f_lopsta | f_getsta | f_bit1; - if (current_drive->xa_byte==0x20) - { - cmd_type=READ_M2; - drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */ - drvcmd[1]=(block>>16)&0x0ff; - drvcmd[2]=(block>>8)&0x0ff; - drvcmd[3]=block&0x0ff; - drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff; - drvcmd[5]=current_drive->sbp_read_frames&0x0ff; - } - else - { - drvcmd[0]=CMD0_READ; /* "read frames", old drives */ - if (current_drive->drv_type>=drv_201) + /* get one sample */ + *p++ = inb_p(CDi_data); + *p++ = inb_p(CDi_data); + *p++ = inb_p(CDi_data); + *p++ = inb_p(CDi_data); + } + } + if (sbpro_type==1) OUT(CDo_sel_i_d,0); + data_retrying = 0; + } + msg(DBG_AUD,"read_audio: after reading data.\n"); + if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */ { - lba2msf(block,&drvcmd[1]); /* msf-bcd format required */ - bin2bcdx(&drvcmd[1]); - bin2bcdx(&drvcmd[2]); - bin2bcdx(&drvcmd[3]); + msg(DBG_AUD,"read_audio: read aborted by drive\n"); +#if 0000 + i=cc_DriveReset(); /* ugly fix to prevent a hang */ +#else + i=cc_ReadError(); +#endif + continue; } - else + if (fam0L_drive) { - drvcmd[1]=(block>>16)&0x0ff; - drvcmd[2]=(block>>8)&0x0ff; - drvcmd[3]=block&0x0ff; + i=maxtim_data; + for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--) + { + for ( ;i!=0;i--) + { + j=inb(CDi_status); + if (!(j&s_not_data_ready)) break; + if (!(j&s_not_result_ready)) break; + if (j&s_attention) break; + } + if (i != 0 || time_after_eq(jiffies, timeout)) break; + sbp_sleep(0); + i = 1; + } + if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ"); + if (!(j&s_attention)) + { + msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n"); + i=cc_DriveReset(); /* ugly fix to prevent a hang */ + continue; + } } - drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff; - drvcmd[5]=current_drive->sbp_read_frames&0x0ff; - drvcmd[6]=(current_drive->drv_typestatus_bits); + continue; /* FIXME */ + } + } + while ((fam0L_drive)&&(!st_check)&&(!(i&p_success))); + if (st_check) + { + i=cc_ReadError(); + msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i); + continue; + } + if (copy_to_user(read_audio.buf, + current_drive->aud_buf, + read_audio.nframes * CD_FRAMESIZE_RAW)) + RETURN_UP(-EFAULT); + msg(DBG_AUD,"read_audio: copy_to_user done.\n"); + break; } - } - else if (fam1_drive) - { - drvcmd[0]=CMD1_READ; - lba2msf(block,&drvcmd[1]); /* msf-bin format required */ - drvcmd[5]=(current_drive->sbp_read_frames>>8)&0x0ff; - drvcmd[6]=current_drive->sbp_read_frames&0x0ff; - } - else if (fam2_drive) - { - drvcmd[0]=CMD2_READ; - lba2msf(block,&drvcmd[1]); /* msf-bin format required */ - drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff; - drvcmd[5]=current_drive->sbp_read_frames&0x0ff; - drvcmd[6]=0x02; - } - else if (famT_drive) - { - drvcmd[0]=CMDT_READ; - drvcmd[2]=(block>>24)&0x0ff; - drvcmd[3]=(block>>16)&0x0ff; - drvcmd[4]=(block>>8)&0x0ff; - drvcmd[5]=block&0x0ff; - drvcmd[7]=(current_drive->sbp_read_frames>>8)&0x0ff; - drvcmd[8]=current_drive->sbp_read_frames&0x0ff; - } - flags_cmd_out=f_putcmd; - response_count=0; - i=cmd_out(); - if (i<0) msg(DBG_INF,"error giving READ command: %0d\n", i); - return; + cc_ModeSelect(CD_FRAMESIZE); + cc_ModeSense(); + current_drive->mode=READ_M1; +#if OLD_BUSY + busy_audio=0; +#endif /* OLD_BUSY */ + if (data_tries == 0) + { + msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__); + RETURN_UP(-EIO); + } + msg(DBG_AUD,"read_audio: successful return.\n"); + RETURN_UP(0); + } /* end of CDROMREADAUDIO */ + + default: + msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); + RETURN_UP(-EINVAL); + } /* end switch(cmd) */ } -/*==========================================================================*/ -/* - * Check the completion of the read-data command. On success, read - * the current_drive->sbp_bufsiz * 2048 bytes of data from the disk into buffer. - */ -static int sbp_data(struct request *req) -{ - int i=0, j=0, l, frame; - u_int try=0; - u_long timeout; - u_char *p; - u_int data_tries = 0; - u_int data_waits = 0; - u_int data_retrying = 0; - int error_flag; - int xa_count; - int max_latency; - int success; - int wait; - int duration; - error_flag=0; - success=0; -#if LONG_TIMING - max_latency=9*HZ; -#else - if (current_drive->f_multisession) max_latency=15*HZ; - else max_latency=5*HZ; -#endif - duration=jiffies; - for (frame=0;framesbp_read_frames&&!error_flag; frame++) +static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd, + void * arg) +{ + struct sbpcd_drive *p = cdi->handle; + int i, st, j; + + msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08p)\n", cdi->name, cmd, arg); + if (p->drv_id==-1) { + msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name); + return (-ENXIO); /* no such drive */ + } + down(&ioctl_read_sem); + if (p != current_drive) + switch_drive(p); + + msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd); + switch (cmd) /* Sun-compatible */ { - SBPCD_CLI; - - del_timer(&data_timer); - data_timer.expires=jiffies+max_latency; - timed_out_data=0; - add_timer(&data_timer); - while (!timed_out_data) + + case CDROMPAUSE: /* Pause the drive */ + msg(DBG_IOC,"ioctl: CDROMPAUSE entered.\n"); + /* pause the drive unit when it is currently in PLAY mode, */ + /* or reset the starting and ending locations when in PAUSED mode. */ + /* If applicable, at the next stopping point it reaches */ + /* the drive will discontinue playing. */ + switch (current_drive->audio_state) { - if (current_drive->f_multisession) try=maxtim_data*4; - else try=maxtim_data; - msg(DBG_000,"sbp_data: CDi_status loop: try=%d.\n",try); - for ( ; try!=0;try--) - { - j=inb(CDi_status); - if (!(j&s_not_data_ready)) break; - if (!(j&s_not_result_ready)) break; - if (fam0LV_drive) if (j&s_attention) break; - } - if (!(j&s_not_data_ready)) goto data_ready; - if (try==0) - { - if (data_retrying == 0) data_waits++; - data_retrying = 1; - msg(DBG_000,"sbp_data: CDi_status loop: sleeping.\n"); - sbp_sleep(1); - try = 1; - } + case audio_playing: + if (famL_drive) i=cc_ReadSubQ(); + else i=cc_Pause_Resume(1); + if (i<0) RETURN_UP(-EIO); + if (famL_drive) i=cc_Pause_Resume(1); + else i=cc_ReadSubQ(); + if (i<0) RETURN_UP(-EIO); + current_drive->pos_audio_start=current_drive->SubQ_run_tot; + current_drive->audio_state=audio_pausing; + RETURN_UP(0); + case audio_pausing: + i=cc_Seek(current_drive->pos_audio_start,1); + if (i<0) RETURN_UP(-EIO); + RETURN_UP(0); + default: + RETURN_UP(-EINVAL); } - msg(DBG_INF,"sbp_data: CDi_status loop expired.\n"); - data_ready: - del_timer(&data_timer); - - if (timed_out_data) + + case CDROMRESUME: /* resume paused audio play */ + msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n"); + /* resume playing audio tracks when a previous PLAY AUDIO call has */ + /* been paused with a PAUSE command. */ + /* It will resume playing from the location saved in SubQ_run_tot. */ + if (current_drive->audio_state!=audio_pausing) RETURN_UP(-EINVAL); + if (famL_drive) + i=cc_PlayAudio(current_drive->pos_audio_start, + current_drive->pos_audio_end); + else i=cc_Pause_Resume(3); + if (i<0) RETURN_UP(-EIO); + current_drive->audio_state=audio_playing; + RETURN_UP(0); + + case CDROMPLAYMSF: + msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n"); +#ifdef SAFE_MIXED + if (current_drive->has_data>1) RETURN_UP(-EBUSY); +#endif /* SAFE_MIXED */ + if (current_drive->audio_state==audio_playing) { - msg(DBG_INF,"sbp_data: CDi_status timeout (timed_out_data) (%02X).\n", j); - error_flag++; + i=cc_Pause_Resume(1); + if (i<0) RETURN_UP(-EIO); + i=cc_ReadSubQ(); + if (i<0) RETURN_UP(-EIO); + current_drive->pos_audio_start=current_drive->SubQ_run_tot; + i=cc_Seek(current_drive->pos_audio_start,1); } - if (try==0) + memcpy(&msf, (void *) arg, sizeof(struct cdrom_msf)); + /* values come as msf-bin */ + current_drive->pos_audio_start = (msf.cdmsf_min0<<16) | + (msf.cdmsf_sec0<<8) | + msf.cdmsf_frame0; + current_drive->pos_audio_end = (msf.cdmsf_min1<<16) | + (msf.cdmsf_sec1<<8) | + msf.cdmsf_frame1; + msg(DBG_IOX,"ioctl: CDROMPLAYMSF %08X %08X\n", + current_drive->pos_audio_start,current_drive->pos_audio_end); + i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end); + if (i<0) { - msg(DBG_INF,"sbp_data: CDi_status timeout (try=0) (%02X).\n", j); - error_flag++; + msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i); + DriveReset(); + current_drive->audio_state=0; + RETURN_UP(-EIO); } - if (!(j&s_not_result_ready)) + current_drive->audio_state=audio_playing; + RETURN_UP(0); + + case CDROMPLAYTRKIND: /* Play a track. This currently ignores index. */ + msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n"); +#ifdef SAFE_MIXED + if (current_drive->has_data>1) RETURN_UP(-EBUSY); +#endif /* SAFE_MIXED */ + if (current_drive->audio_state==audio_playing) { - msg(DBG_INF, "sbp_data: RESULT_READY where DATA_READY awaited (%02X).\n", j); - response_count=20; - j=ResponseInfo(); - j=inb(CDi_status); + msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n"); +#if 1 + RETURN_UP(0); /* just let us play on */ +#else + RETURN_UP(-EINVAL); /* play on, but say "error" */ +#endif } - if (j&s_not_data_ready) + memcpy(&ti,(void *) arg,sizeof(struct cdrom_ti)); + msg(DBG_IOX,"ioctl: trk0: %d, ind0: %d, trk1:%d, ind1:%d\n", + ti.cdti_trk0,ti.cdti_ind0,ti.cdti_trk1,ti.cdti_ind1); + if (ti.cdti_trk0n_first_track) RETURN_UP(-EINVAL); + if (ti.cdti_trk0>current_drive->n_last_track) RETURN_UP(-EINVAL); + if (ti.cdti_trk1current_drive->n_last_track) ti.cdti_trk1=current_drive->n_last_track; + current_drive->pos_audio_start=current_drive->TocBuffer[ti.cdti_trk0].address; + current_drive->pos_audio_end=current_drive->TocBuffer[ti.cdti_trk1+1].address; + i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end); + if (i<0) { - if ((current_drive->ored_ctl_adr&0x40)==0) - msg(DBG_INF, "CD contains no data tracks.\n"); - else msg(DBG_INF, "sbp_data: DATA_READY timeout (%02X).\n", j); - error_flag++; + msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i); + DriveReset(); + current_drive->audio_state=0; + RETURN_UP(-EIO); } - SBPCD_STI; - if (error_flag) break; + current_drive->audio_state=audio_playing; + RETURN_UP(0); + + case CDROMREADTOCHDR: /* Read the table of contents header */ + msg(DBG_IOC,"ioctl: CDROMREADTOCHDR entered.\n"); + tochdr.cdth_trk0=current_drive->n_first_track; + tochdr.cdth_trk1=current_drive->n_last_track; + memcpy((void *) arg, &tochdr, sizeof(struct cdrom_tochdr)); + RETURN_UP(0); + + case CDROMREADTOCENTRY: /* Read an entry in the table of contents */ + msg(DBG_IOC,"ioctl: CDROMREADTOCENTRY entered.\n"); + memcpy(&tocentry, (void *) arg, sizeof(struct cdrom_tocentry)); + i=tocentry.cdte_track; + if (i==CDROM_LEADOUT) i=current_drive->n_last_track+1; + else if (in_first_track||i>current_drive->n_last_track) + RETURN_UP(-EINVAL); + tocentry.cdte_adr=current_drive->TocBuffer[i].ctl_adr&0x0F; + tocentry.cdte_ctrl=(current_drive->TocBuffer[i].ctl_adr>>4)&0x0F; + tocentry.cdte_datamode=current_drive->TocBuffer[i].format; + if (tocentry.cdte_format==CDROM_MSF) /* MSF-bin required */ + { + tocentry.cdte_addr.msf.minute=(current_drive->TocBuffer[i].address>>16)&0x00FF; + tocentry.cdte_addr.msf.second=(current_drive->TocBuffer[i].address>>8)&0x00FF; + tocentry.cdte_addr.msf.frame=current_drive->TocBuffer[i].address&0x00FF; + } + else if (tocentry.cdte_format==CDROM_LBA) /* blk required */ + tocentry.cdte_addr.lba=msf2blk(current_drive->TocBuffer[i].address); + else RETURN_UP(-EINVAL); + memcpy((void *) arg, &tocentry, sizeof(struct cdrom_tocentry)); + RETURN_UP(0); + + case CDROMSTOP: /* Spin down the drive */ + msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n"); +#ifdef SAFE_MIXED + if (current_drive->has_data>1) RETURN_UP(-EBUSY); +#endif /* SAFE_MIXED */ + i=cc_Pause_Resume(1); + current_drive->audio_state=0; +#if 0 + cc_DriveReset(); +#endif + RETURN_UP(i); + + case CDROMSTART: /* Spin up the drive */ + msg(DBG_IOC,"ioctl: CDROMSTART entered.\n"); + cc_SpinUp(); + current_drive->audio_state=0; + RETURN_UP(0); + + case CDROMVOLCTRL: /* Volume control */ + msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n"); + memcpy(&volctrl,(char *) arg,sizeof(volctrl)); + current_drive->vol_chan0=0; + current_drive->vol_ctrl0=volctrl.channel0; + current_drive->vol_chan1=1; + current_drive->vol_ctrl1=volctrl.channel1; + i=cc_SetVolume(); + RETURN_UP(0); + + case CDROMVOLREAD: /* read Volume settings from drive */ + msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n"); + st=cc_GetVolume(); + if (st<0) RETURN_UP(st); + volctrl.channel0=current_drive->vol_ctrl0; + volctrl.channel1=current_drive->vol_ctrl1; + volctrl.channel2=0; + volctrl.channel2=0; + memcpy((void *)arg,&volctrl,sizeof(volctrl)); + RETURN_UP(0); - msg(DBG_000, "sbp_data: beginning to read.\n"); - p = current_drive->sbp_buf + frame * CD_FRAMESIZE; - if (sbpro_type==1) OUT(CDo_sel_i_d,1); - if (cmd_type==READ_M2) { - if (do_16bit) insw(CDi_data, xa_head_buf, CD_XA_HEAD>>1); - else insb(CDi_data, xa_head_buf, CD_XA_HEAD); + case CDROMSUBCHNL: /* Get subchannel info */ + msg(DBG_IOS,"ioctl: CDROMSUBCHNL entered.\n"); + /* Bogus, I can do better than this! --AJK + if ((st_spinning)||(!subq_valid)) { + i=cc_ReadSubQ(); + if (i<0) RETURN_UP(-EIO); } - if (do_16bit) insw(CDi_data, p, CD_FRAMESIZE>>1); - else insb(CDi_data, p, CD_FRAMESIZE); - if (cmd_type==READ_M2) { - if (do_16bit) insw(CDi_data, xa_tail_buf, CD_XA_TAIL>>1); - else insb(CDi_data, xa_tail_buf, CD_XA_TAIL); + */ + i=cc_ReadSubQ(); + if (i<0) { + j=cc_ReadError(); /* clear out error status from drive */ + current_drive->audio_state=CDROM_AUDIO_NO_STATUS; + /* get and set the disk state here, + probably not the right place, but who cares! + It makes it work properly! --AJK */ + if (current_drive->CD_changed==0xFF) { + msg(DBG_000,"Disk changed detect\n"); + current_drive->diskstate_flags &= ~cd_size_bit; + } + RETURN_UP(-EIO); + } + if (current_drive->CD_changed==0xFF) { + /* reread the TOC because the disk has changed! --AJK */ + msg(DBG_000,"Disk changed STILL detected, rereading TOC!\n"); + i=DiskInfo(); + if(i==0) { + current_drive->CD_changed=0x00; /* cd has changed, procede, */ + RETURN_UP(-EIO); /* and get TOC, etc on next try! --AJK */ + } else { + RETURN_UP(-EIO); /* we weren't ready yet! --AJK */ + } + } + memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl)); + /* + This virtual crap is very bogus! + It doesn't detect when the cd is done playing audio! + Lets do this right with proper hardware register reading! + */ + cc_ReadStatus(); + i=ResponseStatus(); + msg(DBG_000,"Drive Status: door_locked =%d.\n", st_door_locked); + msg(DBG_000,"Drive Status: door_closed =%d.\n", st_door_closed); + msg(DBG_000,"Drive Status: caddy_in =%d.\n", st_caddy_in); + msg(DBG_000,"Drive Status: disk_ok =%d.\n", st_diskok); + msg(DBG_000,"Drive Status: spinning =%d.\n", st_spinning); + msg(DBG_000,"Drive Status: busy =%d.\n", st_busy); + /* st_busy indicates if it's _ACTUALLY_ playing audio */ + switch (current_drive->audio_state) + { + case audio_playing: + if(st_busy==0) { + /* CD has stopped playing audio --AJK */ + current_drive->audio_state=audio_completed; + SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED; + } else { + SC.cdsc_audiostatus=CDROM_AUDIO_PLAY; + } + break; + case audio_pausing: + SC.cdsc_audiostatus=CDROM_AUDIO_PAUSED; + break; + case audio_completed: + SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED; + break; + default: + SC.cdsc_audiostatus=CDROM_AUDIO_NO_STATUS; + break; } - current_drive->sbp_current++; - if (sbpro_type==1) OUT(CDo_sel_i_d,0); - if (cmd_type==READ_M2) + SC.cdsc_adr=current_drive->SubQ_ctl_adr; + SC.cdsc_ctrl=current_drive->SubQ_ctl_adr>>4; + SC.cdsc_trk=bcd2bin(current_drive->SubQ_trk); + SC.cdsc_ind=bcd2bin(current_drive->SubQ_pnt_idx); + if (SC.cdsc_format==CDROM_LBA) { - for (xa_count=0;xa_countSubQ_run_tot); + SC.cdsc_reladdr.lba=msf2blk(current_drive->SubQ_run_trk); } - data_retrying = 0; - data_tries++; - if (data_tries >= 1000) + else /* not only if (SC.cdsc_format==CDROM_MSF) */ { - msg(DBG_INF,"sbp_data() statistics: %d waits in %d frames.\n", data_waits, data_tries); - data_waits = data_tries = 0; + SC.cdsc_absaddr.msf.minute=(current_drive->SubQ_run_tot>>16)&0x00FF; + SC.cdsc_absaddr.msf.second=(current_drive->SubQ_run_tot>>8)&0x00FF; + SC.cdsc_absaddr.msf.frame=current_drive->SubQ_run_tot&0x00FF; + SC.cdsc_reladdr.msf.minute=(current_drive->SubQ_run_trk>>16)&0x00FF; + SC.cdsc_reladdr.msf.second=(current_drive->SubQ_run_trk>>8)&0x00FF; + SC.cdsc_reladdr.msf.frame=current_drive->SubQ_run_trk&0x00FF; } - } - duration=jiffies-duration; - msg(DBG_TEA,"time to read %d frames: %d jiffies .\n",frame,duration); - if (famT_drive) + memcpy((void *) arg, &SC, sizeof(struct cdrom_subchnl)); + msg(DBG_IOS,"CDROMSUBCHNL: %1X %02X %08X %08X %02X %02X %06X %06X\n", + SC.cdsc_format,SC.cdsc_audiostatus, + SC.cdsc_adr,SC.cdsc_ctrl, + SC.cdsc_trk,SC.cdsc_ind, + SC.cdsc_absaddr,SC.cdsc_reladdr); + RETURN_UP(0); + + default: + msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); + RETURN_UP(-EINVAL); + } /* end switch(cmd) */ +} +/*==========================================================================*/ +/* + * Take care of the different block sizes between cdrom and Linux. + */ +static void sbp_transfer(struct request *req) +{ + long offs; + + while ( (req->nr_sectors > 0) && + (req->sector/4 >= current_drive->sbp_first_frame) && + (req->sector/4 <= current_drive->sbp_last_frame) ) { - wait=8; - do - { - if (teac==2) - { - if ((i=CDi_stat_loop_T()) == -1) break; - } - else - { - sbp_sleep(1); - OUT(CDo_sel_i_d,0); - i=inb(CDi_status); - } - if (!(i&s_not_data_ready)) - { - OUT(CDo_sel_i_d,1); - j=0; - do - { - if (do_16bit) i=inw(CDi_data); - else i=inb(CDi_data); - j++; - i=inb(CDi_status); - } - while (!(i&s_not_data_ready)); - msg(DBG_TEA, "==========too much data (%d bytes/words)==============.\n", j); - } - if (!(i&s_not_result_ready)) - { - OUT(CDo_sel_i_d,0); - l=0; - do - { - infobuf[l++]=inb(CDi_info); - i=inb(CDi_status); - } - while (!(i&s_not_result_ready)); - if (infobuf[0]==0x00) success=1; -#if 1 - for (j=0;jsector - current_drive->sbp_first_frame * 4) * 512; + memcpy(req->buffer, current_drive->sbp_buf + offs, 512); + req->nr_sectors--; + req->sector++; + req->buffer += 512; + } +} +/*==========================================================================*/ +/* + * special end_request for sbpcd to solve CURRENT==NULL bug. (GTL) + * GTL = Gonzalo Tornaria + * + * This is a kludge so we don't need to modify end_request. + * We put the req we take out after INIT_REQUEST in the requests list, + * so that end_request will discard it. + * + * The bug could be present in other block devices, perhaps we + * should modify INIT_REQUEST and end_request instead, and + * change every block device.. + * + * Could be a race here?? Could e.g. a timer interrupt schedule() us? + * If so, we should copy end_request here, and do it right.. (or + * modify end_request and the block devices). + * + * In any case, the race here would be much small than it was, and + * I couldn't reproduce.. + * + * The race could be: suppose CURRENT==NULL. We put our req in the list, + * and we are scheduled. Other process takes over, and gets into + * do_sbpcd_request. It sees CURRENT!=NULL (it is == to our req), so + * proceeds. It ends, so CURRENT is now NULL.. Now we awake somewhere in + * end_request, but now CURRENT==NULL... oops! + * + */ +#undef DEBUG_GTL + +/*==========================================================================*/ +/* + * I/O request routine, called from Linux kernel. + */ +static void do_sbpcd_request(request_queue_t * q) +{ + u_int block; + u_int nsect; + int status_tries, data_tries; + struct request *req; + struct sbpcd_drive *p; +#ifdef DEBUG_GTL + static int xx_nr=0; + int xnr; #endif - if (infobuf[0]==0x02) - { - error_flag++; - do - { - ++recursion; - if (recursion>1) msg(DBG_TEA,"cmd_out_T READ_ERR recursion (sbp_data): %d !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",recursion); - else msg(DBG_TEA,"sbp_data: CMDT_READ_ERR necessary.\n"); - clr_cmdbuf(); - drvcmd[0]=CMDT_READ_ERR; - j=cmd_out_T(); /* !!! recursive here !!! */ - --recursion; - sbp_sleep(1); - } - while (j<0); - current_drive->error_state=infobuf[2]; - current_drive->b3=infobuf[3]; - current_drive->b4=infobuf[4]; - } - break; - } - else - { -#if 0 - msg(DBG_TEA, "============= waiting for result=================.\n"); - sbp_sleep(1); + + request_loop: +#ifdef DEBUG_GTL + xnr=++xx_nr; + + req = elv_next_request(q); + + if (!req) + { + printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n", + xnr, current->pid, jiffies); + printk( "do_sbpcd_request[%do](NULL) end 0 (null), Time:%li\n", + xnr, jiffies); + return; + } + + printk(" do_sbpcd_request[%di](%p:%ld+%ld), Pid:%d, Time:%li\n", + xnr, req, req->sector, req->nr_sectors, current->pid, jiffies); #endif - } - } - while (wait--); + + req = elv_next_request(q); /* take out our request so no other */ + if (!req) + return; + + if (req -> sector == -1) + end_request(req, 0); + spin_unlock_irq(q->queue_lock); + + down(&ioctl_read_sem); + if (rq_data_dir(elv_next_request(q)) != READ) + { + msg(DBG_INF, "bad cmd %d\n", req->cmd[0]); + goto err_done; } + p = req->rq_disk->private_data; +#if OLD_BUSY + while (busy_audio) sbp_sleep(HZ); /* wait a bit */ + busy_data=1; +#endif /* OLD_BUSY */ + + if (p->audio_state==audio_playing) goto err_done; + if (p != current_drive) + switch_drive(p); - if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */ + block = req->sector; /* always numbered as 512-byte-pieces */ + nsect = req->nr_sectors; /* always counted as 512-byte-pieces */ + + msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect); +#if 0 + msg(DBG_MUL,"read LBA %d\n", block/4); +#endif + + sbp_transfer(req); + /* if we satisfied the request from the buffer, we're done. */ + if (req->nr_sectors == 0) { - msg(DBG_TEA, "================error flag: %d=================.\n", error_flag); - msg(DBG_INF,"sbp_data: read aborted by drive.\n"); -#if 1 - i=cc_DriveReset(); /* ugly fix to prevent a hang */ -#else - i=cc_ReadError(); +#ifdef DEBUG_GTL + printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 2, Time:%li\n", + xnr, req, req->sector, req->nr_sectors, jiffies); #endif - return (0); + up(&ioctl_read_sem); + spin_lock_irq(q->queue_lock); + end_request(req, 1); + goto request_loop; } + +#ifdef FUTURE + i=prepare(0,0); /* at moment not really a hassle check, but ... */ + if (i!=0) + msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i); +#endif /* FUTURE */ - if (fam0LV_drive) + if (!st_spinning) cc_SpinUp(); + + for (data_tries=n_retries; data_tries > 0; data_tries--) { - SBPCD_CLI; - i=maxtim_data; - for (timeout=jiffies+HZ; time_before(jiffies, timeout); timeout--) + for (status_tries=3; status_tries > 0; status_tries--) { - for ( ;i!=0;i--) - { - j=inb(CDi_status); - if (!(j&s_not_data_ready)) break; - if (!(j&s_not_result_ready)) break; - if (j&s_attention) break; - } - if (i != 0 || time_after_eq(jiffies, timeout)) break; - sbp_sleep(0); - i = 1; + flags_cmd_out |= f_respo3; + cc_ReadStatus(); + if (sbp_status() != 0) break; + if (st_check) cc_ReadError(); + sbp_sleep(1); /* wait a bit, try again */ } - if (i==0) msg(DBG_INF,"status timeout after READ.\n"); - if (!(j&s_attention)) + if (status_tries == 0) { - msg(DBG_INF,"sbp_data: timeout waiting DRV_ATTN - retrying.\n"); - i=cc_DriveReset(); /* ugly fix to prevent a hang */ - SBPCD_STI; - return (0); + msg(DBG_INF,"sbp_status: failed after 3 tries in line %d\n", __LINE__); + break; } - SBPCD_STI; - } - -#if 0 - if (!success) -#endif - do + + sbp_read_cmd(req); + sbp_sleep(0); + if (sbp_data(req) != 0) { - if (fam0LV_drive) cc_ReadStatus(); -#if 1 - if (famT_drive) msg(DBG_TEA, "================before ResponseStatus=================.\n", i); -#endif - i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */ -#if 1 - if (famT_drive) msg(DBG_TEA, "================ResponseStatus: %d=================.\n", i); +#ifdef SAFE_MIXED + current_drive->has_data=2; /* is really a data disk */ +#endif /* SAFE_MIXED */ +#ifdef DEBUG_GTL + printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n", + xnr, req, req->sector, req->nr_sectors, jiffies); #endif - if (i<0) - { - msg(DBG_INF,"bad cc_ReadStatus after read: %02X\n", current_drive->status_bits); - return (0); - } + up(&ioctl_read_sem); + spin_lock_irq(q->queue_lock); + end_request(req, 1); + goto request_loop; } - while ((fam0LV_drive)&&(!st_check)&&(!(i&p_success))); - if (st_check) - { - i=cc_ReadError(); - msg(DBG_INF,"cc_ReadError was necessary after read: %d\n",i); - return (0); - } - if (fatal_err) - { - fatal_err=0; - current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */ - current_drive->sbp_current = 0; - msg(DBG_INF,"sbp_data: fatal_err - retrying.\n"); - return (0); } - current_drive->sbp_first_frame = req -> sector / 4; - current_drive->sbp_last_frame = current_drive->sbp_first_frame + current_drive->sbp_read_frames - 1; - sbp_transfer(req); - return (1); + err_done: +#if OLD_BUSY + busy_data=0; +#endif /* OLD_BUSY */ +#ifdef DEBUG_GTL + printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 4 (error), Time:%li\n", + xnr, req, req->sector, req->nr_sectors, jiffies); +#endif + up(&ioctl_read_sem); + sbp_sleep(0); /* wait a bit, try again */ + spin_lock_irq(q->queue_lock); + end_request(req, 0); + goto request_loop; } /*==========================================================================*/ - -static int sbpcd_block_open(struct inode *inode, struct file *file) -{ - struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; - return cdrom_open(p->sbpcd_infop, inode, file); -} - -static int sbpcd_block_release(struct inode *inode, struct file *file) -{ - struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; - return cdrom_release(p->sbpcd_infop, file); -} - -static int sbpcd_block_ioctl(struct inode *inode, struct file *file, - unsigned cmd, unsigned long arg) +/* + * build and send the READ command. + */ +static void sbp_read_cmd(struct request *req) { - struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; - struct cdrom_device_info *cdi = p->sbpcd_infop; - int ret, i; - - ret = cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg); - if (ret != -ENOSYS) - return ret; +#undef OLD - msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg); - if (p->drv_id==-1) { - msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name); - return (-ENXIO); /* no such drive */ - } - down(&ioctl_read_sem); - if (p != current_drive) - switch_drive(p); + int i; + int block; - msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd); - switch (cmd) /* Sun-compatible */ + current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */ + current_drive->sbp_current = 0; + block=req->sector/4; + if (block+current_drive->sbp_bufsiz <= current_drive->CDsize_frm) + current_drive->sbp_read_frames = current_drive->sbp_bufsiz; + else { - case DDIOCSDBG: /* DDI Debug */ - if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM); - i=sbpcd_dbg_ioctl(arg,1); - RETURN_UP(i); - case CDROMRESET: /* hard reset the drive */ - msg(DBG_IOC,"ioctl: CDROMRESET entered.\n"); - i=DriveReset(); - current_drive->audio_state=0; - RETURN_UP(i); - - case CDROMREADMODE1: - msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n"); -#ifdef SAFE_MIXED - if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ - cc_ModeSelect(CD_FRAMESIZE); - cc_ModeSense(); - current_drive->mode=READ_M1; - RETURN_UP(0); - - case CDROMREADMODE2: /* not usable at the moment */ - msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n"); -#ifdef SAFE_MIXED - if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ - cc_ModeSelect(CD_FRAMESIZE_RAW1); - cc_ModeSense(); - current_drive->mode=READ_M2; - RETURN_UP(0); - - case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */ - msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n"); - if (current_drive->sbp_audsiz>0) - vfree(current_drive->aud_buf); - current_drive->aud_buf=NULL; - current_drive->sbp_audsiz=arg; - - if (current_drive->sbp_audsiz>16) + current_drive->sbp_read_frames=current_drive->CDsize_frm-block; + /* avoid reading past end of data */ + if (current_drive->sbp_read_frames < 1) { - current_drive->sbp_audsiz = 0; - RETURN_UP(current_drive->sbp_audsiz); + msg(DBG_INF,"requested frame %d, CD size %d ???\n", + block, current_drive->CDsize_frm); + current_drive->sbp_read_frames=1; } + } - if (current_drive->sbp_audsiz>0) - { - current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW); - if (current_drive->aud_buf==NULL) - { - msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz); - current_drive->sbp_audsiz=0; - } - else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz); - } - RETURN_UP(current_drive->sbp_audsiz); - - case CDROMREADAUDIO: - { /* start of CDROMREADAUDIO */ - int i=0, j=0, frame, block=0; - u_int try=0; - u_long timeout; - u_char *p; - u_int data_tries = 0; - u_int data_waits = 0; - u_int data_retrying = 0; - int status_tries; - int error_flag; - - msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n"); - if (fam0_drive) RETURN_UP(-EINVAL); - if (famL_drive) RETURN_UP(-EINVAL); - if (famV_drive) RETURN_UP(-EINVAL); - if (famT_drive) RETURN_UP(-EINVAL); -#ifdef SAFE_MIXED - if (current_drive->has_data>1) RETURN_UP(-EBUSY); -#endif /* SAFE_MIXED */ - if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL); - if (copy_from_user(&read_audio, (void __user *)arg, - sizeof(struct cdrom_read_audio))) - RETURN_UP(-EFAULT); - if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL); - if (!access_ok(VERIFY_WRITE, read_audio.buf, - read_audio.nframes*CD_FRAMESIZE_RAW)) - RETURN_UP(-EFAULT); - - if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */ - block=msf2lba(&read_audio.addr.msf.minute); - else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */ - block=read_audio.addr.lba; - else RETURN_UP(-EINVAL); -#if 000 - i=cc_SetSpeed(speed_150,0,0); - if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i); -#endif - msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n", - block, blk2msf(block)); - msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n"); -#if OLD_BUSY - while (busy_data) sbp_sleep(HZ/10); /* wait a bit */ - busy_audio=1; -#endif /* OLD_BUSY */ - error_flag=0; - for (data_tries=5; data_tries>0; data_tries--) + flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; + clr_cmdbuf(); + if (famV_drive) + { + drvcmd[0]=CMDV_READ; + lba2msf(block,&drvcmd[1]); /* msf-bcd format required */ + bin2bcdx(&drvcmd[1]); + bin2bcdx(&drvcmd[2]); + bin2bcdx(&drvcmd[3]); + drvcmd[4]=current_drive->sbp_read_frames>>8; + drvcmd[5]=current_drive->sbp_read_frames&0xff; + drvcmd[6]=0x02; /* flag "msf-bcd" */ + } + else if (fam0L_drive) + { + flags_cmd_out |= f_lopsta | f_getsta | f_bit1; + if (current_drive->xa_byte==0x20) { - msg(DBG_AUD,"data_tries=%d ...\n", data_tries); - current_drive->mode=READ_AU; - cc_ModeSelect(CD_FRAMESIZE_RAW); - cc_ModeSense(); - for (status_tries=3; status_tries > 0; status_tries--) - { - flags_cmd_out |= f_respo3; - cc_ReadStatus(); - if (sbp_status() != 0) break; - if (st_check) cc_ReadError(); - sbp_sleep(1); /* wait a bit, try again */ - } - if (status_tries == 0) - { - msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__); - continue; - } - msg(DBG_AUD,"read_audio: sbp_status: ok.\n"); - - flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check; - if (fam0L_drive) + cmd_type=READ_M2; + drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */ + drvcmd[1]=(block>>16)&0x0ff; + drvcmd[2]=(block>>8)&0x0ff; + drvcmd[3]=block&0x0ff; + drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff; + drvcmd[5]=current_drive->sbp_read_frames&0x0ff; + } + else + { + drvcmd[0]=CMD0_READ; /* "read frames", old drives */ + if (current_drive->drv_type>=drv_201) { - flags_cmd_out |= f_lopsta | f_getsta | f_bit1; - cmd_type=READ_M2; - drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */ - drvcmd[1]=(block>>16)&0x000000ff; - drvcmd[2]=(block>>8)&0x000000ff; - drvcmd[3]=block&0x000000ff; - drvcmd[4]=0; - drvcmd[5]=read_audio.nframes; /* # of frames */ - drvcmd[6]=0; + lba2msf(block,&drvcmd[1]); /* msf-bcd format required */ + bin2bcdx(&drvcmd[1]); + bin2bcdx(&drvcmd[2]); + bin2bcdx(&drvcmd[3]); } - else if (fam1_drive) + else { - drvcmd[0]=CMD1_READ; /* "read frames", new drives */ - lba2msf(block,&drvcmd[1]); /* msf-bin format required */ - drvcmd[4]=0; - drvcmd[5]=0; - drvcmd[6]=read_audio.nframes; /* # of frames */ + drvcmd[1]=(block>>16)&0x0ff; + drvcmd[2]=(block>>8)&0x0ff; + drvcmd[3]=block&0x0ff; } - else if (fam2_drive) + drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff; + drvcmd[5]=current_drive->sbp_read_frames&0x0ff; + drvcmd[6]=(current_drive->drv_typesbp_read_frames>>8)&0x0ff; + drvcmd[6]=current_drive->sbp_read_frames&0x0ff; + } + else if (fam2_drive) + { + drvcmd[0]=CMD2_READ; + lba2msf(block,&drvcmd[1]); /* msf-bin format required */ + drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff; + drvcmd[5]=current_drive->sbp_read_frames&0x0ff; + drvcmd[6]=0x02; + } + else if (famT_drive) + { + drvcmd[0]=CMDT_READ; + drvcmd[2]=(block>>24)&0x0ff; + drvcmd[3]=(block>>16)&0x0ff; + drvcmd[4]=(block>>8)&0x0ff; + drvcmd[5]=block&0x0ff; + drvcmd[7]=(current_drive->sbp_read_frames>>8)&0x0ff; + drvcmd[8]=current_drive->sbp_read_frames&0x0ff; + } + flags_cmd_out=f_putcmd; + response_count=0; + i=cmd_out(); + if (i<0) msg(DBG_INF,"error giving READ command: %0d\n", i); + return; +} +/*==========================================================================*/ +/* + * Check the completion of the read-data command. On success, read + * the current_drive->sbp_bufsiz * 2048 bytes of data from the disk into buffer. + */ +static int sbp_data(struct request *req) +{ + int i=0, j=0, l, frame; + u_int try=0; + u_long timeout; + u_char *p; + u_int data_tries = 0; + u_int data_waits = 0; + u_int data_retrying = 0; + int error_flag; + int xa_count; + int max_latency; + int success; + int wait; + int duration; + + error_flag=0; + success=0; +#if LONG_TIMING + max_latency=9*HZ; +#else + if (current_drive->f_multisession) max_latency=15*HZ; + else max_latency=5*HZ; +#endif + duration=jiffies; + for (frame=0;framesbp_read_frames&&!error_flag; frame++) + { + SBPCD_CLI; + + del_timer(&data_timer); + data_timer.expires=jiffies+max_latency; + timed_out_data=0; + add_timer(&data_timer); + while (!timed_out_data) + { + if (current_drive->f_multisession) try=maxtim_data*4; + else try=maxtim_data; + msg(DBG_000,"sbp_data: CDi_status loop: try=%d.\n",try); + for ( ; try!=0;try--) { - drvcmd[0]=CMD2_READ_XA2; - lba2msf(block,&drvcmd[1]); /* msf-bin format required */ - drvcmd[4]=0; - drvcmd[5]=read_audio.nframes; /* # of frames */ - drvcmd[6]=0x11; /* raw mode */ + j=inb(CDi_status); + if (!(j&s_not_data_ready)) break; + if (!(j&s_not_result_ready)) break; + if (fam0LV_drive) if (j&s_attention) break; } - else if (famT_drive) /* CD-55A: not tested yet */ + if (!(j&s_not_data_ready)) goto data_ready; + if (try==0) { + if (data_retrying == 0) data_waits++; + data_retrying = 1; + msg(DBG_000,"sbp_data: CDi_status loop: sleeping.\n"); + sbp_sleep(1); + try = 1; } - msg(DBG_AUD,"read_audio: before giving \"read\" command.\n"); - flags_cmd_out=f_putcmd; - response_count=0; - i=cmd_out(); - if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i); - sbp_sleep(0); - msg(DBG_AUD,"read_audio: after giving \"read\" command.\n"); - for (frame=1;frame<2 && !error_flag; frame++) - { - try=maxtim_data; - for (timeout=jiffies+9*HZ; ; ) - { - for ( ; try!=0;try--) - { - j=inb(CDi_status); - if (!(j&s_not_data_ready)) break; - if (!(j&s_not_result_ready)) break; - if (fam0L_drive) if (j&s_attention) break; - } - if (try != 0 || time_after_eq(jiffies, timeout)) break; - if (data_retrying == 0) data_waits++; - data_retrying = 1; - sbp_sleep(1); - try = 1; - } - if (try==0) - { - msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n"); - error_flag++; - break; - } - msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n"); - if (j&s_not_data_ready) - { - msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n"); - error_flag++; - break; - } - msg(DBG_AUD,"read_audio: before reading data.\n"); - error_flag=0; - p = current_drive->aud_buf; - if (sbpro_type==1) OUT(CDo_sel_i_d,1); - if (do_16bit) + } + msg(DBG_INF,"sbp_data: CDi_status loop expired.\n"); + data_ready: + del_timer(&data_timer); + + if (timed_out_data) + { + msg(DBG_INF,"sbp_data: CDi_status timeout (timed_out_data) (%02X).\n", j); + error_flag++; + } + if (try==0) + { + msg(DBG_INF,"sbp_data: CDi_status timeout (try=0) (%02X).\n", j); + error_flag++; + } + if (!(j&s_not_result_ready)) + { + msg(DBG_INF, "sbp_data: RESULT_READY where DATA_READY awaited (%02X).\n", j); + response_count=20; + j=ResponseInfo(); + j=inb(CDi_status); + } + if (j&s_not_data_ready) + { + if ((current_drive->ored_ctl_adr&0x40)==0) + msg(DBG_INF, "CD contains no data tracks.\n"); + else msg(DBG_INF, "sbp_data: DATA_READY timeout (%02X).\n", j); + error_flag++; + } + SBPCD_STI; + if (error_flag) break; + + msg(DBG_000, "sbp_data: beginning to read.\n"); + p = current_drive->sbp_buf + frame * CD_FRAMESIZE; + if (sbpro_type==1) OUT(CDo_sel_i_d,1); + if (cmd_type==READ_M2) { + if (do_16bit) insw(CDi_data, xa_head_buf, CD_XA_HEAD>>1); + else insb(CDi_data, xa_head_buf, CD_XA_HEAD); + } + if (do_16bit) insw(CDi_data, p, CD_FRAMESIZE>>1); + else insb(CDi_data, p, CD_FRAMESIZE); + if (cmd_type==READ_M2) { + if (do_16bit) insw(CDi_data, xa_tail_buf, CD_XA_TAIL>>1); + else insb(CDi_data, xa_tail_buf, CD_XA_TAIL); + } + current_drive->sbp_current++; + if (sbpro_type==1) OUT(CDo_sel_i_d,0); + if (cmd_type==READ_M2) + { + for (xa_count=0;xa_count= 1000) + { + msg(DBG_INF,"sbp_data() statistics: %d waits in %d frames.\n", data_waits, data_tries); + data_waits = data_tries = 0; + } + } + duration=jiffies-duration; + msg(DBG_TEA,"time to read %d frames: %d jiffies .\n",frame,duration); + if (famT_drive) + { + wait=8; + do + { + if (teac==2) + { + if ((i=CDi_stat_loop_T()) == -1) break; + } + else + { + sbp_sleep(1); + OUT(CDo_sel_i_d,0); + i=inb(CDi_status); + } + if (!(i&s_not_data_ready)) + { + OUT(CDo_sel_i_d,1); + j=0; + do { - u_short *p2 = (u_short *) p; - - for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) - { - if ((inb_p(CDi_status)&s_not_data_ready)) continue; - - /* get one sample */ - *p2++ = inw_p(CDi_data); - *p2++ = inw_p(CDi_data); - } - } else { - for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;) - { - if ((inb_p(CDi_status)&s_not_data_ready)) continue; - - /* get one sample */ - *p++ = inb_p(CDi_data); - *p++ = inb_p(CDi_data); - *p++ = inb_p(CDi_data); - *p++ = inb_p(CDi_data); - } + if (do_16bit) i=inw(CDi_data); + else i=inb(CDi_data); + j++; + i=inb(CDi_status); } - if (sbpro_type==1) OUT(CDo_sel_i_d,0); - data_retrying = 0; + while (!(i&s_not_data_ready)); + msg(DBG_TEA, "==========too much data (%d bytes/words)==============.\n", j); } - msg(DBG_AUD,"read_audio: after reading data.\n"); - if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */ + if (!(i&s_not_result_ready)) { - msg(DBG_AUD,"read_audio: read aborted by drive\n"); -#if 0000 - i=cc_DriveReset(); /* ugly fix to prevent a hang */ -#else - i=cc_ReadError(); + OUT(CDo_sel_i_d,0); + l=0; + do + { + infobuf[l++]=inb(CDi_info); + i=inb(CDi_status); + } + while (!(i&s_not_result_ready)); + if (infobuf[0]==0x00) success=1; +#if 1 + for (j=0;j1) msg(DBG_TEA,"cmd_out_T READ_ERR recursion (sbp_data): %d !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",recursion); + else msg(DBG_TEA,"sbp_data: CMDT_READ_ERR necessary.\n"); + clr_cmdbuf(); + drvcmd[0]=CMDT_READ_ERR; + j=cmd_out_T(); /* !!! recursive here !!! */ + --recursion; + sbp_sleep(1); } - if (i != 0 || time_after_eq(jiffies, timeout)) break; - sbp_sleep(0); - i = 1; - } - if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ"); - if (!(j&s_attention)) - { - msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n"); - i=cc_DriveReset(); /* ugly fix to prevent a hang */ - continue; + while (j<0); + current_drive->error_state=infobuf[2]; + current_drive->b3=infobuf[3]; + current_drive->b4=infobuf[4]; } + break; } - do + else { - if (fam0L_drive) cc_ReadStatus(); - i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */ - if (i<0) { msg(DBG_AUD, - "read_audio: cc_ReadStatus error after read: %02X\n", - current_drive->status_bits); - continue; /* FIXME */ - } +#if 0 + msg(DBG_TEA, "============= waiting for result=================.\n"); + sbp_sleep(1); +#endif } - while ((fam0L_drive)&&(!st_check)&&(!(i&p_success))); - if (st_check) + } + while (wait--); + } + + if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */ + { + msg(DBG_TEA, "================error flag: %d=================.\n", error_flag); + msg(DBG_INF,"sbp_data: read aborted by drive.\n"); +#if 1 + i=cc_DriveReset(); /* ugly fix to prevent a hang */ +#else + i=cc_ReadError(); +#endif + return (0); + } + + if (fam0LV_drive) + { + SBPCD_CLI; + i=maxtim_data; + for (timeout=jiffies+HZ; time_before(jiffies, timeout); timeout--) + { + for ( ;i!=0;i--) { - i=cc_ReadError(); - msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i); - continue; + j=inb(CDi_status); + if (!(j&s_not_data_ready)) break; + if (!(j&s_not_result_ready)) break; + if (j&s_attention) break; } - if (copy_to_user(read_audio.buf, - current_drive->aud_buf, - read_audio.nframes * CD_FRAMESIZE_RAW)) - RETURN_UP(-EFAULT); - msg(DBG_AUD,"read_audio: copy_to_user done.\n"); - break; + if (i != 0 || time_after_eq(jiffies, timeout)) break; + sbp_sleep(0); + i = 1; } - cc_ModeSelect(CD_FRAMESIZE); - cc_ModeSense(); - current_drive->mode=READ_M1; -#if OLD_BUSY - busy_audio=0; -#endif /* OLD_BUSY */ - if (data_tries == 0) + if (i==0) msg(DBG_INF,"status timeout after READ.\n"); + if (!(j&s_attention)) { - msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__); - RETURN_UP(-EIO); + msg(DBG_INF,"sbp_data: timeout waiting DRV_ATTN - retrying.\n"); + i=cc_DriveReset(); /* ugly fix to prevent a hang */ + SBPCD_STI; + return (0); } - msg(DBG_AUD,"read_audio: successful return.\n"); - RETURN_UP(0); - } /* end of CDROMREADAUDIO */ + SBPCD_STI; + } + +#if 0 + if (!success) +#endif + do + { + if (fam0LV_drive) cc_ReadStatus(); +#if 1 + if (famT_drive) msg(DBG_TEA, "================before ResponseStatus=================.\n", i); +#endif + i=ResponseStatus(); /* builds status_bits, returns orig. status (old) or faked p_success (new) */ +#if 1 + if (famT_drive) msg(DBG_TEA, "================ResponseStatus: %d=================.\n", i); +#endif + if (i<0) + { + msg(DBG_INF,"bad cc_ReadStatus after read: %02X\n", current_drive->status_bits); + return (0); + } + } + while ((fam0LV_drive)&&(!st_check)&&(!(i&p_success))); + if (st_check) + { + i=cc_ReadError(); + msg(DBG_INF,"cc_ReadError was necessary after read: %d\n",i); + return (0); + } + if (fatal_err) + { + fatal_err=0; + current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1; /* purge buffer */ + current_drive->sbp_current = 0; + msg(DBG_INF,"sbp_data: fatal_err - retrying.\n"); + return (0); + } + + current_drive->sbp_first_frame = req -> sector / 4; + current_drive->sbp_last_frame = current_drive->sbp_first_frame + current_drive->sbp_read_frames - 1; + sbp_transfer(req); + return (1); +} +/*==========================================================================*/ - default: - msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd); - RETURN_UP(-EINVAL); - } /* end switch(cmd) */ +static int sbpcd_block_open(struct inode *inode, struct file *file) +{ + struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; + return cdrom_open(p->sbpcd_infop, inode, file); +} + +static int sbpcd_block_release(struct inode *inode, struct file *file) +{ + struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; + return cdrom_release(p->sbpcd_infop, file); +} + +static int sbpcd_block_ioctl(struct inode *inode, struct file *file, + unsigned cmd, unsigned long arg) +{ + struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data; + return cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg); } static int sbpcd_block_media_changed(struct gendisk *disk) @@ -5471,9 +5478,10 @@ static struct cdrom_device_ops sbpcd_dops = { .get_mcn = sbpcd_get_mcn, .reset = sbpcd_reset, .audio_ioctl = sbpcd_audio_ioctl, + .dev_ioctl = sbpcd_dev_ioctl, .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | - CDC_MCN | CDC_PLAY_AUDIO, + CDC_MCN | CDC_PLAY_AUDIO | CDC_IOCTLS, .n_minors = 1, }; diff --git a/trunk/drivers/cdrom/viocd.c b/trunk/drivers/cdrom/viocd.c index c0f817ba7adb..e27617259552 100644 --- a/trunk/drivers/cdrom/viocd.c +++ b/trunk/drivers/cdrom/viocd.c @@ -627,7 +627,7 @@ static struct cdrom_device_ops viocd_dops = { .media_changed = viocd_media_changed, .lock_door = viocd_lock_door, .generic_packet = viocd_packet, - .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM + .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM }; static int __init find_capability(const char *type) diff --git a/trunk/drivers/char/amiserial.c b/trunk/drivers/char/amiserial.c index 6602b3156df5..7ac365b5d9ec 100644 --- a/trunk/drivers/char/amiserial.c +++ b/trunk/drivers/char/amiserial.c @@ -46,6 +46,8 @@ /* Sanity checks */ +#define SERIAL_INLINE + #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ tty->name, (info->flags), serial_driver->refcount,info->count,tty->count,s) @@ -93,6 +95,10 @@ static char *serial_version = "4.30"; #include #include +#ifdef SERIAL_INLINE +#define _INLINE_ inline +#endif + #define custom amiga_custom static char *serial_name = "Amiga-builtin serial driver"; @@ -247,14 +253,14 @@ static void rs_start(struct tty_struct *tty) * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ -static void rs_sched_event(struct async_struct *info, - int event) +static _INLINE_ void rs_sched_event(struct async_struct *info, + int event) { info->event |= 1 << event; tasklet_schedule(&info->tlet); } -static void receive_chars(struct async_struct *info) +static _INLINE_ void receive_chars(struct async_struct *info) { int status; int serdatr; @@ -343,7 +349,7 @@ static void receive_chars(struct async_struct *info) return; } -static void transmit_chars(struct async_struct *info) +static _INLINE_ void transmit_chars(struct async_struct *info) { custom.intreq = IF_TBE; mb(); @@ -383,7 +389,7 @@ static void transmit_chars(struct async_struct *info) } } -static void check_modem_status(struct async_struct *info) +static _INLINE_ void check_modem_status(struct async_struct *info) { unsigned char status = ciab.pra & (SER_DCD | SER_CTS | SER_DSR); unsigned char dstatus; @@ -1953,7 +1959,7 @@ static int rs_read_proc(char *page, char **start, off_t off, int count, * number, and identifies which options were configured into this * driver. */ -static void show_serial_version(void) +static _INLINE_ void show_serial_version(void) { printk(KERN_INFO "%s version %s\n", serial_name, serial_version); } diff --git a/trunk/drivers/char/generic_serial.c b/trunk/drivers/char/generic_serial.c index 5e59c0b42731..e38a5f0e07bb 100644 --- a/trunk/drivers/char/generic_serial.c +++ b/trunk/drivers/char/generic_serial.c @@ -48,8 +48,8 @@ static int gs_debug; #define NEW_WRITE_LOCKING 1 #if NEW_WRITE_LOCKING #define DECL /* Nothing */ -#define LOCKIT mutex_lock(& port->port_write_mutex); -#define RELEASEIT mutex_unlock(&port->port_write_mutex); +#define LOCKIT down (& port->port_write_sem); +#define RELEASEIT up (&port->port_write_sem); #else #define DECL unsigned long flags; #define LOCKIT save_flags (flags);cli () @@ -124,14 +124,14 @@ int gs_write(struct tty_struct * tty, /* get exclusive "write" access to this port (problem 3) */ /* This is not a spinlock because we can have a disk access (page fault) in copy_from_user */ - mutex_lock(& port->port_write_mutex); + down (& port->port_write_sem); while (1) { c = count; /* This is safe because we "OWN" the "head". Noone else can - change the "head": we own the port_write_mutex. */ + change the "head": we own the port_write_sem. */ /* Don't overrun the end of the buffer */ t = SERIAL_XMIT_SIZE - port->xmit_head; if (t < c) c = t; @@ -153,7 +153,7 @@ int gs_write(struct tty_struct * tty, count -= c; total += c; } - mutex_unlock(& port->port_write_mutex); + up (& port->port_write_sem); gs_dprintk (GS_DEBUG_WRITE, "write: interrupts are %s\n", (port->flags & GS_TX_INTEN)?"enabled": "disabled"); @@ -214,7 +214,7 @@ int gs_write(struct tty_struct * tty, c = count; /* This is safe because we "OWN" the "head". Noone else can - change the "head": we own the port_write_mutex. */ + change the "head": we own the port_write_sem. */ /* Don't overrun the end of the buffer */ t = SERIAL_XMIT_SIZE - port->xmit_head; if (t < c) c = t; @@ -888,7 +888,7 @@ int gs_init_port(struct gs_port *port) spin_lock_irqsave (&port->driver_lock, flags); if (port->tty) clear_bit(TTY_IO_ERROR, &port->tty->flags); - mutex_init(&port->port_write_mutex); + init_MUTEX(&port->port_write_sem); port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; spin_unlock_irqrestore(&port->driver_lock, flags); gs_set_termios(port->tty, NULL); diff --git a/trunk/drivers/char/istallion.c b/trunk/drivers/char/istallion.c index ede128356af2..28c5a3193b81 100644 --- a/trunk/drivers/char/istallion.c +++ b/trunk/drivers/char/istallion.c @@ -181,6 +181,7 @@ static struct tty_driver *stli_serial; * is already swapping a shared buffer won't make things any worse. */ static char *stli_tmpwritebuf; +static DECLARE_MUTEX(stli_tmpwritesem); #define STLI_TXBUFSIZE 4096 diff --git a/trunk/drivers/char/n_tty.c b/trunk/drivers/char/n_tty.c index ede365d05387..ccad7ae94541 100644 --- a/trunk/drivers/char/n_tty.c +++ b/trunk/drivers/char/n_tty.c @@ -132,7 +132,7 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty) * We test the TTY_THROTTLED bit first so that it always * indicates the current state. The decision about whether * it is worth allowing more input has been taken by the caller. - * Can sleep, may be called under the atomic_read_lock mutex but + * Can sleep, may be called under the atomic_read semaphore but * this is not guaranteed. */ @@ -1132,7 +1132,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt) * buffer, and once to drain the space from the (physical) beginning of * the buffer to head pointer. * - * Called under the tty->atomic_read_lock sem and with TTY_DONT_FLIP set + * Called under the tty->atomic_read sem and with TTY_DONT_FLIP set * */ @@ -1262,11 +1262,11 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file, * Internal serialization of reads. */ if (file->f_flags & O_NONBLOCK) { - if (!mutex_trylock(&tty->atomic_read_lock)) + if (down_trylock(&tty->atomic_read)) return -EAGAIN; } else { - if (mutex_lock_interruptible(&tty->atomic_read_lock)) + if (down_interruptible(&tty->atomic_read)) return -ERESTARTSYS; } @@ -1393,7 +1393,7 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file, timeout = time; } clear_bit(TTY_DONT_FLIP, &tty->flags); - mutex_unlock(&tty->atomic_read_lock); + up(&tty->atomic_read); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) diff --git a/trunk/drivers/char/nwflash.c b/trunk/drivers/char/nwflash.c index 8865387d3448..ca41d62b1d9d 100644 --- a/trunk/drivers/char/nwflash.c +++ b/trunk/drivers/char/nwflash.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -57,7 +56,7 @@ static int gbWriteEnable; static int gbWriteBase64Enable; static volatile unsigned char *FLASH_BASE; static int gbFlashSize = KFLASH_SIZE; -static DEFINE_MUTEX(nwflash_mutex); +static DECLARE_MUTEX(nwflash_sem); extern spinlock_t gpio_lock; @@ -141,7 +140,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size, /* * We now lock against reads and writes. --rmk */ - if (mutex_lock_interruptible(&nwflash_mutex)) + if (down_interruptible(&nwflash_sem)) return -ERESTARTSYS; ret = copy_to_user(buf, (void *)(FLASH_BASE + p), count); @@ -150,7 +149,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t size, *ppos += count; } else ret = -EFAULT; - mutex_unlock(&nwflash_mutex); + up(&nwflash_sem); } return ret; } @@ -189,7 +188,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf, /* * We now lock against reads and writes. --rmk */ - if (mutex_lock_interruptible(&nwflash_mutex)) + if (down_interruptible(&nwflash_sem)) return -ERESTARTSYS; written = 0; @@ -278,7 +277,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf, */ leds_event(led_release); - mutex_unlock(&nwflash_mutex); + up(&nwflash_sem); return written; } diff --git a/trunk/drivers/char/raw.c b/trunk/drivers/char/raw.c index 15a7b4086524..30e4cbe16bb0 100644 --- a/trunk/drivers/char/raw.c +++ b/trunk/drivers/char/raw.c @@ -19,7 +19,6 @@ #include #include #include -#include #include @@ -30,7 +29,7 @@ struct raw_device_data { static struct class *raw_class; static struct raw_device_data raw_devices[MAX_RAW_MINORS]; -static DEFINE_MUTEX(raw_mutex); +static DECLARE_MUTEX(raw_mutex); static struct file_operations raw_ctl_fops; /* forward declaration */ /* @@ -54,7 +53,7 @@ static int raw_open(struct inode *inode, struct file *filp) return 0; } - mutex_lock(&raw_mutex); + down(&raw_mutex); /* * All we need to do on open is check that the device is bound. @@ -79,7 +78,7 @@ static int raw_open(struct inode *inode, struct file *filp) filp->f_dentry->d_inode->i_mapping = bdev->bd_inode->i_mapping; filp->private_data = bdev; - mutex_unlock(&raw_mutex); + up(&raw_mutex); return 0; out2: @@ -87,7 +86,7 @@ static int raw_open(struct inode *inode, struct file *filp) out1: blkdev_put(bdev); out: - mutex_unlock(&raw_mutex); + up(&raw_mutex); return err; } @@ -100,14 +99,14 @@ static int raw_release(struct inode *inode, struct file *filp) const int minor= iminor(inode); struct block_device *bdev; - mutex_lock(&raw_mutex); + down(&raw_mutex); bdev = raw_devices[minor].binding; if (--raw_devices[minor].inuse == 0) { /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ inode->i_mapping = &inode->i_data; inode->i_mapping->backing_dev_info = &default_backing_dev_info; } - mutex_unlock(&raw_mutex); + up(&raw_mutex); bd_release(bdev); blkdev_put(bdev); @@ -188,9 +187,9 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, goto out; } - mutex_lock(&raw_mutex); + down(&raw_mutex); if (rawdev->inuse) { - mutex_unlock(&raw_mutex); + up(&raw_mutex); err = -EBUSY; goto out; } @@ -212,11 +211,11 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, bind_device(&rq); } } - mutex_unlock(&raw_mutex); + up(&raw_mutex); } else { struct block_device *bdev; - mutex_lock(&raw_mutex); + down(&raw_mutex); bdev = rawdev->binding; if (bdev) { rq.block_major = MAJOR(bdev->bd_dev); @@ -224,7 +223,7 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp, } else { rq.block_major = rq.block_minor = 0; } - mutex_unlock(&raw_mutex); + up(&raw_mutex); if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) { err = -EFAULT; goto out; diff --git a/trunk/drivers/char/ser_a2232.c b/trunk/drivers/char/ser_a2232.c index 510bd3e0e88b..fee68cc895f8 100644 --- a/trunk/drivers/char/ser_a2232.c +++ b/trunk/drivers/char/ser_a2232.c @@ -97,7 +97,7 @@ #include #include #include -#include +#include #include @@ -654,7 +654,7 @@ static void a2232_init_portstructs(void) port->gs.closing_wait = 30 * HZ; port->gs.rd = &a2232_real_driver; #ifdef NEW_WRITE_LOCKING - init_MUTEX(&(port->gs.port_write_mutex)); + init_MUTEX(&(port->gs.port_write_sem)); #endif init_waitqueue_head(&port->gs.open_wait); init_waitqueue_head(&port->gs.close_wait); diff --git a/trunk/drivers/char/snsc.c b/trunk/drivers/char/snsc.c index b543821d8cb4..0e7d216e7eb0 100644 --- a/trunk/drivers/char/snsc.c +++ b/trunk/drivers/char/snsc.c @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2004, 2006 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. */ /* @@ -77,7 +77,7 @@ scdrv_open(struct inode *inode, struct file *file) scd = container_of(inode->i_cdev, struct sysctl_data_s, scd_cdev); /* allocate memory for subchannel data */ - sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); + sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL); if (sd == NULL) { printk("%s: couldn't allocate subchannel data\n", __FUNCTION__); @@ -85,6 +85,7 @@ scdrv_open(struct inode *inode, struct file *file) } /* initialize subch_data_s fields */ + memset(sd, 0, sizeof (struct subch_data_s)); sd->sd_nasid = scd->scd_nasid; sd->sd_subch = ia64_sn_irtr_open(scd->scd_nasid); @@ -393,7 +394,7 @@ scdrv_init(void) sprintf(devnamep, "#%d", geo_slab(geoid)); /* allocate sysctl device data */ - scd = kzalloc(sizeof (struct sysctl_data_s), + scd = kmalloc(sizeof (struct sysctl_data_s), GFP_KERNEL); if (!scd) { printk("%s: failed to allocate device info" @@ -401,6 +402,7 @@ scdrv_init(void) SYSCTL_BASENAME, devname); continue; } + memset(scd, 0, sizeof (struct sysctl_data_s)); /* initialize sysctl device data fields */ scd->scd_nasid = cnodeid_to_nasid(cnode); diff --git a/trunk/drivers/char/snsc_event.c b/trunk/drivers/char/snsc_event.c index e234d50e142a..a4fa507eed9e 100644 --- a/trunk/drivers/char/snsc_event.c +++ b/trunk/drivers/char/snsc_event.c @@ -287,7 +287,7 @@ scdrv_event_init(struct sysctl_data_s *scd) { int rv; - event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); + event_sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL); if (event_sd == NULL) { printk(KERN_WARNING "%s: couldn't allocate subchannel info" " for event monitoring\n", __FUNCTION__); @@ -295,6 +295,7 @@ scdrv_event_init(struct sysctl_data_s *scd) } /* initialize subch_data_s fields */ + memset(event_sd, 0, sizeof (struct subch_data_s)); event_sd->sd_nasid = scd->scd_nasid; spin_lock_init(&event_sd->sd_rlock); @@ -320,3 +321,5 @@ scdrv_event_init(struct sysctl_data_s *scd) return; } } + + diff --git a/trunk/drivers/char/stallion.c b/trunk/drivers/char/stallion.c index 3f5d6077f39c..bdaab6992109 100644 --- a/trunk/drivers/char/stallion.c +++ b/trunk/drivers/char/stallion.c @@ -148,6 +148,7 @@ static struct tty_driver *stl_serial; * is already swapping a shared buffer won't make things any worse. */ static char *stl_tmpwritebuf; +static DECLARE_MUTEX(stl_tmpwritesem); /* * Define a local default termios struct. All ports will be created diff --git a/trunk/drivers/char/sx.c b/trunk/drivers/char/sx.c index 3b4747230270..a6b4f02bdceb 100644 --- a/trunk/drivers/char/sx.c +++ b/trunk/drivers/char/sx.c @@ -2318,7 +2318,7 @@ static int sx_init_portstructs (int nboards, int nports) port->board = board; port->gs.rd = &sx_real_driver; #ifdef NEW_WRITE_LOCKING - port->gs.port_write_mutex = MUTEX; + port->gs.port_write_sem = MUTEX; #endif port->gs.driver_lock = SPIN_LOCK_UNLOCKED; /* diff --git a/trunk/drivers/char/tty_io.c b/trunk/drivers/char/tty_io.c index 76592ee1fb38..53d3d066554e 100644 --- a/trunk/drivers/char/tty_io.c +++ b/trunk/drivers/char/tty_io.c @@ -130,7 +130,7 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ /* Semaphore to protect creating and releasing a tty. This is shared with vt.c for deeply disgusting hack reasons */ -DEFINE_MUTEX(tty_mutex); +DECLARE_MUTEX(tty_sem); #ifdef CONFIG_UNIX98_PTYS extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ @@ -1188,11 +1188,11 @@ void disassociate_ctty(int on_exit) lock_kernel(); - mutex_lock(&tty_mutex); + down(&tty_sem); tty = current->signal->tty; if (tty) { tty_pgrp = tty->pgrp; - mutex_unlock(&tty_mutex); + up(&tty_sem); if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) tty_vhangup(tty); } else { @@ -1200,7 +1200,7 @@ void disassociate_ctty(int on_exit) kill_pg(current->signal->tty_old_pgrp, SIGHUP, on_exit); kill_pg(current->signal->tty_old_pgrp, SIGCONT, on_exit); } - mutex_unlock(&tty_mutex); + up(&tty_sem); unlock_kernel(); return; } @@ -1211,7 +1211,7 @@ void disassociate_ctty(int on_exit) } /* Must lock changes to tty_old_pgrp */ - mutex_lock(&tty_mutex); + down(&tty_sem); current->signal->tty_old_pgrp = 0; tty->session = 0; tty->pgrp = -1; @@ -1222,7 +1222,7 @@ void disassociate_ctty(int on_exit) p->signal->tty = NULL; } while_each_task_pid(current->signal->session, PIDTYPE_SID, p); read_unlock(&tasklist_lock); - mutex_unlock(&tty_mutex); + up(&tty_sem); unlock_kernel(); } @@ -1306,7 +1306,7 @@ static inline ssize_t do_tty_write( ssize_t ret = 0, written = 0; unsigned int chunk; - if (mutex_lock_interruptible(&tty->atomic_write_lock)) { + if (down_interruptible(&tty->atomic_write)) { return -ERESTARTSYS; } @@ -1329,7 +1329,7 @@ static inline ssize_t do_tty_write( if (count < chunk) chunk = count; - /* write_buf/write_cnt is protected by the atomic_write_lock mutex */ + /* write_buf/write_cnt is protected by the atomic_write semaphore */ if (tty->write_cnt < chunk) { unsigned char *buf; @@ -1338,7 +1338,7 @@ static inline ssize_t do_tty_write( buf = kmalloc(chunk, GFP_KERNEL); if (!buf) { - mutex_unlock(&tty->atomic_write_lock); + up(&tty->atomic_write); return -ENOMEM; } kfree(tty->write_buf); @@ -1374,7 +1374,7 @@ static inline ssize_t do_tty_write( inode->i_mtime = current_fs_time(inode->i_sb); ret = written; } - mutex_unlock(&tty->atomic_write_lock); + up(&tty->atomic_write); return ret; } @@ -1442,8 +1442,8 @@ static inline void tty_line_name(struct tty_driver *driver, int index, char *p) /* * WSH 06/09/97: Rewritten to remove races and properly clean up after a - * failed open. The new code protects the open with a mutex, so it's - * really quite straightforward. The mutex locking can probably be + * failed open. The new code protects the open with a semaphore, so it's + * really quite straightforward. The semaphore locking can probably be * relaxed for the (most common) case of reopening a tty. */ static int init_dev(struct tty_driver *driver, int idx, @@ -1640,7 +1640,7 @@ static int init_dev(struct tty_driver *driver, int idx, success: *ret_tty = tty; - /* All paths come through here to release the mutex */ + /* All paths come through here to release the semaphore */ end_init: return retval; @@ -1837,7 +1837,7 @@ static void release_dev(struct file * filp) /* Guard against races with tty->count changes elsewhere and opens on /dev/tty */ - mutex_lock(&tty_mutex); + down(&tty_sem); tty_closing = tty->count <= 1; o_tty_closing = o_tty && (o_tty->count <= (pty_master ? 1 : 0)); @@ -1868,7 +1868,7 @@ static void release_dev(struct file * filp) printk(KERN_WARNING "release_dev: %s: read/write wait queue " "active!\n", tty_name(tty, buf)); - mutex_unlock(&tty_mutex); + up(&tty_sem); schedule(); } @@ -1934,7 +1934,7 @@ static void release_dev(struct file * filp) read_unlock(&tasklist_lock); } - mutex_unlock(&tty_mutex); + up(&tty_sem); /* check whether both sides are closing ... */ if (!tty_closing || (o_tty && !o_tty_closing)) @@ -2040,11 +2040,11 @@ static int tty_open(struct inode * inode, struct file * filp) index = -1; retval = 0; - mutex_lock(&tty_mutex); + down(&tty_sem); if (device == MKDEV(TTYAUX_MAJOR,0)) { if (!current->signal->tty) { - mutex_unlock(&tty_mutex); + up(&tty_sem); return -ENXIO; } driver = current->signal->tty->driver; @@ -2070,18 +2070,18 @@ static int tty_open(struct inode * inode, struct file * filp) noctty = 1; goto got_driver; } - mutex_unlock(&tty_mutex); + up(&tty_sem); return -ENODEV; } driver = get_tty_driver(device, &index); if (!driver) { - mutex_unlock(&tty_mutex); + up(&tty_sem); return -ENODEV; } got_driver: retval = init_dev(driver, index, &tty); - mutex_unlock(&tty_mutex); + up(&tty_sem); if (retval) return retval; @@ -2167,9 +2167,9 @@ static int ptmx_open(struct inode * inode, struct file * filp) } up(&allocated_ptys_lock); - mutex_lock(&tty_mutex); + down(&tty_sem); retval = init_dev(ptm_driver, index, &tty); - mutex_unlock(&tty_mutex); + up(&tty_sem); if (retval) goto out; @@ -2915,8 +2915,8 @@ static void initialize_tty_struct(struct tty_struct *tty) init_waitqueue_head(&tty->write_wait); init_waitqueue_head(&tty->read_wait); INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); - mutex_init(&tty->atomic_read_lock); - mutex_init(&tty->atomic_write_lock); + sema_init(&tty->atomic_read, 1); + sema_init(&tty->atomic_write, 1); spin_lock_init(&tty->read_lock); INIT_LIST_HEAD(&tty->tty_files); INIT_WORK(&tty->SAK_work, NULL, NULL); diff --git a/trunk/drivers/char/vme_scc.c b/trunk/drivers/char/vme_scc.c index fd00822ac145..d9325281e482 100644 --- a/trunk/drivers/char/vme_scc.c +++ b/trunk/drivers/char/vme_scc.c @@ -184,7 +184,7 @@ static void scc_init_portstructs(void) port->gs.closing_wait = 30 * HZ; port->gs.rd = &scc_real_driver; #ifdef NEW_WRITE_LOCKING - port->gs.port_write_mutex = MUTEX; + port->gs.port_write_sem = MUTEX; #endif init_waitqueue_head(&port->gs.open_wait); init_waitqueue_head(&port->gs.close_wait); diff --git a/trunk/drivers/char/vt.c b/trunk/drivers/char/vt.c index ca4844c527da..0900d1dbee59 100644 --- a/trunk/drivers/char/vt.c +++ b/trunk/drivers/char/vt.c @@ -2489,7 +2489,7 @@ static int con_open(struct tty_struct *tty, struct file *filp) } /* - * We take tty_mutex in here to prevent another thread from coming in via init_dev + * We take tty_sem in here to prevent another thread from coming in via init_dev * and taking a ref against the tty while we're in the process of forgetting * about it and cleaning things up. * @@ -2497,7 +2497,7 @@ static int con_open(struct tty_struct *tty, struct file *filp) */ static void con_close(struct tty_struct *tty, struct file *filp) { - mutex_lock(&tty_mutex); + down(&tty_sem); acquire_console_sem(); if (tty && tty->count == 1) { struct vc_data *vc = tty->driver_data; @@ -2507,15 +2507,15 @@ static void con_close(struct tty_struct *tty, struct file *filp) tty->driver_data = NULL; release_console_sem(); vcs_remove_devfs(tty); - mutex_unlock(&tty_mutex); + up(&tty_sem); /* - * tty_mutex is released, but we still hold BKL, so there is + * tty_sem is released, but we still hold BKL, so there is * still exclusion against init_dev() */ return; } release_console_sem(); - mutex_unlock(&tty_mutex); + up(&tty_sem); } static void vc_init(struct vc_data *vc, unsigned int rows, @@ -2869,9 +2869,9 @@ void unblank_screen(void) } /* - * We defer the timer blanking to work queue so it can take the console mutex + * We defer the timer blanking to work queue so it can take the console semaphore * (console operations can still happen at irq time, but only from printk which - * has the console mutex. Not perfect yet, but better than no locking + * has the console semaphore. Not perfect yet, but better than no locking */ static void blank_screen_t(unsigned long dummy) { @@ -3234,14 +3234,6 @@ void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org) } } -int is_console_suspend_safe(void) -{ - /* It is unsafe to suspend devices while X has control of the - * hardware. Make sure we are running on a kernel-controlled console. - */ - return vc_cons[fg_console].d->vc_mode == KD_TEXT; -} - /* * Visible symbols for modules */ diff --git a/trunk/drivers/char/watchdog/pcwd_usb.c b/trunk/drivers/char/watchdog/pcwd_usb.c index 2700c5c45b8a..1533f56baa42 100644 --- a/trunk/drivers/char/watchdog/pcwd_usb.c +++ b/trunk/drivers/char/watchdog/pcwd_usb.c @@ -42,7 +42,6 @@ #include #include #include -#include #ifdef CONFIG_USB_DEBUG @@ -144,7 +143,7 @@ struct usb_pcwd_private { static struct usb_pcwd_private *usb_pcwd_device; /* prevent races between open() and disconnect() */ -static DEFINE_MUTEX(disconnect_mutex); +static DECLARE_MUTEX (disconnect_sem); /* local function prototypes */ static int usb_pcwd_probe (struct usb_interface *interface, const struct usb_device_id *id); @@ -724,7 +723,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface) struct usb_pcwd_private *usb_pcwd; /* prevent races with open() */ - mutex_lock(&disconnect_mutex); + down (&disconnect_sem); usb_pcwd = usb_get_intfdata (interface); usb_set_intfdata (interface, NULL); @@ -750,7 +749,7 @@ static void usb_pcwd_disconnect(struct usb_interface *interface) cards_found--; - mutex_unlock(&disconnect_mutex); + up (&disconnect_sem); printk(KERN_INFO PFX "USB PC Watchdog disconnected\n"); } diff --git a/trunk/drivers/connector/connector.c b/trunk/drivers/connector/connector.c index 35897079a78d..d7125f4d9113 100644 --- a/trunk/drivers/connector/connector.c +++ b/trunk/drivers/connector/connector.c @@ -26,7 +26,6 @@ #include #include #include -#include #include @@ -42,7 +41,7 @@ module_param(cn_val, uint, 0); MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); MODULE_PARM_DESC(cn_val, "Connector's main device val."); -static DEFINE_MUTEX(notify_lock); +static DECLARE_MUTEX(notify_lock); static LIST_HEAD(notify_list); static struct cn_dev cdev; @@ -261,7 +260,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) { struct cn_ctl_entry *ent; - mutex_lock(¬ify_lock); + down(¬ify_lock); list_for_each_entry(ent, ¬ify_list, notify_entry) { int i; struct cn_notify_req *req; @@ -294,7 +293,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event) cn_netlink_send(&m, ctl->group, GFP_KERNEL); } } - mutex_unlock(¬ify_lock); + up(¬ify_lock); } /* @@ -408,14 +407,14 @@ static void cn_callback(void *data) if (ctl->group == 0) { struct cn_ctl_entry *n; - mutex_lock(¬ify_lock); + down(¬ify_lock); list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) { if (cn_ctl_msg_equals(ent->msg, ctl)) { list_del(&ent->notify_entry); kfree(ent); } } - mutex_unlock(¬ify_lock); + up(¬ify_lock); return; } @@ -430,9 +429,9 @@ static void cn_callback(void *data) memcpy(ent->msg, ctl, size - sizeof(*ent)); - mutex_lock(¬ify_lock); + down(¬ify_lock); list_add(&ent->notify_entry, ¬ify_list); - mutex_unlock(¬ify_lock); + up(¬ify_lock); } static int __init cn_init(void) diff --git a/trunk/drivers/firmware/dcdbas.c b/trunk/drivers/firmware/dcdbas.c index d6543fc4a923..3a4e5c5b4e1f 100644 --- a/trunk/drivers/firmware/dcdbas.c +++ b/trunk/drivers/firmware/dcdbas.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include @@ -49,7 +48,7 @@ static u8 *smi_data_buf; static dma_addr_t smi_data_buf_handle; static unsigned long smi_data_buf_size; static u32 smi_data_buf_phys_addr; -static DEFINE_MUTEX(smi_data_lock); +static DECLARE_MUTEX(smi_data_lock); static unsigned int host_control_action; static unsigned int host_control_smi_type; @@ -140,9 +139,9 @@ static ssize_t smi_data_buf_size_store(struct device *dev, buf_size = simple_strtoul(buf, NULL, 10); /* make sure SMI data buffer is at least buf_size */ - mutex_lock(&smi_data_lock); + down(&smi_data_lock); ret = smi_data_buf_realloc(buf_size); - mutex_unlock(&smi_data_lock); + up(&smi_data_lock); if (ret) return ret; @@ -155,7 +154,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos, size_t max_read; ssize_t ret; - mutex_lock(&smi_data_lock); + down(&smi_data_lock); if (pos >= smi_data_buf_size) { ret = 0; @@ -166,7 +165,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos, ret = min(max_read, count); memcpy(buf, smi_data_buf + pos, ret); out: - mutex_unlock(&smi_data_lock); + up(&smi_data_lock); return ret; } @@ -175,7 +174,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos, { ssize_t ret; - mutex_lock(&smi_data_lock); + down(&smi_data_lock); ret = smi_data_buf_realloc(pos + count); if (ret) @@ -184,7 +183,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos, memcpy(smi_data_buf + pos, buf, count); ret = count; out: - mutex_unlock(&smi_data_lock); + up(&smi_data_lock); return ret; } @@ -202,9 +201,9 @@ static ssize_t host_control_action_store(struct device *dev, ssize_t ret; /* make sure buffer is available for host control command */ - mutex_lock(&smi_data_lock); + down(&smi_data_lock); ret = smi_data_buf_realloc(sizeof(struct apm_cmd)); - mutex_unlock(&smi_data_lock); + up(&smi_data_lock); if (ret) return ret; @@ -303,7 +302,7 @@ static ssize_t smi_request_store(struct device *dev, unsigned long val = simple_strtoul(buf, NULL, 10); ssize_t ret; - mutex_lock(&smi_data_lock); + down(&smi_data_lock); if (smi_data_buf_size < sizeof(struct smi_cmd)) { ret = -ENODEV; @@ -335,7 +334,7 @@ static ssize_t smi_request_store(struct device *dev, } out: - mutex_unlock(&smi_data_lock); + up(&smi_data_lock); return ret; } diff --git a/trunk/drivers/ide/ide-cd.c b/trunk/drivers/ide/ide-cd.c index c7671e188017..3325660f7248 100644 --- a/trunk/drivers/ide/ide-cd.c +++ b/trunk/drivers/ide/ide-cd.c @@ -313,7 +313,6 @@ #include #include #include -#include #include /* For SCSI -> ATAPI command conversion */ @@ -325,7 +324,7 @@ #include "ide-cd.h" -static DEFINE_MUTEX(idecd_ref_mutex); +static DECLARE_MUTEX(idecd_ref_sem); #define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref) @@ -336,11 +335,11 @@ static struct cdrom_info *ide_cd_get(struct gendisk *disk) { struct cdrom_info *cd = NULL; - mutex_lock(&idecd_ref_mutex); + down(&idecd_ref_sem); cd = ide_cd_g(disk); if (cd) kref_get(&cd->kref); - mutex_unlock(&idecd_ref_mutex); + up(&idecd_ref_sem); return cd; } @@ -348,9 +347,9 @@ static void ide_cd_release(struct kref *); static void ide_cd_put(struct cdrom_info *cd) { - mutex_lock(&idecd_ref_mutex); + down(&idecd_ref_sem); kref_put(&cd->kref, ide_cd_release); - mutex_unlock(&idecd_ref_mutex); + up(&idecd_ref_sem); } /**************************************************************************** @@ -2471,6 +2470,52 @@ static int ide_cdrom_packet(struct cdrom_device_info *cdi, return cgc->stat; } +static +int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi, + unsigned int cmd, unsigned long arg) +{ + struct packet_command cgc; + char buffer[16]; + int stat; + + init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); + + /* These will be moved into the Uniform layer shortly... */ + switch (cmd) { + case CDROMSETSPINDOWN: { + char spindown; + + if (copy_from_user(&spindown, (void __user *) arg, sizeof(char))) + return -EFAULT; + + if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0))) + return stat; + + buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f); + + return cdrom_mode_select(cdi, &cgc); + } + + case CDROMGETSPINDOWN: { + char spindown; + + if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0))) + return stat; + + spindown = buffer[11] & 0x0f; + + if (copy_to_user((void __user *) arg, &spindown, sizeof (char))) + return -EFAULT; + + return 0; + } + + default: + return -EINVAL; + } + +} + static int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi, unsigned int cmd, void *arg) @@ -2807,11 +2852,12 @@ static struct cdrom_device_ops ide_cdrom_dops = { .get_mcn = ide_cdrom_get_mcn, .reset = ide_cdrom_reset, .audio_ioctl = ide_cdrom_audio_ioctl, + .dev_ioctl = ide_cdrom_dev_ioctl, .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | - CDC_DRIVE_STATUS | CDC_CD_R | + CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM | CDC_GENERIC_PACKET | CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM, @@ -3321,45 +3367,6 @@ static int idecd_release(struct inode * inode, struct file * file) return 0; } -static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg) -{ - struct packet_command cgc; - char buffer[16]; - int stat; - char spindown; - - if (copy_from_user(&spindown, (void __user *)arg, sizeof(char))) - return -EFAULT; - - init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); - - stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); - if (stat) - return stat; - - buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f); - return cdrom_mode_select(cdi, &cgc); -} - -static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg) -{ - struct packet_command cgc; - char buffer[16]; - int stat; - char spindown; - - init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); - - stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); - if (stat) - return stat; - - spindown = buffer[11] & 0x0f; - if (copy_to_user((void __user *)arg, &spindown, sizeof (char))) - return -EFAULT; - return 0; -} - static int idecd_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { @@ -3367,16 +3374,7 @@ static int idecd_ioctl (struct inode *inode, struct file *file, struct cdrom_info *info = ide_cd_g(bdev->bd_disk); int err; - switch (cmd) { - case CDROMSETSPINDOWN: - return idecd_set_spindown(&info->devinfo, arg); - case CDROMGETSPINDOWN: - return idecd_get_spindown(&info->devinfo, arg); - default: - break; - } - - err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); + err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg); if (err == -EINVAL) err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg); diff --git a/trunk/drivers/ide/ide-disk.c b/trunk/drivers/ide/ide-disk.c index e238b7da824b..09086b8b6486 100644 --- a/trunk/drivers/ide/ide-disk.c +++ b/trunk/drivers/ide/ide-disk.c @@ -60,7 +60,6 @@ #include #include #include -#include #define _IDE_DISK @@ -79,7 +78,7 @@ struct ide_disk_obj { struct kref kref; }; -static DEFINE_MUTEX(idedisk_ref_mutex); +static DECLARE_MUTEX(idedisk_ref_sem); #define to_ide_disk(obj) container_of(obj, struct ide_disk_obj, kref) @@ -90,11 +89,11 @@ static struct ide_disk_obj *ide_disk_get(struct gendisk *disk) { struct ide_disk_obj *idkp = NULL; - mutex_lock(&idedisk_ref_mutex); + down(&idedisk_ref_sem); idkp = ide_disk_g(disk); if (idkp) kref_get(&idkp->kref); - mutex_unlock(&idedisk_ref_mutex); + up(&idedisk_ref_sem); return idkp; } @@ -102,9 +101,9 @@ static void ide_disk_release(struct kref *); static void ide_disk_put(struct ide_disk_obj *idkp) { - mutex_lock(&idedisk_ref_mutex); + down(&idedisk_ref_sem); kref_put(&idkp->kref, ide_disk_release); - mutex_unlock(&idedisk_ref_mutex); + up(&idedisk_ref_sem); } /* diff --git a/trunk/drivers/ide/ide-floppy.c b/trunk/drivers/ide/ide-floppy.c index a53e3ce4a142..1f8db9ac05d1 100644 --- a/trunk/drivers/ide/ide-floppy.c +++ b/trunk/drivers/ide/ide-floppy.c @@ -98,7 +98,6 @@ #include #include #include -#include #include #include @@ -518,7 +517,7 @@ typedef struct { u8 reserved[4]; } idefloppy_mode_parameter_header_t; -static DEFINE_MUTEX(idefloppy_ref_mutex); +static DECLARE_MUTEX(idefloppy_ref_sem); #define to_ide_floppy(obj) container_of(obj, struct ide_floppy_obj, kref) @@ -529,11 +528,11 @@ static struct ide_floppy_obj *ide_floppy_get(struct gendisk *disk) { struct ide_floppy_obj *floppy = NULL; - mutex_lock(&idefloppy_ref_mutex); + down(&idefloppy_ref_sem); floppy = ide_floppy_g(disk); if (floppy) kref_get(&floppy->kref); - mutex_unlock(&idefloppy_ref_mutex); + up(&idefloppy_ref_sem); return floppy; } @@ -541,9 +540,9 @@ static void ide_floppy_release(struct kref *); static void ide_floppy_put(struct ide_floppy_obj *floppy) { - mutex_lock(&idefloppy_ref_mutex); + down(&idefloppy_ref_sem); kref_put(&floppy->kref, ide_floppy_release); - mutex_unlock(&idefloppy_ref_mutex); + up(&idefloppy_ref_sem); } /* diff --git a/trunk/drivers/ide/ide-tape.c b/trunk/drivers/ide/ide-tape.c index ebc59064b475..0101d0def7c5 100644 --- a/trunk/drivers/ide/ide-tape.c +++ b/trunk/drivers/ide/ide-tape.c @@ -443,7 +443,6 @@ #include #include #include -#include #include #include @@ -1012,7 +1011,7 @@ typedef struct ide_tape_obj { int debug_level; } idetape_tape_t; -static DEFINE_MUTEX(idetape_ref_mutex); +static DECLARE_MUTEX(idetape_ref_sem); static struct class *idetape_sysfs_class; @@ -1025,11 +1024,11 @@ static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) { struct ide_tape_obj *tape = NULL; - mutex_lock(&idetape_ref_mutex); + down(&idetape_ref_sem); tape = ide_tape_g(disk); if (tape) kref_get(&tape->kref); - mutex_unlock(&idetape_ref_mutex); + up(&idetape_ref_sem); return tape; } @@ -1037,9 +1036,9 @@ static void ide_tape_release(struct kref *); static void ide_tape_put(struct ide_tape_obj *tape) { - mutex_lock(&idetape_ref_mutex); + down(&idetape_ref_sem); kref_put(&tape->kref, ide_tape_release); - mutex_unlock(&idetape_ref_mutex); + up(&idetape_ref_sem); } /* @@ -1291,11 +1290,11 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i) { struct ide_tape_obj *tape = NULL; - mutex_lock(&idetape_ref_mutex); + down(&idetape_ref_sem); tape = idetape_devs[i]; if (tape) kref_get(&tape->kref); - mutex_unlock(&idetape_ref_mutex); + up(&idetape_ref_sem); return tape; } @@ -4871,11 +4870,11 @@ static int ide_tape_probe(ide_drive_t *drive) drive->driver_data = tape; - mutex_lock(&idetape_ref_mutex); + down(&idetape_ref_sem); for (minor = 0; idetape_devs[minor]; minor++) ; idetape_devs[minor] = tape; - mutex_unlock(&idetape_ref_mutex); + up(&idetape_ref_sem); idetape_setup(drive, tape, minor); diff --git a/trunk/drivers/isdn/capi/kcapi.c b/trunk/drivers/isdn/capi/kcapi.c index 8c4fcb9027b3..feec40cf5900 100644 --- a/trunk/drivers/isdn/capi/kcapi.c +++ b/trunk/drivers/isdn/capi/kcapi.c @@ -32,7 +32,6 @@ #ifdef CONFIG_AVMB1_COMPAT #include #endif -#include static char *revision = "$Revision: 1.1.2.8 $"; @@ -67,7 +66,7 @@ LIST_HEAD(capi_drivers); DEFINE_RWLOCK(capi_drivers_list_lock); static DEFINE_RWLOCK(application_lock); -static DEFINE_MUTEX(controller_mutex); +static DECLARE_MUTEX(controller_sem); struct capi20_appl *capi_applications[CAPI_MAXAPPL]; struct capi_ctr *capi_cards[CAPI_MAXCONTR]; @@ -396,20 +395,20 @@ attach_capi_ctr(struct capi_ctr *card) { int i; - mutex_lock(&controller_mutex); + down(&controller_sem); for (i = 0; i < CAPI_MAXCONTR; i++) { if (capi_cards[i] == NULL) break; } if (i == CAPI_MAXCONTR) { - mutex_unlock(&controller_mutex); + up(&controller_sem); printk(KERN_ERR "kcapi: out of controller slots\n"); return -EBUSY; } capi_cards[i] = card; - mutex_unlock(&controller_mutex); + up(&controller_sem); card->nrecvctlpkt = 0; card->nrecvdatapkt = 0; @@ -532,13 +531,13 @@ u16 capi20_register(struct capi20_appl *ap) write_unlock_irqrestore(&application_lock, flags); - mutex_lock(&controller_mutex); + down(&controller_sem); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) continue; register_appl(capi_cards[i], applid, &ap->rparam); } - mutex_unlock(&controller_mutex); + up(&controller_sem); if (showcapimsgs & 1) { printk(KERN_DEBUG "kcapi: appl %d up\n", applid); @@ -561,13 +560,13 @@ u16 capi20_release(struct capi20_appl *ap) capi_applications[ap->applid - 1] = NULL; write_unlock_irqrestore(&application_lock, flags); - mutex_lock(&controller_mutex); + down(&controller_sem); for (i = 0; i < CAPI_MAXCONTR; i++) { if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING) continue; release_appl(capi_cards[i], ap->applid); } - mutex_unlock(&controller_mutex); + up(&controller_sem); flush_scheduled_work(); skb_queue_purge(&ap->recv_queue); diff --git a/trunk/drivers/isdn/hisax/config.c b/trunk/drivers/isdn/hisax/config.c index 27332506f9f7..df9d65201819 100644 --- a/trunk/drivers/isdn/hisax/config.c +++ b/trunk/drivers/isdn/hisax/config.c @@ -25,6 +25,7 @@ #include #include #define HISAX_STATUS_BUFSIZE 4096 +#define INCLUDE_INLINE_FUNCS /* * This structure array contains one entry per card. An entry looks diff --git a/trunk/drivers/isdn/hisax/elsa.c b/trunk/drivers/isdn/hisax/elsa.c index f8ca4b323331..110e9fd669c5 100644 --- a/trunk/drivers/isdn/hisax/elsa.c +++ b/trunk/drivers/isdn/hisax/elsa.c @@ -108,6 +108,7 @@ static const char *ITACVer[] = #define ELSA_ASSIGN 4 #define RS_ISR_PASS_LIMIT 256 +#define _INLINE_ inline #define FLG_MODEM_ACTIVE 1 /* IPAC AUX */ #define ELSA_IPAC_LINE_LED 0x40 /* Bit 6 Gelbe LED */ diff --git a/trunk/drivers/net/loopback.c b/trunk/drivers/net/loopback.c index 0c13795dca38..690a1aae0b34 100644 --- a/trunk/drivers/net/loopback.c +++ b/trunk/drivers/net/loopback.c @@ -172,9 +172,11 @@ static struct net_device_stats *get_stats(struct net_device *dev) memset(stats, 0, sizeof(struct net_device_stats)); - for_each_cpu(i) { + for (i=0; i < NR_CPUS; i++) { struct net_device_stats *lb_stats; + if (!cpu_possible(i)) + continue; lb_stats = &per_cpu(loopback_stats, i); stats->rx_bytes += lb_stats->rx_bytes; stats->tx_bytes += lb_stats->tx_bytes; diff --git a/trunk/drivers/net/ppp_generic.c b/trunk/drivers/net/ppp_generic.c index b2073fce8216..f608c12e3e8b 100644 --- a/trunk/drivers/net/ppp_generic.c +++ b/trunk/drivers/net/ppp_generic.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include @@ -199,11 +198,11 @@ static unsigned int cardmap_find_first_free(struct cardmap *map); static void cardmap_destroy(struct cardmap **map); /* - * all_ppp_mutex protects the all_ppp_units mapping. + * all_ppp_sem protects the all_ppp_units mapping. * It also ensures that finding a ppp unit in the all_ppp_units map * and updating its file.refcnt field is atomic. */ -static DEFINE_MUTEX(all_ppp_mutex); +static DECLARE_MUTEX(all_ppp_sem); static struct cardmap *all_ppp_units; static atomic_t ppp_unit_count = ATOMIC_INIT(0); @@ -805,7 +804,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, /* Attach to an existing ppp unit */ if (get_user(unit, p)) break; - mutex_lock(&all_ppp_mutex); + down(&all_ppp_sem); err = -ENXIO; ppp = ppp_find_unit(unit); if (ppp != 0) { @@ -813,7 +812,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file, file->private_data = &ppp->file; err = 0; } - mutex_unlock(&all_ppp_mutex); + up(&all_ppp_sem); break; case PPPIOCATTCHAN: @@ -2447,7 +2446,7 @@ ppp_create_interface(int unit, int *retp) dev->do_ioctl = ppp_net_ioctl; ret = -EEXIST; - mutex_lock(&all_ppp_mutex); + down(&all_ppp_sem); if (unit < 0) unit = cardmap_find_first_free(all_ppp_units); else if (cardmap_get(all_ppp_units, unit) != NULL) @@ -2466,12 +2465,12 @@ ppp_create_interface(int unit, int *retp) atomic_inc(&ppp_unit_count); cardmap_set(&all_ppp_units, unit, ppp); - mutex_unlock(&all_ppp_mutex); + up(&all_ppp_sem); *retp = 0; return ppp; out2: - mutex_unlock(&all_ppp_mutex); + up(&all_ppp_sem); free_netdev(dev); out1: kfree(ppp); @@ -2501,7 +2500,7 @@ static void ppp_shutdown_interface(struct ppp *ppp) { struct net_device *dev; - mutex_lock(&all_ppp_mutex); + down(&all_ppp_sem); ppp_lock(ppp); dev = ppp->dev; ppp->dev = NULL; @@ -2515,7 +2514,7 @@ static void ppp_shutdown_interface(struct ppp *ppp) ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); - mutex_unlock(&all_ppp_mutex); + up(&all_ppp_sem); } /* @@ -2557,7 +2556,7 @@ static void ppp_destroy_interface(struct ppp *ppp) /* * Locate an existing ppp unit. - * The caller should have locked the all_ppp_mutex. + * The caller should have locked the all_ppp_sem. */ static struct ppp * ppp_find_unit(int unit) @@ -2602,7 +2601,7 @@ ppp_connect_channel(struct channel *pch, int unit) int ret = -ENXIO; int hdrlen; - mutex_lock(&all_ppp_mutex); + down(&all_ppp_sem); ppp = ppp_find_unit(unit); if (ppp == 0) goto out; @@ -2627,7 +2626,7 @@ ppp_connect_channel(struct channel *pch, int unit) outl: write_unlock_bh(&pch->upl); out: - mutex_unlock(&all_ppp_mutex); + up(&all_ppp_sem); return ret; } diff --git a/trunk/drivers/oprofile/cpu_buffer.c b/trunk/drivers/oprofile/cpu_buffer.c index 330d3869b41e..78193e4bbdb5 100644 --- a/trunk/drivers/oprofile/cpu_buffer.c +++ b/trunk/drivers/oprofile/cpu_buffer.c @@ -38,8 +38,9 @@ void free_cpu_buffers(void) { int i; - for_each_online_cpu(i) + for_each_online_cpu(i) { vfree(cpu_buffer[i].buffer); + } } int alloc_cpu_buffers(void) diff --git a/trunk/drivers/pnp/pnpbios/rsparser.c b/trunk/drivers/pnp/pnpbios/rsparser.c index c89c98a2cca8..5e38cd7335f7 100644 --- a/trunk/drivers/pnp/pnpbios/rsparser.c +++ b/trunk/drivers/pnp/pnpbios/rsparser.c @@ -448,7 +448,11 @@ pnpbios_parse_resource_option_data(unsigned char * p, unsigned char * end, struc break; case SMALL_TAG_END: - return p + 2; + if (option_independent != option) + printk(KERN_WARNING "PnPBIOS: Missing SMALL_TAG_ENDDEP tag\n"); + p = p + 2; + return (unsigned char *)p; + break; default: /* an unkown tag */ len_err: diff --git a/trunk/drivers/s390/block/dasd_ioctl.c b/trunk/drivers/s390/block/dasd_ioctl.c index f9930552ab54..fafeeae52675 100644 --- a/trunk/drivers/s390/block/dasd_ioctl.c +++ b/trunk/drivers/s390/block/dasd_ioctl.c @@ -151,9 +151,9 @@ dasd_ioctl_enable(struct block_device *bdev, int no, long args) return -ENODEV; dasd_enable_device(device); /* Formatting the dasd device can change the capacity. */ - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return 0; } @@ -184,9 +184,9 @@ dasd_ioctl_disable(struct block_device *bdev, int no, long args) * Set i_size to zero, since read, write, etc. check against this * value. */ - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); i_size_write(bdev->bd_inode, 0); - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); return 0; } diff --git a/trunk/drivers/scsi/ide-scsi.c b/trunk/drivers/scsi/ide-scsi.c index 39b760a24241..0cf0e4c7ac0c 100644 --- a/trunk/drivers/scsi/ide-scsi.c +++ b/trunk/drivers/scsi/ide-scsi.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include @@ -110,7 +109,7 @@ typedef struct ide_scsi_obj { unsigned long log; /* log flags */ } idescsi_scsi_t; -static DEFINE_MUTEX(idescsi_ref_mutex); +static DECLARE_MUTEX(idescsi_ref_sem); #define ide_scsi_g(disk) \ container_of((disk)->private_data, struct ide_scsi_obj, driver) @@ -119,19 +118,19 @@ static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk) { struct ide_scsi_obj *scsi = NULL; - mutex_lock(&idescsi_ref_mutex); + down(&idescsi_ref_sem); scsi = ide_scsi_g(disk); if (scsi) scsi_host_get(scsi->host); - mutex_unlock(&idescsi_ref_mutex); + up(&idescsi_ref_sem); return scsi; } static void ide_scsi_put(struct ide_scsi_obj *scsi) { - mutex_lock(&idescsi_ref_mutex); + down(&idescsi_ref_sem); scsi_host_put(scsi->host); - mutex_unlock(&idescsi_ref_mutex); + up(&idescsi_ref_sem); } static inline idescsi_scsi_t *scsihost_to_idescsi(struct Scsi_Host *host) diff --git a/trunk/drivers/scsi/sr.c b/trunk/drivers/scsi/sr.c index 7c80711e18ed..f9c1192dc15e 100644 --- a/trunk/drivers/scsi/sr.c +++ b/trunk/drivers/scsi/sr.c @@ -71,7 +71,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_CDROM_MAJOR); #define SR_CAPABILITIES \ (CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \ CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \ - CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \ + CDC_PLAY_AUDIO|CDC_RESET|CDC_IOCTLS|CDC_DRIVE_STATUS| \ CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ CDC_MRW|CDC_MRW_W|CDC_RAM) @@ -118,6 +118,7 @@ static struct cdrom_device_ops sr_dops = { .get_mcn = sr_get_mcn, .reset = sr_reset, .audio_ioctl = sr_audio_ioctl, + .dev_ioctl = sr_dev_ioctl, .capability = SR_CAPABILITIES, .generic_packet = sr_packet, }; @@ -455,33 +456,17 @@ static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd, { struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk); struct scsi_device *sdev = cd->device; - void __user *argp = (void __user *)arg; - int ret; - /* - * Send SCSI addressing ioctls directly to mid level, send other - * ioctls to cdrom/block level. - */ - switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - return scsi_ioctl(sdev, cmd, argp); + /* + * Send SCSI addressing ioctls directly to mid level, send other + * ioctls to cdrom/block level. + */ + switch (cmd) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + return scsi_ioctl(sdev, cmd, (void __user *)arg); } - - ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); - if (ret != ENOSYS) - return ret; - - /* - * ENODEV means that we didn't recognise the ioctl, or that we - * cannot execute it in the current device state. In either - * case fall through to scsi_ioctl, which will return ENDOEV again - * if it doesn't recognise the ioctl - */ - ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL); - if (ret != -ENODEV) - return ret; - return scsi_ioctl(sdev, cmd, argp); + return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg); } static int sr_block_media_changed(struct gendisk *disk) diff --git a/trunk/drivers/scsi/sr.h b/trunk/drivers/scsi/sr.h index d65de9621b27..d2bcd99c272f 100644 --- a/trunk/drivers/scsi/sr.h +++ b/trunk/drivers/scsi/sr.h @@ -55,6 +55,7 @@ int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *); int sr_reset(struct cdrom_device_info *); int sr_select_speed(struct cdrom_device_info *cdi, int speed); int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); +int sr_dev_ioctl(struct cdrom_device_info *, unsigned int, unsigned long); int sr_is_xa(Scsi_CD *); diff --git a/trunk/drivers/scsi/sr_ioctl.c b/trunk/drivers/scsi/sr_ioctl.c index d1268cb46837..b65462f76484 100644 --- a/trunk/drivers/scsi/sr_ioctl.c +++ b/trunk/drivers/scsi/sr_ioctl.c @@ -562,3 +562,22 @@ int sr_is_xa(Scsi_CD *cd) #endif return is_xa; } + +int sr_dev_ioctl(struct cdrom_device_info *cdi, + unsigned int cmd, unsigned long arg) +{ + Scsi_CD *cd = cdi->handle; + int ret; + + ret = scsi_nonblockable_ioctl(cd->device, cmd, + (void __user *)arg, NULL); + /* + * ENODEV means that we didn't recognise the ioctl, or that we + * cannot execute it in the current device state. In either + * case fall through to scsi_ioctl, which will return ENDOEV again + * if it doesn't recognise the ioctl + */ + if (ret != -ENODEV) + return ret; + return scsi_ioctl(cd->device, cmd, (void __user *)arg); +} diff --git a/trunk/drivers/serial/68328serial.c b/trunk/drivers/serial/68328serial.c index b88a7c1158af..7f0f35a05dca 100644 --- a/trunk/drivers/serial/68328serial.c +++ b/trunk/drivers/serial/68328serial.c @@ -101,6 +101,8 @@ struct tty_driver *serial_driver; #define RS_ISR_PASS_LIMIT 256 +#define _INLINE_ inline + static void change_speed(struct m68k_serial *info); /* @@ -260,7 +262,7 @@ static void batten_down_hatches(void) /* Drop into the debugger */ } -static void status_handle(struct m68k_serial *info, unsigned short status) +static _INLINE_ void status_handle(struct m68k_serial *info, unsigned short status) { #if 0 if(status & DCD) { @@ -287,8 +289,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status) return; } -static void receive_chars(struct m68k_serial *info, struct pt_regs *regs, - unsigned short rx) +static _INLINE_ void receive_chars(struct m68k_serial *info, struct pt_regs *regs, unsigned short rx) { struct tty_struct *tty = info->tty; m68328_uart *uart = &uart_addr[info->line]; @@ -358,7 +359,7 @@ static void receive_chars(struct m68k_serial *info, struct pt_regs *regs, return; } -static void transmit_chars(struct m68k_serial *info) +static _INLINE_ void transmit_chars(struct m68k_serial *info) { m68328_uart *uart = &uart_addr[info->line]; diff --git a/trunk/drivers/serial/au1x00_uart.c b/trunk/drivers/serial/au1x00_uart.c index 948880ac5878..29f94bbb79be 100644 --- a/trunk/drivers/serial/au1x00_uart.c +++ b/trunk/drivers/serial/au1x00_uart.c @@ -133,12 +133,13 @@ static const struct serial_uart_config uart_config[PORT_MAX_8250+1] = { { "AU1X00_UART",16, UART_CLEAR_FIFO | UART_USE_FIFO }, }; -static unsigned int serial_in(struct uart_8250_port *up, int offset) +static _INLINE_ unsigned int serial_in(struct uart_8250_port *up, int offset) { return au_readl((unsigned long)up->port.membase + offset); } -static void serial_out(struct uart_8250_port *up, int offset, int value) +static _INLINE_ void +serial_out(struct uart_8250_port *up, int offset, int value) { au_writel(value, (unsigned long)up->port.membase + offset); } @@ -236,7 +237,7 @@ static void serial8250_enable_ms(struct uart_port *port) serial_out(up, UART_IER, up->ier); } -static void +static _INLINE_ void receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs) { struct tty_struct *tty = up->port.info->tty; @@ -311,7 +312,7 @@ receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs) spin_lock(&up->port.lock); } -static void transmit_chars(struct uart_8250_port *up) +static _INLINE_ void transmit_chars(struct uart_8250_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int count; @@ -345,7 +346,7 @@ static void transmit_chars(struct uart_8250_port *up) serial8250_stop_tx(&up->port); } -static void check_modem_status(struct uart_8250_port *up) +static _INLINE_ void check_modem_status(struct uart_8250_port *up) { int status; diff --git a/trunk/drivers/serial/crisv10.c b/trunk/drivers/serial/crisv10.c index 89700141f87e..be12623d8544 100644 --- a/trunk/drivers/serial/crisv10.c +++ b/trunk/drivers/serial/crisv10.c @@ -481,6 +481,8 @@ static char *serial_version = "$Revision: 1.25 $"; #include "serial_compat.h" #endif +#define _INLINE_ inline + struct tty_driver *serial_driver; /* serial subtype definitions */ @@ -589,6 +591,8 @@ static void rs_throttle(struct tty_struct * tty); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); static int rs_write(struct tty_struct * tty, int from_user, const unsigned char *buf, int count); +extern _INLINE_ int rs_raw_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count); #ifdef CONFIG_ETRAX_RS485 static int e100_write_rs485(struct tty_struct * tty, int from_user, const unsigned char *buf, int count); @@ -1534,7 +1538,8 @@ e100_enable_rxdma_irq(struct e100_serial *info) /* the tx DMA uses only dma_descr interrupt */ -static void e100_disable_txdma_irq(struct e100_serial *info) +static _INLINE_ void +e100_disable_txdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("txdma_irq(%d): 0\n",info->line); @@ -1543,7 +1548,8 @@ static void e100_disable_txdma_irq(struct e100_serial *info) *R_IRQ_MASK2_CLR = info->irq; } -static void e100_enable_txdma_irq(struct e100_serial *info) +static _INLINE_ void +e100_enable_txdma_irq(struct e100_serial *info) { #ifdef SERIAL_DEBUG_INTR printk("txdma_irq(%d): 1\n",info->line); @@ -1552,7 +1558,8 @@ static void e100_enable_txdma_irq(struct e100_serial *info) *R_IRQ_MASK2_SET = info->irq; } -static void e100_disable_txdma_channel(struct e100_serial *info) +static _INLINE_ void +e100_disable_txdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1592,7 +1599,8 @@ static void e100_disable_txdma_channel(struct e100_serial *info) } -static void e100_enable_txdma_channel(struct e100_serial *info) +static _INLINE_ void +e100_enable_txdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1617,7 +1625,8 @@ static void e100_enable_txdma_channel(struct e100_serial *info) restore_flags(flags); } -static void e100_disable_rxdma_channel(struct e100_serial *info) +static _INLINE_ void +e100_disable_rxdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1656,7 +1665,8 @@ static void e100_disable_rxdma_channel(struct e100_serial *info) } -static void e100_enable_rxdma_channel(struct e100_serial *info) +static _INLINE_ void +e100_enable_rxdma_channel(struct e100_serial *info) { unsigned long flags; @@ -1903,7 +1913,9 @@ rs_start(struct tty_struct *tty) * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ -static void rs_sched_event(struct e100_serial *info, int event) +static _INLINE_ void +rs_sched_event(struct e100_serial *info, + int event) { if (info->event & (1 << event)) return; @@ -2143,9 +2155,8 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl return 1; } -static unsigned int handle_descr_data(struct e100_serial *info, - struct etrax_dma_descr *descr, - unsigned int recvl) +extern _INLINE_ unsigned int +handle_descr_data(struct e100_serial *info, struct etrax_dma_descr *descr, unsigned int recvl) { struct etrax_recv_buffer *buffer = phys_to_virt(descr->buf) - sizeof *buffer; @@ -2171,7 +2182,8 @@ static unsigned int handle_descr_data(struct e100_serial *info, return recvl; } -static unsigned int handle_all_descr_data(struct e100_serial *info) +static _INLINE_ unsigned int +handle_all_descr_data(struct e100_serial *info) { struct etrax_dma_descr *descr; unsigned int recvl; @@ -2218,7 +2230,8 @@ static unsigned int handle_all_descr_data(struct e100_serial *info) return ret; } -static void receive_chars_dma(struct e100_serial *info) +static _INLINE_ void +receive_chars_dma(struct e100_serial *info) { struct tty_struct *tty; unsigned char rstat; @@ -2279,7 +2292,8 @@ static void receive_chars_dma(struct e100_serial *info) *info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart); } -static int start_recv_dma(struct e100_serial *info) +static _INLINE_ int +start_recv_dma(struct e100_serial *info) { struct etrax_dma_descr *descr = info->rec_descr; struct etrax_recv_buffer *buffer; @@ -2334,6 +2348,11 @@ start_receive(struct e100_serial *info) } +static _INLINE_ void +status_handle(struct e100_serial *info, unsigned short status) +{ +} + /* the bits in the MASK2 register are laid out like this: DMAI_EOP DMAI_DESCR DMAO_EOP DMAO_DESCR where I is the input channel and O is the output channel for the port. @@ -2435,7 +2454,8 @@ rec_interrupt(int irq, void *dev_id, struct pt_regs * regs) return IRQ_RETVAL(handled); } /* rec_interrupt */ -static int force_eop_if_needed(struct e100_serial *info) +static _INLINE_ int +force_eop_if_needed(struct e100_serial *info) { /* We check data_avail bit to determine if data has * arrived since last time @@ -2479,7 +2499,8 @@ static int force_eop_if_needed(struct e100_serial *info) return 1; } -static void flush_to_flip_buffer(struct e100_serial *info) +extern _INLINE_ void +flush_to_flip_buffer(struct e100_serial *info) { struct tty_struct *tty; struct etrax_recv_buffer *buffer; @@ -2590,7 +2611,8 @@ static void flush_to_flip_buffer(struct e100_serial *info) tty_flip_buffer_push(tty); } -static void check_flush_timeout(struct e100_serial *info) +static _INLINE_ void +check_flush_timeout(struct e100_serial *info) { /* Flip what we've got (if we can) */ flush_to_flip_buffer(info); @@ -2719,7 +2741,7 @@ TODO: The break will be delayed until an F or V character is received. */ -static +extern _INLINE_ struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info) { unsigned long data_read; @@ -2853,7 +2875,8 @@ struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info) return info; } -static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) +extern _INLINE_ +struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) { unsigned char rstat; @@ -2972,7 +2995,7 @@ static struct e100_serial* handle_ser_rx_interrupt(struct e100_serial *info) return info; } /* handle_ser_rx_interrupt */ -static void handle_ser_tx_interrupt(struct e100_serial *info) +extern _INLINE_ void handle_ser_tx_interrupt(struct e100_serial *info) { unsigned long flags; @@ -3598,8 +3621,9 @@ rs_flush_chars(struct tty_struct *tty) restore_flags(flags); } -static int rs_raw_write(struct tty_struct * tty, int from_user, - const unsigned char *buf, int count) +extern _INLINE_ int +rs_raw_write(struct tty_struct * tty, int from_user, + const unsigned char *buf, int count) { int c, ret = 0; struct e100_serial *info = (struct e100_serial *)tty->driver_data; @@ -4686,7 +4710,7 @@ rs_open(struct tty_struct *tty, struct file * filp) * /proc fs routines.... */ -static int line_info(char *buf, struct e100_serial *info) +extern _INLINE_ int line_info(char *buf, struct e100_serial *info) { char stat_buf[30]; int ret; diff --git a/trunk/drivers/serial/m32r_sio.c b/trunk/drivers/serial/m32r_sio.c index e9c10c0a30fc..876bc5e027bb 100644 --- a/trunk/drivers/serial/m32r_sio.c +++ b/trunk/drivers/serial/m32r_sio.c @@ -248,17 +248,17 @@ static void sio_error(int *status) #endif /* CONFIG_SERIAL_M32R_PLDSIO */ -static unsigned int sio_in(struct uart_sio_port *up, int offset) +static _INLINE_ unsigned int sio_in(struct uart_sio_port *up, int offset) { return __sio_in(up->port.iobase + offset); } -static void sio_out(struct uart_sio_port *up, int offset, int value) +static _INLINE_ void sio_out(struct uart_sio_port *up, int offset, int value) { __sio_out(value, up->port.iobase + offset); } -static unsigned int serial_in(struct uart_sio_port *up, int offset) +static _INLINE_ unsigned int serial_in(struct uart_sio_port *up, int offset) { if (!offset) return 0; @@ -266,7 +266,8 @@ static unsigned int serial_in(struct uart_sio_port *up, int offset) return __sio_in(offset); } -static void serial_out(struct uart_sio_port *up, int offset, int value) +static _INLINE_ void +serial_out(struct uart_sio_port *up, int offset, int value) { if (!offset) return; @@ -325,8 +326,8 @@ static void m32r_sio_enable_ms(struct uart_port *port) serial_out(up, UART_IER, up->ier); } -static void receive_chars(struct uart_sio_port *up, int *status, - struct pt_regs *regs) +static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status, + struct pt_regs *regs) { struct tty_struct *tty = up->port.info->tty; unsigned char ch; @@ -399,7 +400,7 @@ static void receive_chars(struct uart_sio_port *up, int *status, tty_flip_buffer_push(tty); } -static void transmit_chars(struct uart_sio_port *up) +static _INLINE_ void transmit_chars(struct uart_sio_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int count; diff --git a/trunk/drivers/serial/sunsu.c b/trunk/drivers/serial/sunsu.c index 9fe2283d91e5..7fc3d3b41d18 100644 --- a/trunk/drivers/serial/sunsu.c +++ b/trunk/drivers/serial/sunsu.c @@ -102,7 +102,9 @@ struct uart_sunsu_port { #endif }; -static unsigned int serial_in(struct uart_sunsu_port *up, int offset) +#define _INLINE_ + +static _INLINE_ unsigned int serial_in(struct uart_sunsu_port *up, int offset) { offset <<= up->port.regshift; @@ -119,7 +121,8 @@ static unsigned int serial_in(struct uart_sunsu_port *up, int offset) } } -static void serial_out(struct uart_sunsu_port *up, int offset, int value) +static _INLINE_ void +serial_out(struct uart_sunsu_port *up, int offset, int value) { #ifndef CONFIG_SPARC64 /* @@ -313,7 +316,7 @@ static void sunsu_enable_ms(struct uart_port *port) spin_unlock_irqrestore(&up->port.lock, flags); } -static struct tty_struct * +static _INLINE_ struct tty_struct * receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs *regs) { struct tty_struct *tty = up->port.info->tty; @@ -392,7 +395,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs return tty; } -static void transmit_chars(struct uart_sunsu_port *up) +static _INLINE_ void transmit_chars(struct uart_sunsu_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int count; @@ -428,7 +431,7 @@ static void transmit_chars(struct uart_sunsu_port *up) __stop_tx(up); } -static void check_modem_status(struct uart_sunsu_port *up) +static _INLINE_ void check_modem_status(struct uart_sunsu_port *up) { int status; diff --git a/trunk/drivers/tc/zs.c b/trunk/drivers/tc/zs.c index 2dffa8e303b2..6756d0fab6fe 100644 --- a/trunk/drivers/tc/zs.c +++ b/trunk/drivers/tc/zs.c @@ -186,6 +186,8 @@ static struct tty_driver *serial_driver; #define RS_STROBE_TIME 10 #define RS_ISR_PASS_LIMIT 256 +#define _INLINE_ inline + static void probe_sccs(void); static void change_speed(struct dec_serial *info); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); @@ -342,13 +344,14 @@ static inline void rs_recv_clear(struct dec_zschannel *zsc) * This routine is used by the interrupt handler to schedule * processing in the software interrupt portion of the driver. */ -static void rs_sched_event(struct dec_serial *info, int event) +static _INLINE_ void rs_sched_event(struct dec_serial *info, int event) { info->event |= 1 << event; tasklet_schedule(&info->tlet); } -static void receive_chars(struct dec_serial *info, struct pt_regs *regs) +static _INLINE_ void receive_chars(struct dec_serial *info, + struct pt_regs *regs) { struct tty_struct *tty = info->tty; unsigned char ch, stat, flag; @@ -438,7 +441,7 @@ static void transmit_chars(struct dec_serial *info) rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); } -static void status_handle(struct dec_serial *info) +static _INLINE_ void status_handle(struct dec_serial *info) { unsigned char stat; diff --git a/trunk/fs/9p/mux.c b/trunk/fs/9p/mux.c index 8e8356c1c229..ea1134eb47c8 100644 --- a/trunk/fs/9p/mux.c +++ b/trunk/fs/9p/mux.c @@ -31,7 +31,6 @@ #include #include #include -#include #include "debug.h" #include "v9fs.h" @@ -111,7 +110,7 @@ static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16); -static DEFINE_MUTEX(v9fs_mux_task_lock); +static DECLARE_MUTEX(v9fs_mux_task_lock); static struct workqueue_struct *v9fs_mux_wq; static int v9fs_mux_num; @@ -167,7 +166,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num, v9fs_mux_poll_task_num); - mutex_lock(&v9fs_mux_task_lock); + up(&v9fs_mux_task_lock); n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1); if (n > v9fs_mux_poll_task_num) { @@ -226,7 +225,7 @@ static int v9fs_mux_poll_start(struct v9fs_mux_data *m) } v9fs_mux_num++; - mutex_unlock(&v9fs_mux_task_lock); + down(&v9fs_mux_task_lock); return 0; } @@ -236,7 +235,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) int i; struct v9fs_mux_poll_task *vpt; - mutex_lock(&v9fs_mux_task_lock); + up(&v9fs_mux_task_lock); vpt = m->poll_task; list_del(&m->mux_list); for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { @@ -253,7 +252,7 @@ static void v9fs_mux_poll_stop(struct v9fs_mux_data *m) v9fs_mux_poll_task_num--; } v9fs_mux_num--; - mutex_unlock(&v9fs_mux_task_lock); + down(&v9fs_mux_task_lock); } /** diff --git a/trunk/fs/adfs/file.c b/trunk/fs/adfs/file.c index 6af10885f9d6..afebbfde6968 100644 --- a/trunk/fs/adfs/file.c +++ b/trunk/fs/adfs/file.c @@ -19,7 +19,11 @@ * * adfs regular file handling primitives */ +#include #include +#include +#include +#include #include /* for file_fsync() */ #include diff --git a/trunk/fs/autofs4/autofs_i.h b/trunk/fs/autofs4/autofs_i.h index f54c5b21f876..385bed09b0d8 100644 --- a/trunk/fs/autofs4/autofs_i.h +++ b/trunk/fs/autofs4/autofs_i.h @@ -13,7 +13,6 @@ /* Internal header file for autofs */ #include -#include #include /* This is the range of ioctl() numbers we claim as ours */ @@ -103,7 +102,7 @@ struct autofs_sb_info { int reghost_enabled; int needs_reghost; struct super_block *sb; - struct mutex wq_mutex; + struct semaphore wq_sem; spinlock_t fs_lock; struct autofs_wait_queue *queues; /* Wait queue pointer */ }; diff --git a/trunk/fs/autofs4/inode.c b/trunk/fs/autofs4/inode.c index 1ad98d48e550..2d3082854a29 100644 --- a/trunk/fs/autofs4/inode.c +++ b/trunk/fs/autofs4/inode.c @@ -269,7 +269,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) sbi->sb = s; sbi->version = 0; sbi->sub_version = 0; - mutex_init(&sbi->wq_mutex); + init_MUTEX(&sbi->wq_sem); spin_lock_init(&sbi->fs_lock); sbi->queues = NULL; s->s_blocksize = 1024; diff --git a/trunk/fs/autofs4/waitq.c b/trunk/fs/autofs4/waitq.c index be78e9378c03..394ff36ef8f1 100644 --- a/trunk/fs/autofs4/waitq.c +++ b/trunk/fs/autofs4/waitq.c @@ -178,7 +178,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, return -ENOENT; } - if (mutex_lock_interruptible(&sbi->wq_mutex)) { + if (down_interruptible(&sbi->wq_sem)) { kfree(name); return -EINTR; } @@ -194,7 +194,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, /* Can't wait for an expire if there's no mount */ if (notify == NFY_NONE && !d_mountpoint(dentry)) { kfree(name); - mutex_unlock(&sbi->wq_mutex); + up(&sbi->wq_sem); return -ENOENT; } @@ -202,7 +202,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); if ( !wq ) { kfree(name); - mutex_unlock(&sbi->wq_mutex); + up(&sbi->wq_sem); return -ENOMEM; } @@ -218,10 +218,10 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, wq->status = -EINTR; /* Status return if interrupted */ atomic_set(&wq->wait_ctr, 2); atomic_set(&wq->notified, 1); - mutex_unlock(&sbi->wq_mutex); + up(&sbi->wq_sem); } else { atomic_inc(&wq->wait_ctr); - mutex_unlock(&sbi->wq_mutex); + up(&sbi->wq_sem); kfree(name); DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); @@ -282,19 +282,19 @@ int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_tok { struct autofs_wait_queue *wq, **wql; - mutex_lock(&sbi->wq_mutex); + down(&sbi->wq_sem); for ( wql = &sbi->queues ; (wq = *wql) != 0 ; wql = &wq->next ) { if ( wq->wait_queue_token == wait_queue_token ) break; } if ( !wq ) { - mutex_unlock(&sbi->wq_mutex); + up(&sbi->wq_sem); return -EINVAL; } *wql = wq->next; /* Unlink from chain */ - mutex_unlock(&sbi->wq_mutex); + up(&sbi->wq_sem); kfree(wq->name); wq->name = NULL; /* Do not wait on this queue */ diff --git a/trunk/fs/bio.c b/trunk/fs/bio.c index 8f1d2e815c96..1f3bb501c262 100644 --- a/trunk/fs/bio.c +++ b/trunk/fs/bio.c @@ -1243,11 +1243,11 @@ static int __init init_bio(void) scale = 4; /* - * Limit number of entries reserved -- mempools are only used when - * the system is completely unable to allocate memory, so we only - * need enough to make progress. + * scale number of entries */ - bvec_pool_entries = 1 + scale; + bvec_pool_entries = megabytes * 2; + if (bvec_pool_entries > 256) + bvec_pool_entries = 256; fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale); if (!fs_bio_set) diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 44d05e6e34db..6e50346fb1ee 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -265,8 +265,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) SLAB_CTOR_CONSTRUCTOR) { memset(bdev, 0, sizeof(*bdev)); - mutex_init(&bdev->bd_mutex); - mutex_init(&bdev->bd_mount_mutex); + sema_init(&bdev->bd_sem, 1); + sema_init(&bdev->bd_mount_sem, 1); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); inode_init_once(&ei->vfs_inode); @@ -574,7 +574,7 @@ static int do_open(struct block_device *bdev, struct file *file) } owner = disk->fops->owner; - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); if (!bdev->bd_openers) { bdev->bd_disk = disk; bdev->bd_contains = bdev; @@ -605,21 +605,21 @@ static int do_open(struct block_device *bdev, struct file *file) if (ret) goto out_first; bdev->bd_contains = whole; - mutex_lock(&whole->bd_mutex); + down(&whole->bd_sem); whole->bd_part_count++; p = disk->part[part - 1]; bdev->bd_inode->i_data.backing_dev_info = whole->bd_inode->i_data.backing_dev_info; if (!(disk->flags & GENHD_FL_UP) || !p || !p->nr_sects) { whole->bd_part_count--; - mutex_unlock(&whole->bd_mutex); + up(&whole->bd_sem); ret = -ENXIO; goto out_first; } kobject_get(&p->kobj); bdev->bd_part = p; bd_set_size(bdev, (loff_t) p->nr_sects << 9); - mutex_unlock(&whole->bd_mutex); + up(&whole->bd_sem); } } else { put_disk(disk); @@ -633,13 +633,13 @@ static int do_open(struct block_device *bdev, struct file *file) if (bdev->bd_invalidated) rescan_partitions(bdev->bd_disk, bdev); } else { - mutex_lock(&bdev->bd_contains->bd_mutex); + down(&bdev->bd_contains->bd_sem); bdev->bd_contains->bd_part_count++; - mutex_unlock(&bdev->bd_contains->bd_mutex); + up(&bdev->bd_contains->bd_sem); } } bdev->bd_openers++; - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); unlock_kernel(); return 0; @@ -652,7 +652,7 @@ static int do_open(struct block_device *bdev, struct file *file) put_disk(disk); module_put(owner); out: - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); unlock_kernel(); if (ret) bdput(bdev); @@ -714,7 +714,7 @@ int blkdev_put(struct block_device *bdev) struct inode *bd_inode = bdev->bd_inode; struct gendisk *disk = bdev->bd_disk; - mutex_lock(&bdev->bd_mutex); + down(&bdev->bd_sem); lock_kernel(); if (!--bdev->bd_openers) { sync_blockdev(bdev); @@ -724,9 +724,9 @@ int blkdev_put(struct block_device *bdev) if (disk->fops->release) ret = disk->fops->release(bd_inode, NULL); } else { - mutex_lock(&bdev->bd_contains->bd_mutex); + down(&bdev->bd_contains->bd_sem); bdev->bd_contains->bd_part_count--; - mutex_unlock(&bdev->bd_contains->bd_mutex); + up(&bdev->bd_contains->bd_sem); } if (!bdev->bd_openers) { struct module *owner = disk->fops->owner; @@ -746,7 +746,7 @@ int blkdev_put(struct block_device *bdev) bdev->bd_contains = NULL; } unlock_kernel(); - mutex_unlock(&bdev->bd_mutex); + up(&bdev->bd_sem); bdput(bdev); return ret; } diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index 0d6ca7bac6c8..1d3683d496f8 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -201,7 +201,7 @@ int fsync_bdev(struct block_device *bdev) * freeze_bdev -- lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * - * This takes the block device bd_mount_mutex to make sure no new mounts + * This takes the block device bd_mount_sem to make sure no new mounts * happen on bdev until thaw_bdev() is called. * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. @@ -210,7 +210,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) { struct super_block *sb; - mutex_lock(&bdev->bd_mount_mutex); + down(&bdev->bd_mount_sem); sb = get_super(bdev); if (sb && !(sb->s_flags & MS_RDONLY)) { sb->s_frozen = SB_FREEZE_WRITE; @@ -264,7 +264,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) drop_super(sb); } - mutex_unlock(&bdev->bd_mount_mutex); + up(&bdev->bd_mount_sem); } EXPORT_SYMBOL(thaw_bdev); diff --git a/trunk/fs/cifs/dir.c b/trunk/fs/cifs/dir.c index 632561dd9c50..fed55e3c53df 100644 --- a/trunk/fs/cifs/dir.c +++ b/trunk/fs/cifs/dir.c @@ -138,9 +138,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); + down(&direntry->d_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); + up(&direntry->d_sb->s_vfs_rename_sem); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -317,9 +317,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); + down(&direntry->d_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); + up(&direntry->d_sb->s_vfs_rename_sem); if(full_path == NULL) rc = -ENOMEM; else if (pTcon->ses->capabilities & CAP_UNIX) { diff --git a/trunk/fs/cifs/fcntl.c b/trunk/fs/cifs/fcntl.c index ec4dfe9bf5ef..a7a47bb36bf3 100644 --- a/trunk/fs/cifs/fcntl.c +++ b/trunk/fs/cifs/fcntl.c @@ -86,9 +86,9 @@ int cifs_dir_notify(struct file * file, unsigned long arg) cifs_sb = CIFS_SB(file->f_dentry->d_sb); pTcon = cifs_sb->tcon; - mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); + down(&file->f_dentry->d_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(file->f_dentry); - mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); + up(&file->f_dentry->d_sb->s_vfs_rename_sem); if(full_path == NULL) { rc = -ENOMEM; diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index 165d67426381..675bd2568297 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -203,9 +203,9 @@ int cifs_open(struct inode *inode, struct file *file) } } - mutex_lock(&inode->i_sb->s_vfs_rename_mutex); + down(&inode->i_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(file->f_dentry); - mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); + up(&inode->i_sb->s_vfs_rename_sem); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/trunk/fs/cifs/inode.c b/trunk/fs/cifs/inode.c index ff93a9f81d1c..59359911f481 100644 --- a/trunk/fs/cifs/inode.c +++ b/trunk/fs/cifs/inode.c @@ -574,9 +574,9 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry) /* Unlink can be called from rename so we can not grab the sem here since we deadlock otherwise */ -/* mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/ +/* down(&direntry->d_sb->s_vfs_rename_sem);*/ full_path = build_path_from_dentry(direntry); -/* mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/ +/* up(&direntry->d_sb->s_vfs_rename_sem);*/ if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -718,9 +718,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - mutex_lock(&inode->i_sb->s_vfs_rename_mutex); + down(&inode->i_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); + up(&inode->i_sb->s_vfs_rename_sem); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -803,9 +803,9 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - mutex_lock(&inode->i_sb->s_vfs_rename_mutex); + down(&inode->i_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); + up(&inode->i_sb->s_vfs_rename_sem); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -1137,9 +1137,9 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs) rc = 0; } - mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); + down(&direntry->d_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); + up(&direntry->d_sb->s_vfs_rename_sem); if (full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/trunk/fs/cifs/link.c b/trunk/fs/cifs/link.c index 8d0da7c87c7b..0f99aae33162 100644 --- a/trunk/fs/cifs/link.c +++ b/trunk/fs/cifs/link.c @@ -48,10 +48,10 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, /* No need to check for cross device links since server will do that BB note DFS case in future though (when we may have to check) */ - mutex_lock(&inode->i_sb->s_vfs_rename_mutex); + down(&inode->i_sb->s_vfs_rename_sem); fromName = build_path_from_dentry(old_file); toName = build_path_from_dentry(direntry); - mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); + up(&inode->i_sb->s_vfs_rename_sem); if((fromName == NULL) || (toName == NULL)) { rc = -ENOMEM; goto cifs_hl_exit; @@ -103,9 +103,9 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) xid = GetXid(); - mutex_lock(&direntry->d_sb->s_vfs_rename_mutex); + down(&direntry->d_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex); + up(&direntry->d_sb->s_vfs_rename_sem); if (!full_path) goto out_no_free; @@ -164,9 +164,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) cifs_sb = CIFS_SB(inode->i_sb); pTcon = cifs_sb->tcon; - mutex_lock(&inode->i_sb->s_vfs_rename_mutex); + down(&inode->i_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&inode->i_sb->s_vfs_rename_mutex); + up(&inode->i_sb->s_vfs_rename_sem); if(full_path == NULL) { FreeXid(xid); @@ -232,9 +232,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen) /* BB would it be safe against deadlock to grab this sem even though rename itself grabs the sem and calls lookup? */ -/* mutex_lock(&inode->i_sb->s_vfs_rename_mutex);*/ +/* down(&inode->i_sb->s_vfs_rename_sem);*/ full_path = build_path_from_dentry(direntry); -/* mutex_unlock(&inode->i_sb->s_vfs_rename_mutex);*/ +/* up(&inode->i_sb->s_vfs_rename_sem);*/ if(full_path == NULL) { FreeXid(xid); diff --git a/trunk/fs/cifs/readdir.c b/trunk/fs/cifs/readdir.c index edb3b6eb34bc..288cc048d37f 100644 --- a/trunk/fs/cifs/readdir.c +++ b/trunk/fs/cifs/readdir.c @@ -404,9 +404,9 @@ static int initiate_cifs_search(const int xid, struct file *file) if(pTcon == NULL) return -EINVAL; - mutex_lock(&file->f_dentry->d_sb->s_vfs_rename_mutex); + down(&file->f_dentry->d_sb->s_vfs_rename_sem); full_path = build_path_from_dentry(file->f_dentry); - mutex_unlock(&file->f_dentry->d_sb->s_vfs_rename_mutex); + up(&file->f_dentry->d_sb->s_vfs_rename_sem); if(full_path == NULL) { return -ENOMEM; diff --git a/trunk/fs/cifs/xattr.c b/trunk/fs/cifs/xattr.c index 3938444d87b2..777e3363c2a4 100644 --- a/trunk/fs/cifs/xattr.c +++ b/trunk/fs/cifs/xattr.c @@ -62,9 +62,9 @@ int cifs_removexattr(struct dentry * direntry, const char * ea_name) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - mutex_lock(&sb->s_vfs_rename_mutex); + down(&sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&sb->s_vfs_rename_mutex); + up(&sb->s_vfs_rename_sem); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -116,9 +116,9 @@ int cifs_setxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - mutex_lock(&sb->s_vfs_rename_mutex); + down(&sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&sb->s_vfs_rename_mutex); + up(&sb->s_vfs_rename_sem); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -223,9 +223,9 @@ ssize_t cifs_getxattr(struct dentry * direntry, const char * ea_name, cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - mutex_lock(&sb->s_vfs_rename_mutex); + down(&sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&sb->s_vfs_rename_mutex); + up(&sb->s_vfs_rename_sem); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; @@ -341,9 +341,9 @@ ssize_t cifs_listxattr(struct dentry * direntry, char * data, size_t buf_size) cifs_sb = CIFS_SB(sb); pTcon = cifs_sb->tcon; - mutex_lock(&sb->s_vfs_rename_mutex); + down(&sb->s_vfs_rename_sem); full_path = build_path_from_dentry(direntry); - mutex_unlock(&sb->s_vfs_rename_mutex); + up(&sb->s_vfs_rename_sem); if(full_path == NULL) { FreeXid(xid); return -ENOMEM; diff --git a/trunk/fs/devpts/inode.c b/trunk/fs/devpts/inode.c index 14c5620b5cab..bfb8a230bac9 100644 --- a/trunk/fs/devpts/inode.c +++ b/trunk/fs/devpts/inode.c @@ -18,7 +18,6 @@ #include #include #include -#include #define DEVPTS_SUPER_MAGIC 0x1cd1 @@ -33,60 +32,39 @@ static struct { umode_t mode; } config = {.mode = 0600}; -enum { - Opt_uid, Opt_gid, Opt_mode, - Opt_err -}; - -static match_table_t tokens = { - {Opt_uid, "uid=%u"}, - {Opt_gid, "gid=%u"}, - {Opt_mode, "mode=%o"}, - {Opt_err, NULL} -}; - static int devpts_remount(struct super_block *sb, int *flags, char *data) { - char *p; - - config.setuid = 0; - config.setgid = 0; - config.uid = 0; - config.gid = 0; - config.mode = 0600; - - while ((p = strsep(&data, ",")) != NULL) { - substring_t args[MAX_OPT_ARGS]; - int token; - int option; - - if (!*p) + int setuid = 0; + int setgid = 0; + uid_t uid = 0; + gid_t gid = 0; + umode_t mode = 0600; + char *this_char; + + this_char = NULL; + while ((this_char = strsep(&data, ",")) != NULL) { + int n; + char dummy; + if (!*this_char) continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_uid: - if (match_int(&args[0], &option)) - return -EINVAL; - config.uid = option; - config.setuid = 1; - break; - case Opt_gid: - if (match_int(&args[0], &option)) - return -EINVAL; - config.gid = option; - config.setgid = 1; - break; - case Opt_mode: - if (match_octal(&args[0], &option)) - return -EINVAL; - config.mode = option & ~S_IFMT; - break; - default: - printk(KERN_ERR "devpts: called with bogus options\n"); + if (sscanf(this_char, "uid=%i%c", &n, &dummy) == 1) { + setuid = 1; + uid = n; + } else if (sscanf(this_char, "gid=%i%c", &n, &dummy) == 1) { + setgid = 1; + gid = n; + } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1) + mode = n & ~S_IFMT; + else { + printk("devpts: called with bogus options\n"); return -EINVAL; } } + config.setuid = setuid; + config.setgid = setgid; + config.uid = uid; + config.gid = gid; + config.mode = mode; return 0; } diff --git a/trunk/fs/dquot.c b/trunk/fs/dquot.c index acf07e581f8c..1966c890b48d 100644 --- a/trunk/fs/dquot.c +++ b/trunk/fs/dquot.c @@ -103,12 +103,12 @@ * (these locking rules also apply for S_NOQUOTA flag in the inode - note that * for altering the flag i_mutex is also needed). If operation is holding * reference to dquot in other way (e.g. quotactl ops) it must be guarded by - * dqonoff_mutex. + * dqonoff_sem. * This locking assures that: * a) update/access to dquot pointers in inode is serialized * b) everyone is guarded against invalidate_dquots() * - * Each dquot has its dq_lock mutex. Locked dquots might not be referenced + * Each dquot has its dq_lock semaphore. Locked dquots might not be referenced * from inodes (dquot_alloc_space() and such don't check the dq_lock). * Currently dquot is locked only when it is being read to memory (or space for * it is being allocated) on the first dqget() and when it is being released on @@ -118,9 +118,9 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > - * dqio_mutex - * i_mutex on quota files is special (it's below dqio_mutex) + * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > + * > dquot->dq_lock > dqio_sem + * i_mutex on quota files is special (it's below dqio_sem) */ static DEFINE_SPINLOCK(dq_list_lock); @@ -281,8 +281,8 @@ static inline void remove_inuse(struct dquot *dquot) static void wait_on_dquot(struct dquot *dquot) { - mutex_lock(&dquot->dq_lock); - mutex_unlock(&dquot->dq_lock); + down(&dquot->dq_lock); + up(&dquot->dq_lock); } #define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot)) @@ -321,8 +321,8 @@ int dquot_acquire(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - mutex_lock(&dquot->dq_lock); - mutex_lock(&dqopt->dqio_mutex); + down(&dquot->dq_lock); + down(&dqopt->dqio_sem); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot); if (ret < 0) @@ -343,8 +343,8 @@ int dquot_acquire(struct dquot *dquot) } set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: - mutex_unlock(&dqopt->dqio_mutex); - mutex_unlock(&dquot->dq_lock); + up(&dqopt->dqio_sem); + up(&dquot->dq_lock); return ret; } @@ -356,7 +356,7 @@ int dquot_commit(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - mutex_lock(&dqopt->dqio_mutex); + down(&dqopt->dqio_sem); spin_lock(&dq_list_lock); if (!clear_dquot_dirty(dquot)) { spin_unlock(&dq_list_lock); @@ -373,7 +373,7 @@ int dquot_commit(struct dquot *dquot) ret = ret2; } out_sem: - mutex_unlock(&dqopt->dqio_mutex); + up(&dqopt->dqio_sem); return ret; } @@ -385,11 +385,11 @@ int dquot_release(struct dquot *dquot) int ret = 0, ret2 = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); - mutex_lock(&dquot->dq_lock); + down(&dquot->dq_lock); /* Check whether we are not racing with some other dqget() */ if (atomic_read(&dquot->dq_count) > 1) goto out_dqlock; - mutex_lock(&dqopt->dqio_mutex); + down(&dqopt->dqio_sem); if (dqopt->ops[dquot->dq_type]->release_dqblk) { ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot); /* Write the info */ @@ -399,57 +399,31 @@ int dquot_release(struct dquot *dquot) ret = ret2; } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); - mutex_unlock(&dqopt->dqio_mutex); + up(&dqopt->dqio_sem); out_dqlock: - mutex_unlock(&dquot->dq_lock); + up(&dquot->dq_lock); return ret; } /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. There can still be some users of quotas due to inodes being - * just deleted or pruned by prune_icache() (those are not attached to any - * list). We have to wait for such users. - */ + * quota users. Also because we hold dqonoff_sem there can be no quota users + * for this sb+type at all. */ static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot, *tmp; -restart: spin_lock(&dq_list_lock); list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { if (dquot->dq_sb != sb) continue; if (dquot->dq_type != type) continue; - /* Wait for dquot users */ - if (atomic_read(&dquot->dq_count)) { - DEFINE_WAIT(wait); - - atomic_inc(&dquot->dq_count); - prepare_to_wait(&dquot->dq_wait_unused, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock(&dq_list_lock); - /* Once dqput() wakes us up, we know it's time to free - * the dquot. - * IMPORTANT: we rely on the fact that there is always - * at most one process waiting for dquot to free. - * Otherwise dq_count would be > 1 and we would never - * wake up. - */ - if (atomic_read(&dquot->dq_count) > 1) - schedule(); - finish_wait(&dquot->dq_wait_unused, &wait); - dqput(dquot); - /* At this moment dquot() need not exist (it could be - * reclaimed by prune_dqcache(). Hence we must - * restart. */ - goto restart; - } - /* - * Quota now has no users and it has been written on last - * dqput() - */ +#ifdef __DQUOT_PARANOIA + if (atomic_read(&dquot->dq_count)) + BUG(); +#endif + /* Quota now has no users and it has been written on last dqput() */ remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); @@ -465,7 +439,7 @@ int vfs_quota_sync(struct super_block *sb, int type) struct quota_info *dqopt = sb_dqopt(sb); int cnt; - mutex_lock(&dqopt->dqonoff_mutex); + down(&dqopt->dqonoff_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; @@ -500,7 +474,7 @@ int vfs_quota_sync(struct super_block *sb, int type) spin_lock(&dq_list_lock); dqstats.syncs++; spin_unlock(&dq_list_lock); - mutex_unlock(&dqopt->dqonoff_mutex); + up(&dqopt->dqonoff_sem); return 0; } @@ -541,7 +515,7 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) /* * Put reference to dquot * NOTE: If you change this function please check whether dqput_blocks() works right... - * MUST be called with either dqptr_sem or dqonoff_mutex held + * MUST be called with either dqptr_sem or dqonoff_sem held */ static void dqput(struct dquot *dquot) { @@ -566,10 +540,6 @@ static void dqput(struct dquot *dquot) if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); - /* Releasing dquot during quotaoff phase? */ - if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && - atomic_read(&dquot->dq_count) == 1) - wake_up(&dquot->dq_wait_unused); spin_unlock(&dq_list_lock); return; } @@ -606,12 +576,11 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) return NODQUOT; memset((caddr_t)dquot, 0, sizeof(struct dquot)); - mutex_init(&dquot->dq_lock); + sema_init(&dquot->dq_lock, 1); INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); - init_waitqueue_head(&dquot->dq_wait_unused); dquot->dq_sb = sb; dquot->dq_type = type; atomic_set(&dquot->dq_count, 1); @@ -621,7 +590,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) /* * Get reference to dquot - * MUST be called with either dqptr_sem or dqonoff_mutex held + * MUST be called with either dqptr_sem or dqonoff_sem held */ static struct dquot *dqget(struct super_block *sb, unsigned int id, int type) { @@ -687,7 +656,7 @@ static int dqinit_needed(struct inode *inode, int type) return 0; } -/* This routine is guarded by dqonoff_mutex mutex */ +/* This routine is guarded by dqonoff_sem semaphore */ static void add_dquot_ref(struct super_block *sb, int type) { struct list_head *p; @@ -763,9 +732,13 @@ static void drop_dquot_ref(struct super_block *sb, int type) { LIST_HEAD(tofree_head); + /* We need to be guarded against prune_icache to reach all the + * inodes - otherwise some can be on the local list of prune_icache */ + down(&iprune_sem); down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); up_write(&sb_dqopt(sb)->dqptr_sem); + up(&iprune_sem); put_dquot_list(&tofree_head); } @@ -965,8 +938,8 @@ int dquot_initialize(struct inode *inode, int type) unsigned int id = 0; int cnt, ret = 0; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ + /* First test before acquiring semaphore - solves deadlocks when we + * re-enter the quota code and are already holding the semaphore */ if (IS_NOQUOTA(inode)) return 0; down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1029,8 +1002,8 @@ int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ + /* First test before acquiring semaphore - solves deadlocks when we + * re-enter the quota code and are already holding the semaphore */ if (IS_NOQUOTA(inode)) { out_add: inode_add_bytes(inode, number); @@ -1078,8 +1051,8 @@ int dquot_alloc_inode(const struct inode *inode, unsigned long number) int cnt, ret = NO_QUOTA; char warntype[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ + /* First test before acquiring semaphore - solves deadlocks when we + * re-enter the quota code and are already holding the semaphore */ if (IS_NOQUOTA(inode)) return QUOTA_OK; for (cnt = 0; cnt < MAXQUOTAS; cnt++) @@ -1122,8 +1095,8 @@ int dquot_free_space(struct inode *inode, qsize_t number) { unsigned int cnt; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ + /* First test before acquiring semaphore - solves deadlocks when we + * re-enter the quota code and are already holding the semaphore */ if (IS_NOQUOTA(inode)) { out_sub: inode_sub_bytes(inode, number); @@ -1158,8 +1131,8 @@ int dquot_free_inode(const struct inode *inode, unsigned long number) { unsigned int cnt; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ + /* First test before acquiring semaphore - solves deadlocks when we + * re-enter the quota code and are already holding the semaphore */ if (IS_NOQUOTA(inode)) return QUOTA_OK; down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); @@ -1198,8 +1171,8 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid; char warntype[MAXQUOTAS]; - /* First test before acquiring mutex - solves deadlocks when we - * re-enter the quota code and are already holding the mutex */ + /* First test before acquiring semaphore - solves deadlocks when we + * re-enter the quota code and are already holding the semaphore */ if (IS_NOQUOTA(inode)) return QUOTA_OK; /* Clear the arrays */ @@ -1293,9 +1266,9 @@ int dquot_commit_info(struct super_block *sb, int type) int ret; struct quota_info *dqopt = sb_dqopt(sb); - mutex_lock(&dqopt->dqio_mutex); + down(&dqopt->dqio_sem); ret = dqopt->ops[type]->write_file_info(sb, type); - mutex_unlock(&dqopt->dqio_mutex); + up(&dqopt->dqio_sem); return ret; } @@ -1351,7 +1324,7 @@ int vfs_quota_off(struct super_block *sb, int type) struct inode *toputinode[MAXQUOTAS]; /* We need to serialize quota_off() for device */ - mutex_lock(&dqopt->dqonoff_mutex); + down(&dqopt->dqonoff_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { toputinode[cnt] = NULL; if (type != -1 && cnt != type) @@ -1380,7 +1353,7 @@ int vfs_quota_off(struct super_block *sb, int type) dqopt->info[cnt].dqi_bgrace = 0; dqopt->ops[cnt] = NULL; } - mutex_unlock(&dqopt->dqonoff_mutex); + up(&dqopt->dqonoff_sem); /* Sync the superblock so that buffers with quota data are written to * disk (and so userspace sees correct data afterwards). */ if (sb->s_op->sync_fs) @@ -1393,7 +1366,7 @@ int vfs_quota_off(struct super_block *sb, int type) * changes done by userspace on the next quotaon() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (toputinode[cnt]) { - mutex_lock(&dqopt->dqonoff_mutex); + down(&dqopt->dqonoff_sem); /* If quota was reenabled in the meantime, we have * nothing to do */ if (!sb_has_quota_enabled(sb, cnt)) { @@ -1405,7 +1378,7 @@ int vfs_quota_off(struct super_block *sb, int type) mark_inode_dirty(toputinode[cnt]); iput(toputinode[cnt]); } - mutex_unlock(&dqopt->dqonoff_mutex); + up(&dqopt->dqonoff_sem); } if (sb->s_bdev) invalidate_bdev(sb->s_bdev, 0); @@ -1446,7 +1419,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) /* And now flush the block cache so that kernel sees the changes */ invalidate_bdev(sb->s_bdev, 0); mutex_lock(&inode->i_mutex); - mutex_lock(&dqopt->dqonoff_mutex); + down(&dqopt->dqonoff_sem); if (sb_has_quota_enabled(sb, type)) { error = -EBUSY; goto out_lock; @@ -1471,17 +1444,17 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) dqopt->ops[type] = fmt->qf_ops; dqopt->info[type].dqi_format = fmt; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); - mutex_lock(&dqopt->dqio_mutex); + down(&dqopt->dqio_sem); if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) { - mutex_unlock(&dqopt->dqio_mutex); + up(&dqopt->dqio_sem); goto out_file_init; } - mutex_unlock(&dqopt->dqio_mutex); + up(&dqopt->dqio_sem); mutex_unlock(&inode->i_mutex); set_enable_flags(dqopt, type); add_dquot_ref(sb, type); - mutex_unlock(&dqopt->dqonoff_mutex); + up(&dqopt->dqonoff_sem); return 0; @@ -1489,7 +1462,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id) dqopt->files[type] = NULL; iput(inode); out_lock: - mutex_unlock(&dqopt->dqonoff_mutex); + up(&dqopt->dqonoff_sem); if (oldflags != -1) { down_write(&dqopt->dqptr_sem); /* Set the flags back (in the case of accidental quotaon() @@ -1577,14 +1550,14 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + down(&sb_dqopt(sb)->dqonoff_sem); if (!(dquot = dqget(sb, id, type))) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return -ESRCH; } do_get_dqblk(dquot, di); dqput(dquot); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return 0; } @@ -1646,14 +1619,14 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d { struct dquot *dquot; - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + down(&sb_dqopt(sb)->dqonoff_sem); if (!(dquot = dqget(sb, id, type))) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return -ESRCH; } do_set_dqblk(dquot, di); dqput(dquot); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return 0; } @@ -1662,9 +1635,9 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + down(&sb_dqopt(sb)->dqonoff_sem); if (!sb_has_quota_enabled(sb, type)) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1674,7 +1647,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) ii->dqi_flags = mi->dqi_flags & DQF_MASK; ii->dqi_valid = IIF_ALL; spin_unlock(&dq_data_lock); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return 0; } @@ -1683,9 +1656,9 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + down(&sb_dqopt(sb)->dqonoff_sem); if (!sb_has_quota_enabled(sb, type)) { - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return -ESRCH; } mi = sb_dqopt(sb)->info + type; @@ -1700,7 +1673,7 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mark_info_dirty(sb, type); /* Force write to disk */ sb->dq_op->write_info(sb, type); - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); return 0; } diff --git a/trunk/fs/eventpoll.c b/trunk/fs/eventpoll.c index 1c2b16fda13a..4284cd31eba6 100644 --- a/trunk/fs/eventpoll.c +++ b/trunk/fs/eventpoll.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -47,7 +46,7 @@ * LOCKING: * There are three level of locking required by epoll : * - * 1) epmutex (mutex) + * 1) epsem (semaphore) * 2) ep->sem (rw_semaphore) * 3) ep->lock (rw_lock) * @@ -68,9 +67,9 @@ * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). * It is possible to drop the "ep->sem" and to use the global - * semaphore "epmutex" (together with "ep->lock") to have it working, + * semaphore "epsem" (together with "ep->lock") to have it working, * but having "ep->sem" will make the interface more scalable. - * Events that require holding "epmutex" are very rare, while for + * Events that require holding "epsem" are very rare, while for * normal operations the epoll private "ep->sem" will guarantee * a greater scalability. */ @@ -275,7 +274,7 @@ static struct super_block *eventpollfs_get_sb(struct file_system_type *fs_type, /* * This semaphore is used to serialize ep_free() and eventpoll_release_file(). */ -static struct mutex epmutex; +static struct semaphore epsem; /* Safe wake up implementation */ static struct poll_safewake psw; @@ -452,6 +451,15 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq) } +/* Used to initialize the epoll bits inside the "struct file" */ +void eventpoll_init_file(struct file *file) +{ + + INIT_LIST_HEAD(&file->f_ep_links); + spin_lock_init(&file->f_ep_lock); +} + + /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are @@ -469,10 +477,10 @@ void eventpoll_release_file(struct file *file) * cleanup path, and this means that noone is using this file anymore. * The only hit might come from ep_free() but by holding the semaphore * will correctly serialize the operation. We do need to acquire - * "ep->sem" after "epmutex" because ep_remove() requires it when called + * "ep->sem" after "epsem" because ep_remove() requires it when called * from anywhere but ep_free(). */ - mutex_lock(&epmutex); + down(&epsem); while (!list_empty(lsthead)) { epi = list_entry(lsthead->next, struct epitem, fllink); @@ -484,7 +492,7 @@ void eventpoll_release_file(struct file *file) up_write(&ep->sem); } - mutex_unlock(&epmutex); + up(&epsem); } @@ -811,9 +819,9 @@ static void ep_free(struct eventpoll *ep) * We do not need to hold "ep->sem" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but - * holding "epmutex" is sufficent here. + * holding "epsem" is sufficent here. */ - mutex_lock(&epmutex); + down(&epsem); /* * Walks through the whole tree by unregistering poll callbacks. @@ -835,7 +843,7 @@ static void ep_free(struct eventpoll *ep) ep_remove(ep, epi); } - mutex_unlock(&epmutex); + up(&epsem); } @@ -1607,7 +1615,7 @@ static int __init eventpoll_init(void) { int error; - mutex_init(&epmutex); + init_MUTEX(&epsem); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_poll_safewake_init(&psw); diff --git a/trunk/fs/ext2/namei.c b/trunk/fs/ext2/namei.c index 4ca824985321..ad1432a2a62e 100644 --- a/trunk/fs/ext2/namei.c +++ b/trunk/fs/ext2/namei.c @@ -36,6 +36,22 @@ #include "acl.h" #include "xip.h" +/* + * Couple of helper functions - make the code slightly cleaner. + */ + +static inline void ext2_inc_count(struct inode *inode) +{ + inode->i_nlink++; + mark_inode_dirty(inode); +} + +static inline void ext2_dec_count(struct inode *inode) +{ + inode->i_nlink--; + mark_inode_dirty(inode); +} + static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ext2_add_link(dentry, inode); @@ -43,7 +59,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - inode_dec_link_count(inode); + ext2_dec_count(inode); iput(inode); return err; } @@ -185,7 +201,7 @@ static int ext2_symlink (struct inode * dir, struct dentry * dentry, return err; out_fail: - inode_dec_link_count(inode); + ext2_dec_count(inode); iput (inode); goto out; } @@ -199,7 +215,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inode_inc_link_count(inode); + ext2_inc_count(inode); atomic_inc(&inode->i_count); return ext2_add_nondir(dentry, inode); @@ -213,7 +229,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) if (dir->i_nlink >= EXT2_LINK_MAX) goto out; - inode_inc_link_count(dir); + ext2_inc_count(dir); inode = ext2_new_inode (dir, S_IFDIR | mode); err = PTR_ERR(inode); @@ -227,7 +243,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) else inode->i_mapping->a_ops = &ext2_aops; - inode_inc_link_count(inode); + ext2_inc_count(inode); err = ext2_make_empty(inode, dir); if (err) @@ -242,11 +258,11 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) return err; out_fail: - inode_dec_link_count(inode); - inode_dec_link_count(inode); + ext2_dec_count(inode); + ext2_dec_count(inode); iput(inode); out_dir: - inode_dec_link_count(dir); + ext2_dec_count(dir); goto out; } @@ -266,7 +282,7 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - inode_dec_link_count(inode); + ext2_dec_count(inode); err = 0; out: return err; @@ -281,8 +297,8 @@ static int ext2_rmdir (struct inode * dir, struct dentry *dentry) err = ext2_unlink(dir, dentry); if (!err) { inode->i_size = 0; - inode_dec_link_count(inode); - inode_dec_link_count(dir); + ext2_dec_count(inode); + ext2_dec_count(dir); } } return err; @@ -322,41 +338,41 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); + ext2_inc_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - inode_dec_link_count(new_inode); + ext2_dec_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); + ext2_inc_count(old_inode); err = ext2_add_link(new_dentry, old_inode); if (err) { - inode_dec_link_count(old_inode); + ext2_dec_count(old_inode); goto out_dir; } if (dir_de) - inode_inc_link_count(new_dir); + ext2_inc_count(new_dir); } /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. + * ext2_dec_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; ext2_delete_entry (old_de, old_page); - inode_dec_link_count(old_inode); + ext2_dec_count(old_inode); if (dir_de) { ext2_set_link(old_inode, dir_de, dir_page, new_dir); - inode_dec_link_count(old_dir); + ext2_dec_count(old_dir); } return 0; diff --git a/trunk/fs/ext3/dir.c b/trunk/fs/ext3/dir.c index 773459164bb2..832867aef3dc 100644 --- a/trunk/fs/ext3/dir.c +++ b/trunk/fs/ext3/dir.c @@ -95,10 +95,11 @@ static int ext3_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - unsigned long offset; - int i, stored; - struct ext3_dir_entry_2 *de; - struct super_block *sb; + unsigned long offset, blk; + int i, num, stored; + struct buffer_head * bh, * tmp, * bha[16]; + struct ext3_dir_entry_2 * de; + struct super_block * sb; int err; struct inode *inode = filp->f_dentry->d_inode; int ret = 0; @@ -123,29 +124,12 @@ static int ext3_readdir(struct file * filp, } #endif stored = 0; + bh = NULL; offset = filp->f_pos & (sb->s_blocksize - 1); while (!error && !stored && filp->f_pos < inode->i_size) { - unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); - struct buffer_head map_bh; - struct buffer_head *bh = NULL; - - map_bh.b_state = 0; - err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); - if (!err) { - page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, - &filp->f_ra, - filp, - map_bh.b_blocknr >> - (PAGE_CACHE_SHIFT - inode->i_blkbits), - 1); - bh = ext3_bread(NULL, inode, blk, 0, &err); - } - - /* - * We ignore I/O errors on directories so users have a chance - * of recovering data when there's a bad sector - */ + blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb); + bh = ext3_bread(NULL, inode, blk, 0, &err); if (!bh) { ext3_error (sb, "ext3_readdir", "directory #%lu contains a hole at offset %lu", @@ -154,6 +138,26 @@ static int ext3_readdir(struct file * filp, continue; } + /* + * Do the readahead + */ + if (!offset) { + for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0; + i > 0; i--) { + tmp = ext3_getblk (NULL, inode, ++blk, 0, &err); + if (tmp && !buffer_uptodate(tmp) && + !buffer_locked(tmp)) + bha[num++] = tmp; + else + brelse (tmp); + } + if (num) { + ll_rw_block (READA, num, bha); + for (i = 0; i < num; i++) + brelse (bha[i]); + } + } + revalidate: /* If the dir block has changed since the last call to * readdir(2), then we might be pointing to an invalid diff --git a/trunk/fs/ext3/file.c b/trunk/fs/ext3/file.c index 59098ea56711..98e78345ead9 100644 --- a/trunk/fs/ext3/file.c +++ b/trunk/fs/ext3/file.c @@ -37,9 +37,9 @@ static int ext3_release_file (struct inode * inode, struct file * filp) if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1)) { - mutex_lock(&EXT3_I(inode)->truncate_mutex); + down(&EXT3_I(inode)->truncate_sem); ext3_discard_reservation(inode); - mutex_unlock(&EXT3_I(inode)->truncate_mutex); + up(&EXT3_I(inode)->truncate_sem); } if (is_dx(inode) && filp->private_data) ext3_htree_free_dir_info(filp->private_data); diff --git a/trunk/fs/ext3/inode.c b/trunk/fs/ext3/inode.c index 2c361377e0a5..0384e539b88f 100644 --- a/trunk/fs/ext3/inode.c +++ b/trunk/fs/ext3/inode.c @@ -671,7 +671,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, * The BKL may not be held on entry here. Be sure to take it early. */ -int +static int ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create, int extend_disksize) { @@ -702,7 +702,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, if (!create || err == -EIO) goto cleanup; - mutex_lock(&ei->truncate_mutex); + down(&ei->truncate_sem); /* * If the indirect block is missing while we are reading @@ -723,7 +723,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, } partial = ext3_get_branch(inode, depth, offsets, chain, &err); if (!partial) { - mutex_unlock(&ei->truncate_mutex); + up(&ei->truncate_sem); if (err) goto cleanup; clear_buffer_new(bh_result); @@ -759,13 +759,13 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, err = ext3_splice_branch(handle, inode, iblock, chain, partial, left); /* - * i_disksize growing is protected by truncate_mutex. Don't forget to + * i_disksize growing is protected by truncate_sem. Don't forget to * protect it if you're about to implement concurrent * ext3_get_block() -bzzz */ if (!err && extend_disksize && inode->i_size > ei->i_disksize) ei->i_disksize = inode->i_size; - mutex_unlock(&ei->truncate_mutex); + up(&ei->truncate_sem); if (err) goto cleanup; @@ -1227,7 +1227,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ... * * Same applies to ext3_get_block(). We will deadlock on various things like - * lock_journal and i_truncate_mutex. + * lock_journal and i_truncate_sem. * * Setting PF_MEMALLOC here doesn't work - too many internal memory * allocations fail. @@ -2161,7 +2161,7 @@ void ext3_truncate(struct inode * inode) * From here we block out all ext3_get_block() callers who want to * modify the block allocation tree. */ - mutex_lock(&ei->truncate_mutex); + down(&ei->truncate_sem); if (n == 1) { /* direct blocks */ ext3_free_data(handle, inode, NULL, i_data+offsets[0], @@ -2228,7 +2228,7 @@ void ext3_truncate(struct inode * inode) ext3_discard_reservation(inode); - mutex_unlock(&ei->truncate_mutex); + up(&ei->truncate_sem); inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); diff --git a/trunk/fs/ext3/ioctl.c b/trunk/fs/ext3/ioctl.c index aaf1da17b6d4..556cd5510078 100644 --- a/trunk/fs/ext3/ioctl.c +++ b/trunk/fs/ext3/ioctl.c @@ -182,7 +182,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, * need to allocate reservation structure for this inode * before set the window size */ - mutex_lock(&ei->truncate_mutex); + down(&ei->truncate_sem); if (!ei->i_block_alloc_info) ext3_init_block_alloc_info(inode); @@ -190,7 +190,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } - mutex_unlock(&ei->truncate_mutex); + up(&ei->truncate_sem); return 0; } case EXT3_IOC_GROUP_EXTEND: { diff --git a/trunk/fs/ext3/super.c b/trunk/fs/ext3/super.c index efe5b20d7a5a..56bf76586019 100644 --- a/trunk/fs/ext3/super.c +++ b/trunk/fs/ext3/super.c @@ -472,7 +472,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif - mutex_init(&ei->truncate_mutex); + init_MUTEX(&ei->truncate_sem); inode_init_once(&ei->vfs_inode); } } @@ -2382,8 +2382,8 @@ static int ext3_statfs (struct super_block * sb, struct kstatfs * buf) * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() - * DQUOT_INIT() down(dqio_mutex) - * down(dqio_mutex) journal_start() + * DQUOT_INIT() down(dqio_sem) + * down(dqio_sem) journal_start() * */ diff --git a/trunk/fs/fat/fatent.c b/trunk/fs/fat/fatent.c index ab171ea8e869..a1a9e0451217 100644 --- a/trunk/fs/fat/fatent.c +++ b/trunk/fs/fat/fatent.c @@ -267,19 +267,19 @@ static struct fatent_operations fat32_ops = { static inline void lock_fat(struct msdos_sb_info *sbi) { - mutex_lock(&sbi->fat_lock); + down(&sbi->fat_lock); } static inline void unlock_fat(struct msdos_sb_info *sbi) { - mutex_unlock(&sbi->fat_lock); + up(&sbi->fat_lock); } void fat_ent_access_init(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); - mutex_init(&sbi->fat_lock); + init_MUTEX(&sbi->fat_lock); switch (sbi->fat_bits) { case 32: diff --git a/trunk/fs/fcntl.c b/trunk/fs/fcntl.c index 03c789560fb8..dc4a7007f4e7 100644 --- a/trunk/fs/fcntl.c +++ b/trunk/fs/fcntl.c @@ -73,8 +73,8 @@ static int locate_fd(struct files_struct *files, * orig_start..fdt->next_fd */ start = orig_start; - if (start < files->next_fd) - start = files->next_fd; + if (start < fdt->next_fd) + start = fdt->next_fd; newfd = start; if (start < fdt->max_fdset) { @@ -102,8 +102,9 @@ static int locate_fd(struct files_struct *files, * we reacquire the fdtable pointer and use it while holding * the lock, no one can free it during that time. */ - if (start <= files->next_fd) - files->next_fd = newfd + 1; + fdt = files_fdtable(files); + if (start <= fdt->next_fd) + fdt->next_fd = newfd + 1; error = newfd; diff --git a/trunk/fs/file.c b/trunk/fs/file.c index bbc743314730..cea7cbea11d0 100644 --- a/trunk/fs/file.c +++ b/trunk/fs/file.c @@ -125,8 +125,7 @@ static void free_fdtable_rcu(struct rcu_head *rcu) kmem_cache_free(files_cachep, fdt->free_files); return; } - if (fdt->max_fdset <= EMBEDDED_FD_SET_SIZE && - fdt->max_fds <= NR_OPEN_DEFAULT) { + if (fdt->max_fdset <= __FD_SETSIZE && fdt->max_fds <= NR_OPEN_DEFAULT) { /* * The fdtable was embedded */ @@ -156,9 +155,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu) void free_fdtable(struct fdtable *fdt) { - if (fdt->free_files || - fdt->max_fdset > EMBEDDED_FD_SET_SIZE || - fdt->max_fds > NR_OPEN_DEFAULT) + if (fdt->free_files || fdt->max_fdset > __FD_SETSIZE || + fdt->max_fds > NR_OPEN_DEFAULT) call_rcu(&fdt->rcu, free_fdtable_rcu); } @@ -201,6 +199,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *fdt) (nfdt->max_fds - fdt->max_fds) * sizeof(struct file *)); } + nfdt->next_fd = fdt->next_fd; } /* @@ -221,9 +220,11 @@ fd_set * alloc_fdset(int num) void free_fdset(fd_set *array, int num) { - if (num <= EMBEDDED_FD_SET_SIZE) /* Don't free an embedded fdset */ + int size = num / 8; + + if (num <= __FD_SETSIZE) /* Don't free an embedded fdset */ return; - else if (num <= 8 * PAGE_SIZE) + else if (size <= PAGE_SIZE) kfree(array); else vfree(array); @@ -236,17 +237,22 @@ static struct fdtable *alloc_fdtable(int nr) fd_set *new_openset = NULL, *new_execset = NULL; struct file **new_fds; - fdt = kzalloc(sizeof(*fdt), GFP_KERNEL); + fdt = kmalloc(sizeof(*fdt), GFP_KERNEL); if (!fdt) goto out; + memset(fdt, 0, sizeof(*fdt)); - nfds = 8 * L1_CACHE_BYTES; + nfds = __FD_SETSIZE; /* Expand to the max in easy steps */ - while (nfds <= nr) { - nfds = nfds * 2; - if (nfds > NR_OPEN) - nfds = NR_OPEN; - } + do { + if (nfds < (PAGE_SIZE * 8)) + nfds = PAGE_SIZE * 8; + else { + nfds = nfds * 2; + if (nfds > NR_OPEN) + nfds = NR_OPEN; + } + } while (nfds <= nr); new_openset = alloc_fdset(nfds); new_execset = alloc_fdset(nfds); diff --git a/trunk/fs/file_table.c b/trunk/fs/file_table.c index bcea1998b4de..44fabeaa9415 100644 --- a/trunk/fs/file_table.c +++ b/trunk/fs/file_table.c @@ -88,7 +88,6 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp, */ struct file *get_empty_filp(void) { - struct task_struct *tsk; static int old_max; struct file * f; @@ -113,14 +112,13 @@ struct file *get_empty_filp(void) if (security_file_alloc(f)) goto fail_sec; - tsk = current; - INIT_LIST_HEAD(&f->f_u.fu_list); + eventpoll_init_file(f); atomic_set(&f->f_count, 1); + f->f_uid = current->fsuid; + f->f_gid = current->fsgid; rwlock_init(&f->f_owner.lock); - f->f_uid = tsk->fsuid; - f->f_gid = tsk->fsgid; - eventpoll_init_file(f); /* f->f_version: 0 */ + INIT_LIST_HEAD(&f->f_u.fu_list); return f; over: diff --git a/trunk/fs/hpfs/hpfs_fn.h b/trunk/fs/hpfs/hpfs_fn.h index 4c6473ab3b34..6628c3b352cb 100644 --- a/trunk/fs/hpfs/hpfs_fn.h +++ b/trunk/fs/hpfs/hpfs_fn.h @@ -9,7 +9,6 @@ //#define DBG //#define DEBUG_LOCKS -#include #include #include #include @@ -58,8 +57,8 @@ struct hpfs_inode_info { unsigned i_ea_uid : 1; /* file's uid is stored in ea */ unsigned i_ea_gid : 1; /* file's gid is stored in ea */ unsigned i_dirty : 1; - struct mutex i_mutex; - struct mutex i_parent_mutex; + struct semaphore i_sem; + struct semaphore i_parent; loff_t **i_rddir_off; struct inode vfs_inode; }; diff --git a/trunk/fs/hpfs/inode.c b/trunk/fs/hpfs/inode.c index 56f2c338c4d9..e3d17e9ea6c1 100644 --- a/trunk/fs/hpfs/inode.c +++ b/trunk/fs/hpfs/inode.c @@ -186,9 +186,9 @@ void hpfs_write_inode(struct inode *i) kfree(hpfs_inode->i_rddir_off); hpfs_inode->i_rddir_off = NULL; } - mutex_lock(&hpfs_inode->i_parent_mutex); + down(&hpfs_inode->i_parent); if (!i->i_nlink) { - mutex_unlock(&hpfs_inode->i_parent_mutex); + up(&hpfs_inode->i_parent); return; } parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir); @@ -199,14 +199,14 @@ void hpfs_write_inode(struct inode *i) hpfs_read_inode(parent); unlock_new_inode(parent); } - mutex_lock(&hpfs_inode->i_mutex); + down(&hpfs_inode->i_sem); hpfs_write_inode_nolock(i); - mutex_unlock(&hpfs_inode->i_mutex); + up(&hpfs_inode->i_sem); iput(parent); } else { mark_inode_dirty(i); } - mutex_unlock(&hpfs_inode->i_parent_mutex); + up(&hpfs_inode->i_parent); } void hpfs_write_inode_nolock(struct inode *i) diff --git a/trunk/fs/hpfs/namei.c b/trunk/fs/hpfs/namei.c index a03abb12c610..8ff8fc433fc1 100644 --- a/trunk/fs/hpfs/namei.c +++ b/trunk/fs/hpfs/namei.c @@ -60,7 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) if (dee.read_only) result->i_mode &= ~0222; - mutex_lock(&hpfs_i(dir)->i_mutex); + down(&hpfs_i(dir)->i_sem); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail3; @@ -101,11 +101,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); unlock_kernel(); return 0; bail3: - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); iput(result); bail2: hpfs_brelse4(&qbh0); @@ -168,7 +168,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = 0; - mutex_lock(&hpfs_i(dir)->i_mutex); + down(&hpfs_i(dir)->i_sem); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -193,12 +193,12 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc hpfs_write_inode_nolock(result); } d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); unlock_kernel(); return 0; bail2: - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); iput(result); bail1: brelse(bh); @@ -254,7 +254,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_blocks = 1; init_special_inode(result, mode, rdev); - mutex_lock(&hpfs_i(dir)->i_mutex); + down(&hpfs_i(dir)->i_sem); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -271,12 +271,12 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); brelse(bh); unlock_kernel(); return 0; bail2: - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); iput(result); bail1: brelse(bh); @@ -333,7 +333,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_op = &page_symlink_inode_operations; result->i_data.a_ops = &hpfs_symlink_aops; - mutex_lock(&hpfs_i(dir)->i_mutex); + down(&hpfs_i(dir)->i_sem); r = hpfs_add_dirent(dir, (char *)name, len, &dee, 0); if (r == 1) goto bail2; @@ -352,11 +352,11 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy hpfs_write_inode_nolock(result); d_instantiate(dentry, result); - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); unlock_kernel(); return 0; bail2: - mutex_unlock(&hpfs_i(dir)->i_mutex); + up(&hpfs_i(dir)->i_sem); iput(result); bail1: brelse(bh); @@ -382,8 +382,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); hpfs_adjust_length((char *)name, &len); again: - mutex_lock(&hpfs_i(inode)->i_parent_mutex); - mutex_lock(&hpfs_i(dir)->i_mutex); + down(&hpfs_i(inode)->i_parent); + down(&hpfs_i(dir)->i_sem); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -410,8 +410,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) if (rep++) break; - mutex_unlock(&hpfs_i(dir)->i_mutex); - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); + up(&hpfs_i(dir)->i_sem); + up(&hpfs_i(inode)->i_parent); d_drop(dentry); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) > 1 || @@ -442,8 +442,8 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: - mutex_unlock(&hpfs_i(dir)->i_mutex); - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); + up(&hpfs_i(dir)->i_sem); + up(&hpfs_i(inode)->i_parent); unlock_kernel(); return err; } @@ -463,8 +463,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) hpfs_adjust_length((char *)name, &len); lock_kernel(); - mutex_lock(&hpfs_i(inode)->i_parent_mutex); - mutex_lock(&hpfs_i(dir)->i_mutex); + down(&hpfs_i(inode)->i_parent); + down(&hpfs_i(dir)->i_sem); err = -ENOENT; de = map_dirent(dir, hpfs_i(dir)->i_dno, (char *)name, len, &dno, &qbh); if (!de) @@ -502,8 +502,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) out1: hpfs_brelse4(&qbh); out: - mutex_unlock(&hpfs_i(dir)->i_mutex); - mutex_unlock(&hpfs_i(inode)->i_parent_mutex); + up(&hpfs_i(dir)->i_sem); + up(&hpfs_i(inode)->i_parent); unlock_kernel(); return err; } @@ -565,12 +565,12 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, lock_kernel(); /* order doesn't matter, due to VFS exclusion */ - mutex_lock(&hpfs_i(i)->i_parent_mutex); + down(&hpfs_i(i)->i_parent); if (new_inode) - mutex_lock(&hpfs_i(new_inode)->i_parent_mutex); - mutex_lock(&hpfs_i(old_dir)->i_mutex); + down(&hpfs_i(new_inode)->i_parent); + down(&hpfs_i(old_dir)->i_sem); if (new_dir != old_dir) - mutex_lock(&hpfs_i(new_dir)->i_mutex); + down(&hpfs_i(new_dir)->i_sem); /* Erm? Moving over the empty non-busy directory is perfectly legal */ if (new_inode && S_ISDIR(new_inode->i_mode)) { @@ -650,11 +650,11 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, hpfs_decide_conv(i, (char *)new_name, new_len); end1: if (old_dir != new_dir) - mutex_unlock(&hpfs_i(new_dir)->i_mutex); - mutex_unlock(&hpfs_i(old_dir)->i_mutex); - mutex_unlock(&hpfs_i(i)->i_parent_mutex); + up(&hpfs_i(new_dir)->i_sem); + up(&hpfs_i(old_dir)->i_sem); + up(&hpfs_i(i)->i_parent); if (new_inode) - mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex); + up(&hpfs_i(new_inode)->i_parent); unlock_kernel(); return err; } diff --git a/trunk/fs/hpfs/super.c b/trunk/fs/hpfs/super.c index 9488a794076e..63e88d7e2c3b 100644 --- a/trunk/fs/hpfs/super.c +++ b/trunk/fs/hpfs/super.c @@ -181,8 +181,8 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - mutex_init(&ei->i_mutex); - mutex_init(&ei->i_parent_mutex); + init_MUTEX(&ei->i_sem); + init_MUTEX(&ei->i_parent); inode_init_once(&ei->vfs_inode); } } diff --git a/trunk/fs/inode.c b/trunk/fs/inode.c index 25967b67903d..d0be6159eb7f 100644 --- a/trunk/fs/inode.c +++ b/trunk/fs/inode.c @@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtable; DEFINE_SPINLOCK(inode_lock); /* - * iprune_mutex provides exclusion between the kswapd or try_to_free_pages + * iprune_sem provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -DEFINE_MUTEX(iprune_mutex); +DECLARE_MUTEX(iprune_sem); /* * Statistics gathering.. @@ -206,7 +206,7 @@ void inode_init_once(struct inode *inode) i_size_ordered_init(inode); #ifdef CONFIG_INOTIFY INIT_LIST_HEAD(&inode->inotify_watches); - mutex_init(&inode->inotify_mutex); + sema_init(&inode->inotify_sem, 1); #endif } @@ -319,7 +319,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_mutex keeps + * change during umount anymore, and because iprune_sem keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block * sb) int busy; LIST_HEAD(throw_away); - mutex_lock(&iprune_mutex); + down(&iprune_sem); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - mutex_unlock(&iprune_mutex); + up(&iprune_sem); return busy; } @@ -377,7 +377,7 @@ int __invalidate_device(struct block_device *bdev) if (sb) { /* * no need to lock the super, get_super holds the - * read mutex so the filesystem cannot go away + * read semaphore so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ @@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - mutex_lock(&iprune_mutex); + down(&iprune_sem); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - mutex_unlock(&iprune_mutex); + up(&iprune_sem); if (current_is_kswapd()) mod_page_state(kswapd_inodesteal, reap); diff --git a/trunk/fs/inotify.c b/trunk/fs/inotify.c index 0ee39ef591c6..3041503bde02 100644 --- a/trunk/fs/inotify.c +++ b/trunk/fs/inotify.c @@ -54,10 +54,10 @@ int inotify_max_queued_events; * Lock ordering: * * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_mutex (synchronize shrink_icache_memory()) + * iprune_sem (synchronize shrink_icache_memory()) * inode_lock (protects the super_block->s_inodes list) - * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) - * inotify_dev->mutex (protects inotify_device and watches->d_list) + * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) + * inotify_dev->sem (protects inotify_device and watches->d_list) */ /* @@ -79,12 +79,12 @@ int inotify_max_queued_events; /* * struct inotify_device - represents an inotify instance * - * This structure is protected by the mutex 'mutex'. + * This structure is protected by the semaphore 'sem'. */ struct inotify_device { wait_queue_head_t wq; /* wait queue for i/o */ struct idr idr; /* idr mapping wd -> watch */ - struct mutex mutex; /* protects this bad boy */ + struct semaphore sem; /* protects this bad boy */ struct list_head events; /* list of queued events */ struct list_head watches; /* list of watches */ atomic_t count; /* reference count */ @@ -101,7 +101,7 @@ struct inotify_device { * device. In read(), this list is walked and all events that can fit in the * buffer are returned. * - * Protected by dev->mutex of the device in which we are queued. + * Protected by dev->sem of the device in which we are queued. */ struct inotify_kernel_event { struct inotify_event event; /* the user-space event */ @@ -112,8 +112,8 @@ struct inotify_kernel_event { /* * struct inotify_watch - represents a watch request on a specific inode * - * d_list is protected by dev->mutex of the associated watch->dev. - * i_list and mask are protected by inode->inotify_mutex of the associated inode. + * d_list is protected by dev->sem of the associated watch->dev. + * i_list and mask are protected by inode->inotify_sem of the associated inode. * dev, inode, and wd are never written to once the watch is created. */ struct inotify_watch { @@ -261,7 +261,7 @@ static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, /* * inotify_dev_get_event - return the next event in the given dev's queue * - * Caller must hold dev->mutex. + * Caller must hold dev->sem. */ static inline struct inotify_kernel_event * inotify_dev_get_event(struct inotify_device *dev) @@ -272,7 +272,7 @@ inotify_dev_get_event(struct inotify_device *dev) /* * inotify_dev_queue_event - add a new event to the given device * - * Caller must hold dev->mutex. Can sleep (calls kernel_event()). + * Caller must hold dev->sem. Can sleep (calls kernel_event()). */ static void inotify_dev_queue_event(struct inotify_device *dev, struct inotify_watch *watch, u32 mask, @@ -315,7 +315,7 @@ static void inotify_dev_queue_event(struct inotify_device *dev, /* * remove_kevent - cleans up and ultimately frees the given kevent * - * Caller must hold dev->mutex. + * Caller must hold dev->sem. */ static void remove_kevent(struct inotify_device *dev, struct inotify_kernel_event *kevent) @@ -332,7 +332,7 @@ static void remove_kevent(struct inotify_device *dev, /* * inotify_dev_event_dequeue - destroy an event on the given device * - * Caller must hold dev->mutex. + * Caller must hold dev->sem. */ static void inotify_dev_event_dequeue(struct inotify_device *dev) { @@ -346,7 +346,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) /* * inotify_dev_get_wd - returns the next WD for use by the given dev * - * Callers must hold dev->mutex. This function can sleep. + * Callers must hold dev->sem. This function can sleep. */ static int inotify_dev_get_wd(struct inotify_device *dev, struct inotify_watch *watch) @@ -383,7 +383,7 @@ static int find_inode(const char __user *dirname, struct nameidata *nd, /* * create_watch - creates a watch on the given device. * - * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. + * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. */ static struct inotify_watch *create_watch(struct inotify_device *dev, @@ -434,7 +434,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev, /* * inotify_find_dev - find the watch associated with the given inode and dev * - * Callers must hold inode->inotify_mutex. + * Callers must hold inode->inotify_sem. */ static struct inotify_watch *inode_find_dev(struct inode *inode, struct inotify_device *dev) @@ -469,7 +469,7 @@ static void remove_watch_no_event(struct inotify_watch *watch, * the IN_IGNORED event to the given device signifying that the inode is no * longer watched. * - * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a + * Callers must hold both inode->inotify_sem and dev->sem. We drop a * reference to the inode before returning. * * The inode is not iput() so as to remain atomic. If the inode needs to be @@ -507,21 +507,21 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, if (!inotify_inode_watched(inode)) return; - mutex_lock(&inode->inotify_mutex); + down(&inode->inotify_sem); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { u32 watch_mask = watch->mask; if (watch_mask & mask) { struct inotify_device *dev = watch->dev; get_inotify_watch(watch); - mutex_lock(&dev->mutex); + down(&dev->sem); inotify_dev_queue_event(dev, watch, mask, cookie, name); if (watch_mask & IN_ONESHOT) remove_watch_no_event(watch, dev); - mutex_unlock(&dev->mutex); + up(&dev->sem); put_inotify_watch(watch); } } - mutex_unlock(&inode->inotify_mutex); + up(&inode->inotify_sem); } EXPORT_SYMBOL_GPL(inotify_inode_queue_event); @@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); * @list: list of inodes being unmounted (sb->s_inodes) * * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. + * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. * We temporarily drop inode_lock, however, and CAN block. */ void inotify_unmount_inodes(struct list_head *list) @@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_head *list) * We can safely drop inode_lock here because we hold * references on both inode and next_i. Also no new inodes * will be added since the umount has begun. Finally, - * iprune_mutex keeps shrink_icache_memory() away. + * iprune_sem keeps shrink_icache_memory() away. */ spin_unlock(&inode_lock); @@ -626,16 +626,16 @@ void inotify_unmount_inodes(struct list_head *list) iput(need_iput_tmp); /* for each watch, send IN_UNMOUNT and then remove it */ - mutex_lock(&inode->inotify_mutex); + down(&inode->inotify_sem); watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_device *dev = watch->dev; - mutex_lock(&dev->mutex); + down(&dev->sem); inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); remove_watch(watch, dev); - mutex_unlock(&dev->mutex); + up(&dev->sem); } - mutex_unlock(&inode->inotify_mutex); + up(&inode->inotify_sem); iput(inode); spin_lock(&inode_lock); @@ -651,14 +651,14 @@ void inotify_inode_is_dead(struct inode *inode) { struct inotify_watch *watch, *next; - mutex_lock(&inode->inotify_mutex); + down(&inode->inotify_sem); list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { struct inotify_device *dev = watch->dev; - mutex_lock(&dev->mutex); + down(&dev->sem); remove_watch(watch, dev); - mutex_unlock(&dev->mutex); + up(&dev->sem); } - mutex_unlock(&inode->inotify_mutex); + up(&inode->inotify_sem); } EXPORT_SYMBOL_GPL(inotify_inode_is_dead); @@ -670,10 +670,10 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait) int ret = 0; poll_wait(file, &dev->wq, wait); - mutex_lock(&dev->mutex); + down(&dev->sem); if (!list_empty(&dev->events)) ret = POLLIN | POLLRDNORM; - mutex_unlock(&dev->mutex); + up(&dev->sem); return ret; } @@ -695,9 +695,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); - mutex_lock(&dev->mutex); + down(&dev->sem); events = !list_empty(&dev->events); - mutex_unlock(&dev->mutex); + up(&dev->sem); if (events) { ret = 0; break; @@ -720,7 +720,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, if (ret) return ret; - mutex_lock(&dev->mutex); + down(&dev->sem); while (1) { struct inotify_kernel_event *kevent; @@ -750,7 +750,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf, remove_kevent(dev, kevent); } - mutex_unlock(&dev->mutex); + up(&dev->sem); return ret; } @@ -763,37 +763,37 @@ static int inotify_release(struct inode *ignored, struct file *file) * Destroy all of the watches on this device. Unfortunately, not very * pretty. We cannot do a simple iteration over the list, because we * do not know the inode until we iterate to the watch. But we need to - * hold inode->inotify_mutex before dev->mutex. The following works. + * hold inode->inotify_sem before dev->sem. The following works. */ while (1) { struct inotify_watch *watch; struct list_head *watches; struct inode *inode; - mutex_lock(&dev->mutex); + down(&dev->sem); watches = &dev->watches; if (list_empty(watches)) { - mutex_unlock(&dev->mutex); + up(&dev->sem); break; } watch = list_entry(watches->next, struct inotify_watch, d_list); get_inotify_watch(watch); - mutex_unlock(&dev->mutex); + up(&dev->sem); inode = watch->inode; - mutex_lock(&inode->inotify_mutex); - mutex_lock(&dev->mutex); + down(&inode->inotify_sem); + down(&dev->sem); remove_watch_no_event(watch, dev); - mutex_unlock(&dev->mutex); - mutex_unlock(&inode->inotify_mutex); + up(&dev->sem); + up(&inode->inotify_sem); put_inotify_watch(watch); } /* destroy all of the events on this device */ - mutex_lock(&dev->mutex); + down(&dev->sem); while (!list_empty(&dev->events)) inotify_dev_event_dequeue(dev); - mutex_unlock(&dev->mutex); + up(&dev->sem); /* free this device: the put matching the get in inotify_init() */ put_inotify_dev(dev); @@ -811,26 +811,26 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) struct inotify_watch *watch; struct inode *inode; - mutex_lock(&dev->mutex); + down(&dev->sem); watch = idr_find(&dev->idr, wd); if (unlikely(!watch)) { - mutex_unlock(&dev->mutex); + up(&dev->sem); return -EINVAL; } get_inotify_watch(watch); inode = watch->inode; - mutex_unlock(&dev->mutex); + up(&dev->sem); - mutex_lock(&inode->inotify_mutex); - mutex_lock(&dev->mutex); + down(&inode->inotify_sem); + down(&dev->sem); /* make sure that we did not race */ watch = idr_find(&dev->idr, wd); if (likely(watch)) remove_watch(watch, dev); - mutex_unlock(&dev->mutex); - mutex_unlock(&inode->inotify_mutex); + up(&dev->sem); + up(&inode->inotify_sem); put_inotify_watch(watch); return 0; @@ -905,7 +905,7 @@ asmlinkage long sys_inotify_init(void) INIT_LIST_HEAD(&dev->events); INIT_LIST_HEAD(&dev->watches); init_waitqueue_head(&dev->wq); - mutex_init(&dev->mutex); + sema_init(&dev->sem, 1); dev->event_count = 0; dev->queue_size = 0; dev->max_events = inotify_max_queued_events; @@ -960,8 +960,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) inode = nd.dentry->d_inode; dev = filp->private_data; - mutex_lock(&inode->inotify_mutex); - mutex_lock(&dev->mutex); + down(&inode->inotify_sem); + down(&dev->sem); if (mask & IN_MASK_ADD) mask_add = 1; @@ -998,8 +998,8 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) list_add(&watch->i_list, &inode->inotify_watches); ret = watch->wd; out: - mutex_unlock(&dev->mutex); - mutex_unlock(&inode->inotify_mutex); + up(&dev->sem); + up(&inode->inotify_sem); path_release(&nd); fput_and_out: fput_light(filp, fput_needed); diff --git a/trunk/fs/jbd/checkpoint.c b/trunk/fs/jbd/checkpoint.c index 3f5102b069db..543ed543d1e5 100644 --- a/trunk/fs/jbd/checkpoint.c +++ b/trunk/fs/jbd/checkpoint.c @@ -85,7 +85,7 @@ void __log_wait_for_space(journal_t *journal) if (journal->j_flags & JFS_ABORT) return; spin_unlock(&journal->j_state_lock); - mutex_lock(&journal->j_checkpoint_mutex); + down(&journal->j_checkpoint_sem); /* * Test again, another process may have checkpointed while we @@ -98,7 +98,7 @@ void __log_wait_for_space(journal_t *journal) log_do_checkpoint(journal); spin_lock(&journal->j_state_lock); } - mutex_unlock(&journal->j_checkpoint_mutex); + up(&journal->j_checkpoint_sem); } } diff --git a/trunk/fs/jbd/journal.c b/trunk/fs/jbd/journal.c index 95a628d8cac8..e4b516ac4989 100644 --- a/trunk/fs/jbd/journal.c +++ b/trunk/fs/jbd/journal.c @@ -659,8 +659,8 @@ static journal_t * journal_init_common (void) init_waitqueue_head(&journal->j_wait_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); - mutex_init(&journal->j_barrier); - mutex_init(&journal->j_checkpoint_mutex); + init_MUTEX(&journal->j_barrier); + init_MUTEX(&journal->j_checkpoint_sem); spin_lock_init(&journal->j_revoke_lock); spin_lock_init(&journal->j_list_lock); spin_lock_init(&journal->j_state_lock); diff --git a/trunk/fs/jbd/transaction.c b/trunk/fs/jbd/transaction.c index 5fc40888f4cf..ca917973c2c0 100644 --- a/trunk/fs/jbd/transaction.c +++ b/trunk/fs/jbd/transaction.c @@ -455,7 +455,7 @@ void journal_lock_updates(journal_t *journal) * to make sure that we serialise special journal-locked operations * too. */ - mutex_lock(&journal->j_barrier); + down(&journal->j_barrier); } /** @@ -470,7 +470,7 @@ void journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); - mutex_unlock(&journal->j_barrier); + up(&journal->j_barrier); spin_lock(&journal->j_state_lock); --journal->j_barrier_count; spin_unlock(&journal->j_state_lock); diff --git a/trunk/fs/jffs/inode-v23.c b/trunk/fs/jffs/inode-v23.c index 890d7ff7456d..fc3855a1aef3 100644 --- a/trunk/fs/jffs/inode-v23.c +++ b/trunk/fs/jffs/inode-v23.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include @@ -203,7 +203,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) fmc = c->fmc; D3(printk (KERN_NOTICE "notify_change(): down biglock\n")); - mutex_lock(&fmc->biglock); + down(&fmc->biglock); f = jffs_find_file(c, inode->i_ino); @@ -211,7 +211,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) printk("jffs_setattr(): Invalid inode number: %lu\n", inode->i_ino); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - mutex_unlock(&fmc->biglock); + up(&fmc->biglock); res = -EINVAL; goto out; }); @@ -232,7 +232,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) if (!(new_node = jffs_alloc_node())) { D(printk("jffs_setattr(): Allocation failed!\n")); D3(printk (KERN_NOTICE "notify_change(): up biglock\n")); - mutex_unlock(&fmc->biglock); + up(&fmc->biglock); res = -ENOMEM; goto out; } @@ -319,7 +319,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) D(printk("jffs_notify_change(): The write failed!\n")); jffs_free_node(new_node); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); goto out; } @@ -327,7 +327,7 @@ jffs_setattr(struct dentry *dentry, struct iattr *iattr) mark_inode_dirty(inode); D3(printk (KERN_NOTICE "n_c(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); out: unlock_kernel(); return res; @@ -461,7 +461,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, goto jffs_rename_end; } D3(printk (KERN_NOTICE "rename(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); /* Create a node and initialize as much as needed. */ result = -ENOMEM; if (!(node = jffs_alloc_node())) { @@ -555,7 +555,7 @@ jffs_rename(struct inode *old_dir, struct dentry *old_dentry, jffs_rename_end: D3(printk (KERN_NOTICE "rename(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_rename() */ @@ -574,14 +574,14 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) int ddino; lock_kernel(); D3(printk (KERN_NOTICE "readdir(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); D2(printk("jffs_readdir(): inode: 0x%p, filp: 0x%p\n", inode, filp)); if (filp->f_pos == 0) { D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino)); if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -598,7 +598,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) D3(printk("jffs_readdir(): \"..\" %u\n", ddino)); if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -617,7 +617,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (filldir(dirent, f->name, f->nsize, filp->f_pos , f->ino, DT_UNKNOWN) < 0) { D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return 0; } @@ -627,7 +627,7 @@ jffs_readdir(struct file *filp, void *dirent, filldir_t filldir) } while(f && f->deleted); } D3(printk (KERN_NOTICE "readdir(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return filp->f_pos; } /* jffs_readdir() */ @@ -660,7 +660,7 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) }); D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); r = -ENAMETOOLONG; if (len > JFFS_MAX_NAME_LEN) { @@ -683,31 +683,31 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if ((len == 1) && (name[0] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->ino))) { D(printk("jffs_lookup(): . iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); } else if ((len == 2) && (name[0] == '.') && (name[1] == '.')) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, d->pino))) { D(printk("jffs_lookup(): .. iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); } else if ((f = jffs_find_child(d, name, len))) { D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); if (!(inode = iget(dir->i_sb, f->ino))) { D(printk("jffs_lookup(): iget() ==> NULL\n")); goto jffs_lookup_end_no_biglock; } D3(printk (KERN_NOTICE "lookup(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); } else { D3(printk("jffs_lookup(): Couldn't find the file. " "f = 0x%p, name = \"%s\", d = 0x%p, d->ino = %u\n", @@ -717,13 +717,13 @@ jffs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) d_add(dentry, inode); D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return NULL; jffs_lookup_end: D3(printk (KERN_NOTICE "lookup(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); jffs_lookup_end_no_biglock: unlock_kernel(); @@ -753,7 +753,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) ClearPageError(page); D3(printk (KERN_NOTICE "readpage(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); read_len = 0; result = 0; @@ -782,7 +782,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page) kunmap(page); D3(printk (KERN_NOTICE "readpage(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); if (result) { SetPageError(page); @@ -839,7 +839,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) c = dir_f->c; D3(printk (KERN_NOTICE "mkdir(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); dir_mode = S_IFDIR | (mode & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask); @@ -906,7 +906,7 @@ jffs_mkdir(struct inode *dir, struct dentry *dentry, int mode) result = 0; jffs_mkdir_end: D3(printk (KERN_NOTICE "mkdir(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mkdir() */ @@ -921,10 +921,10 @@ jffs_rmdir(struct inode *dir, struct dentry *dentry) D3(printk("***jffs_rmdir()\n")); D3(printk (KERN_NOTICE "rmdir(): down biglock\n")); lock_kernel(); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); ret = jffs_remove(dir, dentry, S_IFDIR); D3(printk (KERN_NOTICE "rmdir(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -940,10 +940,10 @@ jffs_unlink(struct inode *dir, struct dentry *dentry) lock_kernel(); D3(printk("***jffs_unlink()\n")); D3(printk (KERN_NOTICE "unlink(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); ret = jffs_remove(dir, dentry, 0); D3(printk (KERN_NOTICE "unlink(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return ret; } @@ -1086,7 +1086,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) c = dir_f->c; D3(printk (KERN_NOTICE "mknod(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); /* Create and initialize a new node. */ if (!(node = jffs_alloc_node())) { @@ -1152,7 +1152,7 @@ jffs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) jffs_mknod_end: D3(printk (KERN_NOTICE "mknod(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return result; } /* jffs_mknod() */ @@ -1203,7 +1203,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) return -ENOMEM; } D3(printk (KERN_NOTICE "symlink(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1253,7 +1253,7 @@ jffs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) d_instantiate(dentry, inode); jffs_symlink_end: D3(printk (KERN_NOTICE "symlink(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_symlink() */ @@ -1306,7 +1306,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, return -ENOMEM; } D3(printk (KERN_NOTICE "create(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); node->data_offset = 0; node->removed_size = 0; @@ -1359,7 +1359,7 @@ jffs_create(struct inode *dir, struct dentry *dentry, int mode, d_instantiate(dentry, inode); jffs_create_end: D3(printk (KERN_NOTICE "create(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); unlock_kernel(); return err; } /* jffs_create() */ @@ -1423,7 +1423,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count); D3(printk (KERN_NOTICE "file_write(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); /* Urgh. POSIX says we can do short writes if we feel like it. * In practice, we can't. Nothing will cope. So we loop until @@ -1511,7 +1511,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count, } out: D3(printk (KERN_NOTICE "file_write(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); /* Fix things in the real inode. */ if (pos > inode->i_size) { @@ -1567,7 +1567,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, return -EIO; } D3(printk (KERN_NOTICE "ioctl(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); switch (cmd) { case JFFS_PRINT_HASH: @@ -1609,7 +1609,7 @@ jffs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, ret = -ENOTTY; } D3(printk (KERN_NOTICE "ioctl(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); return ret; } /* jffs_ioctl() */ @@ -1685,12 +1685,12 @@ jffs_read_inode(struct inode *inode) } c = (struct jffs_control *)inode->i_sb->s_fs_info; D3(printk (KERN_NOTICE "read_inode(): down biglock\n")); - mutex_lock(&c->fmc->biglock); + down(&c->fmc->biglock); if (!(f = jffs_find_file(c, inode->i_ino))) { D(printk("jffs_read_inode(): No such inode (%lu).\n", inode->i_ino)); D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); return; } inode->u.generic_ip = (void *)f; @@ -1732,7 +1732,7 @@ jffs_read_inode(struct inode *inode) } D3(printk (KERN_NOTICE "read_inode(): up biglock\n")); - mutex_unlock(&c->fmc->biglock); + up(&c->fmc->biglock); } diff --git a/trunk/fs/jffs/intrep.c b/trunk/fs/jffs/intrep.c index 0ef207dfaf6f..ce7b54b0b2b7 100644 --- a/trunk/fs/jffs/intrep.c +++ b/trunk/fs/jffs/intrep.c @@ -62,7 +62,7 @@ #include #include #include -#include +#include #include #include #include @@ -3416,7 +3416,7 @@ jffs_garbage_collect_thread(void *ptr) D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n")); D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n")); - mutex_lock(&fmc->biglock); + down(&fmc->biglock); D1(printk("***jffs_garbage_collect_thread(): round #%u, " "fmc->dirty_size = %u\n", i++, fmc->dirty_size)); @@ -3447,6 +3447,6 @@ jffs_garbage_collect_thread(void *ptr) gc_end: D3(printk (KERN_NOTICE "g_c_thread(): up biglock\n")); - mutex_unlock(&fmc->biglock); + up(&fmc->biglock); } /* for (;;) */ } /* jffs_garbage_collect_thread() */ diff --git a/trunk/fs/jffs/jffs_fm.c b/trunk/fs/jffs/jffs_fm.c index 7d8ca1aeace2..6da13b309bd1 100644 --- a/trunk/fs/jffs/jffs_fm.c +++ b/trunk/fs/jffs/jffs_fm.c @@ -139,7 +139,7 @@ jffs_build_begin(struct jffs_control *c, int unit) fmc->tail = NULL; fmc->head_extra = NULL; fmc->tail_extra = NULL; - mutex_init(&fmc->biglock); + init_MUTEX(&fmc->biglock); return fmc; } diff --git a/trunk/fs/jffs/jffs_fm.h b/trunk/fs/jffs/jffs_fm.h index c794d923df2a..f64151e74122 100644 --- a/trunk/fs/jffs/jffs_fm.h +++ b/trunk/fs/jffs/jffs_fm.h @@ -20,11 +20,10 @@ #ifndef __LINUX_JFFS_FM_H__ #define __LINUX_JFFS_FM_H__ -#include #include #include #include -#include +#include /* The alignment between two nodes in the flash memory. */ #define JFFS_ALIGN_SIZE 4 @@ -98,7 +97,7 @@ struct jffs_fmcontrol struct jffs_fm *tail; struct jffs_fm *head_extra; struct jffs_fm *tail_extra; - struct mutex biglock; + struct semaphore biglock; }; /* Notice the two members head_extra and tail_extra in the jffs_control diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c index 4fdeaceb892c..71fd08fa4103 100644 --- a/trunk/fs/libfs.c +++ b/trunk/fs/libfs.c @@ -7,8 +7,6 @@ #include #include #include -#include - #include int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, @@ -532,7 +530,7 @@ struct simple_attr { char set_buf[24]; void *data; const char *fmt; /* format for read operation */ - struct mutex mutex; /* protects access to these buffers */ + struct semaphore sem; /* protects access to these buffers */ }; /* simple_attr_open is called by an actual attribute open file operation @@ -551,7 +549,7 @@ int simple_attr_open(struct inode *inode, struct file *file, attr->set = set; attr->data = inode->u.generic_ip; attr->fmt = fmt; - mutex_init(&attr->mutex); + init_MUTEX(&attr->sem); file->private_data = attr; @@ -577,7 +575,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, if (!attr->get) return -EACCES; - mutex_lock(&attr->mutex); + down(&attr->sem); if (*ppos) /* continued read */ size = strlen(attr->get_buf); else /* first read */ @@ -586,7 +584,7 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, (unsigned long long)attr->get(attr->data)); ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); - mutex_unlock(&attr->mutex); + up(&attr->sem); return ret; } @@ -604,7 +602,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, if (!attr->set) return -EACCES; - mutex_lock(&attr->mutex); + down(&attr->sem); ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) @@ -615,7 +613,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, val = simple_strtol(attr->set_buf, NULL, 0); attr->set(attr->data, val); out: - mutex_unlock(&attr->mutex); + up(&attr->sem); return ret; } diff --git a/trunk/fs/minix/namei.c b/trunk/fs/minix/namei.c index 5b6a4540a05b..b25bca5bdb57 100644 --- a/trunk/fs/minix/namei.c +++ b/trunk/fs/minix/namei.c @@ -6,6 +6,18 @@ #include "minix.h" +static inline void inc_count(struct inode *inode) +{ + inode->i_nlink++; + mark_inode_dirty(inode); +} + +static inline void dec_count(struct inode *inode) +{ + inode->i_nlink--; + mark_inode_dirty(inode); +} + static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = minix_add_link(dentry, inode); @@ -13,7 +25,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - inode_dec_link_count(inode); + dec_count(inode); iput(inode); return err; } @@ -113,7 +125,7 @@ static int minix_symlink(struct inode * dir, struct dentry *dentry, return err; out_fail: - inode_dec_link_count(inode); + dec_count(inode); iput(inode); goto out; } @@ -127,7 +139,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inode_inc_link_count(inode); + inc_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); } @@ -140,7 +152,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= minix_sb(dir->i_sb)->s_link_max) goto out; - inode_inc_link_count(dir); + inc_count(dir); inode = minix_new_inode(dir, &err); if (!inode) @@ -151,7 +163,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) inode->i_mode |= S_ISGID; minix_set_inode(inode, 0); - inode_inc_link_count(inode); + inc_count(inode); err = minix_make_empty(inode, dir); if (err) @@ -166,11 +178,11 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) return err; out_fail: - inode_dec_link_count(inode); - inode_dec_link_count(inode); + dec_count(inode); + dec_count(inode); iput(inode); out_dir: - inode_dec_link_count(dir); + dec_count(dir); goto out; } @@ -190,7 +202,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry) goto end_unlink; inode->i_ctime = dir->i_ctime; - inode_dec_link_count(inode); + dec_count(inode); end_unlink: return err; } @@ -203,8 +215,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) if (minix_empty_dir(inode)) { err = minix_unlink(dir, dentry); if (!err) { - inode_dec_link_count(dir); - inode_dec_link_count(inode); + dec_count(dir); + dec_count(inode); } } return err; @@ -245,34 +257,34 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); + inc_count(old_inode); minix_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - inode_dec_link_count(new_inode); + dec_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= info->s_link_max) goto out_dir; } - inode_inc_link_count(old_inode); + inc_count(old_inode); err = minix_add_link(new_dentry, old_inode); if (err) { - inode_dec_link_count(old_inode); + dec_count(old_inode); goto out_dir; } if (dir_de) - inode_inc_link_count(new_dir); + inc_count(new_dir); } minix_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + dec_count(old_inode); if (dir_de) { minix_set_link(dir_de, dir_page, new_dir); - inode_dec_link_count(old_dir); + dec_count(old_dir); } return 0; diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index c72b940797fc..8dc2b038d5d9 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -104,7 +104,7 @@ */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) - * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives + * implemented. Let's see if raised priority of ->s_vfs_rename_sem gives * any extra contention... */ @@ -1422,7 +1422,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) return NULL; } - mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); + down(&p1->d_inode->i_sb->s_vfs_rename_sem); for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { @@ -1450,7 +1450,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); - mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); + up(&p1->d_inode->i_sb->s_vfs_rename_sem); } } @@ -2277,17 +2277,17 @@ asmlinkage long sys_link(const char __user *oldname, const char __user *newname) * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on - * sb->s_vfs_rename_mutex. We might be more accurate, but that's another + * sb->s_vfs_rename_sem. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change - * only under ->s_vfs_rename_mutex _and_ that parent of the object we + * only under ->s_vfs_rename_sem _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, - * lock child" and rename is under ->s_vfs_rename_mutex. + * lock child" and rename is under ->s_vfs_rename_sem. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. diff --git a/trunk/fs/ncpfs/file.c b/trunk/fs/ncpfs/file.c index ebdad8f6398f..973b444d6914 100644 --- a/trunk/fs/ncpfs/file.c +++ b/trunk/fs/ncpfs/file.c @@ -46,7 +46,7 @@ int ncp_make_open(struct inode *inode, int right) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum); error = -EACCES; - mutex_lock(&NCP_FINFO(inode)->open_mutex); + down(&NCP_FINFO(inode)->open_sem); if (!atomic_read(&NCP_FINFO(inode)->opened)) { struct ncp_entry_info finfo; int result; @@ -93,7 +93,7 @@ int ncp_make_open(struct inode *inode, int right) } out_unlock: - mutex_unlock(&NCP_FINFO(inode)->open_mutex); + up(&NCP_FINFO(inode)->open_sem); out: return error; } diff --git a/trunk/fs/ncpfs/inode.c b/trunk/fs/ncpfs/inode.c index 0b521d3d97ce..d277a58bd128 100644 --- a/trunk/fs/ncpfs/inode.c +++ b/trunk/fs/ncpfs/inode.c @@ -63,7 +63,7 @@ static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR) { - mutex_init(&ei->open_mutex); + init_MUTEX(&ei->open_sem); inode_init_once(&ei->vfs_inode); } } @@ -520,7 +520,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) } /* server->lock = 0; */ - mutex_init(&server->mutex); + init_MUTEX(&server->sem); server->packet = NULL; /* server->buffer_size = 0; */ /* server->conn_status = 0; */ @@ -557,7 +557,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server->dentry_ttl = 0; /* no caching */ INIT_LIST_HEAD(&server->tx.requests); - mutex_init(&server->rcv.creq_mutex); + init_MUTEX(&server->rcv.creq_sem); server->tx.creq = NULL; server->rcv.creq = NULL; server->data_ready = sock->sk->sk_data_ready; diff --git a/trunk/fs/ncpfs/ncplib_kernel.c b/trunk/fs/ncpfs/ncplib_kernel.c index d9ebf6439f59..c755e1848a42 100644 --- a/trunk/fs/ncpfs/ncplib_kernel.c +++ b/trunk/fs/ncpfs/ncplib_kernel.c @@ -291,7 +291,7 @@ ncp_make_closed(struct inode *inode) int err; err = 0; - mutex_lock(&NCP_FINFO(inode)->open_mutex); + down(&NCP_FINFO(inode)->open_sem); if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_set(&NCP_FINFO(inode)->opened, 0); err = ncp_close_file(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle); @@ -301,7 +301,7 @@ ncp_make_closed(struct inode *inode) NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, err); } - mutex_unlock(&NCP_FINFO(inode)->open_mutex); + up(&NCP_FINFO(inode)->open_sem); return err; } diff --git a/trunk/fs/ncpfs/sock.c b/trunk/fs/ncpfs/sock.c index 8783eb7ec641..6593a5ca88ba 100644 --- a/trunk/fs/ncpfs/sock.c +++ b/trunk/fs/ncpfs/sock.c @@ -171,9 +171,9 @@ static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_req static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) { - mutex_lock(&server->rcv.creq_mutex); + down(&server->rcv.creq_sem); __ncp_abort_request(server, req, err); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); } static inline void __ncptcp_abort(struct ncp_server *server) @@ -303,20 +303,20 @@ static inline void __ncp_start_request(struct ncp_server *server, struct ncp_req static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) { - mutex_lock(&server->rcv.creq_mutex); + down(&server->rcv.creq_sem); if (!ncp_conn_valid(server)) { - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); printk(KERN_ERR "ncpfs: tcp: Server died\n"); return -EIO; } if (server->tx.creq || server->rcv.creq) { req->status = RQ_QUEUED; list_add_tail(&req->req, &server->tx.requests); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); return 0; } __ncp_start_request(server, req); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); return 0; } @@ -400,7 +400,7 @@ void ncpdgram_rcv_proc(void *s) info_server(server, 0, server->unexpected_packet.data, result); continue; } - mutex_lock(&server->rcv.creq_mutex); + down(&server->rcv.creq_sem); req = server->rcv.creq; if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && server->connection == get_conn_number(&reply)))) { @@ -430,11 +430,11 @@ void ncpdgram_rcv_proc(void *s) server->rcv.creq = NULL; ncp_finish_request(req, result); __ncp_next_request(server); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); continue; } } - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); } drop:; _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); @@ -472,9 +472,9 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) void ncpdgram_timeout_proc(void *s) { struct ncp_server *server = s; - mutex_lock(&server->rcv.creq_mutex); + down(&server->rcv.creq_sem); __ncpdgram_timeout_proc(server); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); } static inline void ncp_init_req(struct ncp_request_reply* req) @@ -657,18 +657,18 @@ void ncp_tcp_rcv_proc(void *s) { struct ncp_server *server = s; - mutex_lock(&server->rcv.creq_mutex); + down(&server->rcv.creq_sem); __ncptcp_rcv_proc(server); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); } void ncp_tcp_tx_proc(void *s) { struct ncp_server *server = s; - mutex_lock(&server->rcv.creq_mutex); + down(&server->rcv.creq_sem); __ncptcp_try_send(server); - mutex_unlock(&server->rcv.creq_mutex); + up(&server->rcv.creq_sem); } static int do_ncp_rpc_call(struct ncp_server *server, int size, @@ -833,7 +833,7 @@ int ncp_disconnect(struct ncp_server *server) void ncp_lock_server(struct ncp_server *server) { - mutex_lock(&server->mutex); + down(&server->sem); if (server->lock) printk(KERN_WARNING "ncp_lock_server: was locked!\n"); server->lock = 1; @@ -846,5 +846,5 @@ void ncp_unlock_server(struct ncp_server *server) return; } server->lock = 0; - mutex_unlock(&server->mutex); + up(&server->sem); } diff --git a/trunk/fs/open.c b/trunk/fs/open.c index 1091dadd6c38..70e0230d8e77 100644 --- a/trunk/fs/open.c +++ b/trunk/fs/open.c @@ -973,7 +973,7 @@ int get_unused_fd(void) fdt = files_fdtable(files); fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fdset, - files->next_fd); + fdt->next_fd); /* * N.B. For clone tasks sharing a files structure, this test @@ -998,7 +998,7 @@ int get_unused_fd(void) FD_SET(fd, fdt->open_fds); FD_CLR(fd, fdt->close_on_exec); - files->next_fd = fd + 1; + fdt->next_fd = fd + 1; #if 1 /* Sanity check */ if (fdt->fd[fd] != NULL) { @@ -1019,8 +1019,8 @@ static void __put_unused_fd(struct files_struct *files, unsigned int fd) { struct fdtable *fdt = files_fdtable(files); __FD_CLR(fd, fdt->open_fds); - if (fd < files->next_fd) - files->next_fd = fd; + if (fd < fdt->next_fd) + fdt->next_fd = fd; } void fastcall put_unused_fd(unsigned int fd) diff --git a/trunk/fs/proc/proc_misc.c b/trunk/fs/proc/proc_misc.c index 826c131994c3..1d24fead51a6 100644 --- a/trunk/fs/proc/proc_misc.c +++ b/trunk/fs/proc/proc_misc.c @@ -312,7 +312,7 @@ static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) case BLK_HDR: info->state = BLK_LIST; (*pos)++; - /*fallthrough*/ + break; case BLK_LIST: if (get_blkdev_info(info->blkdev,&idummy,&ndummy)) { /* diff --git a/trunk/fs/qnx4/file.c b/trunk/fs/qnx4/file.c index c33963fded9e..b471315e24ef 100644 --- a/trunk/fs/qnx4/file.c +++ b/trunk/fs/qnx4/file.c @@ -12,7 +12,10 @@ * 27-06-1998 by Frank Denis : file overwriting. */ +#include +#include #include +#include #include /* diff --git a/trunk/fs/quota.c b/trunk/fs/quota.c index d6a2be826e29..ba9e0bf32f67 100644 --- a/trunk/fs/quota.c +++ b/trunk/fs/quota.c @@ -170,10 +170,10 @@ static void quota_sync_sb(struct super_block *sb, int type) /* Now when everything is written we can discard the pagecache so * that userspace sees the changes. We need i_mutex and so we could - * not do it inside dqonoff_mutex. Moreover we need to be carefull + * not do it inside dqonoff_sem. Moreover we need to be carefull * about races with quotaoff() (that is the reason why we have own * reference to inode). */ - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); + down(&sb_dqopt(sb)->dqonoff_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { discard[cnt] = NULL; if (type != -1 && cnt != type) @@ -182,7 +182,7 @@ static void quota_sync_sb(struct super_block *sb, int type) continue; discard[cnt] = igrab(sb_dqopt(sb)->files[cnt]); } - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); + up(&sb_dqopt(sb)->dqonoff_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (discard[cnt]) { mutex_lock(&discard[cnt]->i_mutex); diff --git a/trunk/fs/quota_v2.c b/trunk/fs/quota_v2.c index c519a583e681..b4199ec3ece4 100644 --- a/trunk/fs/quota_v2.c +++ b/trunk/fs/quota_v2.c @@ -394,7 +394,7 @@ static int v2_write_dquot(struct dquot *dquot) ssize_t ret; struct v2_disk_dqblk ddquot, empty; - /* dq_off is guarded by dqio_mutex */ + /* dq_off is guarded by dqio_sem */ if (!dquot->dq_off) if ((ret = dq_insert_tree(dquot)) < 0) { printk(KERN_ERR "VFS: Error %zd occurred while creating quota.\n", ret); diff --git a/trunk/fs/ramfs/file-mmu.c b/trunk/fs/ramfs/file-mmu.c index 6ada2095b9ac..2115383dcc8d 100644 --- a/trunk/fs/ramfs/file-mmu.c +++ b/trunk/fs/ramfs/file-mmu.c @@ -24,7 +24,18 @@ * caches is sufficient. */ +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "internal.h" struct address_space_operations ramfs_aops = { .readpage = simple_readpage, diff --git a/trunk/fs/seq_file.c b/trunk/fs/seq_file.c index 555b9ac04c25..7c40570b71dc 100644 --- a/trunk/fs/seq_file.c +++ b/trunk/fs/seq_file.c @@ -37,7 +37,7 @@ int seq_open(struct file *file, struct seq_operations *op) file->private_data = p; } memset(p, 0, sizeof(*p)); - mutex_init(&p->lock); + sema_init(&p->sem, 1); p->op = op; /* @@ -71,7 +71,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) void *p; int err = 0; - mutex_lock(&m->lock); + down(&m->sem); /* * seq_file->op->..m_start/m_stop/m_next may do special actions * or optimisations based on the file->f_version, so we want to @@ -164,7 +164,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) else *ppos += copied; file->f_version = m->version; - mutex_unlock(&m->lock); + up(&m->sem); return copied; Enomem: err = -ENOMEM; @@ -237,7 +237,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) struct seq_file *m = (struct seq_file *)file->private_data; long long retval = -EINVAL; - mutex_lock(&m->lock); + down(&m->sem); m->version = file->f_version; switch (origin) { case 1: @@ -260,7 +260,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin) } } } - mutex_unlock(&m->lock); + up(&m->sem); file->f_version = m->version; return retval; } diff --git a/trunk/fs/super.c b/trunk/fs/super.c index 425861cb1caa..e20b5580afd5 100644 --- a/trunk/fs/super.c +++ b/trunk/fs/super.c @@ -76,9 +76,9 @@ static struct super_block *alloc_super(void) down_write(&s->s_umount); s->s_count = S_BIAS; atomic_set(&s->s_active, 1); - mutex_init(&s->s_vfs_rename_mutex); - mutex_init(&s->s_dquot.dqio_mutex); - mutex_init(&s->s_dquot.dqonoff_mutex); + sema_init(&s->s_vfs_rename_sem,1); + sema_init(&s->s_dquot.dqio_sem, 1); + sema_init(&s->s_dquot.dqonoff_sem, 1); init_rwsem(&s->s_dquot.dqptr_sem); init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; @@ -693,9 +693,9 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type, * will protect the lockfs code from trying to start a snapshot * while we are mounting */ - mutex_lock(&bdev->bd_mount_mutex); + down(&bdev->bd_mount_sem); s = sget(fs_type, test_bdev_super, set_bdev_super, bdev); - mutex_unlock(&bdev->bd_mount_mutex); + up(&bdev->bd_mount_sem); if (IS_ERR(s)) goto out; diff --git a/trunk/fs/sysv/namei.c b/trunk/fs/sysv/namei.c index b8a73f716fbe..7f0e4b53085e 100644 --- a/trunk/fs/sysv/namei.c +++ b/trunk/fs/sysv/namei.c @@ -16,6 +16,18 @@ #include #include "sysv.h" +static inline void inc_count(struct inode *inode) +{ + inode->i_nlink++; + mark_inode_dirty(inode); +} + +static inline void dec_count(struct inode *inode) +{ + inode->i_nlink--; + mark_inode_dirty(inode); +} + static int add_nondir(struct dentry *dentry, struct inode *inode) { int err = sysv_add_link(dentry, inode); @@ -23,7 +35,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - inode_dec_link_count(inode); + dec_count(inode); iput(inode); return err; } @@ -112,7 +124,7 @@ static int sysv_symlink(struct inode * dir, struct dentry * dentry, return err; out_fail: - inode_dec_link_count(inode); + dec_count(inode); iput(inode); goto out; } @@ -126,7 +138,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir, return -EMLINK; inode->i_ctime = CURRENT_TIME_SEC; - inode_inc_link_count(inode); + inc_count(inode); atomic_inc(&inode->i_count); return add_nondir(dentry, inode); @@ -139,7 +151,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) if (dir->i_nlink >= SYSV_SB(dir->i_sb)->s_link_max) goto out; - inode_inc_link_count(dir); + inc_count(dir); inode = sysv_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -148,7 +160,7 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) sysv_set_inode(inode, 0); - inode_inc_link_count(inode); + inc_count(inode); err = sysv_make_empty(inode, dir); if (err) @@ -163,11 +175,11 @@ static int sysv_mkdir(struct inode * dir, struct dentry *dentry, int mode) return err; out_fail: - inode_dec_link_count(inode); - inode_dec_link_count(inode); + dec_count(inode); + dec_count(inode); iput(inode); out_dir: - inode_dec_link_count(dir); + dec_count(dir); goto out; } @@ -187,7 +199,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry) goto out; inode->i_ctime = dir->i_ctime; - inode_dec_link_count(inode); + dec_count(inode); out: return err; } @@ -201,8 +213,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) err = sysv_unlink(dir, dentry); if (!err) { inode->i_size = 0; - inode_dec_link_count(inode); - inode_dec_link_count(dir); + dec_count(inode); + dec_count(dir); } } return err; @@ -246,34 +258,34 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, new_de = sysv_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); + inc_count(old_inode); sysv_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - inode_dec_link_count(new_inode); + dec_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) goto out_dir; } - inode_inc_link_count(old_inode); + inc_count(old_inode); err = sysv_add_link(new_dentry, old_inode); if (err) { - inode_dec_link_count(old_inode); + dec_count(old_inode); goto out_dir; } if (dir_de) - inode_inc_link_count(new_dir); + inc_count(new_dir); } sysv_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + dec_count(old_inode); if (dir_de) { sysv_set_link(dir_de, dir_page, new_dir); - inode_dec_link_count(old_dir); + dec_count(old_dir); } return 0; diff --git a/trunk/fs/udf/balloc.c b/trunk/fs/udf/balloc.c index ea521f846d97..201049ac8a96 100644 --- a/trunk/fs/udf/balloc.c +++ b/trunk/fs/udf/balloc.c @@ -152,7 +152,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb, int bitmap_nr; unsigned long overflow; - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -211,7 +211,7 @@ static void udf_bitmap_free_blocks(struct super_block * sb, sb->s_dirt = 1; if (UDF_SB_LVIDBH(sb)) mark_buffer_dirty(UDF_SB_LVIDBH(sb)); - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return; } @@ -226,7 +226,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, int nr_groups, bitmap_nr; struct buffer_head *bh; - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) goto out; @@ -275,7 +275,7 @@ static int udf_bitmap_prealloc_blocks(struct super_block * sb, mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return alloc_count; } @@ -291,7 +291,7 @@ static int udf_bitmap_new_block(struct super_block * sb, int newblock = 0; *err = -ENOSPC; - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); repeat: if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) @@ -364,7 +364,7 @@ static int udf_bitmap_new_block(struct super_block * sb, } if (i >= (nr_groups*2)) { - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return newblock; } if (bit < sb->s_blocksize << 3) @@ -373,7 +373,7 @@ static int udf_bitmap_new_block(struct super_block * sb, bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); if (bit >= sb->s_blocksize << 3) { - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return 0; } @@ -387,7 +387,7 @@ static int udf_bitmap_new_block(struct super_block * sb, */ if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); *err = -EDQUOT; return 0; } @@ -410,13 +410,13 @@ static int udf_bitmap_new_block(struct super_block * sb, mark_buffer_dirty(UDF_SB_LVIDBH(sb)); } sb->s_dirt = 1; - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); *err = 0; return newblock; error_return: *err = -EIO; - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return 0; } @@ -433,7 +433,7 @@ static void udf_table_free_blocks(struct super_block * sb, int8_t etype; int i; - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); if (bloc.logicalBlockNum < 0 || (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) { @@ -666,7 +666,7 @@ static void udf_table_free_blocks(struct super_block * sb, error_return: sb->s_dirt = 1; - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return; } @@ -692,7 +692,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, else return 0; - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); extoffset = sizeof(struct unallocSpaceEntry); bloc = UDF_I_LOCATION(table); @@ -736,7 +736,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb, mark_buffer_dirty(UDF_SB_LVIDBH(sb)); sb->s_dirt = 1; } - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return alloc_count; } @@ -761,7 +761,7 @@ static int udf_table_new_block(struct super_block * sb, else return newblock; - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) goal = 0; @@ -811,7 +811,7 @@ static int udf_table_new_block(struct super_block * sb, if (spread == 0xFFFFFFFF) { udf_release_data(goal_bh); - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); return 0; } @@ -827,7 +827,7 @@ static int udf_table_new_block(struct super_block * sb, if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { udf_release_data(goal_bh); - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); *err = -EDQUOT; return 0; } @@ -846,7 +846,7 @@ static int udf_table_new_block(struct super_block * sb, } sb->s_dirt = 1; - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); *err = 0; return newblock; } diff --git a/trunk/fs/udf/ialloc.c b/trunk/fs/udf/ialloc.c index 3873c672cb4c..c9b707b470ca 100644 --- a/trunk/fs/udf/ialloc.c +++ b/trunk/fs/udf/ialloc.c @@ -42,7 +42,7 @@ void udf_free_inode(struct inode * inode) clear_inode(inode); - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); if (sbi->s_lvidbh) { if (S_ISDIR(inode->i_mode)) UDF_SB_LVIDIU(sb)->numDirs = @@ -53,7 +53,7 @@ void udf_free_inode(struct inode * inode) mark_buffer_dirty(sbi->s_lvidbh); } - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1); } @@ -83,7 +83,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) return NULL; } - mutex_lock(&sbi->s_alloc_mutex); + down(&sbi->s_alloc_sem); UDF_I_UNIQUE(inode) = 0; UDF_I_LENEXTENTS(inode) = 0; UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; @@ -148,7 +148,7 @@ struct inode * udf_new_inode (struct inode *dir, int mode, int * err) UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb); insert_inode_hash(inode); mark_inode_dirty(inode); - mutex_unlock(&sbi->s_alloc_mutex); + up(&sbi->s_alloc_sem); if (DQUOT_ALLOC_INODE(inode)) { diff --git a/trunk/fs/udf/super.c b/trunk/fs/udf/super.c index 9303c50c5d55..368d8f81fe54 100644 --- a/trunk/fs/udf/super.c +++ b/trunk/fs/udf/super.c @@ -1515,7 +1515,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) sb->s_fs_info = sbi; memset(UDF_SB(sb), 0x00, sizeof(struct udf_sb_info)); - mutex_init(&sbi->s_alloc_mutex); + init_MUTEX(&sbi->s_alloc_sem); if (!udf_parse_options((char *)options, &uopt)) goto error_out; diff --git a/trunk/fs/ufs/file.c b/trunk/fs/ufs/file.c index 62ad481810ef..ed69d7fe1b5d 100644 --- a/trunk/fs/ufs/file.c +++ b/trunk/fs/ufs/file.c @@ -23,8 +23,18 @@ * ext2 fs regular file handling primitives */ +#include +#include + +#include #include #include +#include +#include +#include +#include +#include +#include /* * We have mostly NULL's here: the current defaults are ok for diff --git a/trunk/fs/ufs/namei.c b/trunk/fs/ufs/namei.c index 8d5f98a01c74..2958cde7d3d6 100644 --- a/trunk/fs/ufs/namei.c +++ b/trunk/fs/ufs/namei.c @@ -43,6 +43,18 @@ #define UFSD(x) #endif +static inline void ufs_inc_count(struct inode *inode) +{ + inode->i_nlink++; + mark_inode_dirty(inode); +} + +static inline void ufs_dec_count(struct inode *inode) +{ + inode->i_nlink--; + mark_inode_dirty(inode); +} + static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = ufs_add_link(dentry, inode); @@ -50,7 +62,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode) d_instantiate(dentry, inode); return 0; } - inode_dec_link_count(inode); + ufs_dec_count(inode); iput(inode); return err; } @@ -161,7 +173,7 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, return err; out_fail: - inode_dec_link_count(inode); + ufs_dec_count(inode); iput(inode); goto out; } @@ -179,7 +191,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, } inode->i_ctime = CURRENT_TIME_SEC; - inode_inc_link_count(inode); + ufs_inc_count(inode); atomic_inc(&inode->i_count); error = ufs_add_nondir(dentry, inode); @@ -196,7 +208,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) goto out; lock_kernel(); - inode_inc_link_count(dir); + ufs_inc_count(dir); inode = ufs_new_inode(dir, S_IFDIR|mode); err = PTR_ERR(inode); @@ -206,7 +218,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) inode->i_op = &ufs_dir_inode_operations; inode->i_fop = &ufs_dir_operations; - inode_inc_link_count(inode); + ufs_inc_count(inode); err = ufs_make_empty(inode, dir); if (err) @@ -222,11 +234,11 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) return err; out_fail: - inode_dec_link_count(inode); - inode_dec_link_count(inode); + ufs_dec_count(inode); + ufs_dec_count(inode); iput (inode); out_dir: - inode_dec_link_count(dir); + ufs_dec_count(dir); unlock_kernel(); goto out; } @@ -248,7 +260,7 @@ static int ufs_unlink(struct inode * dir, struct dentry *dentry) goto out; inode->i_ctime = dir->i_ctime; - inode_dec_link_count(inode); + ufs_dec_count(inode); err = 0; out: unlock_kernel(); @@ -265,8 +277,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) err = ufs_unlink(dir, dentry); if (!err) { inode->i_size = 0; - inode_dec_link_count(inode); - inode_dec_link_count(dir); + ufs_dec_count(inode); + ufs_dec_count(dir); } } unlock_kernel(); @@ -307,35 +319,35 @@ static int ufs_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ufs_find_entry (new_dentry, &new_bh); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); + ufs_inc_count(old_inode); ufs_set_link(new_dir, new_de, new_bh, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) new_inode->i_nlink--; - inode_dec_link_count(new_inode); + ufs_dec_count(new_inode); } else { if (dir_de) { err = -EMLINK; if (new_dir->i_nlink >= UFS_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); + ufs_inc_count(old_inode); err = ufs_add_link(new_dentry, old_inode); if (err) { - inode_dec_link_count(old_inode); + ufs_dec_count(old_inode); goto out_dir; } if (dir_de) - inode_inc_link_count(new_dir); + ufs_inc_count(new_dir); } ufs_delete_entry (old_dir, old_de, old_bh); - inode_dec_link_count(old_inode); + ufs_dec_count(old_inode); if (dir_de) { ufs_set_link(old_inode, dir_de, dir_bh, new_dir); - inode_dec_link_count(old_dir); + ufs_dec_count(old_dir); } unlock_kernel(); return 0; diff --git a/trunk/fs/xfs/Makefile-linux-2.6 b/trunk/fs/xfs/Makefile-linux-2.6 index 5d73eaa1971f..97bd4743b461 100644 --- a/trunk/fs/xfs/Makefile-linux-2.6 +++ b/trunk/fs/xfs/Makefile-linux-2.6 @@ -1,19 +1,33 @@ # -# Copyright (c) 2000-2005 Silicon Graphics, Inc. -# All Rights Reserved. +# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. # -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as +# This program is free software; you can redistribute it and/or modify it +# under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# This program is distributed in the hope that it would be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# Further, this software is distributed without any warranty that it is +# free of the rightful claim of any third person regarding infringement +# or the like. Any license provided herein, whether implied or +# otherwise, applies only to this software file. Patent licenses, if +# any, provided herein do not apply to combinations of this program with +# other software, or any other product whatsoever. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write the Free Software Foundation, Inc., 59 +# Temple Place - Suite 330, Boston MA 02111-1307, USA. +# +# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, +# Mountain View, CA 94043, or: +# +# http://www.sgi.com +# +# For further information regarding this notice, see: +# +# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ # EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char @@ -22,7 +36,7 @@ XFS_LINUX := linux-2.6 ifeq ($(CONFIG_XFS_DEBUG),y) EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG - EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING + EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING endif ifeq ($(CONFIG_XFS_TRACE),y) EXTRA_CFLAGS += -DXFS_ALLOC_TRACE @@ -36,7 +50,7 @@ ifeq ($(CONFIG_XFS_TRACE),y) EXTRA_CFLAGS += -DXFS_ILOCK_TRACE EXTRA_CFLAGS += -DXFS_LOG_TRACE EXTRA_CFLAGS += -DXFS_RW_TRACE - EXTRA_CFLAGS += -DXFS_BUF_TRACE + EXTRA_CFLAGS += -DPAGEBUF_TRACE EXTRA_CFLAGS += -DXFS_VNODE_TRACE endif diff --git a/trunk/fs/xfs/linux-2.6/kmem.h b/trunk/fs/xfs/linux-2.6/kmem.h index f0268a84e6fd..c64a29cdfff3 100644 --- a/trunk/fs/xfs/linux-2.6/kmem.h +++ b/trunk/fs/xfs/linux-2.6/kmem.h @@ -23,8 +23,17 @@ #include /* - * Process flags handling + * memory management routines */ +#define KM_SLEEP 0x0001u +#define KM_NOSLEEP 0x0002u +#define KM_NOFS 0x0004u +#define KM_MAYFAIL 0x0008u + +#define kmem_zone kmem_cache +#define kmem_zone_t struct kmem_cache + +typedef unsigned long xfs_pflags_t; #define PFLAGS_TEST_NOIO() (current->flags & PF_NOIO) #define PFLAGS_TEST_FSTRANS() (current->flags & PF_FSTRANS) @@ -58,102 +67,74 @@ *(NSTATEP) = *(OSTATEP); \ } while (0) -/* - * General memory allocation interfaces - */ - -#define KM_SLEEP 0x0001u -#define KM_NOSLEEP 0x0002u -#define KM_NOFS 0x0004u -#define KM_MAYFAIL 0x0008u - -/* - * We use a special process flag to avoid recursive callbacks into - * the filesystem during transactions. We will also issue our own - * warnings, so we explicitly skip any generic ones (silly of us). - */ -static inline gfp_t -kmem_flags_convert(unsigned int __nocast flags) +static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) { - gfp_t lflags; + gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); +#ifdef DEBUG + if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { + printk(KERN_WARNING + "XFS: memory allocation with wrong flags (%x)\n", flags); + BUG(); + } +#endif if (flags & KM_NOSLEEP) { - lflags = GFP_ATOMIC | __GFP_NOWARN; + lflags |= GFP_ATOMIC; } else { - lflags = GFP_KERNEL | __GFP_NOWARN; + lflags |= GFP_KERNEL; + + /* avoid recusive callbacks to filesystem during transactions */ if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS)) lflags &= ~__GFP_FS; } - return lflags; -} -extern void *kmem_alloc(size_t, unsigned int __nocast); -extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); -extern void *kmem_zalloc(size_t, unsigned int __nocast); -extern void kmem_free(void *, size_t); - -/* - * Zone interfaces - */ - -#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN -#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT -#define KM_ZONE_SPREAD 0 - -#define kmem_zone kmem_cache -#define kmem_zone_t struct kmem_cache + return lflags; +} -static inline kmem_zone_t * +static __inline kmem_zone_t * kmem_zone_init(int size, char *zone_name) { return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); } -static inline kmem_zone_t * -kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, - void (*construct)(void *, kmem_zone_t *, unsigned long)) -{ - return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); -} - -static inline void +static __inline void kmem_zone_free(kmem_zone_t *zone, void *ptr) { kmem_cache_free(zone, ptr); } -static inline void +static __inline void kmem_zone_destroy(kmem_zone_t *zone) { if (zone && kmem_cache_destroy(zone)) BUG(); } -extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); +extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); -/* - * Low memory cache shrinkers - */ +extern void *kmem_alloc(size_t, unsigned int __nocast); +extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); +extern void *kmem_zalloc(size_t, unsigned int __nocast); +extern void kmem_free(void *, size_t); typedef struct shrinker *kmem_shaker_t; typedef int (*kmem_shake_func_t)(int, gfp_t); -static inline kmem_shaker_t +static __inline kmem_shaker_t kmem_shake_register(kmem_shake_func_t sfunc) { return set_shrinker(DEFAULT_SEEKS, sfunc); } -static inline void +static __inline void kmem_shake_deregister(kmem_shaker_t shrinker) { remove_shrinker(shrinker); } -static inline int +static __inline int kmem_shake_allow(gfp_t gfp_mask) { return (gfp_mask & __GFP_WAIT); diff --git a/trunk/fs/xfs/linux-2.6/xfs_aops.c b/trunk/fs/xfs/linux-2.6/xfs_aops.c index 97fc056130eb..74d8be87f983 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_aops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_aops.c @@ -43,29 +43,7 @@ #include #include -STATIC void -xfs_count_page_state( - struct page *page, - int *delalloc, - int *unmapped, - int *unwritten) -{ - struct buffer_head *bh, *head; - - *delalloc = *unmapped = *unwritten = 0; - - bh = head = page_buffers(page); - do { - if (buffer_uptodate(bh) && !buffer_mapped(bh)) - (*unmapped) = 1; - else if (buffer_unwritten(bh) && !buffer_delay(bh)) - clear_buffer_unwritten(bh); - else if (buffer_unwritten(bh)) - (*unwritten) = 1; - else if (buffer_delay(bh)) - (*delalloc) = 1; - } while ((bh = bh->b_this_page) != head); -} +STATIC void xfs_count_page_state(struct page *, int *, int *, int *); #if defined(XFS_RW_TRACE) void @@ -76,7 +54,7 @@ xfs_page_trace( int mask) { xfs_inode_t *ip; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); loff_t isize = i_size_read(inode); loff_t offset = page_offset(page); int delalloc = -1, unmapped = -1, unwritten = -1; @@ -103,7 +81,7 @@ xfs_page_trace( (void *)((unsigned long)delalloc), (void *)((unsigned long)unmapped), (void *)((unsigned long)unwritten), - (void *)((unsigned long)current_pid()), + (void *)NULL, (void *)NULL); } #else @@ -214,7 +192,7 @@ xfs_alloc_ioend( ioend->io_uptodate = 1; /* cleared if any I/O fails */ ioend->io_list = NULL; ioend->io_type = type; - ioend->io_vnode = vn_from_inode(inode); + ioend->io_vnode = LINVFS_GET_VP(inode); ioend->io_buffer_head = NULL; ioend->io_buffer_tail = NULL; atomic_inc(&ioend->io_vnode->v_iocount); @@ -239,7 +217,7 @@ xfs_map_blocks( xfs_iomap_t *mapp, int flags) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error, nmaps = 1; VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); @@ -483,26 +461,6 @@ xfs_add_to_ioend( ioend->io_size += bh->b_size; } -STATIC void -xfs_map_buffer( - struct buffer_head *bh, - xfs_iomap_t *mp, - xfs_off_t offset, - uint block_bits) -{ - sector_t bn; - - ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL); - - bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) + - ((offset - mp->iomap_offset) >> block_bits); - - ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME)); - - bh->b_blocknr = bn; - set_buffer_mapped(bh); -} - STATIC void xfs_map_at_offset( struct buffer_head *bh, @@ -510,11 +468,22 @@ xfs_map_at_offset( int block_bits, xfs_iomap_t *iomapp) { + xfs_daddr_t bn; + int sector_shift; + ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); + ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); + + sector_shift = block_bits - BBSHIFT; + bn = (iomapp->iomap_bn >> sector_shift) + + ((offset - iomapp->iomap_offset) >> block_bits); + + ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME)); + ASSERT((bn << sector_shift) >= iomapp->iomap_bn); lock_buffer(bh); - xfs_map_buffer(bh, iomapp, offset, block_bits); + bh->b_blocknr = bn; bh->b_bdev = iomapp->iomap_target->bt_bdev; set_buffer_mapped(bh); clear_buffer_delay(bh); @@ -647,7 +616,7 @@ xfs_is_delayed_page( acceptable = (type == IOMAP_UNWRITTEN); else if (buffer_delay(bh)) acceptable = (type == IOMAP_DELAY); - else if (buffer_dirty(bh) && buffer_mapped(bh)) + else if (buffer_mapped(bh)) acceptable = (type == 0); else break; @@ -1071,159 +1040,8 @@ xfs_page_state_convert( return err; } -/* - * writepage: Called from one of two places: - * - * 1. we are flushing a delalloc buffer head. - * - * 2. we are writing out a dirty page. Typically the page dirty - * state is cleared before we get here. In this case is it - * conceivable we have no buffer heads. - * - * For delalloc space on the page we need to allocate space and - * flush it. For unmapped buffer heads on the page we should - * allocate space if the page is uptodate. For any other dirty - * buffer heads on the page we should flush them. - * - * If we detect that a transaction would be required to flush - * the page, we have to check the process flags first, if we - * are already in a transaction or disk I/O during allocations - * is off, we need to fail the writepage and redirty the page. - */ - -STATIC int -xfs_vm_writepage( - struct page *page, - struct writeback_control *wbc) -{ - int error; - int need_trans; - int delalloc, unmapped, unwritten; - struct inode *inode = page->mapping->host; - - xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); - - /* - * We need a transaction if: - * 1. There are delalloc buffers on the page - * 2. The page is uptodate and we have unmapped buffers - * 3. The page is uptodate and we have no buffers - * 4. There are unwritten buffers on the page - */ - - if (!page_has_buffers(page)) { - unmapped = 1; - need_trans = 1; - } else { - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!PageUptodate(page)) - unmapped = 0; - need_trans = delalloc + unmapped + unwritten; - } - - /* - * If we need a transaction and the process flags say - * we are already in a transaction, or no IO is allowed - * then mark the page dirty again and leave the page - * as is. - */ - if (PFLAGS_TEST_FSTRANS() && need_trans) - goto out_fail; - - /* - * Delay hooking up buffer heads until we have - * made our go/no-go decision. - */ - if (!page_has_buffers(page)) - create_empty_buffers(page, 1 << inode->i_blkbits, 0); - - /* - * Convert delayed allocate, unwritten or unmapped space - * to real space and flush out to disk. - */ - error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); - if (error == -EAGAIN) - goto out_fail; - if (unlikely(error < 0)) - goto out_unlock; - - return 0; - -out_fail: - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -out_unlock: - unlock_page(page); - return error; -} - -/* - * Called to move a page into cleanable state - and from there - * to be released. Possibly the page is already clean. We always - * have buffer heads in this call. - * - * Returns 0 if the page is ok to release, 1 otherwise. - * - * Possible scenarios are: - * - * 1. We are being called to release a page which has been written - * to via regular I/O. buffer heads will be dirty and possibly - * delalloc. If no delalloc buffer heads in this case then we - * can just return zero. - * - * 2. We are called to release a page which has been written via - * mmap, all we need to do is ensure there is no delalloc - * state in the buffer heads, if not we can let the caller - * free them and we should come back later via writepage. - */ STATIC int -xfs_vm_releasepage( - struct page *page, - gfp_t gfp_mask) -{ - struct inode *inode = page->mapping->host; - int dirty, delalloc, unmapped, unwritten; - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = 1, - }; - - xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); - - if (!page_has_buffers(page)) - return 0; - - xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); - if (!delalloc && !unwritten) - goto free_buffers; - - if (!(gfp_mask & __GFP_FS)) - return 0; - - /* If we are already inside a transaction or the thread cannot - * do I/O, we cannot release this page. - */ - if (PFLAGS_TEST_FSTRANS()) - return 0; - - /* - * Convert delalloc space to real space, do not flush the - * data out to disk, that will be done by the caller. - * Never need to allocate space here - we will always - * come back to writepage in that case. - */ - dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); - if (dirty == 0 && !unwritten) - goto free_buffers; - return 0; - -free_buffers: - return try_to_free_buffers(page); -} - -STATIC int -__xfs_get_block( +__linvfs_get_block( struct inode *inode, sector_t iblock, unsigned long blocks, @@ -1232,7 +1050,7 @@ __xfs_get_block( int direct, bmapi_flags_t flags) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); xfs_iomap_t iomap; xfs_off_t offset; ssize_t size; @@ -1255,13 +1073,21 @@ __xfs_get_block( return 0; if (iomap.iomap_bn != IOMAP_DADDR_NULL) { - /* - * For unwritten extents do not report a disk address on + xfs_daddr_t bn; + xfs_off_t delta; + + /* For unwritten extents do not report a disk address on * the read case (treat as if we're reading into a hole). */ if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { - xfs_map_buffer(bh_result, &iomap, offset, - inode->i_blkbits); + delta = offset - iomap.iomap_offset; + delta >>= inode->i_blkbits; + + bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT); + bn += delta; + BUG_ON(!bn && !(iomap.iomap_flags & IOMAP_REALTIME)); + bh_result->b_blocknr = bn; + set_buffer_mapped(bh_result); } if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { if (direct) @@ -1304,30 +1130,30 @@ __xfs_get_block( } int -xfs_get_block( +linvfs_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - return __xfs_get_block(inode, iblock, 0, bh_result, + return __linvfs_get_block(inode, iblock, 0, bh_result, create, 0, BMAPI_WRITE); } STATIC int -xfs_get_blocks_direct( +linvfs_get_blocks_direct( struct inode *inode, sector_t iblock, unsigned long max_blocks, struct buffer_head *bh_result, int create) { - return __xfs_get_block(inode, iblock, max_blocks, bh_result, + return __linvfs_get_block(inode, iblock, max_blocks, bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); } STATIC void -xfs_end_io_direct( +linvfs_end_io_direct( struct kiocb *iocb, loff_t offset, ssize_t size, @@ -1365,7 +1191,7 @@ xfs_end_io_direct( } STATIC ssize_t -xfs_vm_direct_IO( +linvfs_direct_IO( int rw, struct kiocb *iocb, const struct iovec *iov, @@ -1374,7 +1200,7 @@ xfs_vm_direct_IO( { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); xfs_iomap_t iomap; int maps = 1; int error; @@ -1389,61 +1215,164 @@ xfs_vm_direct_IO( ret = blockdev_direct_IO_own_locking(rw, iocb, inode, iomap.iomap_target->bt_bdev, iov, offset, nr_segs, - xfs_get_blocks_direct, - xfs_end_io_direct); + linvfs_get_blocks_direct, + linvfs_end_io_direct); if (unlikely(ret <= 0 && iocb->private)) xfs_destroy_ioend(iocb->private); return ret; } -STATIC int -xfs_vm_prepare_write( - struct file *file, - struct page *page, - unsigned int from, - unsigned int to) -{ - return block_prepare_write(page, from, to, xfs_get_block); -} STATIC sector_t -xfs_vm_bmap( +linvfs_bmap( struct address_space *mapping, sector_t block) { struct inode *inode = (struct inode *)mapping->host; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error; - vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address); VOP_RWLOCK(vp, VRWLOCK_READ); VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); VOP_RWUNLOCK(vp, VRWLOCK_READ); - return generic_block_bmap(mapping, block, xfs_get_block); + return generic_block_bmap(mapping, block, linvfs_get_block); } STATIC int -xfs_vm_readpage( +linvfs_readpage( struct file *unused, struct page *page) { - return mpage_readpage(page, xfs_get_block); + return mpage_readpage(page, linvfs_get_block); } STATIC int -xfs_vm_readpages( +linvfs_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { - return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); + return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block); +} + +STATIC void +xfs_count_page_state( + struct page *page, + int *delalloc, + int *unmapped, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unmapped = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_uptodate(bh) && !buffer_mapped(bh)) + (*unmapped) = 1; + else if (buffer_unwritten(bh) && !buffer_delay(bh)) + clear_buffer_unwritten(bh); + else if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); } + +/* + * writepage: Called from one of two places: + * + * 1. we are flushing a delalloc buffer head. + * + * 2. we are writing out a dirty page. Typically the page dirty + * state is cleared before we get here. In this case is it + * conceivable we have no buffer heads. + * + * For delalloc space on the page we need to allocate space and + * flush it. For unmapped buffer heads on the page we should + * allocate space if the page is uptodate. For any other dirty + * buffer heads on the page we should flush them. + * + * If we detect that a transaction would be required to flush + * the page, we have to check the process flags first, if we + * are already in a transaction or disk I/O during allocations + * is off, we need to fail the writepage and redirty the page. + */ + STATIC int -xfs_vm_invalidatepage( +linvfs_writepage( + struct page *page, + struct writeback_control *wbc) +{ + int error; + int need_trans; + int delalloc, unmapped, unwritten; + struct inode *inode = page->mapping->host; + + xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); + + /* + * We need a transaction if: + * 1. There are delalloc buffers on the page + * 2. The page is uptodate and we have unmapped buffers + * 3. The page is uptodate and we have no buffers + * 4. There are unwritten buffers on the page + */ + + if (!page_has_buffers(page)) { + unmapped = 1; + need_trans = 1; + } else { + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!PageUptodate(page)) + unmapped = 0; + need_trans = delalloc + unmapped + unwritten; + } + + /* + * If we need a transaction and the process flags say + * we are already in a transaction, or no IO is allowed + * then mark the page dirty again and leave the page + * as is. + */ + if (PFLAGS_TEST_FSTRANS() && need_trans) + goto out_fail; + + /* + * Delay hooking up buffer heads until we have + * made our go/no-go decision. + */ + if (!page_has_buffers(page)) + create_empty_buffers(page, 1 << inode->i_blkbits, 0); + + /* + * Convert delayed allocate, unwritten or unmapped space + * to real space and flush out to disk. + */ + error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); + if (error == -EAGAIN) + goto out_fail; + if (unlikely(error < 0)) + goto out_unlock; + + return 0; + +out_fail: + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; +out_unlock: + unlock_page(page); + return error; +} + +STATIC int +linvfs_invalidate_page( struct page *page, unsigned long offset) { @@ -1452,16 +1381,87 @@ xfs_vm_invalidatepage( return block_invalidatepage(page, offset); } -struct address_space_operations xfs_address_space_operations = { - .readpage = xfs_vm_readpage, - .readpages = xfs_vm_readpages, - .writepage = xfs_vm_writepage, +/* + * Called to move a page into cleanable state - and from there + * to be released. Possibly the page is already clean. We always + * have buffer heads in this call. + * + * Returns 0 if the page is ok to release, 1 otherwise. + * + * Possible scenarios are: + * + * 1. We are being called to release a page which has been written + * to via regular I/O. buffer heads will be dirty and possibly + * delalloc. If no delalloc buffer heads in this case then we + * can just return zero. + * + * 2. We are called to release a page which has been written via + * mmap, all we need to do is ensure there is no delalloc + * state in the buffer heads, if not we can let the caller + * free them and we should come back later via writepage. + */ +STATIC int +linvfs_release_page( + struct page *page, + gfp_t gfp_mask) +{ + struct inode *inode = page->mapping->host; + int dirty, delalloc, unmapped, unwritten; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + }; + + xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask); + + xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); + if (!delalloc && !unwritten) + goto free_buffers; + + if (!(gfp_mask & __GFP_FS)) + return 0; + + /* If we are already inside a transaction or the thread cannot + * do I/O, we cannot release this page. + */ + if (PFLAGS_TEST_FSTRANS()) + return 0; + + /* + * Convert delalloc space to real space, do not flush the + * data out to disk, that will be done by the caller. + * Never need to allocate space here - we will always + * come back to writepage in that case. + */ + dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); + if (dirty == 0 && !unwritten) + goto free_buffers; + return 0; + +free_buffers: + return try_to_free_buffers(page); +} + +STATIC int +linvfs_prepare_write( + struct file *file, + struct page *page, + unsigned int from, + unsigned int to) +{ + return block_prepare_write(page, from, to, linvfs_get_block); +} + +struct address_space_operations linvfs_aops = { + .readpage = linvfs_readpage, + .readpages = linvfs_readpages, + .writepage = linvfs_writepage, .sync_page = block_sync_page, - .releasepage = xfs_vm_releasepage, - .invalidatepage = xfs_vm_invalidatepage, - .prepare_write = xfs_vm_prepare_write, + .releasepage = linvfs_release_page, + .invalidatepage = linvfs_invalidate_page, + .prepare_write = linvfs_prepare_write, .commit_write = generic_commit_write, - .bmap = xfs_vm_bmap, - .direct_IO = xfs_vm_direct_IO, + .bmap = linvfs_bmap, + .direct_IO = linvfs_direct_IO, .migratepage = buffer_migrate_page, }; diff --git a/trunk/fs/xfs/linux-2.6/xfs_aops.h b/trunk/fs/xfs/linux-2.6/xfs_aops.h index 795699f121d2..55339dd5a30d 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_aops.h +++ b/trunk/fs/xfs/linux-2.6/xfs_aops.h @@ -40,7 +40,7 @@ typedef struct xfs_ioend { struct work_struct io_work; /* xfsdatad work queue */ } xfs_ioend_t; -extern struct address_space_operations xfs_address_space_operations; -extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int); +extern struct address_space_operations linvfs_aops; +extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int); #endif /* __XFS_IOPS_H__ */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_buf.c b/trunk/fs/xfs/linux-2.6/xfs_buf.c index 9fb0312665ca..8cdfa4151659 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_buf.c +++ b/trunk/fs/xfs/linux-2.6/xfs_buf.c @@ -1806,12 +1806,13 @@ xfs_flush_buftarg( int __init xfs_buf_init(void) { + int error = -ENOMEM; + #ifdef XFS_BUF_TRACE xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP); #endif - xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", - KM_ZONE_HWALIGN, NULL); + xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); if (!xfs_buf_zone) goto out_free_trace_buf; @@ -1839,7 +1840,7 @@ xfs_buf_init(void) #ifdef XFS_BUF_TRACE ktrace_free(xfs_buf_trace_buf); #endif - return -ENOMEM; + return error; } void diff --git a/trunk/fs/xfs/linux-2.6/xfs_export.c b/trunk/fs/xfs/linux-2.6/xfs_export.c index b768ea910bbe..80eb249f2fa0 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_export.c +++ b/trunk/fs/xfs/linux-2.6/xfs_export.c @@ -25,8 +25,6 @@ #include "xfs_mount.h" #include "xfs_export.h" -STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, }; - /* * XFS encodes and decodes the fileid portion of NFS filehandles * itself instead of letting the generic NFS code do it. This @@ -39,7 +37,7 @@ STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, }; */ STATIC struct dentry * -xfs_fs_decode_fh( +linvfs_decode_fh( struct super_block *sb, __u32 *fh, int fh_len, @@ -80,12 +78,12 @@ xfs_fs_decode_fh( } fh = (__u32 *)&ifid; - return sb->s_export_op->find_exported_dentry(sb, fh, parent, acceptable, context); + return find_exported_dentry(sb, fh, parent, acceptable, context); } STATIC int -xfs_fs_encode_fh( +linvfs_encode_fh( struct dentry *dentry, __u32 *fh, int *max_len, @@ -97,7 +95,7 @@ xfs_fs_encode_fh( int len; int is64 = 0; #if XFS_BIG_INUMS - vfs_t *vfs = vfs_from_sb(inode->i_sb); + vfs_t *vfs = LINVFS_GET_VFS(inode->i_sb); if (!(vfs->vfs_flag & VFS_32BITINODES)) { /* filesystem may contain 64bit inode numbers */ @@ -132,21 +130,21 @@ xfs_fs_encode_fh( } STATIC struct dentry * -xfs_fs_get_dentry( +linvfs_get_dentry( struct super_block *sb, void *data) { vnode_t *vp; struct inode *inode; struct dentry *result; - vfs_t *vfsp = vfs_from_sb(sb); + vfs_t *vfsp = LINVFS_GET_VFS(sb); int error; VFS_VGET(vfsp, &vp, (fid_t *)data, error); if (error || vp == NULL) return ERR_PTR(-ESTALE) ; - inode = vn_to_inode(vp); + inode = LINVFS_GET_IP(vp); result = d_alloc_anon(inode); if (!result) { iput(inode); @@ -156,20 +154,25 @@ xfs_fs_get_dentry( } STATIC struct dentry * -xfs_fs_get_parent( +linvfs_get_parent( struct dentry *child) { int error; vnode_t *vp, *cvp; struct dentry *parent; + struct dentry dotdot; + + dotdot.d_name.name = ".."; + dotdot.d_name.len = 2; + dotdot.d_inode = NULL; cvp = NULL; - vp = vn_from_inode(child->d_inode); + vp = LINVFS_GET_VP(child->d_inode); VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error); if (unlikely(error)) return ERR_PTR(-error); - parent = d_alloc_anon(vn_to_inode(cvp)); + parent = d_alloc_anon(LINVFS_GET_IP(cvp)); if (unlikely(!parent)) { VN_RELE(cvp); return ERR_PTR(-ENOMEM); @@ -177,9 +180,9 @@ xfs_fs_get_parent( return parent; } -struct export_operations xfs_export_operations = { - .decode_fh = xfs_fs_decode_fh, - .encode_fh = xfs_fs_encode_fh, - .get_parent = xfs_fs_get_parent, - .get_dentry = xfs_fs_get_dentry, +struct export_operations linvfs_export_ops = { + .decode_fh = linvfs_decode_fh, + .encode_fh = linvfs_encode_fh, + .get_parent = linvfs_get_parent, + .get_dentry = linvfs_get_dentry, }; diff --git a/trunk/fs/xfs/linux-2.6/xfs_file.c b/trunk/fs/xfs/linux-2.6/xfs_file.c index 185567a6a561..ced4404339c7 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_file.c +++ b/trunk/fs/xfs/linux-2.6/xfs_file.c @@ -43,13 +43,13 @@ #include #include -static struct vm_operations_struct xfs_file_vm_ops; +static struct vm_operations_struct linvfs_file_vm_ops; #ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct xfs_dmapi_file_vm_ops; +static struct vm_operations_struct linvfs_dmapi_file_vm_ops; #endif STATIC inline ssize_t -__xfs_file_read( +__linvfs_read( struct kiocb *iocb, char __user *buf, int ioflags, @@ -58,7 +58,7 @@ __xfs_file_read( { struct iovec iov = {buf, count}; struct file *file = iocb->ki_filp; - vnode_t *vp = vn_from_inode(file->f_dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(file->f_dentry->d_inode); ssize_t rval; BUG_ON(iocb->ki_pos != pos); @@ -71,28 +71,28 @@ __xfs_file_read( STATIC ssize_t -xfs_file_aio_read( +linvfs_aio_read( struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { - return __xfs_file_read(iocb, buf, IO_ISAIO, count, pos); + return __linvfs_read(iocb, buf, IO_ISAIO, count, pos); } STATIC ssize_t -xfs_file_aio_read_invis( +linvfs_aio_read_invis( struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) { - return __xfs_file_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); + return __linvfs_read(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); } STATIC inline ssize_t -__xfs_file_write( +__linvfs_write( struct kiocb *iocb, const char __user *buf, int ioflags, @@ -102,7 +102,7 @@ __xfs_file_write( struct iovec iov = {(void __user *)buf, count}; struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); ssize_t rval; BUG_ON(iocb->ki_pos != pos); @@ -115,28 +115,28 @@ __xfs_file_write( STATIC ssize_t -xfs_file_aio_write( +linvfs_aio_write( struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { - return __xfs_file_write(iocb, buf, IO_ISAIO, count, pos); + return __linvfs_write(iocb, buf, IO_ISAIO, count, pos); } STATIC ssize_t -xfs_file_aio_write_invis( +linvfs_aio_write_invis( struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) { - return __xfs_file_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); + return __linvfs_write(iocb, buf, IO_ISAIO|IO_INVIS, count, pos); } STATIC inline ssize_t -__xfs_file_readv( +__linvfs_readv( struct file *file, const struct iovec *iov, int ioflags, @@ -144,8 +144,8 @@ __xfs_file_readv( loff_t *ppos) { struct inode *inode = file->f_mapping->host; - vnode_t *vp = vn_from_inode(inode); - struct kiocb kiocb; + vnode_t *vp = LINVFS_GET_VP(inode); + struct kiocb kiocb; ssize_t rval; init_sync_kiocb(&kiocb, file); @@ -160,28 +160,28 @@ __xfs_file_readv( } STATIC ssize_t -xfs_file_readv( +linvfs_readv( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __xfs_file_readv(file, iov, 0, nr_segs, ppos); + return __linvfs_readv(file, iov, 0, nr_segs, ppos); } STATIC ssize_t -xfs_file_readv_invis( +linvfs_readv_invis( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __xfs_file_readv(file, iov, IO_INVIS, nr_segs, ppos); + return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos); } STATIC inline ssize_t -__xfs_file_writev( +__linvfs_writev( struct file *file, const struct iovec *iov, int ioflags, @@ -189,8 +189,8 @@ __xfs_file_writev( loff_t *ppos) { struct inode *inode = file->f_mapping->host; - vnode_t *vp = vn_from_inode(inode); - struct kiocb kiocb; + vnode_t *vp = LINVFS_GET_VP(inode); + struct kiocb kiocb; ssize_t rval; init_sync_kiocb(&kiocb, file); @@ -206,34 +206,34 @@ __xfs_file_writev( STATIC ssize_t -xfs_file_writev( +linvfs_writev( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __xfs_file_writev(file, iov, 0, nr_segs, ppos); + return __linvfs_writev(file, iov, 0, nr_segs, ppos); } STATIC ssize_t -xfs_file_writev_invis( +linvfs_writev_invis( struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { - return __xfs_file_writev(file, iov, IO_INVIS, nr_segs, ppos); + return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos); } STATIC ssize_t -xfs_file_sendfile( +linvfs_sendfile( struct file *filp, loff_t *ppos, size_t count, read_actor_t actor, void *target) { - vnode_t *vp = vn_from_inode(filp->f_dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode); ssize_t rval; VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval); @@ -242,11 +242,11 @@ xfs_file_sendfile( STATIC int -xfs_file_open( +linvfs_open( struct inode *inode, struct file *filp) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error; if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) @@ -259,11 +259,11 @@ xfs_file_open( STATIC int -xfs_file_release( +linvfs_release( struct inode *inode, struct file *filp) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error = 0; if (vp) @@ -273,13 +273,13 @@ xfs_file_release( STATIC int -xfs_file_fsync( +linvfs_fsync( struct file *filp, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error; int flags = FSYNC_WAIT; @@ -292,7 +292,7 @@ xfs_file_fsync( } /* - * xfs_file_readdir maps to VOP_READDIR(). + * linvfs_readdir maps to VOP_READDIR(). * We need to build a uio, cred, ... */ @@ -301,13 +301,13 @@ xfs_file_fsync( #ifdef CONFIG_XFS_DMAPI STATIC struct page * -xfs_vm_nopage( +linvfs_filemap_nopage( struct vm_area_struct *area, unsigned long address, int *type) { struct inode *inode = area->vm_file->f_dentry->d_inode; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); int error; @@ -324,7 +324,7 @@ xfs_vm_nopage( STATIC int -xfs_file_readdir( +linvfs_readdir( struct file *filp, void *dirent, filldir_t filldir) @@ -340,7 +340,7 @@ xfs_file_readdir( xfs_off_t start_offset, curr_offset; xfs_dirent_t *dbp = NULL; - vp = vn_from_inode(filp->f_dentry->d_inode); + vp = LINVFS_GET_VP(filp->f_dentry->d_inode); ASSERT(vp); /* Try fairly hard to get memory */ @@ -404,40 +404,39 @@ xfs_file_readdir( STATIC int -xfs_file_mmap( +linvfs_file_mmap( struct file *filp, struct vm_area_struct *vma) { struct inode *ip = filp->f_dentry->d_inode; - vnode_t *vp = vn_from_inode(ip); - vattr_t vattr; + vnode_t *vp = LINVFS_GET_VP(ip); + vattr_t va = { .va_mask = XFS_AT_UPDATIME }; int error; - vma->vm_ops = &xfs_file_vm_ops; + vma->vm_ops = &linvfs_file_vm_ops; #ifdef CONFIG_XFS_DMAPI if (vp->v_vfsp->vfs_flag & VFS_DMI) { - vma->vm_ops = &xfs_dmapi_file_vm_ops; + vma->vm_ops = &linvfs_dmapi_file_vm_ops; } #endif /* CONFIG_XFS_DMAPI */ - vattr.va_mask = XFS_AT_UPDATIME; - VOP_SETATTR(vp, &vattr, XFS_AT_UPDATIME, NULL, error); - if (likely(!error)) - __vn_revalidate(vp, &vattr); /* update flags */ + VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); + if (!error) + vn_revalidate(vp); /* update Linux inode flags */ return 0; } STATIC long -xfs_file_ioctl( +linvfs_ioctl( struct file *filp, unsigned int cmd, unsigned long arg) { int error; - struct inode *inode = filp->f_dentry->d_inode; - vnode_t *vp = vn_from_inode(inode); + struct inode *inode = filp->f_dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); VOP_IOCTL(vp, inode, filp, 0, cmd, (void __user *)arg, error); VMODIFY(vp); @@ -452,14 +451,14 @@ xfs_file_ioctl( } STATIC long -xfs_file_ioctl_invis( +linvfs_ioctl_invis( struct file *filp, unsigned int cmd, unsigned long arg) { int error; - struct inode *inode = filp->f_dentry->d_inode; - vnode_t *vp = vn_from_inode(inode); + struct inode *inode = filp->f_dentry->d_inode; + vnode_t *vp = LINVFS_GET_VP(inode); ASSERT(vp); VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, (void __user *)arg, error); @@ -477,11 +476,11 @@ xfs_file_ioctl_invis( #ifdef CONFIG_XFS_DMAPI #ifdef HAVE_VMOP_MPROTECT STATIC int -xfs_vm_mprotect( +linvfs_mprotect( struct vm_area_struct *vma, unsigned int newflags) { - vnode_t *vp = vn_from_inode(vma->vm_file->f_dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode); int error = 0; if (vp->v_vfsp->vfs_flag & VFS_DMI) { @@ -504,10 +503,10 @@ xfs_vm_mprotect( * it back online. */ STATIC int -xfs_file_open_exec( +linvfs_open_exec( struct inode *inode) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp); int error = 0; xfs_inode_t *ip; @@ -528,69 +527,69 @@ xfs_file_open_exec( } #endif /* HAVE_FOP_OPEN_EXEC */ -struct file_operations xfs_file_operations = { +struct file_operations linvfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, - .readv = xfs_file_readv, - .writev = xfs_file_writev, - .aio_read = xfs_file_aio_read, - .aio_write = xfs_file_aio_write, - .sendfile = xfs_file_sendfile, - .unlocked_ioctl = xfs_file_ioctl, + .readv = linvfs_readv, + .writev = linvfs_writev, + .aio_read = linvfs_aio_read, + .aio_write = linvfs_aio_write, + .sendfile = linvfs_sendfile, + .unlocked_ioctl = linvfs_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = xfs_file_compat_ioctl, + .compat_ioctl = linvfs_compat_ioctl, #endif - .mmap = xfs_file_mmap, - .open = xfs_file_open, - .release = xfs_file_release, - .fsync = xfs_file_fsync, + .mmap = linvfs_file_mmap, + .open = linvfs_open, + .release = linvfs_release, + .fsync = linvfs_fsync, #ifdef HAVE_FOP_OPEN_EXEC - .open_exec = xfs_file_open_exec, + .open_exec = linvfs_open_exec, #endif }; -struct file_operations xfs_invis_file_operations = { +struct file_operations linvfs_invis_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, - .readv = xfs_file_readv_invis, - .writev = xfs_file_writev_invis, - .aio_read = xfs_file_aio_read_invis, - .aio_write = xfs_file_aio_write_invis, - .sendfile = xfs_file_sendfile, - .unlocked_ioctl = xfs_file_ioctl_invis, + .readv = linvfs_readv_invis, + .writev = linvfs_writev_invis, + .aio_read = linvfs_aio_read_invis, + .aio_write = linvfs_aio_write_invis, + .sendfile = linvfs_sendfile, + .unlocked_ioctl = linvfs_ioctl_invis, #ifdef CONFIG_COMPAT - .compat_ioctl = xfs_file_compat_invis_ioctl, + .compat_ioctl = linvfs_compat_invis_ioctl, #endif - .mmap = xfs_file_mmap, - .open = xfs_file_open, - .release = xfs_file_release, - .fsync = xfs_file_fsync, + .mmap = linvfs_file_mmap, + .open = linvfs_open, + .release = linvfs_release, + .fsync = linvfs_fsync, }; -struct file_operations xfs_dir_file_operations = { +struct file_operations linvfs_dir_operations = { .read = generic_read_dir, - .readdir = xfs_file_readdir, - .unlocked_ioctl = xfs_file_ioctl, + .readdir = linvfs_readdir, + .unlocked_ioctl = linvfs_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = xfs_file_compat_ioctl, + .compat_ioctl = linvfs_compat_ioctl, #endif - .fsync = xfs_file_fsync, + .fsync = linvfs_fsync, }; -static struct vm_operations_struct xfs_file_vm_ops = { +static struct vm_operations_struct linvfs_file_vm_ops = { .nopage = filemap_nopage, .populate = filemap_populate, }; #ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct xfs_dmapi_file_vm_ops = { - .nopage = xfs_vm_nopage, +static struct vm_operations_struct linvfs_dmapi_file_vm_ops = { + .nopage = linvfs_filemap_nopage, .populate = filemap_populate, #ifdef HAVE_VMOP_MPROTECT - .mprotect = xfs_vm_mprotect, + .mprotect = linvfs_mprotect, #endif }; #endif /* CONFIG_XFS_DMAPI */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c b/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c index 575f2a790f31..4fa4b1a5187e 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/trunk/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -57,7 +57,7 @@ fs_tosspages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = vn_to_inode(vp); + struct inode *ip = LINVFS_GET_IP(vp); if (VN_CACHED(vp)) truncate_inode_pages(ip->i_mapping, first); @@ -76,7 +76,7 @@ fs_flushinval_pages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = vn_to_inode(vp); + struct inode *ip = LINVFS_GET_IP(vp); if (VN_CACHED(vp)) { filemap_write_and_wait(ip->i_mapping); @@ -98,7 +98,7 @@ fs_flush_pages( int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); - struct inode *ip = vn_to_inode(vp); + struct inode *ip = LINVFS_GET_IP(vp); if (VN_CACHED(vp)) { filemap_fdatawrite(ip->i_mapping); diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl.c b/trunk/fs/xfs/linux-2.6/xfs_ioctl.c index 84478491609b..4db47790415c 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl.c @@ -138,7 +138,7 @@ xfs_find_handle( } /* we need the vnode */ - vp = vn_from_inode(inode); + vp = LINVFS_GET_VP(inode); /* now we can grab the fsid */ memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); @@ -256,7 +256,7 @@ xfs_vget_fsop_handlereq( } vpp = XFS_ITOV(ip); - inodep = vn_to_inode(vpp); + inodep = LINVFS_GET_IP(vpp); xfs_iunlock(ip, XFS_ILOCK_SHARED); *vp = vpp; @@ -344,7 +344,7 @@ xfs_open_by_handle( return -XFS_ERROR(-PTR_ERR(filp)); } if (inode->i_mode & S_IFREG) - filp->f_op = &xfs_invis_file_operations; + filp->f_op = &linvfs_invis_file_operations; fd_install(new_fd, filp); return new_fd; @@ -715,7 +715,7 @@ xfs_ioctl( xfs_inode_t *ip; xfs_mount_t *mp; - vp = vn_from_inode(inode); + vp = LINVFS_GET_VP(inode); vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address); @@ -1160,129 +1160,105 @@ xfs_ioc_xattr( void __user *arg) { struct fsxattr fa; - struct vattr *vattr; - int error = 0; + vattr_t va; + int error; int attr_flags; unsigned int flags; - vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); - if (unlikely(!vattr)) - return -ENOMEM; - switch (cmd) { case XFS_IOC_FSGETXATTR: { - vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ - XFS_AT_NEXTENTS | XFS_AT_PROJID; - VOP_GETATTR(vp, vattr, 0, NULL, error); - if (unlikely(error)) { - error = -error; - break; - } + va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ + XFS_AT_NEXTENTS | XFS_AT_PROJID; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return -error; - fa.fsx_xflags = vattr->va_xflags; - fa.fsx_extsize = vattr->va_extsize; - fa.fsx_nextents = vattr->va_nextents; - fa.fsx_projid = vattr->va_projid; + fa.fsx_xflags = va.va_xflags; + fa.fsx_extsize = va.va_extsize; + fa.fsx_nextents = va.va_nextents; + fa.fsx_projid = va.va_projid; - if (copy_to_user(arg, &fa, sizeof(fa))) { - error = -EFAULT; - break; - } - break; + if (copy_to_user(arg, &fa, sizeof(fa))) + return -XFS_ERROR(EFAULT); + return 0; } case XFS_IOC_FSSETXATTR: { - if (copy_from_user(&fa, arg, sizeof(fa))) { - error = -EFAULT; - break; - } + if (copy_from_user(&fa, arg, sizeof(fa))) + return -XFS_ERROR(EFAULT); attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; - vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; - vattr->va_xflags = fa.fsx_xflags; - vattr->va_extsize = fa.fsx_extsize; - vattr->va_projid = fa.fsx_projid; + va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; + va.va_xflags = fa.fsx_xflags; + va.va_extsize = fa.fsx_extsize; + va.va_projid = fa.fsx_projid; - VOP_SETATTR(vp, vattr, attr_flags, NULL, error); - if (likely(!error)) - __vn_revalidate(vp, vattr); /* update flags */ - error = -error; - break; + VOP_SETATTR(vp, &va, attr_flags, NULL, error); + if (!error) + vn_revalidate(vp); /* update Linux inode flags */ + return -error; } case XFS_IOC_FSGETXATTRA: { - vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ - XFS_AT_ANEXTENTS | XFS_AT_PROJID; - VOP_GETATTR(vp, vattr, 0, NULL, error); - if (unlikely(error)) { - error = -error; - break; - } + va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ + XFS_AT_ANEXTENTS | XFS_AT_PROJID; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (error) + return -error; - fa.fsx_xflags = vattr->va_xflags; - fa.fsx_extsize = vattr->va_extsize; - fa.fsx_nextents = vattr->va_anextents; - fa.fsx_projid = vattr->va_projid; + fa.fsx_xflags = va.va_xflags; + fa.fsx_extsize = va.va_extsize; + fa.fsx_nextents = va.va_anextents; + fa.fsx_projid = va.va_projid; - if (copy_to_user(arg, &fa, sizeof(fa))) { - error = -EFAULT; - break; - } - break; + if (copy_to_user(arg, &fa, sizeof(fa))) + return -XFS_ERROR(EFAULT); + return 0; } case XFS_IOC_GETXFLAGS: { flags = xfs_di2lxflags(ip->i_d.di_flags); if (copy_to_user(arg, &flags, sizeof(flags))) - error = -EFAULT; - break; + return -XFS_ERROR(EFAULT); + return 0; } case XFS_IOC_SETXFLAGS: { - if (copy_from_user(&flags, arg, sizeof(flags))) { - error = -EFAULT; - break; - } + if (copy_from_user(&flags, arg, sizeof(flags))) + return -XFS_ERROR(EFAULT); if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \ LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \ - LINUX_XFLAG_SYNC)) { - error = -EOPNOTSUPP; - break; - } + LINUX_XFLAG_SYNC)) + return -XFS_ERROR(EOPNOTSUPP); attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; - vattr->va_mask = XFS_AT_XFLAGS; - vattr->va_xflags = xfs_merge_ioc_xflags(flags, - xfs_ip2xflags(ip)); + va.va_mask = XFS_AT_XFLAGS; + va.va_xflags = xfs_merge_ioc_xflags(flags, + xfs_ip2xflags(ip)); - VOP_SETATTR(vp, vattr, attr_flags, NULL, error); - if (likely(!error)) - __vn_revalidate(vp, vattr); /* update flags */ - error = -error; - break; + VOP_SETATTR(vp, &va, attr_flags, NULL, error); + if (!error) + vn_revalidate(vp); /* update Linux inode flags */ + return -error; } case XFS_IOC_GETVERSION: { - flags = vn_to_inode(vp)->i_generation; + flags = LINVFS_GET_IP(vp)->i_generation; if (copy_to_user(arg, &flags, sizeof(flags))) - error = -EFAULT; - break; + return -XFS_ERROR(EFAULT); + return 0; } default: - error = -ENOTTY; - break; + return -ENOTTY; } - - kfree(vattr); - return error; } STATIC int diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c b/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c index b6321abd9a81..a7c9ba1a9f7b 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -107,11 +107,11 @@ xfs_ioctl32_bulkstat( #endif STATIC long -xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) +__linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) { int error; struct inode *inode = f->f_dentry->d_inode; - vnode_t *vp = vn_to_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); switch (cmd) { case XFS_IOC_DIOINFO: @@ -196,19 +196,19 @@ xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) } long -xfs_file_compat_ioctl( +linvfs_compat_ioctl( struct file *f, unsigned cmd, unsigned long arg) { - return xfs_compat_ioctl(0, f, cmd, arg); + return __linvfs_compat_ioctl(0, f, cmd, arg); } long -xfs_file_compat_invis_ioctl( +linvfs_compat_invis_ioctl( struct file *f, unsigned cmd, unsigned long arg) { - return xfs_compat_ioctl(IO_INVIS, f, cmd, arg); + return __linvfs_compat_ioctl(IO_INVIS, f, cmd, arg); } diff --git a/trunk/fs/xfs/linux-2.6/xfs_ioctl32.h b/trunk/fs/xfs/linux-2.6/xfs_ioctl32.h index 02de6e62ee37..011c273bec50 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/trunk/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -18,7 +18,7 @@ #ifndef __XFS_IOCTL32_H__ #define __XFS_IOCTL32_H__ -extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long); -extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long); +extern long linvfs_compat_ioctl(struct file *, unsigned, unsigned long); +extern long linvfs_compat_invis_ioctl(struct file *f, unsigned, unsigned long); #endif /* __XFS_IOCTL32_H__ */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_iops.c b/trunk/fs/xfs/linux-2.6/xfs_iops.c index af487437bd7e..d7f6f2d8ac8e 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_iops.c +++ b/trunk/fs/xfs/linux-2.6/xfs_iops.c @@ -106,7 +106,7 @@ xfs_ichgtime( xfs_inode_t *ip, int flags) { - struct inode *inode = vn_to_inode(XFS_ITOV(ip)); + struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); timespec_t tv; nanotime(&tv); @@ -198,22 +198,22 @@ xfs_ichgtime_fast( * Pull the link count and size up from the xfs inode to the linux inode */ STATIC void -xfs_validate_fields( - struct inode *ip, - struct vattr *vattr) +validate_fields( + struct inode *ip) { - vnode_t *vp = vn_from_inode(ip); + vnode_t *vp = LINVFS_GET_VP(ip); + vattr_t va; int error; - vattr->va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; - VOP_GETATTR(vp, vattr, ATTR_LAZY, NULL, error); - if (likely(!error)) { - ip->i_nlink = vattr->va_nlink; - ip->i_blocks = vattr->va_nblocks; + va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; + VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); + if (likely(!error)) { + ip->i_nlink = va.va_nlink; + ip->i_blocks = va.va_nblocks; - /* we're under i_sem so i_size can't change under us */ - if (i_size_read(ip) != vattr->va_size) - i_size_write(ip, vattr->va_size); + /* we're under i_mutex so i_size can't change under us */ + if (i_size_read(ip) != va.va_size) + i_size_write(ip, va.va_size); } } @@ -224,11 +224,11 @@ xfs_validate_fields( * inode, of course, such that log replay can't cause these to be lost). */ STATIC int -xfs_init_security( +linvfs_init_security( struct vnode *vp, struct inode *dir) { - struct inode *ip = vn_to_inode(vp); + struct inode *ip = LINVFS_GET_IP(vp); size_t length; void *value; char *name; @@ -257,46 +257,46 @@ xfs_init_security( * XXX(hch): nfsd is broken, better fix it instead. */ STATIC inline int -xfs_has_fs_struct(struct task_struct *task) +has_fs_struct(struct task_struct *task) { return (task->fs != init_task.fs); } STATIC inline void -xfs_cleanup_inode( +cleanup_inode( vnode_t *dvp, vnode_t *vp, - struct dentry *dentry, + struct dentry *dentry, int mode) { struct dentry teardown = {}; - int error; + int err2; /* Oh, the horror. - * If we can't add the ACL or we fail in - * xfs_init_security we must back out. + * If we can't add the ACL or we fail in + * linvfs_init_security we must back out. * ENOSPC can hit here, among other things. */ - teardown.d_inode = vn_to_inode(vp); + teardown.d_inode = LINVFS_GET_IP(vp); teardown.d_name = dentry->d_name; if (S_ISDIR(mode)) - VOP_RMDIR(dvp, &teardown, NULL, error); + VOP_RMDIR(dvp, &teardown, NULL, err2); else - VOP_REMOVE(dvp, &teardown, NULL, error); + VOP_REMOVE(dvp, &teardown, NULL, err2); VN_RELE(vp); } STATIC int -xfs_vn_mknod( +linvfs_mknod( struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *ip; - vattr_t vattr = { 0 }; - vnode_t *vp = NULL, *dvp = vn_from_inode(dir); + vattr_t va; + vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir); xfs_acl_t *default_acl = NULL; attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; int error; @@ -305,98 +305,99 @@ xfs_vn_mknod( * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ - if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) + if (!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff) return -EINVAL; - if (unlikely(test_default_acl && test_default_acl(dvp))) { - if (!_ACL_ALLOC(default_acl)) { + if (test_default_acl && test_default_acl(dvp)) { + if (!_ACL_ALLOC(default_acl)) return -ENOMEM; - } if (!_ACL_GET_DEFAULT(dvp, default_acl)) { _ACL_FREE(default_acl); default_acl = NULL; } } - if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current)) + if (IS_POSIXACL(dir) && !default_acl && has_fs_struct(current)) mode &= ~current->fs->umask; - vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE; - vattr.va_mode = mode; + memset(&va, 0, sizeof(va)); + va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + va.va_mode = mode; switch (mode & S_IFMT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: - vattr.va_rdev = sysv_encode_dev(rdev); - vattr.va_mask |= XFS_AT_RDEV; + va.va_rdev = sysv_encode_dev(rdev); + va.va_mask |= XFS_AT_RDEV; /*FALLTHROUGH*/ case S_IFREG: - VOP_CREATE(dvp, dentry, &vattr, &vp, NULL, error); + VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); break; case S_IFDIR: - VOP_MKDIR(dvp, dentry, &vattr, &vp, NULL, error); + VOP_MKDIR(dvp, dentry, &va, &vp, NULL, error); break; default: error = EINVAL; break; } - if (unlikely(!error)) { - error = xfs_init_security(vp, dir); + if (!error) + { + error = linvfs_init_security(vp, dir); if (error) - xfs_cleanup_inode(dvp, vp, dentry, mode); + cleanup_inode(dvp, vp, dentry, mode); } - if (unlikely(default_acl)) { + if (default_acl) { if (!error) { - error = _ACL_INHERIT(vp, &vattr, default_acl); - if (!error) + error = _ACL_INHERIT(vp, &va, default_acl); + if (!error) VMODIFY(vp); else - xfs_cleanup_inode(dvp, vp, dentry, mode); + cleanup_inode(dvp, vp, dentry, mode); } _ACL_FREE(default_acl); } - if (likely(!error)) { + if (!error) { ASSERT(vp); - ip = vn_to_inode(vp); + ip = LINVFS_GET_IP(vp); if (S_ISCHR(mode) || S_ISBLK(mode)) ip->i_rdev = rdev; else if (S_ISDIR(mode)) - xfs_validate_fields(ip, &vattr); + validate_fields(ip); d_instantiate(dentry, ip); - xfs_validate_fields(dir, &vattr); + validate_fields(dir); } return -error; } STATIC int -xfs_vn_create( +linvfs_create( struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { - return xfs_vn_mknod(dir, dentry, mode, 0); + return linvfs_mknod(dir, dentry, mode, 0); } STATIC int -xfs_vn_mkdir( +linvfs_mkdir( struct inode *dir, struct dentry *dentry, int mode) { - return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); + return linvfs_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * -xfs_vn_lookup( +linvfs_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - struct vnode *vp = vn_from_inode(dir), *cvp; + struct vnode *vp = LINVFS_GET_VP(dir), *cvp; int error; if (dentry->d_name.len >= MAXNAMELEN) @@ -410,11 +411,11 @@ xfs_vn_lookup( return NULL; } - return d_splice_alias(vn_to_inode(cvp), dentry); + return d_splice_alias(LINVFS_GET_IP(cvp), dentry); } STATIC int -xfs_vn_link( +linvfs_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) @@ -422,102 +423,99 @@ xfs_vn_link( struct inode *ip; /* inode of guy being linked to */ vnode_t *tdvp; /* target directory for new name/link */ vnode_t *vp; /* vp of name being linked */ - vattr_t vattr; int error; ip = old_dentry->d_inode; /* inode being linked to */ if (S_ISDIR(ip->i_mode)) return -EPERM; - tdvp = vn_from_inode(dir); - vp = vn_from_inode(ip); + tdvp = LINVFS_GET_VP(dir); + vp = LINVFS_GET_VP(ip); VOP_LINK(tdvp, vp, dentry, NULL, error); - if (likely(!error)) { + if (!error) { VMODIFY(tdvp); VN_HOLD(vp); - xfs_validate_fields(ip, &vattr); + validate_fields(ip); d_instantiate(dentry, ip); } return -error; } STATIC int -xfs_vn_unlink( +linvfs_unlink( struct inode *dir, struct dentry *dentry) { struct inode *inode; vnode_t *dvp; /* directory containing name to remove */ - vattr_t vattr; int error; inode = dentry->d_inode; - dvp = vn_from_inode(dir); + dvp = LINVFS_GET_VP(dir); VOP_REMOVE(dvp, dentry, NULL, error); - if (likely(!error)) { - xfs_validate_fields(dir, &vattr); /* size needs update */ - xfs_validate_fields(inode, &vattr); + if (!error) { + validate_fields(dir); /* For size only */ + validate_fields(inode); } + return -error; } STATIC int -xfs_vn_symlink( +linvfs_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *ip; - vattr_t vattr = { 0 }; + vattr_t va; vnode_t *dvp; /* directory containing name of symlink */ vnode_t *cvp; /* used to lookup symlink to put in dentry */ int error; - dvp = vn_from_inode(dir); + dvp = LINVFS_GET_VP(dir); cvp = NULL; - vattr.va_mode = S_IFLNK | + memset(&va, 0, sizeof(va)); + va.va_mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); - vattr.va_mask = XFS_AT_TYPE|XFS_AT_MODE; + va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; error = 0; - VOP_SYMLINK(dvp, dentry, &vattr, (char *)symname, &cvp, NULL, error); + VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); if (likely(!error && cvp)) { - error = xfs_init_security(cvp, dir); + error = linvfs_init_security(cvp, dir); if (likely(!error)) { - ip = vn_to_inode(cvp); + ip = LINVFS_GET_IP(cvp); d_instantiate(dentry, ip); - xfs_validate_fields(dir, &vattr); - xfs_validate_fields(ip, &vattr); - } else { - xfs_cleanup_inode(dvp, cvp, dentry, 0); + validate_fields(dir); + validate_fields(ip); } } return -error; } STATIC int -xfs_vn_rmdir( +linvfs_rmdir( struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; - vnode_t *dvp = vn_from_inode(dir); - vattr_t vattr; + vnode_t *dvp = LINVFS_GET_VP(dir); int error; VOP_RMDIR(dvp, dentry, NULL, error); - if (likely(!error)) { - xfs_validate_fields(inode, &vattr); - xfs_validate_fields(dir, &vattr); + if (!error) { + validate_fields(inode); + validate_fields(dir); } return -error; } STATIC int -xfs_vn_rename( +linvfs_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, @@ -526,21 +524,22 @@ xfs_vn_rename( struct inode *new_inode = ndentry->d_inode; vnode_t *fvp; /* from directory */ vnode_t *tvp; /* target directory */ - vattr_t vattr; int error; - fvp = vn_from_inode(odir); - tvp = vn_from_inode(ndir); + fvp = LINVFS_GET_VP(odir); + tvp = LINVFS_GET_VP(ndir); VOP_RENAME(fvp, odentry, tvp, ndentry, NULL, error); - if (likely(!error)) { - if (new_inode) - xfs_validate_fields(new_inode, &vattr); - xfs_validate_fields(odir, &vattr); - if (ndir != odir) - xfs_validate_fields(ndir, &vattr); - } - return -error; + if (error) + return -error; + + if (new_inode) + validate_fields(new_inode); + + validate_fields(odir); + if (ndir != odir) + validate_fields(ndir); + return 0; } /* @@ -549,7 +548,7 @@ xfs_vn_rename( * uio is kmalloced for this reason... */ STATIC void * -xfs_vn_follow_link( +linvfs_follow_link( struct dentry *dentry, struct nameidata *nd) { @@ -575,7 +574,7 @@ xfs_vn_follow_link( return NULL; } - vp = vn_from_inode(dentry->d_inode); + vp = LINVFS_GET_VP(dentry->d_inode); iov.iov_base = link; iov.iov_len = MAXPATHLEN; @@ -600,7 +599,7 @@ xfs_vn_follow_link( } STATIC void -xfs_vn_put_link( +linvfs_put_link( struct dentry *dentry, struct nameidata *nd, void *p) @@ -613,12 +612,12 @@ xfs_vn_put_link( #ifdef CONFIG_XFS_POSIX_ACL STATIC int -xfs_vn_permission( +linvfs_permission( struct inode *inode, int mode, struct nameidata *nd) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error; mode <<= 6; /* convert from linux to vnode access bits */ @@ -626,17 +625,17 @@ xfs_vn_permission( return -error; } #else -#define xfs_vn_permission NULL +#define linvfs_permission NULL #endif STATIC int -xfs_vn_getattr( +linvfs_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error = 0; if (unlikely(vp->v_flag & VMODIFIED)) @@ -647,17 +646,18 @@ xfs_vn_getattr( } STATIC int -xfs_vn_setattr( +linvfs_setattr( struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; unsigned int ia_valid = attr->ia_valid; - vnode_t *vp = vn_from_inode(inode); - vattr_t vattr = { 0 }; + vnode_t *vp = LINVFS_GET_VP(inode); + vattr_t vattr; int flags = 0; int error; + memset(&vattr, 0, sizeof(vattr_t)); if (ia_valid & ATTR_UID) { vattr.va_mask |= XFS_AT_UID; vattr.va_uid = attr->ia_uid; @@ -699,27 +699,28 @@ xfs_vn_setattr( #endif VOP_SETATTR(vp, &vattr, flags, NULL, error); - if (likely(!error)) - __vn_revalidate(vp, &vattr); - return -error; + if (error) + return -error; + vn_revalidate(vp); + return error; } STATIC void -xfs_vn_truncate( +linvfs_truncate( struct inode *inode) { - block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_block); + block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block); } STATIC int -xfs_vn_setxattr( +linvfs_setxattr( struct dentry *dentry, const char *name, const void *data, size_t size, int flags) { - vnode_t *vp = vn_from_inode(dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -743,13 +744,13 @@ xfs_vn_setxattr( } STATIC ssize_t -xfs_vn_getxattr( +linvfs_getxattr( struct dentry *dentry, const char *name, void *data, size_t size) { - vnode_t *vp = vn_from_inode(dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -773,12 +774,12 @@ xfs_vn_getxattr( } STATIC ssize_t -xfs_vn_listxattr( +linvfs_listxattr( struct dentry *dentry, char *data, size_t size) { - vnode_t *vp = vn_from_inode(dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); int error, xflags = ATTR_KERNAMELS; ssize_t result; @@ -793,11 +794,11 @@ xfs_vn_listxattr( } STATIC int -xfs_vn_removexattr( +linvfs_removexattr( struct dentry *dentry, const char *name) { - vnode_t *vp = vn_from_inode(dentry->d_inode); + vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); char *attr = (char *)name; attrnames_t *namesp; int xflags = 0; @@ -815,45 +816,45 @@ xfs_vn_removexattr( } -struct inode_operations xfs_inode_operations = { - .permission = xfs_vn_permission, - .truncate = xfs_vn_truncate, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = xfs_vn_setxattr, - .getxattr = xfs_vn_getxattr, - .listxattr = xfs_vn_listxattr, - .removexattr = xfs_vn_removexattr, +struct inode_operations linvfs_file_inode_operations = { + .permission = linvfs_permission, + .truncate = linvfs_truncate, + .getattr = linvfs_getattr, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, }; -struct inode_operations xfs_dir_inode_operations = { - .create = xfs_vn_create, - .lookup = xfs_vn_lookup, - .link = xfs_vn_link, - .unlink = xfs_vn_unlink, - .symlink = xfs_vn_symlink, - .mkdir = xfs_vn_mkdir, - .rmdir = xfs_vn_rmdir, - .mknod = xfs_vn_mknod, - .rename = xfs_vn_rename, - .permission = xfs_vn_permission, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = xfs_vn_setxattr, - .getxattr = xfs_vn_getxattr, - .listxattr = xfs_vn_listxattr, - .removexattr = xfs_vn_removexattr, +struct inode_operations linvfs_dir_inode_operations = { + .create = linvfs_create, + .lookup = linvfs_lookup, + .link = linvfs_link, + .unlink = linvfs_unlink, + .symlink = linvfs_symlink, + .mkdir = linvfs_mkdir, + .rmdir = linvfs_rmdir, + .mknod = linvfs_mknod, + .rename = linvfs_rename, + .permission = linvfs_permission, + .getattr = linvfs_getattr, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, }; -struct inode_operations xfs_symlink_inode_operations = { +struct inode_operations linvfs_symlink_inode_operations = { .readlink = generic_readlink, - .follow_link = xfs_vn_follow_link, - .put_link = xfs_vn_put_link, - .permission = xfs_vn_permission, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = xfs_vn_setxattr, - .getxattr = xfs_vn_getxattr, - .listxattr = xfs_vn_listxattr, - .removexattr = xfs_vn_removexattr, + .follow_link = linvfs_follow_link, + .put_link = linvfs_put_link, + .permission = linvfs_permission, + .getattr = linvfs_getattr, + .setattr = linvfs_setattr, + .setxattr = linvfs_setxattr, + .getxattr = linvfs_getxattr, + .listxattr = linvfs_listxattr, + .removexattr = linvfs_removexattr, }; diff --git a/trunk/fs/xfs/linux-2.6/xfs_iops.h b/trunk/fs/xfs/linux-2.6/xfs_iops.h index a8417d7af5f9..6899a6b4a50a 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_iops.h +++ b/trunk/fs/xfs/linux-2.6/xfs_iops.h @@ -18,13 +18,13 @@ #ifndef __XFS_IOPS_H__ #define __XFS_IOPS_H__ -extern struct inode_operations xfs_inode_operations; -extern struct inode_operations xfs_dir_inode_operations; -extern struct inode_operations xfs_symlink_inode_operations; +extern struct inode_operations linvfs_file_inode_operations; +extern struct inode_operations linvfs_dir_inode_operations; +extern struct inode_operations linvfs_symlink_inode_operations; -extern struct file_operations xfs_file_operations; -extern struct file_operations xfs_dir_file_operations; -extern struct file_operations xfs_invis_file_operations; +extern struct file_operations linvfs_file_operations; +extern struct file_operations linvfs_invis_file_operations; +extern struct file_operations linvfs_dir_operations; extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *, int, unsigned int, void __user *); diff --git a/trunk/fs/xfs/linux-2.6/xfs_linux.h b/trunk/fs/xfs/linux-2.6/xfs_linux.h index 1fe09f2d6519..67389b745526 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_linux.h +++ b/trunk/fs/xfs/linux-2.6/xfs_linux.h @@ -73,9 +73,6 @@ #include #include #include -#include -#include -#include #include #include @@ -103,11 +100,6 @@ */ #undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */ #define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */ -#ifdef CONFIG_SMP -#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ -#else -#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ -#endif /* * State flag for unwritten extent buffers. @@ -234,7 +226,7 @@ BUFFER_FNS(PrivateStart, unwritten); #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) #define xfs_stack_trace() dump_stack() #define xfs_itruncate_data(ip, off) \ - (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) + (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) #define xfs_statvfs_fsid(statp, mp) \ ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ __kernel_fsid_t *fsid = &(statp)->f_fsid; \ diff --git a/trunk/fs/xfs/linux-2.6/xfs_lrw.c b/trunk/fs/xfs/linux-2.6/xfs_lrw.c index 0169360475c4..e0ab45fbfebd 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_lrw.c +++ b/trunk/fs/xfs/linux-2.6/xfs_lrw.c @@ -83,7 +83,7 @@ xfs_rw_enter_trace( (void *)((unsigned long)ioflags), (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), (void *)((unsigned long)(io->io_new_size & 0xffffffff)), - (void *)((unsigned long)current_pid()), + (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL, @@ -113,7 +113,7 @@ xfs_inval_cached_trace( (void *)((unsigned long)(first & 0xffffffff)), (void *)((unsigned long)((last >> 32) & 0xffffffff)), (void *)((unsigned long)(last & 0xffffffff)), - (void *)((unsigned long)current_pid()), + (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL, @@ -249,8 +249,9 @@ xfs_read( if (n < size) size = n; - if (XFS_FORCED_SHUTDOWN(mp)) + if (XFS_FORCED_SHUTDOWN(mp)) { return -EIO; + } if (unlikely(ioflags & IO_ISDIRECT)) mutex_lock(&inode->i_mutex); @@ -266,14 +267,10 @@ xfs_read( dmflags, &locktype); if (ret) { xfs_iunlock(ip, XFS_IOLOCK_SHARED); - goto unlock_mutex; + goto unlock_isem; } } - if (unlikely((ioflags & IO_ISDIRECT) && VN_CACHED(vp))) - VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(*offset)), - -1, FI_REMAPF_LOCKED); - xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, (void *)iovp, segs, *offset, ioflags); ret = __generic_file_aio_read(iocb, iovp, segs, offset); @@ -284,7 +281,7 @@ xfs_read( xfs_iunlock(ip, XFS_IOLOCK_SHARED); -unlock_mutex: +unlock_isem: if (unlikely(ioflags & IO_ISDIRECT)) mutex_unlock(&inode->i_mutex); return ret; @@ -435,7 +432,7 @@ xfs_zero_eof( xfs_fsize_t isize, /* current inode size */ xfs_fsize_t end_size) /* terminal inode size */ { - struct inode *ip = vn_to_inode(vp); + struct inode *ip = LINVFS_GET_IP(vp); xfs_fileoff_t start_zero_fsb; xfs_fileoff_t end_zero_fsb; xfs_fileoff_t zero_count_fsb; @@ -576,7 +573,7 @@ xfs_write( vrwlock_t locktype; size_t ocount = 0, count; loff_t pos; - int need_i_mutex = 1, need_flush = 0; + int need_isem = 1, need_flush = 0; XFS_STATS_INC(xs_write_calls); @@ -625,14 +622,14 @@ xfs_write( return XFS_ERROR(-EINVAL); if (!VN_CACHED(vp) && pos < i_size_read(inode)) - need_i_mutex = 0; + need_isem = 0; if (VN_CACHED(vp)) need_flush = 1; } relock: - if (need_i_mutex) { + if (need_isem) { iolock = XFS_IOLOCK_EXCL; locktype = VRWLOCK_WRITE; @@ -654,7 +651,7 @@ xfs_write( S_ISBLK(inode->i_mode)); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_mutex; + goto out_unlock_isem; } new_size = pos + count; @@ -666,7 +663,7 @@ xfs_write( loff_t savedsize = pos; int dmflags = FILP_DELAY_FLAG(file); - if (need_i_mutex) + if (need_isem) dmflags |= DM_FLAGS_IMUX; xfs_iunlock(xip, XFS_ILOCK_EXCL); @@ -675,7 +672,7 @@ xfs_write( dmflags, &locktype); if (error) { xfs_iunlock(xip, iolock); - goto out_unlock_mutex; + goto out_unlock_isem; } xfs_ilock(xip, XFS_ILOCK_EXCL); eventsent = 1; @@ -713,7 +710,7 @@ xfs_write( isize, pos + count); if (error) { xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_mutex; + goto out_unlock_isem; } } xfs_iunlock(xip, XFS_ILOCK_EXCL); @@ -734,7 +731,7 @@ xfs_write( error = -remove_suid(file->f_dentry); if (unlikely(error)) { xfs_iunlock(xip, iolock); - goto out_unlock_mutex; + goto out_unlock_isem; } } @@ -750,14 +747,14 @@ xfs_write( -1, FI_REMAPF_LOCKED); } - if (need_i_mutex) { + if (need_isem) { /* demote the lock now the cached pages are gone */ XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); mutex_unlock(&inode->i_mutex); iolock = XFS_IOLOCK_SHARED; locktype = VRWLOCK_WRITE_DIRECT; - need_i_mutex = 0; + need_isem = 0; } xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, @@ -775,7 +772,7 @@ xfs_write( pos += ret; count -= ret; - need_i_mutex = 1; + need_isem = 1; ioflags &= ~IO_ISDIRECT; xfs_iunlock(xip, iolock); goto relock; @@ -797,14 +794,14 @@ xfs_write( !(ioflags & IO_INVIS)) { xfs_rwunlock(bdp, locktype); - if (need_i_mutex) + if (need_isem) mutex_unlock(&inode->i_mutex); error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp, DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ if (error) goto out_nounlocks; - if (need_i_mutex) + if (need_isem) mutex_lock(&inode->i_mutex); xfs_rwlock(bdp, locktype); pos = xip->i_d.di_size; @@ -908,9 +905,9 @@ xfs_write( if (error) goto out_unlock_internal; } - + xfs_rwunlock(bdp, locktype); - if (need_i_mutex) + if (need_isem) mutex_unlock(&inode->i_mutex); error = sync_page_range(inode, mapping, pos, ret); @@ -921,8 +918,8 @@ xfs_write( out_unlock_internal: xfs_rwunlock(bdp, locktype); - out_unlock_mutex: - if (need_i_mutex) + out_unlock_isem: + if (need_isem) mutex_unlock(&inode->i_mutex); out_nounlocks: return -error; diff --git a/trunk/fs/xfs/linux-2.6/xfs_stats.c b/trunk/fs/xfs/linux-2.6/xfs_stats.c index 713e6a7505d0..8955720a2c6b 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_stats.c +++ b/trunk/fs/xfs/linux-2.6/xfs_stats.c @@ -62,15 +62,18 @@ xfs_read_xfsstats( while (j < xstats[i].endpoint) { val = 0; /* sum over all cpus */ - for_each_cpu(c) + for (c = 0; c < NR_CPUS; c++) { + if (!cpu_possible(c)) continue; val += *(((__u32*)&per_cpu(xfsstats, c) + j)); + } len += sprintf(buffer + len, " %u", val); j++; } buffer[len++] = '\n'; } /* extra precision counters */ - for_each_cpu(i) { + for (i = 0; i < NR_CPUS; i++) { + if (!cpu_possible(i)) continue; xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.c b/trunk/fs/xfs/linux-2.6/xfs_super.c index 8355faf8ffde..f22e426d9e42 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.c +++ b/trunk/fs/xfs/linux-2.6/xfs_super.c @@ -59,8 +59,8 @@ #include #include -STATIC struct quotactl_ops xfs_quotactl_operations; -STATIC struct super_operations xfs_super_operations; +STATIC struct quotactl_ops linvfs_qops; +STATIC struct super_operations linvfs_sops; STATIC kmem_zone_t *xfs_vnode_zone; STATIC kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; @@ -76,6 +76,8 @@ xfs_args_allocate( strncpy(args->fsname, sb->s_id, MAXNAMELEN); /* Copy the already-parsed mount(2) flags we're interested in */ + if (sb->s_flags & MS_NOATIME) + args->flags |= XFSMNT_NOATIME; if (sb->s_flags & MS_DIRSYNC) args->flags |= XFSMNT_DIRSYNC; if (sb->s_flags & MS_SYNCHRONOUS) @@ -127,21 +129,21 @@ xfs_set_inodeops( { switch (inode->i_mode & S_IFMT) { case S_IFREG: - inode->i_op = &xfs_inode_operations; - inode->i_fop = &xfs_file_operations; - inode->i_mapping->a_ops = &xfs_address_space_operations; + inode->i_op = &linvfs_file_inode_operations; + inode->i_fop = &linvfs_file_operations; + inode->i_mapping->a_ops = &linvfs_aops; break; case S_IFDIR: - inode->i_op = &xfs_dir_inode_operations; - inode->i_fop = &xfs_dir_file_operations; + inode->i_op = &linvfs_dir_inode_operations; + inode->i_fop = &linvfs_dir_operations; break; case S_IFLNK: - inode->i_op = &xfs_symlink_inode_operations; + inode->i_op = &linvfs_symlink_inode_operations; if (inode->i_blocks) - inode->i_mapping->a_ops = &xfs_address_space_operations; + inode->i_mapping->a_ops = &linvfs_aops; break; default: - inode->i_op = &xfs_inode_operations; + inode->i_op = &linvfs_file_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } @@ -153,7 +155,7 @@ xfs_revalidate_inode( vnode_t *vp, xfs_inode_t *ip) { - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); inode->i_mode = ip->i_d.di_mode; inode->i_nlink = ip->i_d.di_nlink; @@ -210,7 +212,7 @@ xfs_initialize_vnode( int unlock) { xfs_inode_t *ip = XFS_BHVTOI(inode_bhv); - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); if (!inode_bhv->bd_vobj) { vp->v_vfsp = bhvtovfs(bdp); @@ -228,7 +230,7 @@ xfs_initialize_vnode( if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) { xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); xfs_set_inodeops(inode); - + ip->i_flags &= ~XFS_INEW; barrier(); @@ -332,42 +334,43 @@ xfs_blkdev_issue_flush( } STATIC struct inode * -xfs_fs_alloc_inode( +linvfs_alloc_inode( struct super_block *sb) { vnode_t *vp; - vp = kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); - if (unlikely(!vp)) + vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP)); + if (!vp) return NULL; - return vn_to_inode(vp); + return LINVFS_GET_IP(vp); } STATIC void -xfs_fs_destroy_inode( +linvfs_destroy_inode( struct inode *inode) { - kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode)); + kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode)); } STATIC void -xfs_fs_inode_init_once( - void *vnode, - kmem_zone_t *zonep, +linvfs_inode_init_once( + void *data, + kmem_cache_t *cachep, unsigned long flags) { + vnode_t *vp = (vnode_t *)data; + if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(vn_to_inode((vnode_t *)vnode)); + SLAB_CTOR_CONSTRUCTOR) + inode_init_once(LINVFS_GET_IP(vp)); } STATIC int -xfs_init_zones(void) +linvfs_init_zones(void) { - xfs_vnode_zone = kmem_zone_init_flags(sizeof(vnode_t), "xfs_vnode_t", - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | - KM_ZONE_SPREAD, - xfs_fs_inode_init_once); + xfs_vnode_zone = kmem_cache_create("xfs_vnode", + sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, + linvfs_inode_init_once, NULL); if (!xfs_vnode_zone) goto out; @@ -376,12 +379,14 @@ xfs_init_zones(void) goto out_destroy_vnode_zone; xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, - mempool_alloc_slab, mempool_free_slab, - xfs_ioend_zone); + mempool_alloc_slab, mempool_free_slab, + xfs_ioend_zone); if (!xfs_ioend_pool) goto out_free_ioend_zone; + return 0; + out_free_ioend_zone: kmem_zone_destroy(xfs_ioend_zone); out_destroy_vnode_zone: @@ -391,7 +396,7 @@ xfs_init_zones(void) } STATIC void -xfs_destroy_zones(void) +linvfs_destroy_zones(void) { mempool_destroy(xfs_ioend_pool); kmem_zone_destroy(xfs_vnode_zone); @@ -402,14 +407,14 @@ xfs_destroy_zones(void) * Attempt to flush the inode, this will actually fail * if the inode is pinned, but we dirty the inode again * at the point when it is unpinned after a log write, - * since this is when the inode itself becomes flushable. + * since this is when the inode itself becomes flushable. */ STATIC int -xfs_fs_write_inode( +linvfs_write_inode( struct inode *inode, int sync) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error = 0, flags = FLUSH_INODE; if (vp) { @@ -429,13 +434,13 @@ xfs_fs_write_inode( } STATIC void -xfs_fs_clear_inode( +linvfs_clear_inode( struct inode *inode) { - vnode_t *vp = vn_from_inode(inode); + vnode_t *vp = LINVFS_GET_VP(inode); int error, cache; - vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); + vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address); XFS_STATS_INC(vn_rele); XFS_STATS_INC(vn_remove); @@ -511,7 +516,7 @@ void xfs_flush_inode( xfs_inode_t *ip) { - struct inode *inode = vn_to_inode(XFS_ITOV(ip)); + struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); struct vfs *vfs = XFS_MTOVFS(ip->i_mount); igrab(inode); @@ -536,7 +541,7 @@ void xfs_flush_device( xfs_inode_t *ip) { - struct inode *inode = vn_to_inode(XFS_ITOV(ip)); + struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); struct vfs *vfs = XFS_MTOVFS(ip->i_mount); igrab(inode); @@ -545,7 +550,7 @@ xfs_flush_device( xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); } -#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR|SYNC_REFCACHE) +#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR) STATIC void vfs_sync_worker( vfs_t *vfsp, @@ -608,7 +613,7 @@ xfssyncd( } STATIC int -xfs_fs_start_syncd( +linvfs_start_syncd( vfs_t *vfsp) { vfsp->vfs_sync_work.w_syncer = vfs_sync_worker; @@ -620,20 +625,20 @@ xfs_fs_start_syncd( } STATIC void -xfs_fs_stop_syncd( +linvfs_stop_syncd( vfs_t *vfsp) { kthread_stop(vfsp->vfs_sync_task); } STATIC void -xfs_fs_put_super( +linvfs_put_super( struct super_block *sb) { - vfs_t *vfsp = vfs_from_sb(sb); + vfs_t *vfsp = LINVFS_GET_VFS(sb); int error; - xfs_fs_stop_syncd(vfsp); + linvfs_stop_syncd(vfsp); VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); if (!error) VFS_UNMOUNT(vfsp, 0, NULL, error); @@ -647,10 +652,10 @@ xfs_fs_put_super( } STATIC void -xfs_fs_write_super( +linvfs_write_super( struct super_block *sb) { - vfs_t *vfsp = vfs_from_sb(sb); + vfs_t *vfsp = LINVFS_GET_VFS(sb); int error; if (sb->s_flags & MS_RDONLY) { @@ -663,11 +668,11 @@ xfs_fs_write_super( } STATIC int -xfs_fs_sync_super( +linvfs_sync_super( struct super_block *sb, int wait) { - vfs_t *vfsp = vfs_from_sb(sb); + vfs_t *vfsp = LINVFS_GET_VFS(sb); int error; int flags = SYNC_FSDATA; @@ -702,11 +707,11 @@ xfs_fs_sync_super( } STATIC int -xfs_fs_statfs( +linvfs_statfs( struct super_block *sb, struct kstatfs *statp) { - vfs_t *vfsp = vfs_from_sb(sb); + vfs_t *vfsp = LINVFS_GET_VFS(sb); int error; VFS_STATVFS(vfsp, statp, NULL, error); @@ -714,12 +719,12 @@ xfs_fs_statfs( } STATIC int -xfs_fs_remount( +linvfs_remount( struct super_block *sb, int *flags, char *options) { - vfs_t *vfsp = vfs_from_sb(sb); + vfs_t *vfsp = LINVFS_GET_VFS(sb); struct xfs_mount_args *args = xfs_args_allocate(sb); int error; @@ -731,18 +736,18 @@ xfs_fs_remount( } STATIC void -xfs_fs_lockfs( +linvfs_freeze_fs( struct super_block *sb) { - VFS_FREEZE(vfs_from_sb(sb)); + VFS_FREEZE(LINVFS_GET_VFS(sb)); } STATIC int -xfs_fs_show_options( +linvfs_show_options( struct seq_file *m, struct vfsmount *mnt) { - struct vfs *vfsp = vfs_from_sb(mnt->mnt_sb); + struct vfs *vfsp = LINVFS_GET_VFS(mnt->mnt_sb); int error; VFS_SHOWARGS(vfsp, m, error); @@ -750,11 +755,11 @@ xfs_fs_show_options( } STATIC int -xfs_fs_quotasync( +linvfs_quotasync( struct super_block *sb, int type) { - struct vfs *vfsp = vfs_from_sb(sb); + struct vfs *vfsp = LINVFS_GET_VFS(sb); int error; VFS_QUOTACTL(vfsp, Q_XQUOTASYNC, 0, (caddr_t)NULL, error); @@ -762,11 +767,11 @@ xfs_fs_quotasync( } STATIC int -xfs_fs_getxstate( +linvfs_getxstate( struct super_block *sb, struct fs_quota_stat *fqs) { - struct vfs *vfsp = vfs_from_sb(sb); + struct vfs *vfsp = LINVFS_GET_VFS(sb); int error; VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error); @@ -774,12 +779,12 @@ xfs_fs_getxstate( } STATIC int -xfs_fs_setxstate( +linvfs_setxstate( struct super_block *sb, unsigned int flags, int op) { - struct vfs *vfsp = vfs_from_sb(sb); + struct vfs *vfsp = LINVFS_GET_VFS(sb); int error; VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error); @@ -787,13 +792,13 @@ xfs_fs_setxstate( } STATIC int -xfs_fs_getxquota( +linvfs_getxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { - struct vfs *vfsp = vfs_from_sb(sb); + struct vfs *vfsp = LINVFS_GET_VFS(sb); int error, getmode; getmode = (type == USRQUOTA) ? Q_XGETQUOTA : @@ -803,13 +808,13 @@ xfs_fs_getxquota( } STATIC int -xfs_fs_setxquota( +linvfs_setxquota( struct super_block *sb, int type, qid_t id, struct fs_disk_quota *fdq) { - struct vfs *vfsp = vfs_from_sb(sb); + struct vfs *vfsp = LINVFS_GET_VFS(sb); int error, setmode; setmode = (type == USRQUOTA) ? Q_XSETQLIM : @@ -819,17 +824,21 @@ xfs_fs_setxquota( } STATIC int -xfs_fs_fill_super( +linvfs_fill_super( struct super_block *sb, void *data, int silent) { vnode_t *rootvp; - struct vfs *vfsp = vfs_allocate(sb); + struct vfs *vfsp = vfs_allocate(); struct xfs_mount_args *args = xfs_args_allocate(sb); struct kstatfs statvfs; int error, error2; + vfsp->vfs_super = sb; + LINVFS_SET_VFS(sb, vfsp); + if (sb->s_flags & MS_RDONLY) + vfsp->vfs_flag |= VFS_RDONLY; bhv_insert_all_vfsops(vfsp); VFS_PARSEARGS(vfsp, (char *)data, args, 0, error); @@ -840,10 +849,10 @@ xfs_fs_fill_super( sb_min_blocksize(sb, BBSIZE); #ifdef CONFIG_XFS_EXPORT - sb->s_export_op = &xfs_export_operations; + sb->s_export_op = &linvfs_export_ops; #endif - sb->s_qcop = &xfs_quotactl_operations; - sb->s_op = &xfs_super_operations; + sb->s_qcop = &linvfs_qops; + sb->s_op = &linvfs_sops; VFS_MOUNT(vfsp, args, NULL, error); if (error) { @@ -867,7 +876,7 @@ xfs_fs_fill_super( if (error) goto fail_unmount; - sb->s_root = d_alloc_root(vn_to_inode(rootvp)); + sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp)); if (!sb->s_root) { error = ENOMEM; goto fail_vnrele; @@ -876,7 +885,7 @@ xfs_fs_fill_super( error = EINVAL; goto fail_vnrele; } - if ((error = xfs_fs_start_syncd(vfsp))) + if ((error = linvfs_start_syncd(vfsp))) goto fail_vnrele; vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); @@ -901,41 +910,41 @@ xfs_fs_fill_super( } STATIC struct super_block * -xfs_fs_get_sb( +linvfs_get_sb( struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); -} - -STATIC struct super_operations xfs_super_operations = { - .alloc_inode = xfs_fs_alloc_inode, - .destroy_inode = xfs_fs_destroy_inode, - .write_inode = xfs_fs_write_inode, - .clear_inode = xfs_fs_clear_inode, - .put_super = xfs_fs_put_super, - .write_super = xfs_fs_write_super, - .sync_fs = xfs_fs_sync_super, - .write_super_lockfs = xfs_fs_lockfs, - .statfs = xfs_fs_statfs, - .remount_fs = xfs_fs_remount, - .show_options = xfs_fs_show_options, + return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super); +} + +STATIC struct super_operations linvfs_sops = { + .alloc_inode = linvfs_alloc_inode, + .destroy_inode = linvfs_destroy_inode, + .write_inode = linvfs_write_inode, + .clear_inode = linvfs_clear_inode, + .put_super = linvfs_put_super, + .write_super = linvfs_write_super, + .sync_fs = linvfs_sync_super, + .write_super_lockfs = linvfs_freeze_fs, + .statfs = linvfs_statfs, + .remount_fs = linvfs_remount, + .show_options = linvfs_show_options, }; -STATIC struct quotactl_ops xfs_quotactl_operations = { - .quota_sync = xfs_fs_quotasync, - .get_xstate = xfs_fs_getxstate, - .set_xstate = xfs_fs_setxstate, - .get_xquota = xfs_fs_getxquota, - .set_xquota = xfs_fs_setxquota, +STATIC struct quotactl_ops linvfs_qops = { + .quota_sync = linvfs_quotasync, + .get_xstate = linvfs_getxstate, + .set_xstate = linvfs_setxstate, + .get_xquota = linvfs_getxquota, + .set_xquota = linvfs_setxquota, }; STATIC struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", - .get_sb = xfs_fs_get_sb, + .get_sb = linvfs_get_sb, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; @@ -956,7 +965,7 @@ init_xfs_fs( void ) ktrace_init(64); - error = xfs_init_zones(); + error = linvfs_init_zones(); if (error < 0) goto undo_zones; @@ -972,13 +981,14 @@ init_xfs_fs( void ) error = register_filesystem(&xfs_fs_type); if (error) goto undo_register; + XFS_DM_INIT(&xfs_fs_type); return 0; undo_register: xfs_buf_terminate(); undo_buffers: - xfs_destroy_zones(); + linvfs_destroy_zones(); undo_zones: return error; @@ -988,10 +998,11 @@ STATIC void __exit exit_xfs_fs( void ) { vfs_exitquota(); + XFS_DM_EXIT(&xfs_fs_type); unregister_filesystem(&xfs_fs_type); xfs_cleanup(); xfs_buf_terminate(); - xfs_destroy_zones(); + linvfs_destroy_zones(); ktrace_uninit(); } diff --git a/trunk/fs/xfs/linux-2.6/xfs_super.h b/trunk/fs/xfs/linux-2.6/xfs_super.h index 376b96cb513a..df59408dca06 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_super.h +++ b/trunk/fs/xfs/linux-2.6/xfs_super.h @@ -98,6 +98,11 @@ extern void xfs_qm_exit(void); XFS_DMAPI_STRING \ XFS_DBG_STRING /* DBG must be last */ +#define LINVFS_GET_VFS(s) \ + (vfs_t *)((s)->s_fs_info) +#define LINVFS_SET_VFS(s, vfsp) \ + ((s)->s_fs_info = vfsp) + struct xfs_inode; struct xfs_mount; struct xfs_buftarg; @@ -115,6 +120,6 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *, extern void xfs_blkdev_put(struct block_device *); extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); -extern struct export_operations xfs_export_operations; +extern struct export_operations linvfs_export_ops; #endif /* __XFS_SUPER_H__ */ diff --git a/trunk/fs/xfs/linux-2.6/xfs_sysctl.c b/trunk/fs/xfs/linux-2.6/xfs_sysctl.c index 7079cc837210..a02564972420 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/trunk/fs/xfs/linux-2.6/xfs_sysctl.c @@ -38,7 +38,8 @@ xfs_stats_clear_proc_handler( if (!ret && write && *valp) { printk("XFS Clearing xfsstats\n"); - for_each_cpu(c) { + for (c = 0; c < NR_CPUS; c++) { + if (!cpu_possible(c)) continue; preempt_disable(); /* save vn_active, it's a universal truth! */ vn_active = per_cpu(xfsstats, c).vn_active; diff --git a/trunk/fs/xfs/linux-2.6/xfs_vfs.c b/trunk/fs/xfs/linux-2.6/xfs_vfs.c index 6f7c9f7a8624..c855d62e5344 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_vfs.c +++ b/trunk/fs/xfs/linux-2.6/xfs_vfs.c @@ -227,8 +227,7 @@ vfs_freeze( } vfs_t * -vfs_allocate( - struct super_block *sb) +vfs_allocate( void ) { struct vfs *vfsp; @@ -237,23 +236,9 @@ vfs_allocate( INIT_LIST_HEAD(&vfsp->vfs_sync_list); spin_lock_init(&vfsp->vfs_sync_lock); init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); - - vfsp->vfs_super = sb; - sb->s_fs_info = vfsp; - - if (sb->s_flags & MS_RDONLY) - vfsp->vfs_flag |= VFS_RDONLY; - return vfsp; } -vfs_t * -vfs_from_sb( - struct super_block *sb) -{ - return (vfs_t *)sb->s_fs_info; -} - void vfs_deallocate( struct vfs *vfsp) @@ -310,7 +295,7 @@ bhv_remove_all_vfsops( bhv_remove_vfsops(vfsp, VFS_POSITION_DM); if (!freebase) return; - mp = XFS_VFSTOM(vfsp); + mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops)); VFS_REMOVEBHV(vfsp, &mp->m_bhv); xfs_mount_free(mp, 0); } diff --git a/trunk/fs/xfs/linux-2.6/xfs_vfs.h b/trunk/fs/xfs/linux-2.6/xfs_vfs.h index 8fed356db055..57caf9eddee0 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_vfs.h +++ b/trunk/fs/xfs/linux-2.6/xfs_vfs.h @@ -193,8 +193,7 @@ typedef struct bhv_vfsops { #define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o)) #define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL ) -extern vfs_t *vfs_allocate(struct super_block *); -extern vfs_t *vfs_from_sb(struct super_block *); +extern vfs_t *vfs_allocate(void); extern void vfs_deallocate(vfs_t *); extern void vfs_insertops(vfs_t *, bhv_vfsops_t *); extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *); diff --git a/trunk/fs/xfs/linux-2.6/xfs_vnode.c b/trunk/fs/xfs/linux-2.6/xfs_vnode.c index d27c25b27ccd..260dd8415dd7 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_vnode.c +++ b/trunk/fs/xfs/linux-2.6/xfs_vnode.c @@ -58,7 +58,7 @@ struct vnode * vn_initialize( struct inode *inode) { - struct vnode *vp = vn_from_inode(inode); + struct vnode *vp = LINVFS_GET_VP(inode); XFS_STATS_INC(vn_active); XFS_STATS_INC(vn_alloc); @@ -83,7 +83,7 @@ vn_initialize( vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); #endif /* XFS_VNODE_TRACE */ - vn_trace_exit(vp, __FUNCTION__, (inst_t *)__return_address); + vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address); return vp; } @@ -97,7 +97,7 @@ vn_revalidate_core( struct vnode *vp, vattr_t *vap) { - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); inode->i_mode = vap->va_mode; inode->i_nlink = vap->va_nlink; @@ -129,31 +129,24 @@ vn_revalidate_core( * Revalidate the Linux inode from the vnode. */ int -__vn_revalidate( - struct vnode *vp, - struct vattr *vattr) +vn_revalidate( + struct vnode *vp) { + vattr_t va; int error; - vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); - vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS; - VOP_GETATTR(vp, vattr, 0, NULL, error); - if (likely(!error)) { - vn_revalidate_core(vp, vattr); + vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); + ASSERT(vp->v_fbhv != NULL); + + va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS; + VOP_GETATTR(vp, &va, 0, NULL, error); + if (!error) { + vn_revalidate_core(vp, &va); VUNMODIFY(vp); } return -error; } -int -vn_revalidate( - struct vnode *vp) -{ - vattr_t vattr; - - return __vn_revalidate(vp, &vattr); -} - /* * Add a reference to a referenced vnode. */ @@ -166,7 +159,7 @@ vn_hold( XFS_STATS_INC(vn_hold); VN_LOCK(vp); - inode = igrab(vn_to_inode(vp)); + inode = igrab(LINVFS_GET_IP(vp)); ASSERT(inode); VN_UNLOCK(vp, 0); diff --git a/trunk/fs/xfs/linux-2.6/xfs_vnode.h b/trunk/fs/xfs/linux-2.6/xfs_vnode.h index 06f5845e9568..0fe2419461d6 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_vnode.h +++ b/trunk/fs/xfs/linux-2.6/xfs_vnode.h @@ -116,14 +116,8 @@ typedef enum { /* * Vnode to Linux inode mapping. */ -static inline struct vnode *vn_from_inode(struct inode *inode) -{ - return (vnode_t *)list_entry(inode, vnode_t, v_inode); -} -static inline struct inode *vn_to_inode(struct vnode *vnode) -{ - return &vnode->v_inode; -} +#define LINVFS_GET_VP(inode) ((vnode_t *)list_entry(inode, vnode_t, v_inode)) +#define LINVFS_GET_IP(vp) (&(vp)->v_inode) /* * Vnode flags. @@ -496,7 +490,6 @@ typedef struct vnode_map { (vmap).v_ino = (vp)->v_inode.i_ino; } extern int vn_revalidate(struct vnode *); -extern int __vn_revalidate(struct vnode *, vattr_t *); extern void vn_revalidate_core(struct vnode *, vattr_t *); extern void vn_iowait(struct vnode *vp); @@ -504,7 +497,7 @@ extern void vn_iowake(struct vnode *vp); static inline int vn_count(struct vnode *vp) { - return atomic_read(&vn_to_inode(vp)->i_count); + return atomic_read(&LINVFS_GET_IP(vp)->i_count); } /* @@ -518,16 +511,16 @@ extern vnode_t *vn_hold(struct vnode *); vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address)) #define VN_RELE(vp) \ (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \ - iput(vn_to_inode(vp))) + iput(LINVFS_GET_IP(vp))) #else #define VN_HOLD(vp) ((void)vn_hold(vp)) -#define VN_RELE(vp) (iput(vn_to_inode(vp))) +#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) #endif static inline struct vnode *vn_grab(struct vnode *vp) { - struct inode *inode = igrab(vn_to_inode(vp)); - return inode ? vn_from_inode(inode) : NULL; + struct inode *inode = igrab(LINVFS_GET_IP(vp)); + return inode ? LINVFS_GET_VP(inode) : NULL; } /* @@ -535,7 +528,7 @@ static inline struct vnode *vn_grab(struct vnode *vp) */ #define VNAME(dentry) ((char *) (dentry)->d_name.name) #define VNAMELEN(dentry) ((dentry)->d_name.len) -#define VNAME_TO_VNODE(dentry) (vn_from_inode((dentry)->d_inode)) +#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode)) /* * Vnode spinlock manipulation. @@ -564,12 +557,12 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag) */ static inline void vn_mark_bad(struct vnode *vp) { - make_bad_inode(vn_to_inode(vp)); + make_bad_inode(LINVFS_GET_IP(vp)); } static inline int VN_BAD(struct vnode *vp) { - return is_bad_inode(vn_to_inode(vp)); + return is_bad_inode(LINVFS_GET_IP(vp)); } /* @@ -594,9 +587,9 @@ static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt) /* * Some useful predicates. */ -#define VN_MAPPED(vp) mapping_mapped(vn_to_inode(vp)->i_mapping) -#define VN_CACHED(vp) (vn_to_inode(vp)->i_mapping->nrpages) -#define VN_DIRTY(vp) mapping_tagged(vn_to_inode(vp)->i_mapping, \ +#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping) +#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) +#define VN_DIRTY(vp) mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \ PAGECACHE_TAG_DIRTY) #define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED) #define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) diff --git a/trunk/fs/xfs/quota/xfs_dquot_item.c b/trunk/fs/xfs/quota/xfs_dquot_item.c index e4e5f05b841b..2ec6b441849c 100644 --- a/trunk/fs/xfs/quota/xfs_dquot_item.c +++ b/trunk/fs/xfs/quota/xfs_dquot_item.c @@ -79,11 +79,9 @@ xfs_qm_dquot_logitem_format( logvec->i_addr = (xfs_caddr_t)&logitem->qli_format; logvec->i_len = sizeof(xfs_dq_logformat_t); - XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT); logvec++; logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core; logvec->i_len = sizeof(xfs_disk_dquot_t); - XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT); ASSERT(2 == logitem->qli_item.li_desc->lid_size); logitem->qli_format.qlf_size = 2; diff --git a/trunk/fs/xfs/quota/xfs_qm.c b/trunk/fs/xfs/quota/xfs_qm.c index 1fb757ef3f41..7c0e39dc6189 100644 --- a/trunk/fs/xfs/quota/xfs_qm.c +++ b/trunk/fs/xfs/quota/xfs_qm.c @@ -1704,9 +1704,9 @@ xfs_qm_get_rtblks( xfs_qcnt_t *O_rtblks) { xfs_filblks_t rtblks; /* total rt blks */ - xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extent entries */ + xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ int error; @@ -1717,11 +1717,10 @@ xfs_qm_get_rtblks( return error; } rtblks = 0; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (idx = 0; idx < nextents; idx++) { - ep = xfs_iext_get_ext(ifp, idx); + nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; + for (ep = base; ep < &base[nextents]; ep++) rtblks += xfs_bmbt_get_blockcount(ep); - } *O_rtblks = (xfs_qcnt_t)rtblks; return 0; } @@ -2789,7 +2788,9 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql) xfs_qm_dqdestroy(dqp); dqp = nextdqp; } - mutex_unlock(&ql->qh_lock); + /* + * Don't bother about unlocking. + */ mutex_destroy(&ql->qh_lock); ASSERT(ql->qh_nelems == 0); diff --git a/trunk/fs/xfs/quota/xfs_qm_bhv.c b/trunk/fs/xfs/quota/xfs_qm_bhv.c index 6838b36d95a9..90402a1c3983 100644 --- a/trunk/fs/xfs/quota/xfs_qm_bhv.c +++ b/trunk/fs/xfs/quota/xfs_qm_bhv.c @@ -374,7 +374,7 @@ xfs_qm_exit(void) vfs_bhv_clr_custom(&xfs_qmops); xfs_qm_cleanup_procfs(); if (qm_dqzone) - kmem_zone_destroy(qm_dqzone); + kmem_cache_destroy(qm_dqzone); if (qm_dqtrxzone) - kmem_zone_destroy(qm_dqtrxzone); + kmem_cache_destroy(qm_dqtrxzone); } diff --git a/trunk/fs/xfs/support/ktrace.c b/trunk/fs/xfs/support/ktrace.c index addf5a7ea06c..841aa4c15b8a 100644 --- a/trunk/fs/xfs/support/ktrace.c +++ b/trunk/fs/xfs/support/ktrace.c @@ -39,8 +39,8 @@ ktrace_init(int zentries) void ktrace_uninit(void) { - kmem_zone_destroy(ktrace_hdr_zone); - kmem_zone_destroy(ktrace_ent_zone); + kmem_cache_destroy(ktrace_hdr_zone); + kmem_cache_destroy(ktrace_ent_zone); } /* diff --git a/trunk/fs/xfs/support/uuid.c b/trunk/fs/xfs/support/uuid.c index e157015c70ff..a3d565a67734 100644 --- a/trunk/fs/xfs/support/uuid.c +++ b/trunk/fs/xfs/support/uuid.c @@ -21,6 +21,13 @@ static mutex_t uuid_monitor; static int uuid_table_size; static uuid_t *uuid_table; +void +uuid_init(void) +{ + mutex_init(&uuid_monitor); +} + + /* IRIX interpretation of an uuid_t */ typedef struct { __be32 uu_timelow; @@ -43,7 +50,7 @@ uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | be16_to_cpu(uup->uu_timemid); - fsid[1] = be32_to_cpu(uup->uu_timelow); + fsid[1] = be16_to_cpu(uup->uu_timelow); } void @@ -132,9 +139,3 @@ uuid_table_remove(uuid_t *uuid) ASSERT(i < uuid_table_size); mutex_unlock(&uuid_monitor); } - -void -uuid_init(void) -{ - mutex_init(&uuid_monitor); -} diff --git a/trunk/fs/xfs/xfs_acl.h b/trunk/fs/xfs/xfs_acl.h index 538d0d65b04c..f9315bc960cb 100644 --- a/trunk/fs/xfs/xfs_acl.h +++ b/trunk/fs/xfs/xfs_acl.h @@ -55,8 +55,8 @@ struct xfs_inode; extern struct kmem_zone *xfs_acl_zone; #define xfs_acl_zone_init(zone, name) \ - (zone) = kmem_zone_init(sizeof(xfs_acl_t), (name)) -#define xfs_acl_zone_destroy(zone) kmem_zone_destroy(zone) + (zone) = kmem_zone_init(sizeof(xfs_acl_t), name) +#define xfs_acl_zone_destroy(zone) kmem_cache_destroy(zone) extern int xfs_acl_inherit(struct vnode *, struct vattr *, xfs_acl_t *); extern int xfs_acl_iaccess(struct xfs_inode *, mode_t, cred_t *); diff --git a/trunk/fs/xfs/xfs_attr.c b/trunk/fs/xfs/xfs_attr.c index 093fac476bda..e5e91e9c7e89 100644 --- a/trunk/fs/xfs/xfs_attr.c +++ b/trunk/fs/xfs/xfs_attr.c @@ -1127,7 +1127,8 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context) return(error); ASSERT(bp != NULL); leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + != XFS_ATTR_LEAF_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); xfs_da_brelse(NULL, bp); @@ -1540,8 +1541,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) XFS_ATTR_FORK); if (error) goto out; - ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *) - bp->data)->hdr.info.magic) + ASSERT(INT_GET(((xfs_attr_leafblock_t *) + bp->data)->hdr.info.magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { @@ -1762,7 +1763,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(error); if (bp) { node = bp->data; - switch (be16_to_cpu(node->hdr.info.magic)) { + switch (INT_GET(node->hdr.info.magic, ARCH_CONVERT)) { case XFS_DA_NODE_MAGIC: xfs_attr_trace_l_cn("wrong blk", context, node); xfs_da_brelse(NULL, bp); @@ -1770,14 +1771,18 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) break; case XFS_ATTR_LEAF_MAGIC: leaf = bp->data; - if (cursor->hashval > be32_to_cpu(leaf->entries[ - be16_to_cpu(leaf->hdr.count)-1].hashval)) { + if (cursor->hashval > + INT_GET(leaf->entries[ + INT_GET(leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT)) { xfs_attr_trace_l_cl("wrong blk", context, leaf); xfs_da_brelse(NULL, bp); bp = NULL; } else if (cursor->hashval <= - be32_to_cpu(leaf->entries[0].hashval)) { + INT_GET(leaf->entries[0].hashval, + ARCH_CONVERT)) { xfs_attr_trace_l_cl("maybe wrong blk", context, leaf); xfs_da_brelse(NULL, bp); @@ -1812,10 +1817,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } node = bp->data; - if (be16_to_cpu(node->hdr.info.magic) + if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) break; - if (unlikely(be16_to_cpu(node->hdr.info.magic) + if (unlikely(INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, @@ -1825,17 +1830,19 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } btree = node->btree; - for (i = 0; i < be16_to_cpu(node->hdr.count); + for (i = 0; + i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { if (cursor->hashval - <= be32_to_cpu(btree->hashval)) { - cursor->blkno = be32_to_cpu(btree->before); + <= INT_GET(btree->hashval, + ARCH_CONVERT)) { + cursor->blkno = INT_GET(btree->before, ARCH_CONVERT); xfs_attr_trace_l_cb("descending", context, btree); break; } } - if (i == be16_to_cpu(node->hdr.count)) { + if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { xfs_da_brelse(NULL, bp); return(0); } @@ -1851,7 +1858,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) */ for (;;) { leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_ATTR_LEAF_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", XFS_ERRLEVEL_LOW, @@ -1862,7 +1869,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) error = xfs_attr_leaf_list_int(bp, context); if (error || !leaf->hdr.info.forw) break; /* not really an error, buffer full or EOF */ - cursor->blkno = be32_to_cpu(leaf->hdr.info.forw); + cursor->blkno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT); xfs_da_brelse(NULL, bp); error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1, &bp, XFS_ATTR_FORK); @@ -2225,10 +2232,9 @@ xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)be16_to_cpu(node->hdr.count), - (__psunsigned_t)be32_to_cpu(node->btree[0].hashval), - (__psunsigned_t)be32_to_cpu(node->btree[ - be16_to_cpu(node->hdr.count)-1].hashval)); + (__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); } /* @@ -2255,8 +2261,8 @@ xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)be32_to_cpu(btree->hashval), - (__psunsigned_t)be32_to_cpu(btree->before), + (__psunsigned_t)INT_GET(btree->hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(btree->before, ARCH_CONVERT), (__psunsigned_t)NULL); } @@ -2284,10 +2290,9 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context, : 0, (__psunsigned_t)context->dupcnt, (__psunsigned_t)context->flags, - (__psunsigned_t)be16_to_cpu(leaf->hdr.count), - (__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval), - (__psunsigned_t)be32_to_cpu(leaf->entries[ - be16_to_cpu(leaf->hdr.count)-1].hashval)); + (__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT), + (__psunsigned_t)INT_GET(leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); } /* @@ -2517,7 +2522,7 @@ attr_user_capable( struct vnode *vp, cred_t *cred) { - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; @@ -2535,7 +2540,7 @@ attr_trusted_capable( struct vnode *vp, cred_t *cred) { - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; diff --git a/trunk/fs/xfs/xfs_attr_leaf.c b/trunk/fs/xfs/xfs_attr_leaf.c index 717682747bd2..fe91eac4e2a7 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.c +++ b/trunk/fs/xfs/xfs_attr_leaf.c @@ -194,7 +194,7 @@ xfs_attr_shortform_create(xfs_da_args_t *args) xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; hdr->count = 0; - hdr->totsize = cpu_to_be16(sizeof(*hdr)); + INT_SET(hdr->totsize, ARCH_CONVERT, sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } @@ -224,7 +224,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); + sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { #ifdef DEBUG if (sfe->namelen != args->namelen) continue; @@ -247,13 +248,13 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset); sfe->namelen = args->namelen; - sfe->valuelen = args->valuelen; + INT_SET(sfe->valuelen, ARCH_CONVERT, args->valuelen); sfe->flags = (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); memcpy(sfe->nameval, args->name, args->namelen); memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen); - sf->hdr.count++; - be16_add(&sf->hdr.totsize, size); + INT_MOD(sf->hdr.count, ARCH_CONVERT, 1); + INT_MOD(sf->hdr.totsize, ARCH_CONVERT, size); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); xfs_sbversion_add_attr2(mp, args->trans); @@ -276,7 +277,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) base = sizeof(xfs_attr_sf_hdr_t); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; sfe = &sf->list[0]; - end = sf->hdr.count; + end = INT_GET(sf->hdr.count, ARCH_CONVERT); for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), base += size, i++) { size = XFS_ATTR_SF_ENTSIZE(sfe); @@ -299,11 +300,11 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) * Fix up the attribute fork data, covering the hole */ end = base + size; - totsize = be16_to_cpu(sf->hdr.totsize); + totsize = INT_GET(sf->hdr.totsize, ARCH_CONVERT); if (end != totsize) memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end); - sf->hdr.count--; - be16_add(&sf->hdr.totsize, -size); + INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); + INT_MOD(sf->hdr.totsize, ARCH_CONVERT, -size); /* * Fix up the start offset of the attribute fork @@ -359,7 +360,7 @@ xfs_attr_shortform_lookup(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < sf->hdr.count; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { if (sfe->namelen != args->namelen) continue; @@ -390,7 +391,7 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE); sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data; sfe = &sf->list[0]; - for (i = 0; i < sf->hdr.count; + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { if (sfe->namelen != args->namelen) continue; @@ -403,14 +404,14 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) ((sfe->flags & XFS_ATTR_ROOT) != 0)) continue; if (args->flags & ATTR_KERNOVAL) { - args->valuelen = sfe->valuelen; + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); return(XFS_ERROR(EEXIST)); } - if (args->valuelen < sfe->valuelen) { - args->valuelen = sfe->valuelen; + if (args->valuelen < INT_GET(sfe->valuelen, ARCH_CONVERT)) { + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); return(XFS_ERROR(ERANGE)); } - args->valuelen = sfe->valuelen; + args->valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); memcpy(args->value, &sfe->nameval[args->namelen], args->valuelen); return(XFS_ERROR(EEXIST)); @@ -437,7 +438,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) dp = args->dp; ifp = dp->i_afp; sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; - size = be16_to_cpu(sf->hdr.totsize); + size = INT_GET(sf->hdr.totsize, ARCH_CONVERT); tmpbuffer = kmem_alloc(size, KM_SLEEP); ASSERT(tmpbuffer != NULL); memcpy(tmpbuffer, ifp->if_u1.if_data, size); @@ -480,11 +481,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) nargs.oknoent = 1; sfe = &sf->list[0]; - for (i = 0; i < sf->hdr.count; i++) { + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { nargs.name = (char *)sfe->nameval; nargs.namelen = sfe->namelen; nargs.value = (char *)&sfe->nameval[nargs.namelen]; - nargs.valuelen = sfe->valuelen; + nargs.valuelen = INT_GET(sfe->valuelen, ARCH_CONVERT); nargs.hashval = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); nargs.flags = (sfe->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : @@ -513,9 +514,11 @@ xfs_attr_shortform_compare(const void *a, const void *b) sa = (xfs_attr_sf_sort_t *)a; sb = (xfs_attr_sf_sort_t *)b; - if (sa->hash < sb->hash) { + if (INT_GET(sa->hash, ARCH_CONVERT) + < INT_GET(sb->hash, ARCH_CONVERT)) { return(-1); - } else if (sa->hash > sb->hash) { + } else if (INT_GET(sa->hash, ARCH_CONVERT) + > INT_GET(sb->hash, ARCH_CONVERT)) { return(1); } else { return(sa->entno - sb->entno); @@ -557,8 +560,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * If the buffer is large enough, do not bother with sorting. * Note the generous fudge factor of 16 overhead bytes per entry. */ - if ((dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize) { - for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { + if ((dp->i_afp->if_bytes + INT_GET(sf->hdr.count, ARCH_CONVERT) * 16) + < context->bufsize) { + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { attrnames_t *namesp; if (((context->flags & ATTR_SECURE) != 0) != @@ -579,13 +584,14 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) if (context->flags & ATTR_KERNOVAL) { ASSERT(context->flags & ATTR_KERNAMELS); context->count += namesp->attr_namelen + - sfe->namelen + 1; + INT_GET(sfe->namelen, ARCH_CONVERT) + 1; } else { if (xfs_attr_put_listent(context, namesp, (char *)sfe->nameval, (int)sfe->namelen, - (int)sfe->valuelen)) + (int)INT_GET(sfe->valuelen, + ARCH_CONVERT))) break; } sfe = XFS_ATTR_SF_NEXTENTRY(sfe); @@ -597,7 +603,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) /* * It didn't all fit, so we have to sort everything on hashval. */ - sbsize = sf->hdr.count * sizeof(*sbuf); + sbsize = INT_GET(sf->hdr.count, ARCH_CONVERT) * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); /* @@ -605,7 +611,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * the relevant info from only those that match into a buffer. */ nsbuf = 0; - for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { @@ -629,7 +636,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) continue; } sbp->entno = i; - sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen); + INT_SET(sbp->hash, ARCH_CONVERT, + xfs_da_hashname((char *)sfe->nameval, sfe->namelen)); sbp->name = (char *)sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ @@ -652,12 +660,12 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { - if (sbp->hash == cursor->hashval) { + if (INT_GET(sbp->hash, ARCH_CONVERT) == cursor->hashval) { if (cursor->offset == count) { break; } count++; - } else if (sbp->hash > cursor->hashval) { + } else if (INT_GET(sbp->hash, ARCH_CONVERT) > cursor->hashval) { break; } } @@ -677,8 +685,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) ((sbp->flags & XFS_ATTR_ROOT) ? &attr_trusted : &attr_user); - if (cursor->hashval != sbp->hash) { - cursor->hashval = sbp->hash; + if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) { + cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT); cursor->offset = 0; } if (context->flags & ATTR_KERNOVAL) { @@ -688,7 +696,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) } else { if (xfs_attr_put_listent(context, namesp, sbp->name, sbp->namelen, - sbp->valuelen)) + INT_GET(sbp->valuelen, ARCH_CONVERT))) break; } cursor->offset++; @@ -712,11 +720,12 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) int bytes, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); entry = &leaf->entries[0]; bytes = sizeof(struct xfs_attr_sf_hdr); - for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!(entry->flags & XFS_ATTR_LOCAL)) @@ -724,11 +733,11 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i); if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); - if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) + if (INT_GET(name_loc->valuelen, ARCH_CONVERT) >= XFS_ATTR_SF_ENTSIZE_MAX) return(0); bytes += sizeof(struct xfs_attr_sf_entry)-1 + name_loc->namelen - + be16_to_cpu(name_loc->valuelen); + + INT_GET(name_loc->valuelen, ARCH_CONVERT); } if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && (bytes == sizeof(struct xfs_attr_sf_hdr))) @@ -757,7 +766,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_attr_leafblock_t *)tmpbuffer; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -800,7 +810,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) nargs.trans = args->trans; nargs.oknoent = 1; entry = &leaf->entries[0]; - for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { if (entry->flags & XFS_ATTR_INCOMPLETE) continue; /* don't copy partial entries */ if (!entry->nameidx) @@ -810,8 +820,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) nargs.name = (char *)name_loc->nameval; nargs.namelen = name_loc->namelen; nargs.value = (char *)&name_loc->nameval[nargs.namelen]; - nargs.valuelen = be16_to_cpu(name_loc->valuelen); - nargs.hashval = be32_to_cpu(entry->hashval); + nargs.valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); + nargs.hashval = INT_GET(entry->hashval, ARCH_CONVERT); nargs.flags = (entry->flags & XFS_ATTR_SECURE) ? ATTR_SECURE : ((entry->flags & XFS_ATTR_ROOT) ? ATTR_ROOT : 0); xfs_attr_shortform_add(&nargs, forkoff); @@ -865,12 +875,13 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args) goto out; node = bp1->data; leaf = bp2->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); /* both on-disk, don't endian-flip twice */ node->btree[0].hashval = - leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval; - node->btree[0].before = cpu_to_be32(blkno); - node->hdr.count = cpu_to_be16(1); + leaf->entries[INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval; + INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 1); xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1); error = 0; out: @@ -909,16 +920,19 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) leaf = bp->data; memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); hdr = &leaf->hdr; - hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC); - hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount)); + INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_ATTR_LEAF_MAGIC); + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); if (!hdr->firstused) { - hdr->firstused = cpu_to_be16( + INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN); } - hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); - hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) - - sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr->firstused, ARCH_CONVERT) + - INT_GET(hdr->freemap[0].base, + ARCH_CONVERT)); xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1); @@ -990,9 +1004,10 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) int tablesize, entsize, sum, tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); ASSERT((args->index >= 0) - && (args->index <= be16_to_cpu(leaf->hdr.count))); + && (args->index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); hdr = &leaf->hdr; entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen, args->trans->t_mountp->m_sb.sb_blocksize, NULL); @@ -1001,25 +1016,26 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) * Search through freemap for first-fit on new name length. * (may need to figure in size of entry struct too) */ - tablesize = (be16_to_cpu(hdr->count) + 1) + tablesize = (INT_GET(hdr->count, ARCH_CONVERT) + 1) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1]; for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) { - if (tablesize > be16_to_cpu(hdr->firstused)) { - sum += be16_to_cpu(map->size); + if (tablesize > INT_GET(hdr->firstused, ARCH_CONVERT)) { + sum += INT_GET(map->size, ARCH_CONVERT); continue; } if (!map->size) continue; /* no space in this map */ tmp = entsize; - if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused)) + if (INT_GET(map->base, ARCH_CONVERT) + < INT_GET(hdr->firstused, ARCH_CONVERT)) tmp += sizeof(xfs_attr_leaf_entry_t); - if (be16_to_cpu(map->size) >= tmp) { + if (INT_GET(map->size, ARCH_CONVERT) >= tmp) { tmp = xfs_attr_leaf_add_work(bp, args, i); return(tmp); } - sum += be16_to_cpu(map->size); + sum += INT_GET(map->size, ARCH_CONVERT); } /* @@ -1040,7 +1056,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) * After compaction, the block is guaranteed to have only one * free region, in freemap[0]. If it is not big enough, give up. */ - if (be16_to_cpu(hdr->freemap[0].size) + if (INT_GET(hdr->freemap[0].size, ARCH_CONVERT) < (entsize + sizeof(xfs_attr_leaf_entry_t))) return(XFS_ERROR(ENOSPC)); @@ -1063,42 +1079,45 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) int tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); - ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count))); + ASSERT((args->index >= 0) + && (args->index <= INT_GET(hdr->count, ARCH_CONVERT))); /* * Force open some space in the entry array and fill it in. */ entry = &leaf->entries[args->index]; - if (args->index < be16_to_cpu(hdr->count)) { - tmp = be16_to_cpu(hdr->count) - args->index; + if (args->index < INT_GET(hdr->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr->count, ARCH_CONVERT) - args->index; tmp *= sizeof(xfs_attr_leaf_entry_t); memmove((char *)(entry+1), (char *)entry, tmp); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); } - be16_add(&hdr->count, 1); + INT_MOD(hdr->count, ARCH_CONVERT, 1); /* * Allocate space for the new string (at the end of the run). */ map = &hdr->freemap[mapindex]; mp = args->trans->t_mountp; - ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); - ASSERT((be16_to_cpu(map->base) & 0x3) == 0); - ASSERT(be16_to_cpu(map->size) >= + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT((INT_GET(map->base, ARCH_CONVERT) & 0x3) == 0); + ASSERT(INT_GET(map->size, ARCH_CONVERT) >= xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, NULL)); - ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); - ASSERT((be16_to_cpu(map->size) & 0x3) == 0); - be16_add(&map->size, + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT((INT_GET(map->size, ARCH_CONVERT) & 0x3) == 0); + INT_MOD(map->size, ARCH_CONVERT, -xfs_attr_leaf_newentsize(args->namelen, args->valuelen, mp->m_sb.sb_blocksize, &tmp)); - entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) + - be16_to_cpu(map->size)); - entry->hashval = cpu_to_be32(args->hashval); + INT_SET(entry->nameidx, ARCH_CONVERT, + INT_GET(map->base, ARCH_CONVERT) + + INT_GET(map->size, ARCH_CONVERT)); + INT_SET(entry->hashval, ARCH_CONVERT, args->hashval); entry->flags = tmp ? XFS_ATTR_LOCAL : 0; entry->flags |= (args->flags & ATTR_SECURE) ? XFS_ATTR_SECURE : ((args->flags & ATTR_ROOT) ? XFS_ATTR_ROOT : 0); @@ -1111,10 +1130,12 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) } xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry))); - ASSERT((args->index == 0) || - (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval))); - ASSERT((args->index == be16_to_cpu(hdr->count)-1) || - (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval))); + ASSERT((args->index == 0) || (INT_GET(entry->hashval, ARCH_CONVERT) + >= INT_GET((entry-1)->hashval, + ARCH_CONVERT))); + ASSERT((args->index == INT_GET(hdr->count, ARCH_CONVERT)-1) || + (INT_GET(entry->hashval, ARCH_CONVERT) + <= (INT_GET((entry+1)->hashval, ARCH_CONVERT)))); /* * Copy the attribute name and value into the new space. @@ -1128,10 +1149,10 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) if (entry->flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); name_loc->namelen = args->namelen; - name_loc->valuelen = cpu_to_be16(args->valuelen); + INT_SET(name_loc->valuelen, ARCH_CONVERT, args->valuelen); memcpy((char *)name_loc->nameval, args->name, args->namelen); memcpy((char *)&name_loc->nameval[args->namelen], args->value, - be16_to_cpu(name_loc->valuelen)); + INT_GET(name_loc->valuelen, ARCH_CONVERT)); } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); name_rmt->namelen = args->namelen; @@ -1150,23 +1171,28 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) /* * Update the control info for this leaf node */ - if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) { + if (INT_GET(entry->nameidx, ARCH_CONVERT) + < INT_GET(hdr->firstused, ARCH_CONVERT)) { /* both on-disk, don't endian-flip twice */ hdr->firstused = entry->nameidx; } - ASSERT(be16_to_cpu(hdr->firstused) >= - ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr))); - tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t) + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) + >= ((INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(*entry))+sizeof(*hdr))); + tmp = (INT_GET(hdr->count, ARCH_CONVERT)-1) + * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[0]; for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { - if (be16_to_cpu(map->base) == tmp) { - be16_add(&map->base, sizeof(xfs_attr_leaf_entry_t)); - be16_add(&map->size, - -((int)sizeof(xfs_attr_leaf_entry_t))); + if (INT_GET(map->base, ARCH_CONVERT) == tmp) { + INT_MOD(map->base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, + -sizeof(xfs_attr_leaf_entry_t)); } } - be16_add(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index)); + INT_MOD(hdr->usedbytes, ARCH_CONVERT, + xfs_attr_leaf_entsize(leaf, args->index)); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr))); return(0); @@ -1197,25 +1223,28 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp) hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; hdr_d->info = hdr_s->info; /* struct copy */ - hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp)); + INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp)); /* handle truncation gracefully */ if (!hdr_d->firstused) { - hdr_d->firstused = cpu_to_be16( + INT_SET(hdr_d->firstused, ARCH_CONVERT, XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN); } hdr_d->usedbytes = 0; hdr_d->count = 0; hdr_d->holes = 0; - hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); - hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) - - sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr_d->firstused, ARCH_CONVERT) + - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); /* * Copy all entry's in the same (sorted) order, * but allocate name/value pairs packed and in sequence. */ xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0, - be16_to_cpu(hdr_s->count), mp); + (int)INT_GET(hdr_s->count, ARCH_CONVERT), mp); + xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1); kmem_free(tmpbuffer, XFS_LBSIZE(mp)); @@ -1250,8 +1279,10 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); args = state->args; /* @@ -1288,21 +1319,22 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move any entries required from leaf to leaf: */ - if (count < be16_to_cpu(hdr1->count)) { + if (count < INT_GET(hdr1->count, ARCH_CONVERT)) { /* * Figure the total bytes to be added to the destination leaf. */ /* number entries being moved */ - count = be16_to_cpu(hdr1->count) - count; - space = be16_to_cpu(hdr1->usedbytes) - totallen; + count = INT_GET(hdr1->count, ARCH_CONVERT) - count; + space = INT_GET(hdr1->usedbytes, ARCH_CONVERT) - totallen; space += count * sizeof(xfs_attr_leaf_entry_t); /* * leaf2 is the destination, compact it if it looks tight. */ - max = be16_to_cpu(hdr2->firstused) + max = INT_GET(hdr2->firstused, ARCH_CONVERT) - sizeof(xfs_attr_leaf_hdr_t); - max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t); + max -= INT_GET(hdr2->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); if (space > max) { xfs_attr_leaf_compact(args->trans, blk2->bp); } @@ -1310,12 +1342,13 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move high entries from leaf1 to low end of leaf2. */ - xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count, + xfs_attr_leaf_moveents(leaf1, + INT_GET(hdr1->count, ARCH_CONVERT)-count, leaf2, 0, count, state->mp); xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); - } else if (count > be16_to_cpu(hdr1->count)) { + } else if (count > INT_GET(hdr1->count, ARCH_CONVERT)) { /* * I assert that since all callers pass in an empty * second buffer, this code should never execute. @@ -1325,16 +1358,17 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Figure the total bytes to be added to the destination leaf. */ /* number entries being moved */ - count -= be16_to_cpu(hdr1->count); - space = totallen - be16_to_cpu(hdr1->usedbytes); + count -= INT_GET(hdr1->count, ARCH_CONVERT); + space = totallen - INT_GET(hdr1->usedbytes, ARCH_CONVERT); space += count * sizeof(xfs_attr_leaf_entry_t); /* * leaf1 is the destination, compact it if it looks tight. */ - max = be16_to_cpu(hdr1->firstused) + max = INT_GET(hdr1->firstused, ARCH_CONVERT) - sizeof(xfs_attr_leaf_hdr_t); - max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t); + max -= INT_GET(hdr1->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); if (space > max) { xfs_attr_leaf_compact(args->trans, blk1->bp); } @@ -1343,7 +1377,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Move low entries from leaf2 to high end of leaf1. */ xfs_attr_leaf_moveents(leaf2, 0, leaf1, - be16_to_cpu(hdr1->count), count, state->mp); + (int)INT_GET(hdr1->count, ARCH_CONVERT), count, + state->mp); xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1); xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1); @@ -1352,10 +1387,12 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Copy out last hashval in each block for B-tree code. */ - blk1->hashval = be32_to_cpu( - leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval); - blk2->hashval = be32_to_cpu( - leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval); + blk1->hashval = + INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT); + blk2->hashval = + INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT); /* * Adjust the expected index for insertion. @@ -1369,12 +1406,13 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * inserting. The index/blkno fields refer to the "old" entry, * while the index2/blkno2 fields refer to the "new" entry. */ - if (blk1->index > be16_to_cpu(leaf1->hdr.count)) { + if (blk1->index > INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { ASSERT(state->inleaf == 0); - blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); + blk2->index = blk1->index + - INT_GET(leaf1->hdr.count, ARCH_CONVERT); args->index = args->index2 = blk2->index; args->blkno = args->blkno2 = blk2->blkno; - } else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) { + } else if (blk1->index == INT_GET(leaf1->hdr.count, ARCH_CONVERT)) { if (state->inleaf) { args->index = blk1->index; args->blkno = blk1->blkno; @@ -1382,7 +1420,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, args->blkno2 = blk2->blkno; } else { blk2->index = blk1->index - - be16_to_cpu(leaf1->hdr.count); + - INT_GET(leaf1->hdr.count, ARCH_CONVERT); args->index = args->index2 = blk2->index; args->blkno = args->blkno2 = blk2->blkno; } @@ -1426,14 +1464,15 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state, * Examine entries until we reduce the absolute difference in * byte usage between the two blocks to a minimum. */ - max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count); + max = INT_GET(hdr1->count, ARCH_CONVERT) + + INT_GET(hdr2->count, ARCH_CONVERT); half = (max+1) * sizeof(*entry); - half += be16_to_cpu(hdr1->usedbytes) + - be16_to_cpu(hdr2->usedbytes) + - xfs_attr_leaf_newentsize( - state->args->namelen, - state->args->valuelen, - state->blocksize, NULL); + half += INT_GET(hdr1->usedbytes, ARCH_CONVERT) + + INT_GET(hdr2->usedbytes, ARCH_CONVERT) + + xfs_attr_leaf_newentsize( + state->args->namelen, + state->args->valuelen, + state->blocksize, NULL); half /= 2; lastdelta = state->blocksize; entry = &leaf1->entries[0]; @@ -1459,7 +1498,7 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state, /* * Wrap around into the second block if necessary. */ - if (count == be16_to_cpu(hdr1->count)) { + if (count == INT_GET(hdr1->count, ARCH_CONVERT)) { leaf1 = leaf2; entry = &leaf1->entries[0]; index = 0; @@ -1527,12 +1566,12 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); leaf = (xfs_attr_leafblock_t *)info; - count = be16_to_cpu(leaf->hdr.count); + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes = sizeof(xfs_attr_leaf_hdr_t) + count * sizeof(xfs_attr_leaf_entry_t) + - be16_to_cpu(leaf->hdr.usedbytes); + INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); if (bytes > (state->blocksize >> 1)) { *action = 0; /* blk over 50%, don't try to join */ return(0); @@ -1549,7 +1588,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = (info->forw != 0); + forward = info->forw; memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -1571,12 +1610,13 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) * to shrink an attribute list over time. */ /* start with smaller blk num */ - forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); + forward = (INT_GET(info->forw, ARCH_CONVERT) + < INT_GET(info->back, ARCH_CONVERT)); for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = be32_to_cpu(info->forw); + blkno = INT_GET(info->forw, ARCH_CONVERT); else - blkno = be32_to_cpu(info->back); + blkno = INT_GET(info->back, ARCH_CONVERT); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -1586,13 +1626,14 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) ASSERT(bp != NULL); leaf = (xfs_attr_leafblock_t *)info; - count = be16_to_cpu(leaf->hdr.count); + count = INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes = state->blocksize - (state->blocksize>>2); - bytes -= be16_to_cpu(leaf->hdr.usedbytes); + bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - count += be16_to_cpu(leaf->hdr.count); - bytes -= be16_to_cpu(leaf->hdr.usedbytes); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT); + bytes -= INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); bytes -= count * sizeof(xfs_attr_leaf_entry_t); bytes -= sizeof(xfs_attr_leaf_hdr_t); xfs_da_brelse(state->args->trans, bp); @@ -1644,18 +1685,21 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_mount_t *mp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); hdr = &leaf->hdr; mp = args->trans->t_mountp; - ASSERT((be16_to_cpu(hdr->count) > 0) - && (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8))); + ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) + && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); ASSERT((args->index >= 0) - && (args->index < be16_to_cpu(hdr->count))); - ASSERT(be16_to_cpu(hdr->firstused) >= - ((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr))); + && (args->index < INT_GET(hdr->count, ARCH_CONVERT))); + ASSERT(INT_GET(hdr->firstused, ARCH_CONVERT) + >= ((INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(*entry))+sizeof(*hdr))); entry = &leaf->entries[args->index]; - ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused)); - ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) < XFS_LBSIZE(mp)); /* * Scan through free region table: @@ -1663,30 +1707,33 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * find smallest free region in case we need to replace it, * adjust any map that borders the entry table, */ - tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t) + tablesize = INT_GET(hdr->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); map = &hdr->freemap[0]; - tmp = be16_to_cpu(map->size); + tmp = INT_GET(map->size, ARCH_CONVERT); before = after = -1; smallest = XFS_ATTR_LEAF_MAPSIZE - 1; entsize = xfs_attr_leaf_entsize(leaf, args->index); for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) { - ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp)); - ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp)); - if (be16_to_cpu(map->base) == tablesize) { - be16_add(&map->base, - -((int)sizeof(xfs_attr_leaf_entry_t))); - be16_add(&map->size, sizeof(xfs_attr_leaf_entry_t)); + ASSERT(INT_GET(map->base, ARCH_CONVERT) < XFS_LBSIZE(mp)); + ASSERT(INT_GET(map->size, ARCH_CONVERT) < XFS_LBSIZE(mp)); + if (INT_GET(map->base, ARCH_CONVERT) == tablesize) { + INT_MOD(map->base, ARCH_CONVERT, + -sizeof(xfs_attr_leaf_entry_t)); + INT_MOD(map->size, ARCH_CONVERT, + sizeof(xfs_attr_leaf_entry_t)); } - if ((be16_to_cpu(map->base) + be16_to_cpu(map->size)) - == be16_to_cpu(entry->nameidx)) { + if ((INT_GET(map->base, ARCH_CONVERT) + + INT_GET(map->size, ARCH_CONVERT)) + == INT_GET(entry->nameidx, ARCH_CONVERT)) { before = i; - } else if (be16_to_cpu(map->base) - == (be16_to_cpu(entry->nameidx) + entsize)) { + } else if (INT_GET(map->base, ARCH_CONVERT) + == (INT_GET(entry->nameidx, ARCH_CONVERT) + entsize)) { after = i; - } else if (be16_to_cpu(map->size) < tmp) { - tmp = be16_to_cpu(map->size); + } else if (INT_GET(map->size, ARCH_CONVERT) < tmp) { + tmp = INT_GET(map->size, ARCH_CONVERT); smallest = i; } } @@ -1698,35 +1745,38 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) if ((before >= 0) || (after >= 0)) { if ((before >= 0) && (after >= 0)) { map = &hdr->freemap[before]; - be16_add(&map->size, entsize); - be16_add(&map->size, - be16_to_cpu(hdr->freemap[after].size)); + INT_MOD(map->size, ARCH_CONVERT, entsize); + INT_MOD(map->size, ARCH_CONVERT, + INT_GET(hdr->freemap[after].size, + ARCH_CONVERT)); hdr->freemap[after].base = 0; hdr->freemap[after].size = 0; } else if (before >= 0) { map = &hdr->freemap[before]; - be16_add(&map->size, entsize); + INT_MOD(map->size, ARCH_CONVERT, entsize); } else { map = &hdr->freemap[after]; /* both on-disk, don't endian flip twice */ map->base = entry->nameidx; - be16_add(&map->size, entsize); + INT_MOD(map->size, ARCH_CONVERT, entsize); } } else { /* * Replace smallest region (if it is smaller than free'd entry) */ map = &hdr->freemap[smallest]; - if (be16_to_cpu(map->size) < entsize) { - map->base = cpu_to_be16(be16_to_cpu(entry->nameidx)); - map->size = cpu_to_be16(entsize); + if (INT_GET(map->size, ARCH_CONVERT) < entsize) { + INT_SET(map->base, ARCH_CONVERT, + INT_GET(entry->nameidx, ARCH_CONVERT)); + INT_SET(map->size, ARCH_CONVERT, entsize); } } /* * Did we remove the first entry? */ - if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused)) + if (INT_GET(entry->nameidx, ARCH_CONVERT) + == INT_GET(hdr->firstused, ARCH_CONVERT)) smallest = 1; else smallest = 0; @@ -1735,18 +1785,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * Compress the remaining entries and zero out the removed stuff. */ memset(XFS_ATTR_LEAF_NAME(leaf, args->index), 0, entsize); - be16_add(&hdr->usedbytes, -entsize); + INT_MOD(hdr->usedbytes, ARCH_CONVERT, -entsize); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, XFS_ATTR_LEAF_NAME(leaf, args->index), entsize)); - tmp = (be16_to_cpu(hdr->count) - args->index) + tmp = (INT_GET(hdr->count, ARCH_CONVERT) - args->index) * sizeof(xfs_attr_leaf_entry_t); memmove((char *)entry, (char *)(entry+1), tmp); - be16_add(&hdr->count, -1); + INT_MOD(hdr->count, ARCH_CONVERT, -1); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry))); - entry = &leaf->entries[be16_to_cpu(hdr->count)]; + entry = &leaf->entries[INT_GET(hdr->count, ARCH_CONVERT)]; memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t)); /* @@ -1758,17 +1808,18 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) if (smallest) { tmp = XFS_LBSIZE(mp); entry = &leaf->entries[0]; - for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) { - ASSERT(be16_to_cpu(entry->nameidx) >= - be16_to_cpu(hdr->firstused)); - ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp)); - - if (be16_to_cpu(entry->nameidx) < tmp) - tmp = be16_to_cpu(entry->nameidx); + for (i = INT_GET(hdr->count, ARCH_CONVERT)-1; + i >= 0; entry++, i--) { + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + >= INT_GET(hdr->firstused, ARCH_CONVERT)); + ASSERT(INT_GET(entry->nameidx, ARCH_CONVERT) + < XFS_LBSIZE(mp)); + if (INT_GET(entry->nameidx, ARCH_CONVERT) < tmp) + tmp = INT_GET(entry->nameidx, ARCH_CONVERT); } - hdr->firstused = cpu_to_be16(tmp); + INT_SET(hdr->firstused, ARCH_CONVERT, tmp); if (!hdr->firstused) { - hdr->firstused = cpu_to_be16( + INT_SET(hdr->firstused, ARCH_CONVERT, tmp - XFS_ATTR_LEAF_NAME_ALIGN); } } else { @@ -1782,8 +1833,9 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) * "join" the leaf with a sibling if so. */ tmp = sizeof(xfs_attr_leaf_hdr_t); - tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t); - tmp += be16_to_cpu(leaf->hdr.usedbytes); + tmp += INT_GET(leaf->hdr.count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t); + tmp += INT_GET(leaf->hdr.usedbytes, ARCH_CONVERT); return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */ } @@ -1807,16 +1859,20 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; /* * Save last hashval from dying block for later Btree fixup. */ - drop_blk->hashval = be32_to_cpu( - drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval); + drop_blk->hashval = + INT_GET(drop_leaf->entries[INT_GET(drop_leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT); /* * Check if we need a temp buffer, or can we do it in place. @@ -1830,11 +1886,12 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, */ if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0, - be16_to_cpu(drop_hdr->count), mp); + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), mp); } else { xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, - be16_to_cpu(save_hdr->count), - be16_to_cpu(drop_hdr->count), mp); + INT_GET(save_hdr->count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); } } else { /* @@ -1848,24 +1905,28 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, tmp_hdr = &tmp_leaf->hdr; tmp_hdr->info = save_hdr->info; /* struct copy */ tmp_hdr->count = 0; - tmp_hdr->firstused = cpu_to_be16(state->blocksize); + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize); if (!tmp_hdr->firstused) { - tmp_hdr->firstused = cpu_to_be16( + INT_SET(tmp_hdr->firstused, ARCH_CONVERT, state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN); } tmp_hdr->usedbytes = 0; if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) { xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0, - be16_to_cpu(drop_hdr->count), mp); + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, - be16_to_cpu(tmp_leaf->hdr.count), - be16_to_cpu(save_hdr->count), mp); + INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(save_hdr->count, ARCH_CONVERT), + mp); } else { xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0, - be16_to_cpu(save_hdr->count), mp); + (int)INT_GET(save_hdr->count, ARCH_CONVERT), + mp); xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, - be16_to_cpu(tmp_leaf->hdr.count), - be16_to_cpu(drop_hdr->count), mp); + INT_GET(tmp_leaf->hdr.count, ARCH_CONVERT), + (int)INT_GET(drop_hdr->count, ARCH_CONVERT), + mp); } memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize); kmem_free(tmpbuffer, state->blocksize); @@ -1877,8 +1938,10 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, /* * Copy out last hashval in each block for B-tree code. */ - save_blk->hashval = be32_to_cpu( - save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval); + save_blk->hashval = + INT_GET(save_leaf->entries[INT_GET(save_leaf->hdr.count, + ARCH_CONVERT)-1].hashval, + ARCH_CONVERT); } /*======================================================================== @@ -1909,45 +1972,48 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf->hdr.count) + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); /* * Binary search. (note: small blocks will skip this loop) */ hashval = args->hashval; - probe = span = be16_to_cpu(leaf->hdr.count) / 2; + probe = span = INT_GET(leaf->hdr.count, ARCH_CONVERT) / 2; for (entry = &leaf->entries[probe]; span > 4; entry = &leaf->entries[probe]) { span /= 2; - if (be32_to_cpu(entry->hashval) < hashval) + if (INT_GET(entry->hashval, ARCH_CONVERT) < hashval) probe += span; - else if (be32_to_cpu(entry->hashval) > hashval) + else if (INT_GET(entry->hashval, ARCH_CONVERT) > hashval) probe -= span; else break; } ASSERT((probe >= 0) && (!leaf->hdr.count - || (probe < be16_to_cpu(leaf->hdr.count)))); - ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval)); + || (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)))); + ASSERT((span <= 4) || (INT_GET(entry->hashval, ARCH_CONVERT) + == hashval)); /* * Since we may have duplicate hashval's, find the first matching * hashval in the leaf. */ - while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) { + while ((probe > 0) && (INT_GET(entry->hashval, ARCH_CONVERT) + >= hashval)) { entry--; probe--; } - while ((probe < be16_to_cpu(leaf->hdr.count)) && - (be32_to_cpu(entry->hashval) < hashval)) { + while ((probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (INT_GET(entry->hashval, ARCH_CONVERT) < hashval)) { entry++; probe++; } - if ((probe == be16_to_cpu(leaf->hdr.count)) || - (be32_to_cpu(entry->hashval) != hashval)) { + if ((probe == INT_GET(leaf->hdr.count, ARCH_CONVERT)) + || (INT_GET(entry->hashval, ARCH_CONVERT) != hashval)) { args->index = probe; return(XFS_ERROR(ENOATTR)); } @@ -1955,8 +2021,8 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) /* * Duplicate keys may be present, so search all of them for a match. */ - for ( ; (probe < be16_to_cpu(leaf->hdr.count)) && - (be32_to_cpu(entry->hashval) == hashval); + for ( ; (probe < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + && (INT_GET(entry->hashval, ARCH_CONVERT) == hashval); entry++, probe++) { /* * GROT: Add code to remove incomplete entries. @@ -1998,9 +2064,11 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) ((entry->flags & XFS_ATTR_ROOT) != 0)) continue; args->index = probe; - args->rmtblkno = be32_to_cpu(name_rmt->valueblk); + args->rmtblkno + = INT_GET(name_rmt->valueblk, ARCH_CONVERT); args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, - be32_to_cpu(name_rmt->valuelen)); + INT_GET(name_rmt->valuelen, + ARCH_CONVERT)); return(XFS_ERROR(EEXIST)); } } @@ -2022,17 +2090,18 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_attr_leaf_name_remote_t *name_rmt; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf->hdr.count) + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); - ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); + ASSERT(args->index < ((int)INT_GET(leaf->hdr.count, ARCH_CONVERT))); entry = &leaf->entries[args->index]; if (entry->flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, args->index); ASSERT(name_loc->namelen == args->namelen); ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0); - valuelen = be16_to_cpu(name_loc->valuelen); + valuelen = INT_GET(name_loc->valuelen, ARCH_CONVERT); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; return(0); @@ -2047,8 +2116,8 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); ASSERT(name_rmt->namelen == args->namelen); ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0); - valuelen = be32_to_cpu(name_rmt->valuelen); - args->rmtblkno = be32_to_cpu(name_rmt->valueblk); + valuelen = INT_GET(name_rmt->valuelen, ARCH_CONVERT); + args->rmtblkno = INT_GET(name_rmt->valueblk, ARCH_CONVERT); args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen); if (args->flags & ATTR_KERNOVAL) { args->valuelen = valuelen; @@ -2090,29 +2159,32 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; - ASSERT((be16_to_cpu(hdr_s->count) > 0) && - (be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8))); - ASSERT(be16_to_cpu(hdr_s->firstused) >= - ((be16_to_cpu(hdr_s->count) + ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) + && (INT_GET(hdr_s->count, ARCH_CONVERT) + < (XFS_LBSIZE(mp)/8))); + ASSERT(INT_GET(hdr_s->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_s->count, ARCH_CONVERT) * sizeof(*entry_s))+sizeof(*hdr_s))); - ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8)); - ASSERT(be16_to_cpu(hdr_d->firstused) >= - ((be16_to_cpu(hdr_d->count) + ASSERT(INT_GET(hdr_d->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8)); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= + ((INT_GET(hdr_d->count, ARCH_CONVERT) * sizeof(*entry_d))+sizeof(*hdr_d))); - ASSERT(start_s < be16_to_cpu(hdr_s->count)); - ASSERT(start_d <= be16_to_cpu(hdr_d->count)); - ASSERT(count <= be16_to_cpu(hdr_s->count)); + ASSERT(start_s < INT_GET(hdr_s->count, ARCH_CONVERT)); + ASSERT(start_d <= INT_GET(hdr_d->count, ARCH_CONVERT)); + ASSERT(count <= INT_GET(hdr_s->count, ARCH_CONVERT)); /* * Move the entries in the destination leaf up to make a hole? */ - if (start_d < be16_to_cpu(hdr_d->count)) { - tmp = be16_to_cpu(hdr_d->count) - start_d; + if (start_d < INT_GET(hdr_d->count, ARCH_CONVERT)) { + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) - start_d; tmp *= sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_d->entries[start_d]; entry_d = &leaf_d->entries[start_d + count]; @@ -2127,8 +2199,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, entry_d = &leaf_d->entries[start_d]; desti = start_d; for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) { - ASSERT(be16_to_cpu(entry_s->nameidx) - >= be16_to_cpu(hdr_s->firstused)); + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + >= INT_GET(hdr_s->firstused, ARCH_CONVERT)); tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i); #ifdef GROT /* @@ -2138,35 +2210,35 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, */ if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */ memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); - be16_add(&hdr_s->usedbytes, -tmp); - be16_add(&hdr_s->count, -1); + INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); entry_d--; /* to compensate for ++ in loop hdr */ desti--; if ((start_s + i) < offset) result++; /* insertion index adjustment */ } else { #endif /* GROT */ - be16_add(&hdr_d->firstused, -tmp); + INT_MOD(hdr_d->firstused, ARCH_CONVERT, -tmp); /* both on-disk, don't endian flip twice */ entry_d->hashval = entry_s->hashval; /* both on-disk, don't endian flip twice */ entry_d->nameidx = hdr_d->firstused; entry_d->flags = entry_s->flags; - ASSERT(be16_to_cpu(entry_d->nameidx) + tmp + ASSERT(INT_GET(entry_d->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp)); memmove(XFS_ATTR_LEAF_NAME(leaf_d, desti), XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), tmp); - ASSERT(be16_to_cpu(entry_s->nameidx) + tmp + ASSERT(INT_GET(entry_s->nameidx, ARCH_CONVERT) + tmp <= XFS_LBSIZE(mp)); memset(XFS_ATTR_LEAF_NAME(leaf_s, start_s + i), 0, tmp); - be16_add(&hdr_s->usedbytes, -tmp); - be16_add(&hdr_d->usedbytes, tmp); - be16_add(&hdr_s->count, -1); - be16_add(&hdr_d->count, 1); - tmp = be16_to_cpu(hdr_d->count) + INT_MOD(hdr_s->usedbytes, ARCH_CONVERT, -tmp); + INT_MOD(hdr_d->usedbytes, ARCH_CONVERT, tmp); + INT_MOD(hdr_s->count, ARCH_CONVERT, -1); + INT_MOD(hdr_d->count, ARCH_CONVERT, 1); + tmp = INT_GET(hdr_d->count, ARCH_CONVERT) * sizeof(xfs_attr_leaf_entry_t) + sizeof(xfs_attr_leaf_hdr_t); - ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp); + ASSERT(INT_GET(hdr_d->firstused, ARCH_CONVERT) >= tmp); #ifdef GROT } #endif /* GROT */ @@ -2175,7 +2247,7 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Zero out the entries we just copied. */ - if (start_s == be16_to_cpu(hdr_s->count)) { + if (start_s == INT_GET(hdr_s->count, ARCH_CONVERT)) { tmp = count * sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_s->entries[start_s]; ASSERT(((char *)entry_s + tmp) <= @@ -2186,14 +2258,15 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, * Move the remaining entries down to fill the hole, * then zero the entries at the top. */ - tmp = be16_to_cpu(hdr_s->count) - count; + tmp = INT_GET(hdr_s->count, ARCH_CONVERT) - count; tmp *= sizeof(xfs_attr_leaf_entry_t); entry_s = &leaf_s->entries[start_s + count]; entry_d = &leaf_s->entries[start_s]; memmove((char *)entry_d, (char *)entry_s, tmp); tmp = count * sizeof(xfs_attr_leaf_entry_t); - entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)]; + entry_s = &leaf_s->entries[INT_GET(hdr_s->count, + ARCH_CONVERT)]; ASSERT(((char *)entry_s + tmp) <= ((char *)leaf_s + XFS_LBSIZE(mp))); memset((char *)entry_s, 0, tmp); @@ -2202,11 +2275,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Fill in the freemap information */ - hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t)); - be16_add(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) * - sizeof(xfs_attr_leaf_entry_t)); - hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) - - be16_to_cpu(hdr_d->freemap[0].base)); + INT_SET(hdr_d->freemap[0].base, ARCH_CONVERT, + sizeof(xfs_attr_leaf_hdr_t)); + INT_MOD(hdr_d->freemap[0].base, ARCH_CONVERT, + INT_GET(hdr_d->count, ARCH_CONVERT) + * sizeof(xfs_attr_leaf_entry_t)); + INT_SET(hdr_d->freemap[0].size, ARCH_CONVERT, + INT_GET(hdr_d->firstused, ARCH_CONVERT) + - INT_GET(hdr_d->freemap[0].base, ARCH_CONVERT)); hdr_d->freemap[1].base = 0; hdr_d->freemap[2].base = 0; hdr_d->freemap[1].size = 0; @@ -2225,16 +2301,18 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) && - (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC)); - if ((be16_to_cpu(leaf1->hdr.count) > 0) && - (be16_to_cpu(leaf2->hdr.count) > 0) && - ((be32_to_cpu(leaf2->entries[0].hashval) < - be32_to_cpu(leaf1->entries[0].hashval)) || - (be32_to_cpu(leaf2->entries[ - be16_to_cpu(leaf2->hdr.count)-1].hashval) < - be32_to_cpu(leaf1->entries[ - be16_to_cpu(leaf1->hdr.count)-1].hashval)))) { + ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) && + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC)); + if ( (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) + && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) + && ( (INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) + || (INT_GET(leaf2->entries[INT_GET(leaf2->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT) < + INT_GET(leaf1->entries[INT_GET(leaf1->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT))) ) { return(1); } return(0); @@ -2249,12 +2327,14 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_attr_leafblock_t *leaf; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); if (count) - *count = be16_to_cpu(leaf->hdr.count); + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); if (!leaf->hdr.count) return(0); - return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval); + return(INT_GET(leaf->entries[INT_GET(leaf->hdr.count, + ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); } /* @@ -2268,11 +2348,13 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) xfs_attr_leaf_name_remote_t *name_rmt; int size; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, index); size = XFS_ATTR_LEAF_ENTSIZE_LOCAL(name_loc->namelen, - be16_to_cpu(name_loc->valuelen)); + INT_GET(name_loc->valuelen, + ARCH_CONVERT)); } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, index); size = XFS_ATTR_LEAF_ENTSIZE_REMOTE(name_rmt->namelen); @@ -2330,20 +2412,22 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) */ if (context->resynch) { entry = &leaf->entries[0]; - for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { - if (be32_to_cpu(entry->hashval) == cursor->hashval) { + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); + entry++, i++) { + if (INT_GET(entry->hashval, ARCH_CONVERT) + == cursor->hashval) { if (cursor->offset == context->dupcnt) { context->dupcnt = 0; break; } context->dupcnt++; - } else if (be32_to_cpu(entry->hashval) > - cursor->hashval) { + } else if (INT_GET(entry->hashval, ARCH_CONVERT) + > cursor->hashval) { context->dupcnt = 0; break; } } - if (i == be16_to_cpu(leaf->hdr.count)) { + if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { xfs_attr_trace_l_c("not found", context); return(0); } @@ -2357,12 +2441,12 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) * We have found our place, start copying out the new attributes. */ retval = 0; - for ( ; (i < be16_to_cpu(leaf->hdr.count)) + for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT)) && (retval == 0); entry++, i++) { attrnames_t *namesp; - if (be32_to_cpu(entry->hashval) != cursor->hashval) { - cursor->hashval = be32_to_cpu(entry->hashval); + if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) { + cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT); cursor->offset = 0; } @@ -2391,7 +2475,8 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = xfs_attr_put_listent(context, namesp, (char *)name_loc->nameval, (int)name_loc->namelen, - be16_to_cpu(name_loc->valuelen)); + (int)INT_GET(name_loc->valuelen, + ARCH_CONVERT)); } } else { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); @@ -2403,7 +2488,8 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) retval = xfs_attr_put_listent(context, namesp, (char *)name_rmt->name, (int)name_rmt->namelen, - be32_to_cpu(name_rmt->valuelen)); + (int)INT_GET(name_rmt->valuelen, + ARCH_CONVERT)); } } if (retval == 0) { @@ -2510,8 +2596,9 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; ASSERT(entry->flags & XFS_ATTR_INCOMPLETE); @@ -2526,7 +2613,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) namelen = name_rmt->namelen; name = (char *)name_rmt->name; } - ASSERT(be32_to_cpu(entry->hashval) == args->hashval); + ASSERT(INT_GET(entry->hashval, ARCH_CONVERT) == args->hashval); ASSERT(namelen == args->namelen); ASSERT(memcmp(name, args->name, namelen) == 0); #endif /* DEBUG */ @@ -2538,8 +2625,8 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) if (args->rmtblkno) { ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0); name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, args->index); - name_rmt->valueblk = cpu_to_be32(args->rmtblkno); - name_rmt->valuelen = cpu_to_be32(args->valuelen); + INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); + INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); xfs_da_log_buf(args->trans, bp, XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); } @@ -2576,8 +2663,9 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf->hdr.count, ARCH_CONVERT)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2648,14 +2736,16 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) } leaf1 = bp1->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index < be16_to_cpu(leaf1->hdr.count)); + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index < INT_GET(leaf1->hdr.count, ARCH_CONVERT)); ASSERT(args->index >= 0); entry1 = &leaf1->entries[ args->index ]; leaf2 = bp2->data; - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count)); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); + ASSERT(args->index2 < INT_GET(leaf2->hdr.count, ARCH_CONVERT)); ASSERT(args->index2 >= 0); entry2 = &leaf2->entries[ args->index2 ]; @@ -2678,7 +2768,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) namelen2 = name_rmt->namelen; name2 = (char *)name_rmt->name; } - ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval)); + ASSERT(INT_GET(entry1->hashval, ARCH_CONVERT) == INT_GET(entry2->hashval, ARCH_CONVERT)); ASSERT(namelen1 == namelen2); ASSERT(memcmp(name1, name2, namelen1) == 0); #endif /* DEBUG */ @@ -2692,8 +2782,8 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) if (args->rmtblkno) { ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0); name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf1, args->index); - name_rmt->valueblk = cpu_to_be32(args->rmtblkno); - name_rmt->valuelen = cpu_to_be32(args->valuelen); + INT_SET(name_rmt->valueblk, ARCH_CONVERT, args->rmtblkno); + INT_SET(name_rmt->valuelen, ARCH_CONVERT, args->valuelen); xfs_da_log_buf(args->trans, bp1, XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt))); } @@ -2752,9 +2842,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) * This is a depth-first traversal! */ info = bp->data; - if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { + if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { error = xfs_attr_node_inactive(trans, dp, bp, 1); - } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { + } else if (INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { error = xfs_attr_leaf_inactive(trans, dp, bp); } else { error = XFS_ERROR(EIO); @@ -2802,14 +2892,15 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, } node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) + == XFS_DA_NODE_MAGIC); parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ - count = be16_to_cpu(node->hdr.count); + count = INT_GET(node->hdr.count, ARCH_CONVERT); if (!count) { xfs_da_brelse(*trans, bp); return(0); } - child_fsb = be32_to_cpu(node->btree[0].before); + child_fsb = INT_GET(node->btree[0].before, ARCH_CONVERT); xfs_da_brelse(*trans, bp); /* no locks for later trans */ /* @@ -2836,10 +2927,12 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, * Invalidate the subtree, however we have to. */ info = child_bp->data; - if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { + if (INT_GET(info->magic, ARCH_CONVERT) + == XFS_DA_NODE_MAGIC) { error = xfs_attr_node_inactive(trans, dp, child_bp, level+1); - } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { + } else if (INT_GET(info->magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC) { error = xfs_attr_leaf_inactive(trans, dp, child_bp); } else { @@ -2869,7 +2962,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, &bp, XFS_ATTR_FORK); if (error) return(error); - child_fsb = be32_to_cpu(node->btree[i+1].before); + child_fsb = INT_GET(node->btree[i+1].before, ARCH_CONVERT); xfs_da_brelse(*trans, bp); } /* @@ -2898,16 +2991,17 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) int error, count, size, tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) + == XFS_ATTR_LEAF_MAGIC); /* * Count the number of "remote" value extents. */ count = 0; entry = &leaf->entries[0]; - for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { - if (be16_to_cpu(entry->nameidx) && - ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if ( INT_GET(entry->nameidx, ARCH_CONVERT) + && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); if (name_rmt->valueblk) count++; @@ -2933,14 +3027,17 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) */ lp = list; entry = &leaf->entries[0]; - for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { - if (be16_to_cpu(entry->nameidx) && - ((entry->flags & XFS_ATTR_LOCAL) == 0)) { + for (i = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); entry++, i++) { + if ( INT_GET(entry->nameidx, ARCH_CONVERT) + && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i); if (name_rmt->valueblk) { - lp->valueblk = be32_to_cpu(name_rmt->valueblk); - lp->valuelen = XFS_B_TO_FSB(dp->i_mount, - be32_to_cpu(name_rmt->valuelen)); + /* both on-disk, don't endian flip twice */ + lp->valueblk = name_rmt->valueblk; + INT_SET(lp->valuelen, ARCH_CONVERT, + XFS_B_TO_FSB(dp->i_mount, + INT_GET(name_rmt->valuelen, + ARCH_CONVERT))); lp++; } } @@ -2953,8 +3050,10 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) error = 0; for (lp = list, i = 0; i < count; i++, lp++) { tmp = xfs_attr_leaf_freextent(trans, dp, - lp->valueblk, lp->valuelen); - + INT_GET(lp->valueblk, + ARCH_CONVERT), + INT_GET(lp->valuelen, + ARCH_CONVERT)); if (error == 0) error = tmp; /* save only the 1st errno */ } diff --git a/trunk/fs/xfs/xfs_attr_leaf.h b/trunk/fs/xfs/xfs_attr_leaf.h index 51c3ee156b2f..541e34109bb9 100644 --- a/trunk/fs/xfs/xfs_attr_leaf.h +++ b/trunk/fs/xfs/xfs_attr_leaf.h @@ -73,39 +73,39 @@ struct xfs_trans; #define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */ typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */ - __be16 base; /* base of free region */ - __be16 size; /* length of free region */ + __uint16_t base; /* base of free region */ + __uint16_t size; /* length of free region */ } xfs_attr_leaf_map_t; typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */ xfs_da_blkinfo_t info; /* block type, links, etc. */ - __be16 count; /* count of active leaf_entry's */ - __be16 usedbytes; /* num bytes of names/values stored */ - __be16 firstused; /* first used byte in name area */ - __u8 holes; /* != 0 if blk needs compaction */ - __u8 pad1; + __uint16_t count; /* count of active leaf_entry's */ + __uint16_t usedbytes; /* num bytes of names/values stored */ + __uint16_t firstused; /* first used byte in name area */ + __uint8_t holes; /* != 0 if blk needs compaction */ + __uint8_t pad1; xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */ } xfs_attr_leaf_hdr_t; typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */ - __be32 hashval; /* hash value of name */ - __be16 nameidx; /* index into buffer of name/value */ - __u8 flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ - __u8 pad2; /* unused pad byte */ + xfs_dahash_t hashval; /* hash value of name */ + __uint16_t nameidx; /* index into buffer of name/value */ + __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */ + __uint8_t pad2; /* unused pad byte */ } xfs_attr_leaf_entry_t; typedef struct xfs_attr_leaf_name_local { - __be16 valuelen; /* number of bytes in value */ - __u8 namelen; /* length of name bytes */ - __u8 nameval[1]; /* name/value bytes */ + __uint16_t valuelen; /* number of bytes in value */ + __uint8_t namelen; /* length of name bytes */ + __uint8_t nameval[1]; /* name/value bytes */ } xfs_attr_leaf_name_local_t; typedef struct xfs_attr_leaf_name_remote { - __be32 valueblk; /* block number of value bytes */ - __be32 valuelen; /* number of bytes in value */ - __u8 namelen; /* length of name bytes */ - __u8 name[1]; /* name bytes */ + xfs_dablk_t valueblk; /* block number of value bytes */ + __uint32_t valuelen; /* number of bytes in value */ + __uint8_t namelen; /* length of name bytes */ + __uint8_t name[1]; /* name bytes */ } xfs_attr_leaf_name_remote_t; typedef struct xfs_attr_leafblock { @@ -143,8 +143,8 @@ typedef struct xfs_attr_leafblock { static inline xfs_attr_leaf_name_remote_t * xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) { - return (xfs_attr_leaf_name_remote_t *) - &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; + return (xfs_attr_leaf_name_remote_t *) &((char *) + (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; } #define XFS_ATTR_LEAF_NAME_LOCAL(leafp,idx) \ @@ -152,15 +152,16 @@ xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) static inline xfs_attr_leaf_name_local_t * xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) { - return (xfs_attr_leaf_name_local_t *) - &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; + return (xfs_attr_leaf_name_local_t *) &((char *) + (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]; } #define XFS_ATTR_LEAF_NAME(leafp,idx) \ xfs_attr_leaf_name(leafp,idx) static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) { - return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; + return (&((char *) + (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)]); } /* diff --git a/trunk/fs/xfs/xfs_attr_sf.h b/trunk/fs/xfs/xfs_attr_sf.h index f67f917803b1..ffed6ca81a52 100644 --- a/trunk/fs/xfs/xfs_attr_sf.h +++ b/trunk/fs/xfs/xfs_attr_sf.h @@ -32,8 +32,8 @@ struct xfs_inode; */ typedef struct xfs_attr_shortform { struct xfs_attr_sf_hdr { /* constant-structure header block */ - __be16 totsize; /* total bytes in shortform list */ - __u8 count; /* count of active entries */ + __uint16_t totsize; /* total bytes in shortform list */ + __uint8_t count; /* count of active entries */ } hdr; struct xfs_attr_sf_entry { __uint8_t namelen; /* actual length of name (no NULL) */ @@ -66,8 +66,8 @@ typedef struct xfs_attr_sf_sort { #define XFS_ATTR_SF_NEXTENTRY(sfep) /* next entry in struct */ \ ((xfs_attr_sf_entry_t *)((char *)(sfep) + XFS_ATTR_SF_ENTSIZE(sfep))) #define XFS_ATTR_SF_TOTSIZE(dp) /* total space in use */ \ - (be16_to_cpu(((xfs_attr_shortform_t *) \ - ((dp)->i_afp->if_u1.if_data))->hdr.totsize)) + (INT_GET(((xfs_attr_shortform_t *) \ + ((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT)) #if defined(XFS_ATTR_TRACE) /* diff --git a/trunk/fs/xfs/xfs_bmap.c b/trunk/fs/xfs/xfs_bmap.c index 2d702e4a74a3..70625e577c70 100644 --- a/trunk/fs/xfs/xfs_bmap.c +++ b/trunk/fs/xfs/xfs_bmap.c @@ -89,7 +89,7 @@ xfs_bmap_add_attrfork_local( int *flags); /* inode logging flags */ /* - * Called by xfs_bmapi to update file extent records and the btree + * Called by xfs_bmapi to update extent list structure and the btree * after allocating space (or doing a delayed allocation). */ STATIC int /* error */ @@ -97,7 +97,7 @@ xfs_bmap_add_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ @@ -113,7 +113,7 @@ xfs_bmap_add_extent_delay_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ @@ -129,7 +129,7 @@ xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp,/* inode logging flags */ int rsvd); /* OK to allocate reserved blocks */ @@ -142,7 +142,7 @@ xfs_bmap_add_extent_hole_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp, /* inode logging flags */ int whichfork); /* data or attr fork */ @@ -155,7 +155,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp); /* inode logging flags */ /* @@ -169,7 +169,7 @@ xfs_bmap_alloc( /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. - * Since the file extents are already in-core, all we have to do is + * Since the extent list is already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ @@ -191,7 +191,7 @@ xfs_bmap_check_extents( #endif /* - * Called by xfs_bmapi to update file extent records and the btree + * Called by xfs_bmapi to update extent list structure and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ @@ -201,7 +201,7 @@ xfs_bmap_del_extent( xfs_extnum_t idx, /* extent number to update/insert */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp,/* inode logging flags */ int whichfork, /* data or attr fork */ int rsvd); /* OK to allocate reserved blocks */ @@ -216,6 +216,18 @@ xfs_bmap_del_free( xfs_bmap_free_item_t *prev, /* previous item on list, if any */ xfs_bmap_free_item_t *free); /* list item to be freed */ +/* + * Remove count entries from the extents array for inode "ip", starting + * at index "idx". Copies the remaining items down over the deleted ones, + * and gives back the excess memory. + */ +STATIC void +xfs_bmap_delete_exlist( + xfs_inode_t *ip, /* incode inode pointer */ + xfs_extnum_t idx, /* starting delete index */ + xfs_extnum_t count, /* count of items to delete */ + int whichfork); /* data or attr fork */ + /* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. @@ -231,6 +243,18 @@ xfs_bmap_extents_to_btree( int *logflagsp, /* inode logging flags */ int whichfork); /* data or attr fork */ +/* + * Insert new item(s) in the extent list for inode "ip". + * Count new items are inserted at offset idx. + */ +STATIC void +xfs_bmap_insert_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new, /* items to insert */ + int whichfork); /* data or attr fork */ + /* * Convert a local file to an extents file. * This code is sort of bogus, since the file data needs to get @@ -292,7 +316,7 @@ xfs_bmap_trace_addentry( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to a call to xfs_iext_remove. + * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. */ STATIC void xfs_bmap_trace_delete( @@ -304,7 +328,7 @@ xfs_bmap_trace_delete( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to a call to xfs_iext_insert, or + * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or * reading in the extents list from the disk (in the btree). */ STATIC void @@ -319,7 +343,7 @@ xfs_bmap_trace_insert( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry after updating an extent record in place. + * Add bmap trace entry after updating an extent list entry in place. */ STATIC void xfs_bmap_trace_post_update( @@ -330,7 +354,7 @@ xfs_bmap_trace_post_update( int whichfork); /* data or attr fork */ /* - * Add bmap trace entry prior to updating an extent record in place. + * Add bmap trace entry prior to updating an extent list entry in place. */ STATIC void xfs_bmap_trace_pre_update( @@ -389,24 +413,19 @@ STATIC int xfs_bmap_count_tree( xfs_mount_t *mp, xfs_trans_t *tp, - xfs_ifork_t *ifp, xfs_fsblock_t blockno, int levelin, int *count); STATIC int xfs_bmap_count_leaves( - xfs_ifork_t *ifp, - xfs_extnum_t idx, + xfs_bmbt_rec_t *frp, int numrecs, int *count); STATIC int xfs_bmap_disk_count_leaves( - xfs_ifork_t *ifp, - xfs_mount_t *mp, - xfs_extnum_t idx, - xfs_bmbt_block_t *block, + xfs_bmbt_rec_t *frp, int numrecs, int *count); @@ -518,7 +537,7 @@ xfs_bmap_add_attrfork_local( } /* - * Called by xfs_bmapi to update file extent records and the btree + * Called by xfs_bmapi to update extent list structure and the btree * after allocating space (or doing a delayed allocation). */ STATIC int /* error */ @@ -526,7 +545,7 @@ xfs_bmap_add_extent( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ @@ -559,7 +578,7 @@ xfs_bmap_add_extent( if (nextents == 0) { xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, NULL, whichfork); - xfs_iext_insert(ifp, 0, 1, new); + xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork); ASSERT(cur == NULL); ifp->if_lastex = 0; if (!ISNULLSTARTBLOCK(new->br_startblock)) { @@ -595,7 +614,7 @@ xfs_bmap_add_extent( /* * Get the record referred to by idx. */ - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev); + xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev); /* * If it's a real allocation record, and the new allocation ends * after the start of the referred to record, then we're filling @@ -695,13 +714,14 @@ xfs_bmap_add_extent_delay_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ xfs_fsblock_t *first, /* pointer to firstblock variable */ xfs_bmap_free_t *flist, /* list of extents to be freed */ int *logflagsp, /* inode logging flags */ int rsvd) /* OK to use reserved data block allocation */ { + xfs_bmbt_rec_t *base; /* base of extent entry list */ xfs_btree_cur_t *cur; /* btree cursor */ int diff; /* temp value */ xfs_bmbt_rec_t *ep; /* extent entry for idx */ @@ -710,7 +730,6 @@ xfs_bmap_add_extent_delay_real( static char fname[] = "xfs_bmap_add_extent_delay_real"; #endif int i; /* temp state */ - xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_bmbt_irec_t r[3]; /* neighbor extent entries */ /* left is 0, right is 1, prev is 2 */ @@ -744,8 +763,8 @@ xfs_bmap_add_extent_delay_real( * Set up a bunch of variables to make the tests simpler. */ cur = *curp; - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); - ep = xfs_iext_get_ext(ifp, idx); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; xfs_bmbt_get_all(ep, &PREV); new_endoff = new->br_startoff + new->br_blockcount; ASSERT(PREV.br_startoff <= new->br_startoff); @@ -762,7 +781,7 @@ xfs_bmap_add_extent_delay_real( * Don't set contiguous if the combined extent would be too large. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); + xfs_bmbt_get_all(ep - 1, &LEFT); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, @@ -779,7 +798,7 @@ xfs_bmap_add_extent_delay_real( if (STATE_SET_TEST(RIGHT_VALID, idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); + xfs_bmbt_get_all(ep + 1, &RIGHT); STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, @@ -806,14 +825,14 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx, 2); + xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents--; if (cur == NULL) @@ -848,14 +867,14 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, LEFT.br_blockcount + PREV.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx, 1); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); if (cur == NULL) rval = XFS_ILOG_DEXT; else { @@ -889,7 +908,7 @@ xfs_bmap_add_extent_delay_real( ip->i_df.if_lastex = idx; xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx + 1, 1); + xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); if (cur == NULL) rval = XFS_ILOG_DEXT; else { @@ -945,7 +964,7 @@ xfs_bmap_add_extent_delay_real( */ xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); @@ -991,7 +1010,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_iext_insert(ifp, idx, 1, new); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -1020,7 +1039,8 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); - ep = xfs_iext_get_ext(ifp, idx + 1); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx + 1]; xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, XFS_DATA_FORK); @@ -1038,8 +1058,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, XFS_DATA_FORK); xfs_bmbt_set_blockcount(ep, temp); - xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), - new->br_startoff, new->br_startblock, + xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, RIGHT.br_state); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, @@ -1079,7 +1098,7 @@ xfs_bmap_add_extent_delay_real( xfs_bmbt_set_blockcount(ep, temp); xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, new, NULL, XFS_DATA_FORK); - xfs_iext_insert(ifp, idx + 1, 1, new); + xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1108,7 +1127,8 @@ xfs_bmap_add_extent_delay_real( temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), STARTBLOCKVAL(PREV.br_startblock) - (cur ? cur->bc_private.b.allocated : 0)); - ep = xfs_iext_get_ext(ifp, idx); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); *dnew = temp; @@ -1129,7 +1149,7 @@ xfs_bmap_add_extent_delay_real( r[1].br_blockcount = temp2; xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], XFS_DATA_FORK); - xfs_iext_insert(ifp, idx + 1, 2, &r[0]); + xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1184,13 +1204,13 @@ xfs_bmap_add_extent_delay_real( } } } - ep = xfs_iext_get_ext(ifp, idx); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, XFS_DATA_FORK); - xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), - NULLSTARTBLOCK((int)temp2)); + xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2)); xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, XFS_DATA_FORK); *dnew = temp + temp2; @@ -1234,9 +1254,10 @@ xfs_bmap_add_extent_unwritten_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp) /* inode logging flags */ { + xfs_bmbt_rec_t *base; /* base of extent entry list */ xfs_btree_cur_t *cur; /* btree cursor */ xfs_bmbt_rec_t *ep; /* extent entry for idx */ int error; /* error return value */ @@ -1244,7 +1265,6 @@ xfs_bmap_add_extent_unwritten_real( static char fname[] = "xfs_bmap_add_extent_unwritten_real"; #endif int i; /* temp state */ - xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t new_endoff; /* end offset of new entry */ xfs_exntst_t newext; /* new extent state */ xfs_exntst_t oldext; /* old extent state */ @@ -1278,8 +1298,8 @@ xfs_bmap_add_extent_unwritten_real( */ error = 0; cur = *curp; - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); - ep = xfs_iext_get_ext(ifp, idx); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; xfs_bmbt_get_all(ep, &PREV); newext = new->br_state; oldext = (newext == XFS_EXT_UNWRITTEN) ? @@ -1300,7 +1320,7 @@ xfs_bmap_add_extent_unwritten_real( * Don't set contiguous if the combined extent would be too large. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); + xfs_bmbt_get_all(ep - 1, &LEFT); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock)); } STATE_SET(LEFT_CONTIG, @@ -1317,7 +1337,7 @@ xfs_bmap_add_extent_unwritten_real( if (STATE_SET_TEST(RIGHT_VALID, idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); + xfs_bmbt_get_all(ep + 1, &RIGHT); STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock)); } STATE_SET(RIGHT_CONTIG, @@ -1343,14 +1363,14 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, LEFT.br_blockcount + PREV.br_blockcount + RIGHT.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx, 2); + xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; ip->i_d.di_nextents -= 2; if (cur == NULL) @@ -1389,14 +1409,14 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, LEFT.br_blockcount + PREV.br_blockcount); xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx, 1); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1436,7 +1456,7 @@ xfs_bmap_add_extent_unwritten_real( ip->i_df.if_lastex = idx; xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx + 1, 1); + xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK); ip->i_d.di_nextents--; if (cur == NULL) rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; @@ -1496,7 +1516,7 @@ xfs_bmap_add_extent_unwritten_real( */ xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, LEFT.br_blockcount + new->br_blockcount); xfs_bmbt_set_startoff(ep, PREV.br_startoff + new->br_blockcount); @@ -1551,7 +1571,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_iext_insert(ifp, idx, 1, new); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); ip->i_df.if_lastex = idx; ip->i_d.di_nextents++; if (cur == NULL) @@ -1589,8 +1609,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_blockcount - new->br_blockcount); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, XFS_DATA_FORK); - xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), - new->br_startoff, new->br_startblock, + xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock, new->br_blockcount + RIGHT.br_blockcount, newext); xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, XFS_DATA_FORK); @@ -1630,7 +1649,7 @@ xfs_bmap_add_extent_unwritten_real( xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, new, NULL, XFS_DATA_FORK); - xfs_iext_insert(ifp, idx + 1, 1, new); + xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) @@ -1677,7 +1696,7 @@ xfs_bmap_add_extent_unwritten_real( r[1].br_state = oldext; xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], XFS_DATA_FORK); - xfs_iext_insert(ifp, idx + 1, 2, &r[0]); + xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents += 2; if (cur == NULL) @@ -1751,15 +1770,15 @@ xfs_bmap_add_extent_hole_delay( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp, /* inode logging flags */ int rsvd) /* OK to allocate reserved blocks */ { - xfs_bmbt_rec_t *ep; /* extent record for idx */ + xfs_bmbt_rec_t *base; /* base of extent entry list */ + xfs_bmbt_rec_t *ep; /* extent list entry for idx */ #ifdef XFS_BMAP_TRACE static char fname[] = "xfs_bmap_add_extent_hole_delay"; #endif - xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_irec_t left; /* left neighbor extent entry */ xfs_filblks_t newlen=0; /* new indirect size */ xfs_filblks_t oldlen=0; /* old indirect size */ @@ -1780,15 +1799,15 @@ xfs_bmap_add_extent_hole_delay( ((state &= ~MASK(b)), 0)) #define SWITCH_STATE (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG)) - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); - ep = xfs_iext_get_ext(ifp, idx); + base = ip->i_df.if_u1.if_extents; + ep = &base[idx]; state = 0; ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); /* * Check and set flags if this segment has a left neighbor */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); + xfs_bmbt_get_all(ep - 1, &left); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* @@ -1825,24 +1844,23 @@ xfs_bmap_add_extent_hole_delay( /* * New allocation is contiguous with delayed allocations * on the left and on the right. - * Merge all three into a single extent record. + * Merge all three into a single extent list entry. */ temp = left.br_blockcount + new->br_blockcount + right.br_blockcount; xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); + xfs_bmbt_set_blockcount(ep - 1, temp); oldlen = STARTBLOCKVAL(left.br_startblock) + STARTBLOCKVAL(new->br_startblock) + STARTBLOCKVAL(right.br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), - NULLSTARTBLOCK((int)newlen)); + xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, XFS_DATA_FORK); xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, XFS_DATA_FORK); - xfs_iext_remove(ifp, idx, 1); + xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; break; @@ -1855,12 +1873,11 @@ xfs_bmap_add_extent_hole_delay( temp = left.br_blockcount + new->br_blockcount; xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, XFS_DATA_FORK); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); + xfs_bmbt_set_blockcount(ep - 1, temp); oldlen = STARTBLOCKVAL(left.br_startblock) + STARTBLOCKVAL(new->br_startblock); newlen = xfs_bmap_worst_indlen(ip, temp); - xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), - NULLSTARTBLOCK((int)newlen)); + xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen)); xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, XFS_DATA_FORK); ip->i_df.if_lastex = idx - 1; @@ -1892,7 +1909,7 @@ xfs_bmap_add_extent_hole_delay( oldlen = newlen = 0; xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, XFS_DATA_FORK); - xfs_iext_insert(ifp, idx, 1, new); + xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK); ip->i_df.if_lastex = idx; break; } @@ -1923,7 +1940,7 @@ xfs_bmap_add_extent_hole_real( xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t idx, /* extent number to update/insert */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *new, /* new data to add to file extents */ + xfs_bmbt_irec_t *new, /* new data to put in extent list */ int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { @@ -1953,13 +1970,13 @@ xfs_bmap_add_extent_hole_real( ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); - ep = xfs_iext_get_ext(ifp, idx); + ep = &ifp->if_u1.if_extents[idx]; state = 0; /* * Check and set flags if this segment has a left neighbor. */ if (STATE_SET_TEST(LEFT_VALID, idx > 0)) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); + xfs_bmbt_get_all(ep - 1, &left); STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock)); } /* @@ -2002,18 +2019,18 @@ xfs_bmap_add_extent_hole_real( /* * New allocation is contiguous with real allocations on the * left and on the right. - * Merge all three into a single extent record. + * Merge all three into a single extent list entry. */ xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, whichfork); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, left.br_blockcount + new->br_blockcount + right.br_blockcount); xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, whichfork); xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, whichfork); - xfs_iext_remove(ifp, idx, 1); + xfs_bmap_delete_exlist(ip, idx, 1, whichfork); ifp->if_lastex = idx - 1; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) - 1); @@ -2045,7 +2062,7 @@ xfs_bmap_add_extent_hole_real( * Merge the new allocation with the left neighbor. */ xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); - xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), + xfs_bmbt_set_blockcount(ep - 1, left.br_blockcount + new->br_blockcount); xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); ifp->if_lastex = idx - 1; @@ -2099,7 +2116,7 @@ xfs_bmap_add_extent_hole_real( */ xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, whichfork); - xfs_iext_insert(ifp, idx, 1, new); + xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork); ifp->if_lastex = idx; XFS_IFORK_NEXT_SET(ip, whichfork, XFS_IFORK_NEXTENTS(ip, whichfork) + 1); @@ -2294,15 +2311,25 @@ xfs_bmap_extsize_align( #define XFS_ALLOC_GAP_UNITS 4 +/* + * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. + * It figures out where to ask the underlying allocator to put the new extent. + */ STATIC int -xfs_bmap_adjacent( +xfs_bmap_alloc( xfs_bmalloca_t *ap) /* bmap alloc argument struct */ { xfs_fsblock_t adjust; /* adjustment to block numbers */ + xfs_alloctype_t atype=0; /* type for allocation routines */ + int error; /* error return value */ xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ xfs_mount_t *mp; /* mount point structure */ int nullfb; /* true if ap->firstblock isn't set */ int rt; /* true if inode is realtime */ + xfs_extlen_t prod = 0; /* product factor for allocators */ + xfs_extlen_t ralen = 0; /* realtime allocation length */ + xfs_extlen_t align; /* minimum allocation alignment */ + xfs_rtblock_t rtx; #define ISVALID(x,y) \ (rt ? \ @@ -2311,10 +2338,75 @@ xfs_bmap_adjacent( XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \ XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks) + /* + * Set up variables. + */ mp = ap->ip->i_mount; nullfb = ap->firstblock == NULLFSBLOCK; rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); + if (rt) { + align = ap->ip->i_d.di_extsize ? + ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; + /* Set prod to match the extent size */ + prod = align / mp->m_sb.sb_rextsize; + + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, + align, rt, ap->eof, 0, + ap->conv, &ap->off, &ap->alen); + if (error) + return error; + ASSERT(ap->alen); + ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); + + /* + * If the offset & length are not perfectly aligned + * then kill prod, it will just get us in trouble. + */ + if (do_mod(ap->off, align) || ap->alen % align) + prod = 1; + /* + * Set ralen to be the actual requested length in rtextents. + */ + ralen = ap->alen / mp->m_sb.sb_rextsize; + /* + * If the old value was close enough to MAXEXTLEN that + * we rounded up to it, cut it back so it's valid again. + * Note that if it's a really large request (bigger than + * MAXEXTLEN), we don't hear about that number, and can't + * adjust the starting point to match it. + */ + if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) + ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; + /* + * If it's an allocation to an empty file at offset 0, + * pick an extent that will space things out in the rt area. + */ + if (ap->eof && ap->off == 0) { + error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); + if (error) + return error; + ap->rval = rtx * mp->m_sb.sb_rextsize; + } else + ap->rval = 0; + } else { + align = (ap->userdata && ap->ip->i_d.di_extsize && + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? + ap->ip->i_d.di_extsize : 0; + if (unlikely(align)) { + error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, + align, rt, + ap->eof, 0, ap->conv, + &ap->off, &ap->alen); + ASSERT(!error); + ASSERT(ap->alen); + } + if (nullfb) + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); + else + ap->rval = ap->firstblock; + } + /* * If allocating at eof, and there's a previous real block, * try to use it's last block as our starting point. @@ -2439,384 +2531,287 @@ xfs_bmap_adjacent( else if (gotbno != NULLFSBLOCK) ap->rval = gotbno; } -#undef ISVALID - return 0; -} - -STATIC int -xfs_bmap_rtalloc( - xfs_bmalloca_t *ap) /* bmap alloc argument struct */ -{ - xfs_alloctype_t atype = 0; /* type for allocation routines */ - int error; /* error return value */ - xfs_mount_t *mp; /* mount point structure */ - xfs_extlen_t prod = 0; /* product factor for allocators */ - xfs_extlen_t ralen = 0; /* realtime allocation length */ - xfs_extlen_t align; /* minimum allocation alignment */ - xfs_rtblock_t rtx; /* realtime extent number */ - xfs_rtblock_t rtb; - - mp = ap->ip->i_mount; - align = ap->ip->i_d.di_extsize ? - ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; - prod = align / mp->m_sb.sb_rextsize; - error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, - align, 1, ap->eof, 0, - ap->conv, &ap->off, &ap->alen); - if (error) - return error; - ASSERT(ap->alen); - ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0); - - /* - * If the offset & length are not perfectly aligned - * then kill prod, it will just get us in trouble. - */ - if (do_mod(ap->off, align) || ap->alen % align) - prod = 1; - /* - * Set ralen to be the actual requested length in rtextents. - */ - ralen = ap->alen / mp->m_sb.sb_rextsize; - /* - * If the old value was close enough to MAXEXTLEN that - * we rounded up to it, cut it back so it's valid again. - * Note that if it's a really large request (bigger than - * MAXEXTLEN), we don't hear about that number, and can't - * adjust the starting point to match it. - */ - if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) - ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; - /* - * If it's an allocation to an empty file at offset 0, - * pick an extent that will space things out in the rt area. - */ - if (ap->eof && ap->off == 0) { - error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); - if (error) - return error; - ap->rval = rtx * mp->m_sb.sb_rextsize; - } else { - ap->rval = 0; - } - - xfs_bmap_adjacent(ap); - - /* - * Realtime allocation, done through xfs_rtallocate_extent. - */ - atype = ap->rval == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; - do_div(ap->rval, mp->m_sb.sb_rextsize); - rtb = ap->rval; - ap->alen = ralen; - if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, - &ralen, atype, ap->wasdel, prod, &rtb))) - return error; - if (rtb == NULLFSBLOCK && prod > 1 && - (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, - ap->alen, &ralen, atype, - ap->wasdel, 1, &rtb))) - return error; - ap->rval = rtb; - if (ap->rval != NULLFSBLOCK) { - ap->rval *= mp->m_sb.sb_rextsize; - ralen *= mp->m_sb.sb_rextsize; - ap->alen = ralen; - ap->ip->i_d.di_nblocks += ralen; - xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); - if (ap->wasdel) - ap->ip->i_delayed_blks -= ralen; - /* - * Adjust the disk quota also. This was reserved - * earlier. - */ - XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, - ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : - XFS_TRANS_DQ_RTBCOUNT, (long) ralen); - } else { - ap->alen = 0; - } - return 0; -} - -STATIC int -xfs_bmap_btalloc( - xfs_bmalloca_t *ap) /* bmap alloc argument struct */ -{ - xfs_mount_t *mp; /* mount point structure */ - xfs_alloctype_t atype = 0; /* type for allocation routines */ - xfs_extlen_t align; /* minimum allocation alignment */ - xfs_agnumber_t ag; - xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */ - xfs_agnumber_t startag; - xfs_alloc_arg_t args; - xfs_extlen_t blen; - xfs_extlen_t delta; - xfs_extlen_t longest; - xfs_extlen_t need; - xfs_extlen_t nextminlen = 0; - xfs_perag_t *pag; - int nullfb; /* true if ap->firstblock isn't set */ - int isaligned; - int notinit; - int tryagain; - int error; - - mp = ap->ip->i_mount; - align = (ap->userdata && ap->ip->i_d.di_extsize && - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? - ap->ip->i_d.di_extsize : 0; - if (unlikely(align)) { - error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, - align, 0, ap->eof, 0, ap->conv, - &ap->off, &ap->alen); - ASSERT(!error); - ASSERT(ap->alen); - } - nullfb = ap->firstblock == NULLFSBLOCK; - fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); - if (nullfb) - ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); - else - ap->rval = ap->firstblock; - - xfs_bmap_adjacent(ap); - /* * If allowed, use ap->rval; otherwise must use firstblock since * it's in the right allocation group. */ - if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) + if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno) ; else ap->rval = ap->firstblock; /* - * Normal allocation, done through xfs_alloc_vextent. + * Realtime allocation, done through xfs_rtallocate_extent. */ - tryagain = isaligned = 0; - args.tp = ap->tp; - args.mp = mp; - args.fsbno = ap->rval; - args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); - blen = 0; - if (nullfb) { - args.type = XFS_ALLOCTYPE_START_BNO; - args.total = ap->total; - /* - * Find the longest available space. - * We're going to try for the whole allocation at once. - */ - startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); - notinit = 0; - down_read(&mp->m_peraglock); - while (blen < ap->alen) { - pag = &mp->m_perag[ag]; - if (!pag->pagf_init && - (error = xfs_alloc_pagf_init(mp, args.tp, - ag, XFS_ALLOC_FLAG_TRYLOCK))) { - up_read(&mp->m_peraglock); - return error; - } + if (rt) { +#ifndef __KERNEL__ + ASSERT(0); +#else + xfs_rtblock_t rtb; + + atype = ap->rval == 0 ? + XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO; + do_div(ap->rval, mp->m_sb.sb_rextsize); + rtb = ap->rval; + ap->alen = ralen; + if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen, + &ralen, atype, ap->wasdel, prod, &rtb))) + return error; + if (rtb == NULLFSBLOCK && prod > 1 && + (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, + ap->alen, &ralen, atype, + ap->wasdel, 1, &rtb))) + return error; + ap->rval = rtb; + if (ap->rval != NULLFSBLOCK) { + ap->rval *= mp->m_sb.sb_rextsize; + ralen *= mp->m_sb.sb_rextsize; + ap->alen = ralen; + ap->ip->i_d.di_nblocks += ralen; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= ralen; /* - * See xfs_alloc_fix_freelist... + * Adjust the disk quota also. This was reserved + * earlier. */ - if (pag->pagf_init) { - need = XFS_MIN_FREELIST_PAG(pag, mp); - delta = need > pag->pagf_flcount ? - need - pag->pagf_flcount : 0; - longest = (pag->pagf_longest > delta) ? - (pag->pagf_longest - delta) : - (pag->pagf_flcount > 0 || - pag->pagf_longest > 0); - if (blen < longest) - blen = longest; - } else - notinit = 1; - if (++ag == mp->m_sb.sb_agcount) - ag = 0; - if (ag == startag) - break; - } - up_read(&mp->m_peraglock); - /* - * Since the above loop did a BUF_TRYLOCK, it is - * possible that there is space for this request. - */ - if (notinit || blen < ap->minlen) - args.minlen = ap->minlen; - /* - * If the best seen length is less than the request - * length, use the best as the minimum. - */ - else if (blen < ap->alen) - args.minlen = blen; - /* - * Otherwise we've seen an extent as big as alen, - * use that as the minimum. - */ - else - args.minlen = ap->alen; - } else if (ap->low) { - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.total = args.minlen = ap->minlen; - } else { - args.type = XFS_ALLOCTYPE_NEAR_BNO; - args.total = ap->total; - args.minlen = ap->minlen; - } - if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { - args.prod = ap->ip->i_d.di_extsize; - if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) - args.mod = (xfs_extlen_t)(args.prod - args.mod); - } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { - args.prod = 1; - args.mod = 0; - } else { - args.prod = NBPP >> mp->m_sb.sb_blocklog; - if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) - args.mod = (xfs_extlen_t)(args.prod - args.mod); + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT : + XFS_TRANS_DQ_RTBCOUNT, + (long) ralen); + } else + ap->alen = 0; +#endif /* __KERNEL__ */ } /* - * If we are not low on available data blocks, and the - * underlying logical volume manager is a stripe, and - * the file offset is zero then try to allocate data - * blocks on stripe unit boundary. - * NOTE: ap->aeof is only set if the allocation length - * is >= the stripe unit and the allocation offset is - * at the end of file. + * Normal allocation, done through xfs_alloc_vextent. */ - if (!ap->low && ap->aeof) { - if (!ap->off) { - args.alignment = mp->m_dalign; - atype = args.type; - isaligned = 1; + else { + xfs_agnumber_t ag; + xfs_alloc_arg_t args; + xfs_extlen_t blen; + xfs_extlen_t delta; + int isaligned; + xfs_extlen_t longest; + xfs_extlen_t need; + xfs_extlen_t nextminlen=0; + int notinit; + xfs_perag_t *pag; + xfs_agnumber_t startag; + int tryagain; + + tryagain = isaligned = 0; + args.tp = ap->tp; + args.mp = mp; + args.fsbno = ap->rval; + args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); + blen = 0; + if (nullfb) { + args.type = XFS_ALLOCTYPE_START_BNO; + args.total = ap->total; /* - * Adjust for alignment + * Find the longest available space. + * We're going to try for the whole allocation at once. */ - if (blen > args.alignment && blen <= ap->alen) - args.minlen = blen - args.alignment; - args.minalignslop = 0; - } else { + startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); + notinit = 0; + down_read(&mp->m_peraglock); + while (blen < ap->alen) { + pag = &mp->m_perag[ag]; + if (!pag->pagf_init && + (error = xfs_alloc_pagf_init(mp, args.tp, + ag, XFS_ALLOC_FLAG_TRYLOCK))) { + up_read(&mp->m_peraglock); + return error; + } + /* + * See xfs_alloc_fix_freelist... + */ + if (pag->pagf_init) { + need = XFS_MIN_FREELIST_PAG(pag, mp); + delta = need > pag->pagf_flcount ? + need - pag->pagf_flcount : 0; + longest = (pag->pagf_longest > delta) ? + (pag->pagf_longest - delta) : + (pag->pagf_flcount > 0 || + pag->pagf_longest > 0); + if (blen < longest) + blen = longest; + } else + notinit = 1; + if (++ag == mp->m_sb.sb_agcount) + ag = 0; + if (ag == startag) + break; + } + up_read(&mp->m_peraglock); /* - * First try an exact bno allocation. - * If it fails then do a near or start bno - * allocation with alignment turned on. + * Since the above loop did a BUF_TRYLOCK, it is + * possible that there is space for this request. */ - atype = args.type; - tryagain = 1; - args.type = XFS_ALLOCTYPE_THIS_BNO; - args.alignment = 1; + if (notinit || blen < ap->minlen) + args.minlen = ap->minlen; /* - * Compute the minlen+alignment for the - * next case. Set slop so that the value - * of minlen+alignment+slop doesn't go up - * between the calls. + * If the best seen length is less than the request + * length, use the best as the minimum. + */ + else if (blen < ap->alen) + args.minlen = blen; + /* + * Otherwise we've seen an extent as big as alen, + * use that as the minimum. */ - if (blen > mp->m_dalign && blen <= ap->alen) - nextminlen = blen - mp->m_dalign; - else - nextminlen = args.minlen; - if (nextminlen + mp->m_dalign > args.minlen + 1) - args.minalignslop = - nextminlen + mp->m_dalign - - args.minlen - 1; else - args.minalignslop = 0; + args.minlen = ap->alen; + } else if (ap->low) { + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = args.minlen = ap->minlen; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + args.total = ap->total; + args.minlen = ap->minlen; + } + if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && + (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { + args.prod = ap->ip->i_d.di_extsize; + if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); + } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) { + args.prod = 1; + args.mod = 0; + } else { + args.prod = NBPP >> mp->m_sb.sb_blocklog; + if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) + args.mod = (xfs_extlen_t)(args.prod - args.mod); } - } else { - args.alignment = 1; - args.minalignslop = 0; - } - args.minleft = ap->minleft; - args.wasdel = ap->wasdel; - args.isfl = 0; - args.userdata = ap->userdata; - if ((error = xfs_alloc_vextent(&args))) - return error; - if (tryagain && args.fsbno == NULLFSBLOCK) { - /* - * Exact allocation failed. Now try with alignment - * turned on. - */ - args.type = atype; - args.fsbno = ap->rval; - args.alignment = mp->m_dalign; - args.minlen = nextminlen; - args.minalignslop = 0; - isaligned = 1; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (isaligned && args.fsbno == NULLFSBLOCK) { /* - * allocation failed, so turn off alignment and - * try again. + * If we are not low on available data blocks, and the + * underlying logical volume manager is a stripe, and + * the file offset is zero then try to allocate data + * blocks on stripe unit boundary. + * NOTE: ap->aeof is only set if the allocation length + * is >= the stripe unit and the allocation offset is + * at the end of file. */ - args.type = atype; - args.fsbno = ap->rval; - args.alignment = 0; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (args.fsbno == NULLFSBLOCK && nullfb && - args.minlen > ap->minlen) { - args.minlen = ap->minlen; - args.type = XFS_ALLOCTYPE_START_BNO; - args.fsbno = ap->rval; - if ((error = xfs_alloc_vextent(&args))) - return error; - } - if (args.fsbno == NULLFSBLOCK && nullfb) { - args.fsbno = 0; - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.total = ap->minlen; - args.minleft = 0; + if (!ap->low && ap->aeof) { + if (!ap->off) { + args.alignment = mp->m_dalign; + atype = args.type; + isaligned = 1; + /* + * Adjust for alignment + */ + if (blen > args.alignment && blen <= ap->alen) + args.minlen = blen - args.alignment; + args.minalignslop = 0; + } else { + /* + * First try an exact bno allocation. + * If it fails then do a near or start bno + * allocation with alignment turned on. + */ + atype = args.type; + tryagain = 1; + args.type = XFS_ALLOCTYPE_THIS_BNO; + args.alignment = 1; + /* + * Compute the minlen+alignment for the + * next case. Set slop so that the value + * of minlen+alignment+slop doesn't go up + * between the calls. + */ + if (blen > mp->m_dalign && blen <= ap->alen) + nextminlen = blen - mp->m_dalign; + else + nextminlen = args.minlen; + if (nextminlen + mp->m_dalign > args.minlen + 1) + args.minalignslop = + nextminlen + mp->m_dalign - + args.minlen - 1; + else + args.minalignslop = 0; + } + } else { + args.alignment = 1; + args.minalignslop = 0; + } + args.minleft = ap->minleft; + args.wasdel = ap->wasdel; + args.isfl = 0; + args.userdata = ap->userdata; if ((error = xfs_alloc_vextent(&args))) return error; - ap->low = 1; - } - if (args.fsbno != NULLFSBLOCK) { - ap->firstblock = ap->rval = args.fsbno; - ASSERT(nullfb || fb_agno == args.agno || - (ap->low && fb_agno < args.agno)); - ap->alen = args.len; - ap->ip->i_d.di_nblocks += args.len; - xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); - if (ap->wasdel) - ap->ip->i_delayed_blks -= args.len; - /* - * Adjust the disk quota also. This was reserved - * earlier. - */ - XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, - ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : - XFS_TRANS_DQ_BCOUNT, - (long) args.len); - } else { - ap->rval = NULLFSBLOCK; - ap->alen = 0; + if (tryagain && args.fsbno == NULLFSBLOCK) { + /* + * Exact allocation failed. Now try with alignment + * turned on. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = mp->m_dalign; + args.minlen = nextminlen; + args.minalignslop = 0; + isaligned = 1; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (isaligned && args.fsbno == NULLFSBLOCK) { + /* + * allocation failed, so turn off alignment and + * try again. + */ + args.type = atype; + args.fsbno = ap->rval; + args.alignment = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb && + args.minlen > ap->minlen) { + args.minlen = ap->minlen; + args.type = XFS_ALLOCTYPE_START_BNO; + args.fsbno = ap->rval; + if ((error = xfs_alloc_vextent(&args))) + return error; + } + if (args.fsbno == NULLFSBLOCK && nullfb) { + args.fsbno = 0; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.total = ap->minlen; + args.minleft = 0; + if ((error = xfs_alloc_vextent(&args))) + return error; + ap->low = 1; + } + if (args.fsbno != NULLFSBLOCK) { + ap->firstblock = ap->rval = args.fsbno; + ASSERT(nullfb || fb_agno == args.agno || + (ap->low && fb_agno < args.agno)); + ap->alen = args.len; + ap->ip->i_d.di_nblocks += args.len; + xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + if (ap->wasdel) + ap->ip->i_delayed_blks -= args.len; + /* + * Adjust the disk quota also. This was reserved + * earlier. + */ + XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip, + ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : + XFS_TRANS_DQ_BCOUNT, + (long) args.len); + } else { + ap->rval = NULLFSBLOCK; + ap->alen = 0; + } } return 0; -} - -/* - * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file. - * It figures out where to ask the underlying allocator to put the new extent. - */ -STATIC int -xfs_bmap_alloc( - xfs_bmalloca_t *ap) /* bmap alloc argument struct */ -{ - if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata) - return xfs_bmap_rtalloc(ap); - return xfs_bmap_btalloc(ap); +#undef ISVALID } /* * Transform a btree format file with only one leaf node, where the * extents list will fit in the inode, into an extents format file. - * Since the file extents are already in-core, all we have to do is + * Since the extent list is already in-core, all we have to do is * give up the space for the btree root and pitch the leaf block. */ STATIC int /* error */ @@ -2873,7 +2868,7 @@ xfs_bmap_btree_to_extents( } /* - * Called by xfs_bmapi to update file extent records and the btree + * Called by xfs_bmapi to update extent list structure and the btree * after removing space (or undoing a delayed allocation). */ STATIC int /* error */ @@ -2883,7 +2878,7 @@ xfs_bmap_del_extent( xfs_extnum_t idx, /* extent number to update/delete */ xfs_bmap_free_t *flist, /* list of extents to be freed */ xfs_btree_cur_t *cur, /* if null, not a btree */ - xfs_bmbt_irec_t *del, /* data to remove from extents */ + xfs_bmbt_irec_t *del, /* data to remove from extent list */ int *logflagsp, /* inode logging flags */ int whichfork, /* data or attr fork */ int rsvd) /* OK to allocate reserved blocks */ @@ -2908,6 +2903,7 @@ xfs_bmap_del_extent( xfs_filblks_t nblks; /* quota/sb block count */ xfs_bmbt_irec_t new; /* new record to be inserted */ /* REFERENCED */ + xfs_extnum_t nextents; /* number of extents in list */ uint qfield; /* quota field to update */ xfs_filblks_t temp; /* for indirect length calculations */ xfs_filblks_t temp2; /* for indirect length calculations */ @@ -2915,10 +2911,10 @@ xfs_bmap_del_extent( XFS_STATS_INC(xs_del_exlist); mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT((idx >= 0) && (idx < ifp->if_bytes / - (uint)sizeof(xfs_bmbt_rec_t))); + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + ASSERT(idx >= 0 && idx < nextents); ASSERT(del->br_blockcount > 0); - ep = xfs_iext_get_ext(ifp, idx); + ep = &ifp->if_u1.if_extents[idx]; xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= del->br_startoff); del_endoff = del->br_startoff + del->br_blockcount; @@ -2994,7 +2990,7 @@ xfs_bmap_del_extent( * Matches the whole extent. Delete the entry. */ xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); - xfs_iext_remove(ifp, idx, 1); + xfs_bmap_delete_exlist(ip, idx, 1, whichfork); ifp->if_lastex = idx; if (delay) break; @@ -3164,7 +3160,7 @@ xfs_bmap_del_extent( xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, whichfork); - xfs_iext_insert(ifp, idx + 1, 1, &new); + xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork); ifp->if_lastex = idx + 1; break; } @@ -3216,6 +3212,31 @@ xfs_bmap_del_free( kmem_zone_free(xfs_bmap_free_item_zone, free); } +/* + * Remove count entries from the extents array for inode "ip", starting + * at index "idx". Copies the remaining items down over the deleted ones, + * and gives back the excess memory. + */ +STATIC void +xfs_bmap_delete_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting delete index */ + xfs_extnum_t count, /* count of items to delete */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extents in list after */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count; + memmove(&base[idx], &base[idx + count], + (nextents - idx) * sizeof(*base)); + xfs_iext_realloc(ip, -count, whichfork); +} + /* * Convert an extents-format file into a btree-format file. * The new file will have a root block (in the inode) and a single child block. @@ -3237,13 +3258,13 @@ xfs_bmap_extents_to_btree( xfs_bmbt_rec_t *arp; /* child record pointer */ xfs_bmbt_block_t *block; /* btree root block */ xfs_btree_cur_t *cur; /* bmap btree cursor */ - xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_bmbt_rec_t *ep; /* extent list pointer */ int error; /* error return value */ - xfs_extnum_t i, cnt; /* extent record index */ + xfs_extnum_t i, cnt; /* extent list index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_bmbt_key_t *kp; /* root block key pointer */ xfs_mount_t *mp; /* mount structure */ - xfs_extnum_t nextents; /* number of file extents */ + xfs_extnum_t nextents; /* extent list size */ xfs_bmbt_ptr_t *pp; /* root block address pointer */ ifp = XFS_IFORK_PTR(ip, whichfork); @@ -3322,8 +3343,7 @@ xfs_bmap_extents_to_btree( ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (cnt = i = 0; i < nextents; i++) { - ep = xfs_iext_get_ext(ifp, i); + for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) { if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) { arp->l0 = INT_GET(ep->l0, ARCH_CONVERT); arp->l1 = INT_GET(ep->l1, ARCH_CONVERT); @@ -3352,6 +3372,34 @@ xfs_bmap_extents_to_btree( return 0; } +/* + * Insert new item(s) in the extent list for inode "ip". + * Count new items are inserted at offset idx. + */ +STATIC void +xfs_bmap_insert_exlist( + xfs_inode_t *ip, /* incore inode pointer */ + xfs_extnum_t idx, /* starting index of new items */ + xfs_extnum_t count, /* number of inserted items */ + xfs_bmbt_irec_t *new, /* items to insert */ + int whichfork) /* data or attr fork */ +{ + xfs_bmbt_rec_t *base; /* extent list base */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* extent list size */ + xfs_extnum_t to; /* extent list index */ + + ifp = XFS_IFORK_PTR(ip, whichfork); + ASSERT(ifp->if_flags & XFS_IFEXTENTS); + xfs_iext_realloc(ip, count, whichfork); + base = ifp->if_u1.if_extents; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + memmove(&base[idx + count], &base[idx], + (nextents - (idx + count)) * sizeof(*base)); + for (to = idx; to < idx + count; to++, new++) + xfs_bmbt_set_all(&base[to], new); +} + /* * Helper routine to reset inode di_forkoff field when switching * attribute fork from local to extent format - we reset it where @@ -3409,13 +3457,12 @@ xfs_bmap_local_to_extents( error = 0; if (ifp->if_bytes) { xfs_alloc_arg_t args; /* allocation arguments */ - xfs_buf_t *bp; /* buffer for extent block */ - xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_buf_t *bp; /* buffer for extent list block */ + xfs_bmbt_rec_t *ep; /* extent list pointer */ args.tp = tp; args.mp = ip->i_mount; - ASSERT((ifp->if_flags & - (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE); + ASSERT(ifp->if_flags & XFS_IFINLINE); /* * Allocate a block. We know we need only one, since the * file currently fits in an inode. @@ -3445,8 +3492,8 @@ xfs_bmap_local_to_extents( xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); - xfs_iext_add(ifp, 0, 1); - ep = xfs_iext_get_ext(ifp, 0); + xfs_iext_realloc(ip, 1, whichfork); + ep = ifp->if_u1.if_extents; xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); XFS_IFORK_NEXT_SET(ip, whichfork, 1); @@ -3471,7 +3518,7 @@ xfs_bmbt_rec_t * /* pointer to found extent entry */ xfs_bmap_do_search_extents( xfs_bmbt_rec_t *base, /* base of extent list */ xfs_extnum_t lastx, /* last extent index used */ - xfs_extnum_t nextents, /* number of file extents */ + xfs_extnum_t nextents, /* extent list size */ xfs_fileoff_t bno, /* block number searched for */ int *eofp, /* out: end of file found */ xfs_extnum_t *lastxp, /* out: last extent index */ @@ -3522,9 +3569,9 @@ xfs_bmap_do_search_extents( got.br_blockcount = xfs_bmbt_get_blockcount(ep); *eofp = 0; } else { + /* binary search the extents array */ low = 0; high = nextents - 1; - /* binary search the extents array */ while (low <= high) { XFS_STATS_INC(xs_cmp_exlist); lastx = (low + high) >> 1; @@ -3574,57 +3621,6 @@ xfs_bmap_do_search_extents( return ep; } -/* - * Search the extent records for the entry containing block bno. - * If bno lies in a hole, point to the next entry. If bno lies - * past eof, *eofp will be set, and *prevp will contain the last - * entry (null if none). Else, *lastxp will be set to the index - * of the found entry; *gotp will contain the entry. - */ -xfs_bmbt_rec_t * /* pointer to found extent entry */ -xfs_bmap_search_multi_extents( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_fileoff_t bno, /* block number searched for */ - int *eofp, /* out: end of file found */ - xfs_extnum_t *lastxp, /* out: last extent index */ - xfs_bmbt_irec_t *gotp, /* out: extent entry found */ - xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ -{ - xfs_bmbt_rec_t *ep; /* extent record pointer */ - xfs_extnum_t lastx; /* last extent index */ - - /* - * Initialize the extent entry structure to catch access to - * uninitialized br_startblock field. - */ - gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; - gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; - gotp->br_state = XFS_EXT_INVALID; -#if XFS_BIG_BLKNOS - gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; -#else - gotp->br_startblock = 0xffffa5a5; -#endif - prevp->br_startoff = NULLFILEOFF; - - ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); - if (lastx > 0) { - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp); - } - if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) { - xfs_bmbt_get_all(ep, gotp); - *eofp = 0; - } else { - if (lastx > 0) { - *gotp = *prevp; - } - *eofp = 1; - ep = NULL; - } - *lastxp = lastx; - return ep; -} - /* * Search the extents list for the inode, for the extent containing bno. * If bno lies in a hole, point to the next entry. If bno lies past eof, @@ -3643,14 +3639,20 @@ xfs_bmap_search_extents( xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */ { xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_extnum_t lastx; /* last extent index used */ + xfs_extnum_t nextents; /* extent list size */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ int rt; /* realtime flag */ XFS_STATS_INC(xs_look_exlist); ifp = XFS_IFORK_PTR(ip, whichfork); + lastx = ifp->if_lastex; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + base = &ifp->if_u1.if_extents[0]; - ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp); - + ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp, + lastxp, gotp, prevp); rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip); if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) { cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld " @@ -3730,7 +3732,7 @@ xfs_bmap_trace_addentry( } /* - * Add bmap trace entry prior to a call to xfs_iext_remove. + * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist. */ STATIC void xfs_bmap_trace_delete( @@ -3745,13 +3747,13 @@ xfs_bmap_trace_delete( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx, - cnt, xfs_iext_get_ext(ifp, idx), - cnt == 2 ? xfs_iext_get_ext(ifp, idx + 1) : NULL, + cnt, &ifp->if_u1.if_extents[idx], + cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL, whichfork); } /* - * Add bmap trace entry prior to a call to xfs_iext_insert, or + * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or * reading in the extents list from the disk (in the btree). */ STATIC void @@ -3781,7 +3783,7 @@ xfs_bmap_trace_insert( } /* - * Add bmap trace entry after updating an extent record in place. + * Add bmap trace entry after updating an extent list entry in place. */ STATIC void xfs_bmap_trace_post_update( @@ -3795,11 +3797,11 @@ xfs_bmap_trace_post_update( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx, - 1, xfs_iext_get_ext(ifp, idx), NULL, whichfork); + 1, &ifp->if_u1.if_extents[idx], NULL, whichfork); } /* - * Add bmap trace entry prior to updating an extent record in place. + * Add bmap trace entry prior to updating an extent list entry in place. */ STATIC void xfs_bmap_trace_pre_update( @@ -3813,7 +3815,7 @@ xfs_bmap_trace_pre_update( ifp = XFS_IFORK_PTR(ip, whichfork); xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1, - xfs_iext_get_ext(ifp, idx), NULL, whichfork); + &ifp->if_u1.if_extents[idx], NULL, whichfork); } #endif /* XFS_BMAP_TRACE */ @@ -3890,7 +3892,7 @@ xfs_bmap_add_attrfork( int rsvd) /* xact may use reserved blks */ { xfs_fsblock_t firstblock; /* 1st block/ag allocated */ - xfs_bmap_free_t flist; /* freed extent records */ + xfs_bmap_free_t flist; /* freed extent list */ xfs_mount_t *mp; /* mount structure */ xfs_trans_t *tp; /* transaction pointer */ unsigned long s; /* spinlock spl value */ @@ -4144,7 +4146,7 @@ xfs_bmap_finish( xfs_efd_log_item_t *efd; /* extent free data */ xfs_efi_log_item_t *efi; /* extent free intention */ int error; /* error return value */ - xfs_bmap_free_item_t *free; /* free extent item */ + xfs_bmap_free_item_t *free; /* free extent list item */ unsigned int logres; /* new log reservation */ unsigned int logcount; /* new log count */ xfs_mount_t *mp; /* filesystem mount structure */ @@ -4240,9 +4242,9 @@ xfs_bmap_first_unused( xfs_fileoff_t *first_unused, /* unused block */ int whichfork) /* data or attr fork */ { + xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to an extent entry */ int error; /* error return value */ - int idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_fileoff_t lastaddr; /* last block number seen */ xfs_fileoff_t lowest; /* lowest useful block */ @@ -4263,8 +4265,10 @@ xfs_bmap_first_unused( return error; lowest = *first_unused; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { - ep = xfs_iext_get_ext(ifp, idx); + base = &ifp->if_u1.if_extents[0]; + for (lastaddr = 0, max = lowest, ep = base; + ep < &base[nextents]; + ep++) { off = xfs_bmbt_get_startoff(ep); /* * See if the hole before this extent will work. @@ -4283,8 +4287,8 @@ xfs_bmap_first_unused( /* * Returns the file-relative block number of the last block + 1 before * last_block (input value) in the file. - * This is not based on i_size, it is based on the extent records. - * Returns 0 for local files, as they do not have extent records. + * This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. */ int /* error */ xfs_bmap_last_before( @@ -4331,8 +4335,8 @@ xfs_bmap_last_before( /* * Returns the file-relative block number of the first block past eof in - * the file. This is not based on i_size, it is based on the extent records. - * Returns 0 for local files, as they do not have extent records. + * the file. This is not based on i_size, it is based on the extent list. + * Returns 0 for local files, as they do not have an extent list. */ int /* error */ xfs_bmap_last_offset( @@ -4341,6 +4345,7 @@ xfs_bmap_last_offset( xfs_fileoff_t *last_block, /* last block */ int whichfork) /* data or attr fork */ { + xfs_bmbt_rec_t *base; /* base of extent array */ xfs_bmbt_rec_t *ep; /* pointer to last extent */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ @@ -4363,7 +4368,9 @@ xfs_bmap_last_offset( *last_block = 0; return 0; } - ep = xfs_iext_get_ext(ifp, nextents - 1); + base = &ifp->if_u1.if_extents[0]; + ASSERT(base != NULL); + ep = &base[nextents - 1]; *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep); return 0; } @@ -4393,7 +4400,7 @@ xfs_bmap_one_block( return 0; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); - ep = xfs_iext_get_ext(ifp, 0); + ep = ifp->if_u1.if_extents; xfs_bmbt_get_all(ep, &s); rval = s.br_startoff == 0 && s.br_blockcount == 1; if (rval && whichfork == XFS_DATA_FORK) @@ -4428,6 +4435,7 @@ xfs_bmap_read_extents( xfs_bmbt_ptr_t *pp; /* pointer to block address */ /* REFERENCED */ xfs_extnum_t room; /* number of entries there's room for */ + xfs_bmbt_rec_t *trp; /* target record pointer */ bno = NULLFSBLOCK; mp = ip->i_mount; @@ -4470,16 +4478,16 @@ xfs_bmap_read_extents( /* * Here with bp and block set to the leftmost leaf node in the tree. */ - room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + room = ifp->if_bytes / (uint)sizeof(*trp); + trp = ifp->if_u1.if_extents; i = 0; /* - * Loop over all leaf nodes. Copy information to the extent records. + * Loop over all leaf nodes. Copy information to the extent list. */ for (;;) { - xfs_bmbt_rec_t *frp, *trp; + xfs_bmbt_rec_t *frp, *temp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; - xfs_extnum_t start; num_recs = be16_to_cpu(block->bb_numrecs); @@ -4503,13 +4511,12 @@ xfs_bmap_read_extents( if (nextbno != NULLFSBLOCK) xfs_btree_reada_bufl(mp, nextbno, 1); /* - * Copy records into the extent records. + * Copy records into the extent list. */ frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - start = i; - for (j = 0; j < num_recs; j++, i++, frp++) { - trp = xfs_iext_get_ext(ifp, i); + temp = trp; + for (j = 0; j < num_recs; j++, frp++, trp++) { trp->l0 = INT_GET(frp->l0, ARCH_CONVERT); trp->l1 = INT_GET(frp->l1, ARCH_CONVERT); } @@ -4519,14 +4526,14 @@ xfs_bmap_read_extents( * any "older" data bmap btree records for a * set bit in the "extent flag" position. */ - if (unlikely(xfs_check_nostate_extents(ifp, - start, num_recs))) { + if (unlikely(xfs_check_nostate_extents(temp, num_recs))) { XFS_ERROR_REPORT("xfs_bmap_read_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); goto error0; } } + i += num_recs; xfs_trans_brelse(tp, bp); bno = nextbno; /* @@ -4539,7 +4546,7 @@ xfs_bmap_read_extents( return error; block = XFS_BUF_TO_BMBT_BLOCK(bp); } - ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); + ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp)); ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); xfs_bmap_trace_exlist(fname, ip, i, whichfork); return 0; @@ -4550,7 +4557,7 @@ xfs_bmap_read_extents( #ifdef XFS_BMAP_TRACE /* - * Add bmap trace insert entries for all the contents of the extent records. + * Add bmap trace insert entries for all the contents of the extent list. */ void xfs_bmap_trace_exlist( @@ -4559,15 +4566,16 @@ xfs_bmap_trace_exlist( xfs_extnum_t cnt, /* count of entries in the list */ int whichfork) /* data or attr fork */ { - xfs_bmbt_rec_t *ep; /* current extent record */ - xfs_extnum_t idx; /* extent record index */ + xfs_bmbt_rec_t *base; /* base of extent list */ + xfs_bmbt_rec_t *ep; /* current entry in extent list */ + xfs_extnum_t idx; /* extent list entry number */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_irec_t s; /* file extent record */ + xfs_bmbt_irec_t s; /* extent list record */ ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); - for (idx = 0; idx < cnt; idx++) { - ep = xfs_iext_get_ext(ifp, idx); + ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base)); + base = ifp->if_u1.if_extents; + for (idx = 0, ep = base; idx < cnt; idx++, ep++) { xfs_bmbt_get_all(ep, &s); xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, whichfork); @@ -4653,10 +4661,14 @@ xfs_bmapi( xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */ xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_fileoff_t end; /* end of mapped file region */ - int eof; /* we've hit the end of extents */ - xfs_bmbt_rec_t *ep; /* extent record pointer */ + int eof; /* we've hit the end of extent list */ + char contig; /* allocation must be one extent */ + char delay; /* this request is for delayed alloc */ + char exact; /* don't do all of wasdelayed extent */ + char convert; /* unwritten extent I/O completion */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ int error; /* error return */ - xfs_bmbt_irec_t got; /* current file extent record */ + xfs_bmbt_irec_t got; /* current extent list record */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extlen_t indlen; /* indirect blocks length */ xfs_extnum_t lastx; /* last useful extent number */ @@ -4668,13 +4680,17 @@ xfs_bmapi( int nallocs; /* number of extents alloc\'d */ xfs_extnum_t nextents; /* number of extents in file */ xfs_fileoff_t obno; /* old block number (offset) */ - xfs_bmbt_irec_t prev; /* previous file extent record */ + xfs_bmbt_irec_t prev; /* previous extent list record */ int tmp_logflags; /* temp flags holder */ int whichfork; /* data or attr fork */ char inhole; /* current location is hole in file */ + char stateless; /* ignore state flag set */ + char trim; /* output trimmed to match range */ + char userdata; /* allocating non-metadata */ char wasdelay; /* old extent was delayed */ char wr; /* this is a write request */ char rt; /* this is a realtime file */ + char rsvd; /* OK to allocate reserved blocks */ #ifdef DEBUG xfs_fileoff_t orig_bno; /* original block number value */ int orig_flags; /* original flags arg value */ @@ -4711,8 +4727,15 @@ xfs_bmapi( XFS_STATS_INC(xs_blk_mapw); else XFS_STATS_INC(xs_blk_mapr); + delay = (flags & XFS_BMAPI_DELAY) != 0; + trim = (flags & XFS_BMAPI_ENTIRE) == 0; + userdata = (flags & XFS_BMAPI_METADATA) == 0; + convert = (flags & XFS_BMAPI_CONVERT) != 0; + exact = (flags & XFS_BMAPI_EXACT) != 0; + rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; + contig = (flags & XFS_BMAPI_CONTIG) != 0; /* - * IGSTATE flag is used to combine extents which + * stateless is used to combine extents which * differ only due to the state of the extents. * This technique is used from xfs_getbmap() * when the caller does not wish to see the @@ -4728,9 +4751,10 @@ xfs_bmapi( * xfs_strat_comp(), where the xfs_bmapi() call * is transactioned, and the extents combined. */ - if ((flags & XFS_BMAPI_IGSTATE) && wr) /* if writing unwritten space */ - wr = 0; /* no allocations are allowed */ - ASSERT(wr || !(flags & XFS_BMAPI_DELAY)); + stateless = (flags & XFS_BMAPI_IGSTATE) != 0; + if (stateless && wr) /* if writing unwritten space, no */ + wr = 0; /* allocations are allowed */ + ASSERT(wr || !delay); logflags = 0; nallocs = 0; cur = NULL; @@ -4765,7 +4789,7 @@ xfs_bmapi( if (eof && !wr) got.br_startoff = end; inhole = eof || got.br_startoff > bno; - wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) && + wasdelay = wr && !inhole && !delay && ISNULLSTARTBLOCK(got.br_startblock); /* * First, deal with the hole before the allocated space @@ -4777,11 +4801,11 @@ xfs_bmapi( * allocate the stuff asked for in this bmap call * but that wouldn't be as good. */ - if (wasdelay && !(flags & XFS_BMAPI_EXACT)) { + if (wasdelay && !exact) { alen = (xfs_extlen_t)got.br_blockcount; aoff = got.br_startoff; if (lastx != NULLEXTNUM && lastx) { - ep = xfs_iext_get_ext(ifp, lastx - 1); + ep = &ifp->if_u1.if_extents[lastx - 1]; xfs_bmbt_get_all(ep, &prev); } } else if (wasdelay) { @@ -4799,8 +4823,8 @@ xfs_bmapi( got.br_startoff - bno); aoff = bno; } - minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1; - if (flags & XFS_BMAPI_DELAY) { + minlen = contig ? alen : 1; + if (delay) { xfs_extlen_t extsz; /* Figure out the extent size, adjust alen */ @@ -4813,9 +4837,7 @@ xfs_bmapi( if (extsz) { error = xfs_bmap_extsize_align(mp, &got, &prev, extsz, - rt, eof, - flags&XFS_BMAPI_DELAY, - flags&XFS_BMAPI_CONVERT, + rt, eof, delay, convert, &aoff, &alen); ASSERT(!error); } @@ -4853,29 +4875,24 @@ xfs_bmapi( if (rt) { error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - -(extsz), (flags & - XFS_BMAPI_RSVBLOCKS)); + -(extsz), rsvd); } else { error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - -(alen), (flags & - XFS_BMAPI_RSVBLOCKS)); + -(alen), rsvd); } if (!error) { error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - -(indlen), (flags & - XFS_BMAPI_RSVBLOCKS)); + -(indlen), rsvd); if (error && rt) xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, - extsz, (flags & - XFS_BMAPI_RSVBLOCKS)); + extsz, rsvd); else if (error) xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, - alen, (flags & - XFS_BMAPI_RSVBLOCKS)); + alen, rsvd); } if (error) { @@ -4908,7 +4925,7 @@ xfs_bmapi( /* Indicate if this is the first user data * in the file, or just any user data. */ - if (!(flags & XFS_BMAPI_METADATA)) { + if (userdata) { bma.userdata = (aoff == 0) ? XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; @@ -4920,7 +4937,7 @@ xfs_bmapi( bma.firstblock = *firstblock; bma.alen = alen; bma.off = aoff; - bma.conv = (flags & XFS_BMAPI_CONVERT); + bma.conv = convert; bma.wasdel = wasdelay; bma.minlen = minlen; bma.low = flist->xbf_low; @@ -4931,8 +4948,7 @@ xfs_bmapi( * is larger than a stripe unit. */ if (mp->m_dalign && alen >= mp->m_dalign && - (!(flags & XFS_BMAPI_METADATA)) && - (whichfork == XFS_DATA_FORK)) { + userdata && whichfork == XFS_DATA_FORK) { if ((error = xfs_bmap_isaeof(ip, aoff, whichfork, &bma.aeof))) goto error0; @@ -4995,19 +5011,19 @@ xfs_bmapi( } error = xfs_bmap_add_extent(ip, lastx, &cur, &got, firstblock, flist, &tmp_logflags, whichfork, - (flags & XFS_BMAPI_RSVBLOCKS)); + rsvd); logflags |= tmp_logflags; if (error) goto error0; lastx = ifp->if_lastex; - ep = xfs_iext_get_ext(ifp, lastx); + ep = &ifp->if_u1.if_extents[lastx]; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(ep, &got); ASSERT(got.br_startoff <= aoff); ASSERT(got.br_startoff + got.br_blockcount >= aoff + alen); #ifdef DEBUG - if (flags & XFS_BMAPI_DELAY) { + if (delay) { ASSERT(ISNULLSTARTBLOCK(got.br_startblock)); ASSERT(STARTBLOCKVAL(got.br_startblock) > 0); } @@ -5036,15 +5052,14 @@ xfs_bmapi( * Then deal with the allocated space we found. */ ASSERT(ep != NULL); - if (!(flags & XFS_BMAPI_ENTIRE) && - (got.br_startoff + got.br_blockcount > obno)) { + if (trim && (got.br_startoff + got.br_blockcount > obno)) { if (obno > bno) bno = obno; ASSERT((bno >= obno) || (n == 0)); ASSERT(bno < end); mval->br_startoff = bno; if (ISNULLSTARTBLOCK(got.br_startblock)) { - ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); + ASSERT(!wr || delay); mval->br_startblock = DELAYSTARTBLOCK; } else mval->br_startblock = @@ -5066,7 +5081,7 @@ xfs_bmapi( } else { *mval = got; if (ISNULLSTARTBLOCK(mval->br_startblock)) { - ASSERT(!wr || (flags & XFS_BMAPI_DELAY)); + ASSERT(!wr || delay); mval->br_startblock = DELAYSTARTBLOCK; } } @@ -5092,12 +5107,12 @@ xfs_bmapi( mval->br_state = XFS_EXT_NORM; error = xfs_bmap_add_extent(ip, lastx, &cur, mval, firstblock, flist, &tmp_logflags, whichfork, - (flags & XFS_BMAPI_RSVBLOCKS)); + rsvd); logflags |= tmp_logflags; if (error) goto error0; lastx = ifp->if_lastex; - ep = xfs_iext_get_ext(ifp, lastx); + ep = &ifp->if_u1.if_extents[lastx]; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); xfs_bmbt_get_all(ep, &got); /* @@ -5109,10 +5124,9 @@ xfs_bmapi( continue; } - ASSERT((flags & XFS_BMAPI_ENTIRE) || + ASSERT(!trim || ((mval->br_startoff + mval->br_blockcount) <= end)); - ASSERT((flags & XFS_BMAPI_ENTIRE) || - (mval->br_blockcount <= len) || + ASSERT(!trim || (mval->br_blockcount <= len) || (mval->br_startoff < obno)); bno = mval->br_startoff + mval->br_blockcount; len = end - bno; @@ -5127,8 +5141,7 @@ xfs_bmapi( mval[-1].br_startblock != HOLESTARTBLOCK && mval->br_startblock == mval[-1].br_startblock + mval[-1].br_blockcount && - ((flags & XFS_BMAPI_IGSTATE) || - mval[-1].br_state == mval->br_state)) { + (stateless || mval[-1].br_state == mval->br_state)) { ASSERT(mval->br_startoff == mval[-1].br_startoff + mval[-1].br_blockcount); mval[-1].br_blockcount += mval->br_blockcount; @@ -5155,7 +5168,8 @@ xfs_bmapi( /* * Else go on to the next record. */ - ep = xfs_iext_get_ext(ifp, ++lastx); + ep++; + lastx++; if (lastx >= nextents) { eof = 1; prev = got; @@ -5185,7 +5199,7 @@ xfs_bmapi( error0: /* * Log everything. Do this after conversion, there's no point in - * logging the extent records if we've converted to btree format. + * logging the extent list if we've converted to btree format. */ if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) @@ -5238,12 +5252,12 @@ xfs_bmapi_single( xfs_fsblock_t *fsb, /* output: mapped block */ xfs_fileoff_t bno) /* starting file offs. mapped */ { - int eof; /* we've hit the end of extents */ + int eof; /* we've hit the end of extent list */ int error; /* error return */ - xfs_bmbt_irec_t got; /* current file extent record */ + xfs_bmbt_irec_t got; /* current extent list record */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t lastx; /* last useful extent number */ - xfs_bmbt_irec_t prev; /* previous file extent record */ + xfs_bmbt_irec_t prev; /* previous extent list record */ ifp = XFS_IFORK_PTR(ip, whichfork); if (unlikely( @@ -5298,18 +5312,18 @@ xfs_bunmapi( xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmbt_irec_t del; /* extent being deleted */ int eof; /* is deleting at eof */ - xfs_bmbt_rec_t *ep; /* extent record pointer */ + xfs_bmbt_rec_t *ep; /* extent list entry pointer */ int error; /* error return value */ xfs_extnum_t extno; /* extent number in list */ - xfs_bmbt_irec_t got; /* current extent record */ + xfs_bmbt_irec_t got; /* current extent list entry */ xfs_ifork_t *ifp; /* inode fork pointer */ int isrt; /* freeing in rt area */ xfs_extnum_t lastx; /* last extent index used */ int logflags; /* transaction logging flags */ xfs_extlen_t mod; /* rt extent offset */ xfs_mount_t *mp; /* mount structure */ - xfs_extnum_t nextents; /* number of file extents */ - xfs_bmbt_irec_t prev; /* previous extent record */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_irec_t prev; /* previous extent list entry */ xfs_fileoff_t start; /* first file offset deleted */ int tmp_logflags; /* partial logging flags */ int wasdel; /* was a delayed alloc extent */ @@ -5355,7 +5369,7 @@ xfs_bunmapi( * file, back up to the last block if so... */ if (eof) { - ep = xfs_iext_get_ext(ifp, --lastx); + ep = &ifp->if_u1.if_extents[--lastx]; xfs_bmbt_get_all(ep, &got); bno = got.br_startoff + got.br_blockcount - 1; } @@ -5379,7 +5393,7 @@ xfs_bunmapi( if (got.br_startoff > bno) { if (--lastx < 0) break; - ep = xfs_iext_get_ext(ifp, lastx); + ep--; xfs_bmbt_get_all(ep, &got); } /* @@ -5426,8 +5440,7 @@ xfs_bunmapi( del.br_blockcount : mod; if (bno < got.br_startoff) { if (--lastx >= 0) - xfs_bmbt_get_all(xfs_iext_get_ext( - ifp, lastx), &got); + xfs_bmbt_get_all(--ep, &got); } continue; } @@ -5487,8 +5500,7 @@ xfs_bunmapi( * try again. */ ASSERT(lastx > 0); - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, - lastx - 1), &prev); + xfs_bmbt_get_all(ep - 1, &prev); ASSERT(prev.br_state == XFS_EXT_NORM); ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock)); ASSERT(del.br_startblock == @@ -5575,12 +5587,12 @@ xfs_bunmapi( * If not done go on to the next (previous) record. * Reset ep in case the extents array was re-alloced. */ - ep = xfs_iext_get_ext(ifp, lastx); + ep = &ifp->if_u1.if_extents[lastx]; if (bno != (xfs_fileoff_t)-1 && bno >= start) { if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || xfs_bmbt_get_startoff(ep) > bno) { - if (--lastx >= 0) - ep = xfs_iext_get_ext(ifp, lastx); + lastx--; + ep--; } if (lastx >= 0) xfs_bmbt_get_all(ep, &got); @@ -5624,7 +5636,7 @@ xfs_bunmapi( error0: /* * Log everything. Do this after conversion, there's no point in - * logging the extent records if we've converted to btree format. + * logging the extent list if we've converted to btree format. */ if ((logflags & XFS_ILOG_FEXT(whichfork)) && XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) @@ -5880,9 +5892,9 @@ xfs_bmap_isaeof( { int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *lastrec; /* extent record pointer */ - xfs_extnum_t nextents; /* number of file extents */ - xfs_bmbt_irec_t s; /* expanded extent record */ + xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ + xfs_extnum_t nextents; /* size of extent list */ + xfs_bmbt_irec_t s; /* expanded extent list entry */ ASSERT(whichfork == XFS_DATA_FORK); ifp = XFS_IFORK_PTR(ip, whichfork); @@ -5897,7 +5909,7 @@ xfs_bmap_isaeof( /* * Go to the last extent */ - lastrec = xfs_iext_get_ext(ifp, nextents - 1); + lastrec = &ifp->if_u1.if_extents[nextents - 1]; xfs_bmbt_get_all(lastrec, &s); /* * Check we are allocating in the last extent (for delayed allocations) @@ -5924,8 +5936,8 @@ xfs_bmap_eof( xfs_fsblock_t blockcount; /* extent block count */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_bmbt_rec_t *lastrec; /* extent record pointer */ - xfs_extnum_t nextents; /* number of file extents */ + xfs_bmbt_rec_t *lastrec; /* extent list entry pointer */ + xfs_extnum_t nextents; /* size of extent list */ xfs_fileoff_t startoff; /* extent starting file offset */ ASSERT(whichfork == XFS_DATA_FORK); @@ -5941,7 +5953,7 @@ xfs_bmap_eof( /* * Go to the last extent */ - lastrec = xfs_iext_get_ext(ifp, nextents - 1); + lastrec = &ifp->if_u1.if_extents[nextents - 1]; startoff = xfs_bmbt_get_startoff(lastrec); blockcount = xfs_bmbt_get_blockcount(lastrec); *eof = endoff >= startoff + blockcount; @@ -5957,21 +5969,18 @@ xfs_bmap_check_extents( xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { + xfs_bmbt_rec_t *base; /* base of extents list */ xfs_bmbt_rec_t *ep; /* current extent entry */ - xfs_extnum_t idx; /* extent record index */ xfs_ifork_t *ifp; /* inode fork pointer */ xfs_extnum_t nextents; /* number of extents in list */ - xfs_bmbt_rec_t *nextp; /* next extent entry */ ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); + base = ifp->if_u1.if_extents; nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ep = xfs_iext_get_ext(ifp, 0); - for (idx = 0; idx < nextents - 1; idx++) { - nextp = xfs_iext_get_ext(ifp, idx + 1); + for (ep = base; ep < &base[nextents - 1]; ep++) { xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, - (void *)(nextp)); - ep = nextp; + (void *)(ep + 1)); } } @@ -6110,14 +6119,12 @@ xfs_bmap_check_leaf_extents( xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ - xfs_extnum_t i=0, j; /* index into the extents list */ + xfs_extnum_t i=0; /* index into the extents list */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ xfs_mount_t *mp; /* file system mount structure */ xfs_bmbt_ptr_t *pp; /* pointer to block address */ - xfs_bmbt_rec_t *ep; /* pointer to current extent */ - xfs_bmbt_rec_t *lastp; /* pointer to previous extent */ - xfs_bmbt_rec_t *nextp; /* pointer to next extent */ + xfs_bmbt_rec_t *ep, *lastp; /* extent pointers in block entry */ int bp_release = 0; if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) { @@ -6187,6 +6194,7 @@ xfs_bmap_check_leaf_extents( */ lastp = NULL; for (;;) { + xfs_bmbt_rec_t *frp; xfs_fsblock_t nextbno; xfs_extnum_t num_recs; @@ -6205,20 +6213,18 @@ xfs_bmap_check_leaf_extents( * conform with the first entry in this one. */ - ep = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); - for (j = 1; j < num_recs; j++) { - nextp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, - block, j + 1, mp->m_bmap_dmxr[0]); + + for (ep = frp;ep < frp + (num_recs - 1); ep++) { if (lastp) { xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)lastp, (void *)ep); } xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep, - (void *)(nextp)); - lastp = ep; - ep = nextp; + (void *)(ep + 1)); } + lastp = frp + num_recs - 1; /* For the next iteration */ i += num_recs; if (bp_release) { @@ -6282,7 +6288,7 @@ xfs_bmap_count_blocks( mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) { - if (unlikely(xfs_bmap_count_leaves(ifp, 0, + if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents, ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t), count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)", @@ -6304,7 +6310,7 @@ xfs_bmap_count_blocks( ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks); bno = INT_GET(*pp, ARCH_CONVERT); - if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { + if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) { XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); @@ -6321,7 +6327,6 @@ int /* error */ xfs_bmap_count_tree( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ - xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fsblock_t blockno, /* file system block number */ int levelin, /* level in btree */ int *count) /* Count of blocks */ @@ -6334,6 +6339,7 @@ xfs_bmap_count_tree( xfs_fsblock_t nextbno; xfs_bmbt_block_t *block, *nextblock; int numrecs; + xfs_bmbt_rec_t *frp; if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) return error; @@ -6358,7 +6364,7 @@ xfs_bmap_count_tree( xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); bno = INT_GET(*pp, ARCH_CONVERT); if (unlikely((error = - xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { + xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", XFS_ERRLEVEL_LOW, mp); @@ -6370,8 +6376,9 @@ xfs_bmap_count_tree( for (;;) { nextbno = be64_to_cpu(block->bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); - if (unlikely(xfs_bmap_disk_count_leaves(ifp, mp, - 0, block, numrecs, count) < 0)) { + frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, + xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]); + if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) { xfs_trans_brelse(tp, bp); XFS_ERROR_REPORT("xfs_bmap_count_tree(2)", XFS_ERRLEVEL_LOW, mp); @@ -6392,45 +6399,33 @@ xfs_bmap_count_tree( } /* - * Count leaf blocks given a range of extent records. + * Count leaf blocks given a pointer to an extent list. */ int xfs_bmap_count_leaves( - xfs_ifork_t *ifp, - xfs_extnum_t idx, + xfs_bmbt_rec_t *frp, int numrecs, int *count) { int b; - xfs_bmbt_rec_t *frp; - for (b = 0; b < numrecs; b++) { - frp = xfs_iext_get_ext(ifp, idx + b); + for ( b = 1; b <= numrecs; b++, frp++) *count += xfs_bmbt_get_blockcount(frp); - } return 0; } /* - * Count leaf blocks given a range of extent records originally - * in btree format. + * Count leaf blocks given a pointer to an extent list originally in btree format. */ int xfs_bmap_disk_count_leaves( - xfs_ifork_t *ifp, - xfs_mount_t *mp, - xfs_extnum_t idx, - xfs_bmbt_block_t *block, + xfs_bmbt_rec_t *frp, int numrecs, int *count) { int b; - xfs_bmbt_rec_t *frp; - for (b = 1; b <= numrecs; b++) { - frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, - xfs_bmbt, block, idx + b, mp->m_bmap_dmxr[0]); + for ( b = 1; b <= numrecs; b++, frp++) *count += xfs_bmbt_disk_get_blockcount(frp); - } return 0; } diff --git a/trunk/fs/xfs/xfs_bmap.h b/trunk/fs/xfs/xfs_bmap.h index 011ccaa9a1c0..12cc63dfc2c4 100644 --- a/trunk/fs/xfs/xfs_bmap.h +++ b/trunk/fs/xfs/xfs_bmap.h @@ -20,7 +20,6 @@ struct getbmap; struct xfs_bmbt_irec; -struct xfs_ifork; struct xfs_inode; struct xfs_mount; struct xfs_trans; @@ -348,28 +347,9 @@ xfs_bmap_count_blocks( */ int xfs_check_nostate_extents( - struct xfs_ifork *ifp, - xfs_extnum_t idx, + xfs_bmbt_rec_t *ep, xfs_extnum_t num); -/* - * Call xfs_bmap_do_search_extents() to search for the extent - * record containing block bno. If in multi-level in-core extent - * allocation mode, find and extract the target extent buffer, - * otherwise just use the direct extent list. - */ -xfs_bmbt_rec_t * -xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *, - xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); - -/* - * Search an extent list for the extent which includes block - * bno. - */ -xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *, - xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *, - xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); - #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_H__ */ diff --git a/trunk/fs/xfs/xfs_bmap_btree.c b/trunk/fs/xfs/xfs_bmap_btree.c index bea44709afbe..3f1383d160e8 100644 --- a/trunk/fs/xfs/xfs_bmap_btree.c +++ b/trunk/fs/xfs/xfs_bmap_btree.c @@ -2754,7 +2754,7 @@ xfs_bmbt_update( } /* - * Check extent records, which have just been read, for + * Check an extent list, which has just been read, for * any bit in the extent flag field. ASSERT on debug * kernels, as this condition should not occur. * Return an error condition (1) if any flags found, @@ -2763,14 +2763,10 @@ xfs_bmbt_update( int xfs_check_nostate_extents( - xfs_ifork_t *ifp, - xfs_extnum_t idx, + xfs_bmbt_rec_t *ep, xfs_extnum_t num) { - xfs_bmbt_rec_t *ep; - - for (; num > 0; num--, idx++) { - ep = xfs_iext_get_ext(ifp, idx); + for (; num > 0; num--, ep++) { if ((ep->l0 >> (64 - BMBT_EXNTFLAG_BITLEN)) != 0) { ASSERT(0); diff --git a/trunk/fs/xfs/xfs_bmap_btree.h b/trunk/fs/xfs/xfs_bmap_btree.h index 6478cfa0e539..e095a2d344ae 100644 --- a/trunk/fs/xfs/xfs_bmap_btree.h +++ b/trunk/fs/xfs/xfs_bmap_btree.h @@ -372,6 +372,14 @@ extern int xfs_bmbt_get_rec(struct xfs_btree_cur *, xfs_fileoff_t *, xfs_exntst_t *, int *); #endif +/* + * Search an extent list for the extent which includes block + * bno. + */ +xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *, + xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *, + xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); + #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_BTREE_H__ */ diff --git a/trunk/fs/xfs/xfs_clnt.h b/trunk/fs/xfs/xfs_clnt.h index 022fff62085b..f57cc9ac875e 100644 --- a/trunk/fs/xfs/xfs_clnt.h +++ b/trunk/fs/xfs/xfs_clnt.h @@ -68,6 +68,8 @@ struct xfs_mount_args { * enforcement */ #define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit * enforcement */ +#define XFSMNT_NOATIME 0x00000100 /* don't modify access + * times on reads */ #define XFSMNT_NOALIGN 0x00000200 /* don't allocate at * stripe boundaries*/ #define XFSMNT_RETERR 0x00000400 /* return error to user */ diff --git a/trunk/fs/xfs/xfs_da_btree.c b/trunk/fs/xfs/xfs_da_btree.c index 4bae3a76c678..473671fa5c13 100644 --- a/trunk/fs/xfs/xfs_da_btree.c +++ b/trunk/fs/xfs/xfs_da_btree.c @@ -126,10 +126,10 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level, node = bp->data; node->hdr.info.forw = 0; node->hdr.info.back = 0; - node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC); + INT_SET(node->hdr.info.magic, ARCH_CONVERT, XFS_DA_NODE_MAGIC); node->hdr.info.pad = 0; node->hdr.count = 0; - node->hdr.level = cpu_to_be16(level); + INT_SET(node->hdr.level, ARCH_CONVERT, level); xfs_da_log_buf(tp, bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); @@ -290,28 +290,28 @@ xfs_da_split(xfs_da_state_t *state) node = oldblk->bp->data; if (node->hdr.info.forw) { - if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) { + if (INT_GET(node->hdr.info.forw, ARCH_CONVERT) == addblk->blkno) { bp = addblk->bp; } else { ASSERT(state->extravalid); bp = state->extrablk.bp; } node = bp->data; - node->hdr.info.back = cpu_to_be32(oldblk->blkno); + INT_SET(node->hdr.info.back, ARCH_CONVERT, oldblk->blkno); xfs_da_log_buf(state->args->trans, bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); } node = oldblk->bp->data; - if (node->hdr.info.back) { - if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) { + if (INT_GET(node->hdr.info.back, ARCH_CONVERT)) { + if (INT_GET(node->hdr.info.back, ARCH_CONVERT) == addblk->blkno) { bp = addblk->bp; } else { ASSERT(state->extravalid); bp = state->extrablk.bp; } node = bp->data; - node->hdr.info.forw = cpu_to_be32(oldblk->blkno); + INT_SET(node->hdr.info.forw, ARCH_CONVERT, oldblk->blkno); xfs_da_log_buf(state->args->trans, bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); @@ -359,14 +359,14 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(bp != NULL); node = bp->data; oldroot = blk1->bp->data; - if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) { - size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - + if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { + size = (int)((char *)&oldroot->btree[INT_GET(oldroot->hdr.count, ARCH_CONVERT)] - (char *)oldroot); } else { ASSERT(XFS_DIR_IS_V2(mp)); - ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); leaf = (xfs_dir2_leaf_t *)oldroot; - size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] - + size = (int)((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] - (char *)leaf); } memcpy(node, oldroot, size); @@ -381,18 +381,18 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, error = xfs_da_node_create(args, args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp) ? mp->m_dirleafblk : 0, - be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork); + INT_GET(node->hdr.level, ARCH_CONVERT) + 1, &bp, args->whichfork); if (error) return(error); node = bp->data; - node->btree[0].hashval = cpu_to_be32(blk1->hashval); - node->btree[0].before = cpu_to_be32(blk1->blkno); - node->btree[1].hashval = cpu_to_be32(blk2->hashval); - node->btree[1].before = cpu_to_be32(blk2->blkno); - node->hdr.count = cpu_to_be16(2); + INT_SET(node->btree[0].hashval, ARCH_CONVERT, blk1->hashval); + INT_SET(node->btree[0].before, ARCH_CONVERT, blk1->blkno); + INT_SET(node->btree[1].hashval, ARCH_CONVERT, blk2->hashval); + INT_SET(node->btree[1].before, ARCH_CONVERT, blk2->blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 2); #ifdef DEBUG - if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) { + if (INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { ASSERT(blk1->blkno >= mp->m_dirleafblk && blk1->blkno < mp->m_dirfreeblk); ASSERT(blk2->blkno >= mp->m_dirleafblk && @@ -424,7 +424,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int useextra; node = oldblk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); /* * With V2 the extra block is data or freespace. @@ -435,7 +435,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, /* * Do we have to split the node? */ - if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) { + if ((INT_GET(node->hdr.count, ARCH_CONVERT) + newcount) > state->node_ents) { /* * Allocate a new node, add to the doubly linked chain of * nodes, then move some of our excess entries into it. @@ -472,7 +472,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, * If we had double-split op below us, then add the extra block too. */ node = oldblk->bp->data; - if (oldblk->index <= be16_to_cpu(node->hdr.count)) { + if (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT)) { oldblk->index++; xfs_da_node_add(state, oldblk, addblk); if (useextra) { @@ -516,17 +516,17 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Figure out how many entries need to move, and in which direction. * Swap the nodes around if that makes it simpler. */ - if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && - ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) || - (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) < - be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) { + if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { tmpnode = node1; node1 = node2; node2 = tmpnode; } - ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC); - count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2; + ASSERT(INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + count = (INT_GET(node1->hdr.count, ARCH_CONVERT) - INT_GET(node2->hdr.count, ARCH_CONVERT)) / 2; if (count == 0) return; tp = state->args->trans; @@ -537,7 +537,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, /* * Move elements in node2 up to make a hole. */ - if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) { + if ((tmp = INT_GET(node2->hdr.count, ARCH_CONVERT)) > 0) { tmp *= (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[0]; btree_d = &node2->btree[count]; @@ -548,12 +548,13 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, * Move the req'd B-tree elements from high in node1 to * low in node2. */ - be16_add(&node2->hdr.count, count); + INT_MOD(node2->hdr.count, ARCH_CONVERT, count); tmp = count * (uint)sizeof(xfs_da_node_entry_t); - btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count]; + btree_s = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT) - count]; btree_d = &node2->btree[0]; memcpy(btree_d, btree_s, tmp); - be16_add(&node1->hdr.count, -count); + INT_MOD(node1->hdr.count, ARCH_CONVERT, -(count)); + } else { /* * Move the req'd B-tree elements from low in node2 to @@ -562,21 +563,21 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, count = -count; tmp = count * (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[0]; - btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)]; + btree_d = &node1->btree[INT_GET(node1->hdr.count, ARCH_CONVERT)]; memcpy(btree_d, btree_s, tmp); - be16_add(&node1->hdr.count, count); + INT_MOD(node1->hdr.count, ARCH_CONVERT, count); xfs_da_log_buf(tp, blk1->bp, XFS_DA_LOGRANGE(node1, btree_d, tmp)); /* * Move elements in node2 down to fill the hole. */ - tmp = be16_to_cpu(node2->hdr.count) - count; + tmp = INT_GET(node2->hdr.count, ARCH_CONVERT) - count; tmp *= (uint)sizeof(xfs_da_node_entry_t); btree_s = &node2->btree[count]; btree_d = &node2->btree[0]; memmove(btree_d, btree_s, tmp); - be16_add(&node2->hdr.count, -count); + INT_MOD(node2->hdr.count, ARCH_CONVERT, -(count)); } /* @@ -587,7 +588,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, xfs_da_log_buf(tp, blk2->bp, XFS_DA_LOGRANGE(node2, &node2->hdr, sizeof(node2->hdr) + - sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count))); + sizeof(node2->btree[0]) * INT_GET(node2->hdr.count, ARCH_CONVERT))); /* * Record the last hashval from each block for upward propagation. @@ -595,15 +596,15 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, */ node1 = blk1->bp->data; node2 = blk2->bp->data; - blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval); - blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval); + blk1->hashval = INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); + blk2->hashval = INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); /* * Adjust the expected index for insertion. */ - if (blk1->index >= be16_to_cpu(node1->hdr.count)) { - blk2->index = blk1->index - be16_to_cpu(node1->hdr.count); - blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */ + if (blk1->index >= INT_GET(node1->hdr.count, ARCH_CONVERT)) { + blk2->index = blk1->index - INT_GET(node1->hdr.count, ARCH_CONVERT); + blk1->index = INT_GET(node1->hdr.count, ARCH_CONVERT) + 1; /* make it invalid */ } } @@ -621,8 +622,8 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, node = oldblk->bp->data; mp = state->mp; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT((oldblk->index >= 0) && (oldblk->index <= INT_GET(node->hdr.count, ARCH_CONVERT))); ASSERT(newblk->blkno != 0); if (state->args->whichfork == XFS_DATA_FORK && XFS_DIR_IS_V2(mp)) ASSERT(newblk->blkno >= mp->m_dirleafblk && @@ -633,22 +634,22 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, */ tmp = 0; btree = &node->btree[ oldblk->index ]; - if (oldblk->index < be16_to_cpu(node->hdr.count)) { - tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree); + if (oldblk->index < INT_GET(node->hdr.count, ARCH_CONVERT)) { + tmp = (INT_GET(node->hdr.count, ARCH_CONVERT) - oldblk->index) * (uint)sizeof(*btree); memmove(btree + 1, btree, tmp); } - btree->hashval = cpu_to_be32(newblk->hashval); - btree->before = cpu_to_be32(newblk->blkno); + INT_SET(btree->hashval, ARCH_CONVERT, newblk->hashval); + INT_SET(btree->before, ARCH_CONVERT, newblk->blkno); xfs_da_log_buf(state->args->trans, oldblk->bp, XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree))); - be16_add(&node->hdr.count, 1); + INT_MOD(node->hdr.count, ARCH_CONVERT, +1); xfs_da_log_buf(state->args->trans, oldblk->bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); /* * Copy the last hash value from the oldblk to propagate upwards. */ - oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval); + oldblk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); } /*======================================================================== @@ -767,21 +768,21 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(args != NULL); ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); oldroot = root_blk->bp->data; - ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(oldroot->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); ASSERT(!oldroot->hdr.info.forw); ASSERT(!oldroot->hdr.info.back); /* * If the root has more than one child, then don't do anything. */ - if (be16_to_cpu(oldroot->hdr.count) > 1) + if (INT_GET(oldroot->hdr.count, ARCH_CONVERT) > 1) return(0); /* * Read in the (only) child block, then copy those bytes into * the root block's buffer and free the original child block. */ - child = be32_to_cpu(oldroot->btree[0].before); + child = INT_GET(oldroot->btree[ 0 ].before, ARCH_CONVERT); ASSERT(child != 0); error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp, args->whichfork); @@ -789,11 +790,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) return(error); ASSERT(bp != NULL); blkinfo = bp->data; - if (be16_to_cpu(oldroot->hdr.level) == 1) { - ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || - be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC); + if (INT_GET(oldroot->hdr.level, ARCH_CONVERT) == 1) { + ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); } else { - ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(blkinfo->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); } ASSERT(!blkinfo->forw); ASSERT(!blkinfo->back); @@ -829,9 +830,9 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); node = (xfs_da_intnode_t *)info; - count = be16_to_cpu(node->hdr.count); + count = INT_GET(node->hdr.count, ARCH_CONVERT); if (count > (state->node_ents >> 1)) { *action = 0; /* blk over 50%, don't try to join */ return(0); /* blk over 50%, don't try to join */ @@ -848,7 +849,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = (info->forw != 0); + forward = info->forw; memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -870,12 +871,13 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) * to shrink a directory over time. */ /* start with smaller blk num */ - forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); + forward = (INT_GET(info->forw, ARCH_CONVERT) + < INT_GET(info->back, ARCH_CONVERT)); for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = be32_to_cpu(info->forw); + blkno = INT_GET(info->forw, ARCH_CONVERT); else - blkno = be32_to_cpu(info->back); + blkno = INT_GET(info->back, ARCH_CONVERT); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -887,10 +889,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) node = (xfs_da_intnode_t *)info; count = state->node_ents; count -= state->node_ents >> 2; - count -= be16_to_cpu(node->hdr.count); + count -= INT_GET(node->hdr.count, ARCH_CONVERT); node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); - count -= be16_to_cpu(node->hdr.count); + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + count -= INT_GET(node->hdr.count, ARCH_CONVERT); xfs_da_brelse(state->args->trans, bp); if (count >= 0) break; /* fits with at least 25% to spare */ @@ -971,16 +973,16 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) } for (blk--, level--; level >= 0; blk--, level--) { node = blk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); btree = &node->btree[ blk->index ]; - if (be32_to_cpu(btree->hashval) == lasthash) + if (INT_GET(btree->hashval, ARCH_CONVERT) == lasthash) break; blk->hashval = lasthash; - btree->hashval = cpu_to_be32(lasthash); + INT_SET(btree->hashval, ARCH_CONVERT, lasthash); xfs_da_log_buf(state->args->trans, blk->bp, XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); - lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); + lasthash = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); } } @@ -995,25 +997,25 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) int tmp; node = drop_blk->bp->data; - ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count)); + ASSERT(drop_blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)); ASSERT(drop_blk->index >= 0); /* * Copy over the offending entry, or just zero it out. */ btree = &node->btree[drop_blk->index]; - if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) { - tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1; + if (drop_blk->index < (INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { + tmp = INT_GET(node->hdr.count, ARCH_CONVERT) - drop_blk->index - 1; tmp *= (uint)sizeof(xfs_da_node_entry_t); memmove(btree, btree + 1, tmp); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, btree, tmp)); - btree = &node->btree[be16_to_cpu(node->hdr.count)-1]; + btree = &node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ]; } memset((char *)btree, 0, sizeof(xfs_da_node_entry_t)); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, btree, sizeof(*btree))); - be16_add(&node->hdr.count, -1); + INT_MOD(node->hdr.count, ARCH_CONVERT, -1); xfs_da_log_buf(state->args->trans, drop_blk->bp, XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr))); @@ -1021,7 +1023,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk) * Copy the last hash value from the block to propagate upwards. */ btree--; - drop_blk->hashval = be32_to_cpu(btree->hashval); + drop_blk->hashval = INT_GET(btree->hashval, ARCH_CONVERT); } /* @@ -1039,40 +1041,40 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, drop_node = drop_blk->bp->data; save_node = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(drop_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(save_node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); tp = state->args->trans; /* * If the dying block has lower hashvals, then move all the * elements in the remaining block up to make a hole. */ - if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) || - (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) < - be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval))) + if ((INT_GET(drop_node->btree[ 0 ].hashval, ARCH_CONVERT) < INT_GET(save_node->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(drop_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT))) { - btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)]; - tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); + btree = &save_node->btree[ INT_GET(drop_node->hdr.count, ARCH_CONVERT) ]; + tmp = INT_GET(save_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); memmove(btree, &save_node->btree[0], tmp); btree = &save_node->btree[0]; xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, btree, - (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) * + (INT_GET(save_node->hdr.count, ARCH_CONVERT) + INT_GET(drop_node->hdr.count, ARCH_CONVERT)) * sizeof(xfs_da_node_entry_t))); } else { - btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)]; + btree = &save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT) ]; xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, btree, - be16_to_cpu(drop_node->hdr.count) * + INT_GET(drop_node->hdr.count, ARCH_CONVERT) * sizeof(xfs_da_node_entry_t))); } /* * Move all the B-tree elements from drop_blk to save_blk. */ - tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t); + tmp = INT_GET(drop_node->hdr.count, ARCH_CONVERT) * (uint)sizeof(xfs_da_node_entry_t); memcpy(btree, &drop_node->btree[0], tmp); - be16_add(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count)); + INT_MOD(save_node->hdr.count, ARCH_CONVERT, INT_GET(drop_node->hdr.count, ARCH_CONVERT)); xfs_da_log_buf(tp, save_blk->bp, XFS_DA_LOGRANGE(save_node, &save_node->hdr, @@ -1081,7 +1083,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, /* * Save the last hashval in the remaining block for upward propagation. */ - save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval); + save_blk->hashval = INT_GET(save_node->btree[ INT_GET(save_node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); } /*======================================================================== @@ -1136,46 +1138,46 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) return(error); } curr = blk->bp->data; - ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC || - be16_to_cpu(curr->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || - be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || + INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); /* * Search an intermediate node for a match. */ - blk->magic = be16_to_cpu(curr->magic); - if (blk->magic == XFS_DA_NODE_MAGIC) { + blk->magic = INT_GET(curr->magic, ARCH_CONVERT); + if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { node = blk->bp->data; - blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); + blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); /* * Binary search. (note: small blocks will skip loop) */ - max = be16_to_cpu(node->hdr.count); + max = INT_GET(node->hdr.count, ARCH_CONVERT); probe = span = max / 2; hashval = args->hashval; for (btree = &node->btree[probe]; span > 4; btree = &node->btree[probe]) { span /= 2; - if (be32_to_cpu(btree->hashval) < hashval) + if (INT_GET(btree->hashval, ARCH_CONVERT) < hashval) probe += span; - else if (be32_to_cpu(btree->hashval) > hashval) + else if (INT_GET(btree->hashval, ARCH_CONVERT) > hashval) probe -= span; else break; } ASSERT((probe >= 0) && (probe < max)); - ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval)); + ASSERT((span <= 4) || (INT_GET(btree->hashval, ARCH_CONVERT) == hashval)); /* * Since we may have duplicate hashval's, find the first * matching hashval in the node. */ - while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) { + while ((probe > 0) && (INT_GET(btree->hashval, ARCH_CONVERT) >= hashval)) { btree--; probe--; } - while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) { + while ((probe < max) && (INT_GET(btree->hashval, ARCH_CONVERT) < hashval)) { btree++; probe++; } @@ -1185,21 +1187,21 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result) */ if (probe == max) { blk->index = max-1; - blkno = be32_to_cpu(node->btree[max-1].before); + blkno = INT_GET(node->btree[ max-1 ].before, ARCH_CONVERT); } else { blk->index = probe; - blkno = be32_to_cpu(btree->before); + blkno = INT_GET(btree->before, ARCH_CONVERT); } } - else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) { + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC) { blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); break; } - else if (be16_to_cpu(curr->magic) == XFS_DIR_LEAF_MAGIC) { + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { blk->hashval = xfs_dir_leaf_lasthash(blk->bp, NULL); break; } - else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) { + else if (INT_GET(curr->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); break; } @@ -1272,8 +1274,8 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC || old_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || old_blk->magic == XFS_ATTR_LEAF_MAGIC); - ASSERT(old_blk->magic == be16_to_cpu(old_info->magic)); - ASSERT(new_blk->magic == be16_to_cpu(new_info->magic)); + ASSERT(old_blk->magic == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(new_blk->magic == INT_GET(new_info->magic, ARCH_CONVERT)); ASSERT(old_blk->magic == new_blk->magic); switch (old_blk->magic) { @@ -1300,44 +1302,47 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, /* * Link new block in before existing block. */ - new_info->forw = cpu_to_be32(old_blk->blkno); - new_info->back = old_info->back; - if (old_info->back) { + INT_SET(new_info->forw, ARCH_CONVERT, old_blk->blkno); + new_info->back = old_info->back; /* INT_: direct copy */ + if (INT_GET(old_info->back, ARCH_CONVERT)) { error = xfs_da_read_buf(args->trans, args->dp, - be32_to_cpu(old_info->back), - -1, &bp, args->whichfork); + INT_GET(old_info->back, + ARCH_CONVERT), -1, &bp, + args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic)); - ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno); - tmp_info->forw = cpu_to_be32(new_blk->blkno); + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == old_blk->blkno); + INT_SET(tmp_info->forw, ARCH_CONVERT, new_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da_buf_done(bp); } - old_info->back = cpu_to_be32(new_blk->blkno); + INT_SET(old_info->back, ARCH_CONVERT, new_blk->blkno); } else { /* * Link new block in after existing block. */ - new_info->forw = old_info->forw; - new_info->back = cpu_to_be32(old_blk->blkno); - if (old_info->forw) { + new_info->forw = old_info->forw; /* INT_: direct copy */ + INT_SET(new_info->back, ARCH_CONVERT, old_blk->blkno); + if (INT_GET(old_info->forw, ARCH_CONVERT)) { error = xfs_da_read_buf(args->trans, args->dp, - be32_to_cpu(old_info->forw), - -1, &bp, args->whichfork); + INT_GET(old_info->forw, ARCH_CONVERT), -1, &bp, + args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(tmp_info->magic == old_info->magic); - ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno); - tmp_info->back = cpu_to_be32(new_blk->blkno); + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) + == INT_GET(old_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) + == old_blk->blkno); + INT_SET(tmp_info->back, ARCH_CONVERT, new_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1); xfs_da_buf_done(bp); } - old_info->forw = cpu_to_be32(new_blk->blkno); + INT_SET(old_info->forw, ARCH_CONVERT, new_blk->blkno); } xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); @@ -1355,13 +1360,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) node1 = node1_bp->data; node2 = node2_bp->data; - ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) && - (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC)); - if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && - ((be32_to_cpu(node2->btree[0].hashval) < - be32_to_cpu(node1->btree[0].hashval)) || - (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) < - be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) { + ASSERT((INT_GET(node1->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) && + (INT_GET(node2->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC)); + if ((INT_GET(node1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(node2->hdr.count, ARCH_CONVERT) > 0) && + ((INT_GET(node2->btree[ 0 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ 0 ].hashval, ARCH_CONVERT)) || + (INT_GET(node2->btree[ INT_GET(node2->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT) < + INT_GET(node1->btree[ INT_GET(node1->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)))) { return(1); } return(0); @@ -1376,12 +1381,12 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) xfs_da_intnode_t *node; node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); if (count) - *count = be16_to_cpu(node->hdr.count); + *count = INT_GET(node->hdr.count, ARCH_CONVERT); if (!node->hdr.count) return(0); - return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); + return(INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); } /* @@ -1406,47 +1411,50 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC || save_blk->magic == XFS_DIRX_LEAF_MAGIC(state->mp) || save_blk->magic == XFS_ATTR_LEAF_MAGIC); - ASSERT(save_blk->magic == be16_to_cpu(save_info->magic)); - ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic)); + ASSERT(save_blk->magic == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(drop_blk->magic == INT_GET(drop_info->magic, ARCH_CONVERT)); ASSERT(save_blk->magic == drop_blk->magic); - ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) || - (be32_to_cpu(save_info->back) == drop_blk->blkno)); - ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) || - (be32_to_cpu(drop_info->back) == save_blk->blkno)); + ASSERT((INT_GET(save_info->forw, ARCH_CONVERT) == drop_blk->blkno) || + (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno)); + ASSERT((INT_GET(drop_info->forw, ARCH_CONVERT) == save_blk->blkno) || + (INT_GET(drop_info->back, ARCH_CONVERT) == save_blk->blkno)); /* * Unlink the leaf block from the doubly linked chain of leaves. */ - if (be32_to_cpu(save_info->back) == drop_blk->blkno) { - save_info->back = drop_info->back; - if (drop_info->back) { + if (INT_GET(save_info->back, ARCH_CONVERT) == drop_blk->blkno) { + save_info->back = drop_info->back; /* INT_: direct copy */ + if (INT_GET(drop_info->back, ARCH_CONVERT)) { error = xfs_da_read_buf(args->trans, args->dp, - be32_to_cpu(drop_info->back), - -1, &bp, args->whichfork); + INT_GET(drop_info->back, + ARCH_CONVERT), -1, &bp, + args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(tmp_info->magic == save_info->magic); - ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno); - tmp_info->forw = cpu_to_be32(save_blk->blkno); + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->forw, ARCH_CONVERT) == drop_blk->blkno); + INT_SET(tmp_info->forw, ARCH_CONVERT, save_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info) - 1); xfs_da_buf_done(bp); } } else { - save_info->forw = drop_info->forw; - if (drop_info->forw) { + save_info->forw = drop_info->forw; /* INT_: direct copy */ + if (INT_GET(drop_info->forw, ARCH_CONVERT)) { error = xfs_da_read_buf(args->trans, args->dp, - be32_to_cpu(drop_info->forw), - -1, &bp, args->whichfork); + INT_GET(drop_info->forw, ARCH_CONVERT), -1, &bp, + args->whichfork); if (error) return(error); ASSERT(bp != NULL); tmp_info = bp->data; - ASSERT(tmp_info->magic == save_info->magic); - ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno); - tmp_info->back = cpu_to_be32(save_blk->blkno); + ASSERT(INT_GET(tmp_info->magic, ARCH_CONVERT) + == INT_GET(save_info->magic, ARCH_CONVERT)); + ASSERT(INT_GET(tmp_info->back, ARCH_CONVERT) + == drop_blk->blkno); + INT_SET(tmp_info->back, ARCH_CONVERT, save_blk->blkno); xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info) - 1); xfs_da_buf_done(bp); @@ -1489,14 +1497,14 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, for (blk = &path->blk[level]; level >= 0; blk--, level--) { ASSERT(blk->bp != NULL); node = blk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); - if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { + ASSERT(INT_GET(node->hdr.info.magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); + if (forward && (blk->index < INT_GET(node->hdr.count, ARCH_CONVERT)-1)) { blk->index++; - blkno = be32_to_cpu(node->btree[blk->index].before); + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); break; } else if (!forward && (blk->index > 0)) { blk->index--; - blkno = be32_to_cpu(node->btree[blk->index].before); + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); break; } } @@ -1528,18 +1536,18 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, return(error); ASSERT(blk->bp != NULL); info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC || - be16_to_cpu(info->magic) == XFS_DIRX_LEAF_MAGIC(state->mp) || - be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); - blk->magic = be16_to_cpu(info->magic); - if (blk->magic == XFS_DA_NODE_MAGIC) { + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC || + INT_GET(info->magic, ARCH_CONVERT) == XFS_DIRX_LEAF_MAGIC(state->mp) || + INT_GET(info->magic, ARCH_CONVERT) == XFS_ATTR_LEAF_MAGIC); + blk->magic = INT_GET(info->magic, ARCH_CONVERT); + if (INT_GET(info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC) { node = (xfs_da_intnode_t *)info; - blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); + blk->hashval = INT_GET(node->btree[ INT_GET(node->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT); if (forward) blk->index = 0; else - blk->index = be16_to_cpu(node->hdr.count)-1; - blkno = be32_to_cpu(node->btree[blk->index].before); + blk->index = INT_GET(node->hdr.count, ARCH_CONVERT)-1; + blkno = INT_GET(node->btree[ blk->index ].before, ARCH_CONVERT); } else { ASSERT(level == path->active-1); blk->index = 0; @@ -1780,40 +1788,40 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Get values from the moved block. */ - if (be16_to_cpu(dead_info->magic) == XFS_DIR_LEAF_MAGIC) { + if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) { ASSERT(XFS_DIR_IS_V1(mp)); dead_leaf = (xfs_dir_leafblock_t *)dead_info; dead_level = 0; dead_hash = INT_GET(dead_leaf->entries[INT_GET(dead_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); - } else if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) { + } else if (INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC) { ASSERT(XFS_DIR_IS_V2(mp)); dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; dead_level = 0; - dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval); + dead_hash = INT_GET(dead_leaf2->ents[INT_GET(dead_leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); } else { - ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC); + ASSERT(INT_GET(dead_info->magic, ARCH_CONVERT) == XFS_DA_NODE_MAGIC); dead_node = (xfs_da_intnode_t *)dead_info; - dead_level = be16_to_cpu(dead_node->hdr.level); - dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval); + dead_level = INT_GET(dead_node->hdr.level, ARCH_CONVERT); + dead_hash = INT_GET(dead_node->btree[INT_GET(dead_node->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); } sib_buf = par_buf = NULL; /* * If the moved block has a left sibling, fix up the pointers. */ - if ((sib_blkno = be32_to_cpu(dead_info->back))) { + if ((sib_blkno = INT_GET(dead_info->back, ARCH_CONVERT))) { if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) goto done; sib_info = sib_buf->data; if (unlikely( - be32_to_cpu(sib_info->forw) != last_blkno || - sib_info->magic != dead_info->magic)) { + INT_GET(sib_info->forw, ARCH_CONVERT) != last_blkno || + INT_GET(sib_info->magic, ARCH_CONVERT) != INT_GET(dead_info->magic, ARCH_CONVERT))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - sib_info->forw = cpu_to_be32(dead_blkno); + INT_SET(sib_info->forw, ARCH_CONVERT, dead_blkno); xfs_da_log_buf(tp, sib_buf, XFS_DA_LOGRANGE(sib_info, &sib_info->forw, sizeof(sib_info->forw))); @@ -1823,19 +1831,20 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * If the moved block has a right sibling, fix up the pointers. */ - if ((sib_blkno = be32_to_cpu(dead_info->forw))) { + if ((sib_blkno = INT_GET(dead_info->forw, ARCH_CONVERT))) { if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w))) goto done; sib_info = sib_buf->data; if (unlikely( - be32_to_cpu(sib_info->back) != last_blkno || - sib_info->magic != dead_info->magic)) { + INT_GET(sib_info->back, ARCH_CONVERT) != last_blkno + || INT_GET(sib_info->magic, ARCH_CONVERT) + != INT_GET(dead_info->magic, ARCH_CONVERT))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - sib_info->back = cpu_to_be32(dead_blkno); + INT_SET(sib_info->back, ARCH_CONVERT, dead_blkno); xfs_da_log_buf(tp, sib_buf, XFS_DA_LOGRANGE(sib_info, &sib_info->back, sizeof(sib_info->back))); @@ -1852,26 +1861,26 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, goto done; par_node = par_buf->data; if (unlikely( - be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC || - (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { + INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC || + (level >= 0 && level != INT_GET(par_node->hdr.level, ARCH_CONVERT) + 1))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - level = be16_to_cpu(par_node->hdr.level); + level = INT_GET(par_node->hdr.level, ARCH_CONVERT); for (entno = 0; - entno < be16_to_cpu(par_node->hdr.count) && - be32_to_cpu(par_node->btree[entno].hashval) < dead_hash; + entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && + INT_GET(par_node->btree[entno].hashval, ARCH_CONVERT) < dead_hash; entno++) continue; - if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) { + if (unlikely(entno == INT_GET(par_node->hdr.count, ARCH_CONVERT))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); goto done; } - par_blkno = be32_to_cpu(par_node->btree[entno].before); + par_blkno = INT_GET(par_node->btree[entno].before, ARCH_CONVERT); if (level == dead_level + 1) break; xfs_da_brelse(tp, par_buf); @@ -1883,13 +1892,13 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, */ for (;;) { for (; - entno < be16_to_cpu(par_node->hdr.count) && - be32_to_cpu(par_node->btree[entno].before) != last_blkno; + entno < INT_GET(par_node->hdr.count, ARCH_CONVERT) && + INT_GET(par_node->btree[entno].before, ARCH_CONVERT) != last_blkno; entno++) continue; - if (entno < be16_to_cpu(par_node->hdr.count)) + if (entno < INT_GET(par_node->hdr.count, ARCH_CONVERT)) break; - par_blkno = be32_to_cpu(par_node->hdr.info.forw); + par_blkno = INT_GET(par_node->hdr.info.forw, ARCH_CONVERT); xfs_da_brelse(tp, par_buf); par_buf = NULL; if (unlikely(par_blkno == 0)) { @@ -1902,8 +1911,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, goto done; par_node = par_buf->data; if (unlikely( - be16_to_cpu(par_node->hdr.level) != level || - be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { + INT_GET(par_node->hdr.level, ARCH_CONVERT) != level || + INT_GET(par_node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC)) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); @@ -1914,7 +1923,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Update the parent entry pointing to the moved block. */ - par_node->btree[entno].before = cpu_to_be32(dead_blkno); + INT_SET(par_node->btree[entno].before, ARCH_CONVERT, dead_blkno); xfs_da_log_buf(tp, par_buf, XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before, sizeof(par_node->btree[entno].before))); @@ -2194,8 +2203,8 @@ xfs_da_do_buf( info = rbp->data; data = rbp->data; free = rbp->data; - magic = be16_to_cpu(info->magic); - magic1 = be32_to_cpu(data->hdr.magic); + magic = INT_GET(info->magic, ARCH_CONVERT); + magic1 = INT_GET(data->hdr.magic, ARCH_CONVERT); if (unlikely( XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && (magic != XFS_DIR_LEAF_MAGIC) && @@ -2204,7 +2213,7 @@ xfs_da_do_buf( (magic != XFS_DIR2_LEAFN_MAGIC) && (magic1 != XFS_DIR2_BLOCK_MAGIC) && (magic1 != XFS_DIR2_DATA_MAGIC) && - (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), + (INT_GET(free->hdr.magic, ARCH_CONVERT) != XFS_DIR2_FREE_MAGIC), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { xfs_buftrace("DA READ ERROR", rbp->bps[0]); diff --git a/trunk/fs/xfs/xfs_da_btree.h b/trunk/fs/xfs/xfs_da_btree.h index 243a730d5ec8..41352113721a 100644 --- a/trunk/fs/xfs/xfs_da_btree.h +++ b/trunk/fs/xfs/xfs_da_btree.h @@ -45,10 +45,10 @@ struct zone; (XFS_DIR_IS_V1(mp) ? XFS_DIR_LEAF_MAGIC : XFS_DIR2_LEAFN_MAGIC) typedef struct xfs_da_blkinfo { - __be32 forw; /* previous block in list */ - __be32 back; /* following block in list */ - __be16 magic; /* validity check on block */ - __be16 pad; /* unused */ + xfs_dablk_t forw; /* previous block in list */ + xfs_dablk_t back; /* following block in list */ + __uint16_t magic; /* validity check on block */ + __uint16_t pad; /* unused */ } xfs_da_blkinfo_t; /* @@ -65,12 +65,12 @@ typedef struct xfs_da_blkinfo { typedef struct xfs_da_intnode { struct xfs_da_node_hdr { /* constant-structure header block */ xfs_da_blkinfo_t info; /* block type, links, etc. */ - __be16 count; /* count of active entries */ - __be16 level; /* level above leaves (leaf == 0) */ + __uint16_t count; /* count of active entries */ + __uint16_t level; /* level above leaves (leaf == 0) */ } hdr; struct xfs_da_node_entry { - __be32 hashval; /* hash value for this descendant */ - __be32 before; /* Btree block before this key */ + xfs_dahash_t hashval; /* hash value for this descendant */ + xfs_dablk_t before; /* Btree block before this key */ } btree[1]; /* variable sized array of keys */ } xfs_da_intnode_t; typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; diff --git a/trunk/fs/xfs/xfs_dfrag.c b/trunk/fs/xfs/xfs_dfrag.c index 4968a6358e61..c6191d00ad27 100644 --- a/trunk/fs/xfs/xfs_dfrag.c +++ b/trunk/fs/xfs/xfs_dfrag.c @@ -83,7 +83,7 @@ xfs_swapext( /* Pull information for the target fd */ if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || - ((vp = vn_from_inode(fp->f_dentry->d_inode)) == NULL)) { + ((vp = LINVFS_GET_VP(fp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } @@ -95,7 +95,7 @@ xfs_swapext( } if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || - ((tvp = vn_from_inode(tfp->f_dentry->d_inode)) == NULL)) { + ((tvp = LINVFS_GET_VP(tfp->f_dentry->d_inode)) == NULL)) { error = XFS_ERROR(EINVAL); goto error0; } diff --git a/trunk/fs/xfs/xfs_dir.c b/trunk/fs/xfs/xfs_dir.c index 9cc702a839a3..bb87d2a700a9 100644 --- a/trunk/fs/xfs/xfs_dir.c +++ b/trunk/fs/xfs/xfs_dir.c @@ -634,7 +634,7 @@ xfs_dir_leaf_removename(xfs_da_args_t *args, int *count, int *totallen) return(retval); ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); retval = xfs_dir_leaf_lookup_int(bp, args, &index); if (retval == EEXIST) { (void)xfs_dir_leaf_remove(args->trans, bp, index); @@ -912,7 +912,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, return(error); if (bp) leaf = bp->data; - if (bp && be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) { + if (bp && INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { xfs_dir_trace_g_dub("node: block not a leaf", dp, uio, bno); xfs_da_brelse(trans, bp); @@ -949,17 +949,17 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, if (bp == NULL) return(XFS_ERROR(EFSCORRUPTED)); node = bp->data; - if (be16_to_cpu(node->hdr.info.magic) != XFS_DA_NODE_MAGIC) + if (INT_GET(node->hdr.info.magic, ARCH_CONVERT) != XFS_DA_NODE_MAGIC) break; btree = &node->btree[0]; xfs_dir_trace_g_dun("node: node detail", dp, uio, node); - for (i = 0; i < be16_to_cpu(node->hdr.count); btree++, i++) { - if (be32_to_cpu(btree->hashval) >= cookhash) { - bno = be32_to_cpu(btree->before); + for (i = 0; i < INT_GET(node->hdr.count, ARCH_CONVERT); btree++, i++) { + if (INT_GET(btree->hashval, ARCH_CONVERT) >= cookhash) { + bno = INT_GET(btree->before, ARCH_CONVERT); break; } } - if (i == be16_to_cpu(node->hdr.count)) { + if (i == INT_GET(node->hdr.count, ARCH_CONVERT)) { xfs_da_brelse(trans, bp); xfs_dir_trace_g_du("node: hash beyond EOF", dp, uio); @@ -982,7 +982,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, */ for (;;) { leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC)) { + if (unlikely(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC)) { xfs_dir_trace_g_dul("node: not a leaf", dp, uio, leaf); xfs_da_brelse(trans, bp); XFS_CORRUPTION_ERROR("xfs_dir_node_getdents(1)", @@ -990,7 +990,7 @@ xfs_dir_node_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, return XFS_ERROR(EFSCORRUPTED); } xfs_dir_trace_g_dul("node: leaf detail", dp, uio, leaf); - if ((nextbno = be32_to_cpu(leaf->hdr.info.forw))) { + if ((nextbno = INT_GET(leaf->hdr.info.forw, ARCH_CONVERT))) { nextda = xfs_da_reada_buf(trans, dp, nextbno, XFS_DATA_FORK); } else @@ -1118,20 +1118,21 @@ void xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio, xfs_da_intnode_t *node) { - int last = be16_to_cpu(node->hdr.count) - 1; + int last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1; xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where, (void *)dp, (void *)dp->i_mount, (void *)((unsigned long)(uio->uio_offset >> 32)), (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), (void *)(unsigned long)uio->uio_resid, - (void *)(unsigned long)be32_to_cpu(node->hdr.info.forw), (void *)(unsigned long) - be16_to_cpu(node->hdr.count), + INT_GET(node->hdr.info.forw, ARCH_CONVERT), (void *)(unsigned long) - be32_to_cpu(node->btree[0].hashval), + INT_GET(node->hdr.count, ARCH_CONVERT), (void *)(unsigned long) - be32_to_cpu(node->btree[last].hashval), + INT_GET(node->btree[0].hashval, ARCH_CONVERT), + (void *)(unsigned long) + INT_GET(node->btree[last].hashval, ARCH_CONVERT), NULL, NULL, NULL); } @@ -1149,7 +1150,8 @@ xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio, (void *)((unsigned long)(uio->uio_offset >> 32)), (void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)), (void *)(unsigned long)uio->uio_resid, - (void *)(unsigned long)be32_to_cpu(leaf->hdr.info.forw), + (void *)(unsigned long) + INT_GET(leaf->hdr.info.forw, ARCH_CONVERT), (void *)(unsigned long) INT_GET(leaf->hdr.count, ARCH_CONVERT), (void *)(unsigned long) diff --git a/trunk/fs/xfs/xfs_dir2.h b/trunk/fs/xfs/xfs_dir2.h index 7dd364b1e038..3158f5dc431f 100644 --- a/trunk/fs/xfs/xfs_dir2.h +++ b/trunk/fs/xfs/xfs_dir2.h @@ -55,16 +55,16 @@ typedef __uint32_t xfs_dir2_db_t; /* * Byte offset in a directory. */ -typedef xfs_off_t xfs_dir2_off_t; +typedef xfs_off_t xfs_dir2_off_t; /* * For getdents, argument struct for put routines. */ typedef int (*xfs_dir2_put_t)(struct xfs_dir2_put_args *pa); typedef struct xfs_dir2_put_args { - xfs_off_t cook; /* cookie of (next) entry */ + xfs_off_t cook; /* cookie of (next) entry */ xfs_intino_t ino; /* inode number */ - xfs_dirent_t *dbp; /* buffer pointer */ + struct xfs_dirent *dbp; /* buffer pointer */ char *name; /* directory entry name */ int namelen; /* length of name */ int done; /* output: set if value was stored */ @@ -75,13 +75,18 @@ typedef struct xfs_dir2_put_args { /* * Other interfaces used by the rest of the dir v2 code. */ -extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, - xfs_dir2_db_t *dbp); -extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, - int *vp); -extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, - int *vp); -extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, - struct xfs_dabuf *bp); +extern int + xfs_dir2_grow_inode(struct xfs_da_args *args, int space, + xfs_dir2_db_t *dbp); + +extern int + xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); + +extern int + xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *vp); + +extern int + xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, + struct xfs_dabuf *bp); #endif /* __XFS_DIR2_H__ */ diff --git a/trunk/fs/xfs/xfs_dir2_block.c b/trunk/fs/xfs/xfs_dir2_block.c index bd5cee6aa51a..31bc99faa704 100644 --- a/trunk/fs/xfs/xfs_dir2_block.c +++ b/trunk/fs/xfs/xfs_dir2_block.c @@ -81,7 +81,7 @@ xfs_dir2_block_addname( xfs_mount_t *mp; /* filesystem mount point */ int needlog; /* need to log header */ int needscan; /* need to rescan freespace */ - __be16 *tagp; /* pointer to tag value */ + xfs_dir2_data_off_t *tagp; /* pointer to tag value */ xfs_trans_t *tp; /* transaction structure */ xfs_dir2_trace_args("block_addname", args); @@ -100,7 +100,8 @@ xfs_dir2_block_addname( /* * Check the magic number, corrupted if wrong. */ - if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) { + if (unlikely(INT_GET(block->hdr.magic, ARCH_CONVERT) + != XFS_DIR2_BLOCK_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", XFS_ERRLEVEL_LOW, mp, block); xfs_da_brelse(tp, bp); @@ -120,38 +121,38 @@ xfs_dir2_block_addname( /* * Tag just before the first leaf entry. */ - tagp = (__be16 *)blp - 1; + tagp = (xfs_dir2_data_off_t *)blp - 1; /* * Data object just before the first leaf entry. */ - enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); + enddup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); /* * If it's not free then can't do this add without cleaning up: * the space before the first leaf entry needs to be free so it * can be expanded to hold the pointer to the new entry. */ - if (be16_to_cpu(enddup->freetag) != XFS_DIR2_DATA_FREE_TAG) + if (INT_GET(enddup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) dup = enddup = NULL; /* * Check out the biggest freespace and see if it's the same one. */ else { dup = (xfs_dir2_data_unused_t *) - ((char *)block + be16_to_cpu(bf[0].offset)); + ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); if (dup == enddup) { /* * It is the biggest freespace, is it too small * to hold the new leaf too? */ - if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) { + if (INT_GET(dup->length, ARCH_CONVERT) < len + (uint)sizeof(*blp)) { /* * Yes, we use the second-largest * entry instead if it works. */ - if (be16_to_cpu(bf[1].length) >= len) + if (INT_GET(bf[1].length, ARCH_CONVERT) >= len) dup = (xfs_dir2_data_unused_t *) ((char *)block + - be16_to_cpu(bf[1].offset)); + INT_GET(bf[1].offset, ARCH_CONVERT)); else dup = NULL; } @@ -160,7 +161,7 @@ xfs_dir2_block_addname( * Not the same free entry, * just check its length. */ - if (be16_to_cpu(dup->length) < len) { + if (INT_GET(dup->length, ARCH_CONVERT) < len) { dup = NULL; } } @@ -171,9 +172,9 @@ xfs_dir2_block_addname( * If there are stale entries we'll use one for the leaf. * Is the biggest entry enough to avoid compaction? */ - else if (be16_to_cpu(bf[0].length) >= len) { + else if (INT_GET(bf[0].length, ARCH_CONVERT) >= len) { dup = (xfs_dir2_data_unused_t *) - ((char *)block + be16_to_cpu(bf[0].offset)); + ((char *)block + INT_GET(bf[0].offset, ARCH_CONVERT)); compact = 0; } /* @@ -183,20 +184,20 @@ xfs_dir2_block_addname( /* * Tag just before the first leaf entry. */ - tagp = (__be16 *)blp - 1; + tagp = (xfs_dir2_data_off_t *)blp - 1; /* * Data object just before the first leaf entry. */ - dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); + dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); /* * If it's not free then the data will go where the * leaf data starts now, if it works at all. */ - if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - if (be16_to_cpu(dup->length) + (be32_to_cpu(btp->stale) - 1) * + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + if (INT_GET(dup->length, ARCH_CONVERT) + (INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) dup = NULL; - } else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len) + } else if ((INT_GET(btp->stale, ARCH_CONVERT) - 1) * (uint)sizeof(*blp) < len) dup = NULL; else dup = (xfs_dir2_data_unused_t *)blp; @@ -242,11 +243,11 @@ xfs_dir2_block_addname( int fromidx; /* source leaf index */ int toidx; /* target leaf index */ - for (fromidx = toidx = be32_to_cpu(btp->count) - 1, + for (fromidx = toidx = INT_GET(btp->count, ARCH_CONVERT) - 1, highstale = lfloghigh = -1; fromidx >= 0; fromidx--) { - if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) { + if (INT_GET(blp[fromidx].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { if (highstale == -1) highstale = toidx; else { @@ -259,15 +260,15 @@ xfs_dir2_block_addname( blp[toidx] = blp[fromidx]; toidx--; } - lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); - lfloghigh -= be32_to_cpu(btp->stale) - 1; - be32_add(&btp->count, -(be32_to_cpu(btp->stale) - 1)); + lfloglow = toidx + 1 - (INT_GET(btp->stale, ARCH_CONVERT) - 1); + lfloghigh -= INT_GET(btp->stale, ARCH_CONVERT) - 1; + INT_MOD(btp->count, ARCH_CONVERT, -(INT_GET(btp->stale, ARCH_CONVERT) - 1)); xfs_dir2_data_make_free(tp, bp, (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), - (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), + (xfs_dir2_data_aoff_t)((INT_GET(btp->stale, ARCH_CONVERT) - 1) * sizeof(*blp)), &needlog, &needscan); - blp += be32_to_cpu(btp->stale) - 1; - btp->stale = cpu_to_be32(1); + blp += INT_GET(btp->stale, ARCH_CONVERT) - 1; + INT_SET(btp->stale, ARCH_CONVERT, 1); /* * If we now need to rebuild the bestfree map, do so. * This needs to happen before the next call to use_free. @@ -282,23 +283,23 @@ xfs_dir2_block_addname( * Set leaf logging boundaries to impossible state. * For the no-stale case they're set explicitly. */ - else if (btp->stale) { - lfloglow = be32_to_cpu(btp->count); + else if (INT_GET(btp->stale, ARCH_CONVERT)) { + lfloglow = INT_GET(btp->count, ARCH_CONVERT); lfloghigh = -1; } /* * Find the slot that's first lower than our hash value, -1 if none. */ - for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) { + for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; low <= high; ) { mid = (low + high) >> 1; - if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) + if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) break; if (hash < args->hashval) low = mid + 1; else high = mid - 1; } - while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) { + while (mid >= 0 && INT_GET(blp[mid].hashval, ARCH_CONVERT) >= args->hashval) { mid--; } /* @@ -310,14 +311,14 @@ xfs_dir2_block_addname( */ xfs_dir2_data_use_free(tp, bp, enddup, (xfs_dir2_data_aoff_t) - ((char *)enddup - (char *)block + be16_to_cpu(enddup->length) - + ((char *)enddup - (char *)block + INT_GET(enddup->length, ARCH_CONVERT) - sizeof(*blp)), (xfs_dir2_data_aoff_t)sizeof(*blp), &needlog, &needscan); /* * Update the tail (entry count). */ - be32_add(&btp->count, 1); + INT_MOD(btp->count, ARCH_CONVERT, +1); /* * If we now need to rebuild the bestfree map, do so. * This needs to happen before the next call to use_free. @@ -345,12 +346,12 @@ xfs_dir2_block_addname( else { for (lowstale = mid; lowstale >= 0 && - be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR; + INT_GET(blp[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; for (highstale = mid + 1; - highstale < be32_to_cpu(btp->count) && - be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR && + highstale < INT_GET(btp->count, ARCH_CONVERT) && + INT_GET(blp[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || mid - lowstale > highstale - mid); highstale++) continue; @@ -358,7 +359,7 @@ xfs_dir2_block_addname( * Move entries toward the low-numbered stale entry. */ if (lowstale >= 0 && - (highstale == be32_to_cpu(btp->count) || + (highstale == INT_GET(btp->count, ARCH_CONVERT) || mid - lowstale <= highstale - mid)) { if (mid - lowstale) memmove(&blp[lowstale], &blp[lowstale + 1], @@ -370,7 +371,7 @@ xfs_dir2_block_addname( * Move entries toward the high-numbered stale entry. */ else { - ASSERT(highstale < be32_to_cpu(btp->count)); + ASSERT(highstale < INT_GET(btp->count, ARCH_CONVERT)); mid++; if (highstale - mid) memmove(&blp[mid + 1], &blp[mid], @@ -378,7 +379,7 @@ xfs_dir2_block_addname( lfloglow = MIN(mid, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - be32_add(&btp->stale, -1); + INT_MOD(btp->stale, ARCH_CONVERT, -1); } /* * Point to the new data entry. @@ -387,9 +388,8 @@ xfs_dir2_block_addname( /* * Fill in the leaf entry. */ - blp[mid].hashval = cpu_to_be32(args->hashval); - blp[mid].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, - (char *)dep - (char *)block)); + INT_SET(blp[mid].hashval, ARCH_CONVERT, args->hashval); + INT_SET(blp[mid].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); /* * Mark space for the data entry used. @@ -404,7 +404,7 @@ xfs_dir2_block_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, args->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); /* * Clean up the bestfree array and log the header, tail, and entry. */ @@ -485,8 +485,8 @@ xfs_dir2_block_getdents( /* * Unused, skip it. */ - if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - ptr += be16_to_cpu(dup->length); + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ptr += INT_GET(dup->length, ARCH_CONVERT); continue; } @@ -622,7 +622,7 @@ xfs_dir2_block_lookup( * Get the offset from the leaf entry, to point to the data. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); /* * Fill in inode number, release the block. */ @@ -674,10 +674,10 @@ xfs_dir2_block_lookup_int( * Loop doing a binary search for our hash value. * Find our entry, ENOENT if it's not there. */ - for (low = 0, high = be32_to_cpu(btp->count) - 1; ; ) { + for (low = 0, high = INT_GET(btp->count, ARCH_CONVERT) - 1; ; ) { ASSERT(low <= high); mid = (low + high) >> 1; - if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) + if ((hash = INT_GET(blp[mid].hashval, ARCH_CONVERT)) == args->hashval) break; if (hash < args->hashval) low = mid + 1; @@ -692,7 +692,7 @@ xfs_dir2_block_lookup_int( /* * Back up to the first one with the right hash value. */ - while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) { + while (mid > 0 && INT_GET(blp[mid - 1].hashval, ARCH_CONVERT) == args->hashval) { mid--; } /* @@ -700,7 +700,7 @@ xfs_dir2_block_lookup_int( * right hash value looking for our name. */ do { - if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR) + if ((addr = INT_GET(blp[mid].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Get pointer to the entry from the leaf. @@ -717,7 +717,7 @@ xfs_dir2_block_lookup_int( *entno = mid; return 0; } - } while (++mid < be32_to_cpu(btp->count) && be32_to_cpu(blp[mid].hashval) == hash); + } while (++mid < INT_GET(btp->count, ARCH_CONVERT) && INT_GET(blp[mid].hashval, ARCH_CONVERT) == hash); /* * No match, release the buffer and return ENOENT. */ @@ -767,7 +767,7 @@ xfs_dir2_block_removename( * Point to the data entry using the leaf entry. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); /* * Mark the data entry's space free. */ @@ -778,12 +778,12 @@ xfs_dir2_block_removename( /* * Fix up the block tail. */ - be32_add(&btp->stale, 1); + INT_MOD(btp->stale, ARCH_CONVERT, +1); xfs_dir2_block_log_tail(tp, bp); /* * Remove the leaf entry by marking it stale. */ - blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); + INT_SET(blp[ent].address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); xfs_dir2_block_log_leaf(tp, bp, ent, ent); /* * Fix up bestfree, log the header if necessary. @@ -843,7 +843,7 @@ xfs_dir2_block_replace( * Point to the data entry we need to change. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); + ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(blp[ent].address, ARCH_CONVERT))); ASSERT(INT_GET(dep->inumber, ARCH_CONVERT) != args->inumber); /* * Change the inode number to the new value. @@ -868,8 +868,8 @@ xfs_dir2_block_sort( la = a; lb = b; - return be32_to_cpu(la->hashval) < be32_to_cpu(lb->hashval) ? -1 : - (be32_to_cpu(la->hashval) > be32_to_cpu(lb->hashval) ? 1 : 0); + return INT_GET(la->hashval, ARCH_CONVERT) < INT_GET(lb->hashval, ARCH_CONVERT) ? -1 : + (INT_GET(la->hashval, ARCH_CONVERT) > INT_GET(lb->hashval, ARCH_CONVERT) ? 1 : 0); } /* @@ -881,7 +881,7 @@ xfs_dir2_leaf_to_block( xfs_dabuf_t *lbp, /* leaf buffer */ xfs_dabuf_t *dbp) /* data buffer */ { - __be16 *bestsp; /* leaf bests table */ + xfs_dir2_data_off_t *bestsp; /* leaf bests table */ xfs_dir2_block_t *block; /* block structure */ xfs_dir2_block_tail_t *btp; /* block tail */ xfs_inode_t *dp; /* incore directory inode */ @@ -896,7 +896,7 @@ xfs_dir2_leaf_to_block( int needscan; /* need to scan for bestfree */ xfs_dir2_sf_hdr_t sfh; /* shortform header */ int size; /* bytes used */ - __be16 *tagp; /* end of entry (tag) */ + xfs_dir2_data_off_t *tagp; /* end of entry (tag) */ int to; /* block/leaf to index */ xfs_trans_t *tp; /* transaction pointer */ @@ -905,7 +905,7 @@ xfs_dir2_leaf_to_block( tp = args->trans; mp = dp->i_mount; leaf = lbp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); /* * If there are data blocks other than the first one, take this @@ -915,11 +915,11 @@ xfs_dir2_leaf_to_block( */ while (dp->i_d.di_size > mp->m_dirblksize) { bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) == + if (INT_GET(bestsp[INT_GET(ltp->bestcount, ARCH_CONVERT) - 1], ARCH_CONVERT) == mp->m_dirblksize - (uint)sizeof(block->hdr)) { if ((error = xfs_dir2_leaf_trim_data(args, lbp, - (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1)))) + (xfs_dir2_db_t)(INT_GET(ltp->bestcount, ARCH_CONVERT) - 1)))) goto out; } else { error = 0; @@ -935,29 +935,28 @@ xfs_dir2_leaf_to_block( goto out; } block = dbp->data; - ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(INT_GET(block->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); /* * Size of the "leaf" area in the block. */ size = (uint)sizeof(block->tail) + - (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); + (uint)sizeof(*lep) * (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); /* * Look at the last data entry. */ - tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1; - dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); + tagp = (xfs_dir2_data_off_t *)((char *)block + mp->m_dirblksize) - 1; + dup = (xfs_dir2_data_unused_t *)((char *)block + INT_GET(*tagp, ARCH_CONVERT)); /* * If it's not free or is too short we can't do it. */ - if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG || - be16_to_cpu(dup->length) < size) { + if (INT_GET(dup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG || INT_GET(dup->length, ARCH_CONVERT) < size) { error = 0; goto out; } /* * Start converting it to block form. */ - block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); needlog = 1; needscan = 0; /* @@ -969,20 +968,20 @@ xfs_dir2_leaf_to_block( * Initialize the block tail. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); - btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); + INT_SET(btp->count, ARCH_CONVERT, INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)); btp->stale = 0; xfs_dir2_block_log_tail(tp, dbp); /* * Initialize the block leaf area. We compact out stale entries. */ lep = XFS_DIR2_BLOCK_LEAF_P(btp); - for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { - if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) + for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) continue; lep[to++] = leaf->ents[from]; } - ASSERT(to == be32_to_cpu(btp->count)); - xfs_dir2_block_log_leaf(tp, dbp, 0, be32_to_cpu(btp->count) - 1); + ASSERT(to == INT_GET(btp->count, ARCH_CONVERT)); + xfs_dir2_block_log_leaf(tp, dbp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); /* * Scan the bestfree if we need it and log the data block header. */ @@ -1044,7 +1043,7 @@ xfs_dir2_sf_to_block( int offset; /* target block offset */ xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ xfs_dir2_sf_t *sfp; /* shortform structure */ - __be16 *tagp; /* end of data entry */ + xfs_dir2_data_off_t *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_trace_args("sf_to_block", args); @@ -1096,12 +1095,12 @@ xfs_dir2_sf_to_block( return error; } block = bp->data; - block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_BLOCK_MAGIC); /* * Compute size of block "tail" area. */ i = (uint)sizeof(*btp) + - (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); + (INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); /* * The whole thing is initialized to free by the init routine. * Say we're using the leaf and tail area. @@ -1115,7 +1114,7 @@ xfs_dir2_sf_to_block( * Fill in the tail. */ btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); - btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */ + INT_SET(btp->count, ARCH_CONVERT, INT_GET(sfp->hdr.count, ARCH_CONVERT) + 2); /* ., .. */ btp->stale = 0; blp = XFS_DIR2_BLOCK_LEAF_P(btp); endoffset = (uint)((char *)blp - (char *)block); @@ -1124,7 +1123,7 @@ xfs_dir2_sf_to_block( */ xfs_dir2_data_use_free(tp, bp, dup, (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), - be16_to_cpu(dup->length), &needlog, &needscan); + INT_GET(dup->length, ARCH_CONVERT), &needlog, &needscan); /* * Create entry for . */ @@ -1134,11 +1133,10 @@ xfs_dir2_sf_to_block( dep->namelen = 1; dep->name[0] = '.'; tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); xfs_dir2_data_log_entry(tp, bp, dep); - blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); - blp[0].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, - (char *)dep - (char *)block)); + INT_SET(blp[0].hashval, ARCH_CONVERT, xfs_dir_hash_dot); + INT_SET(blp[0].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); /* * Create entry for .. */ @@ -1148,16 +1146,15 @@ xfs_dir2_sf_to_block( dep->namelen = 2; dep->name[0] = dep->name[1] = '.'; tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); xfs_dir2_data_log_entry(tp, bp, dep); - blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); - blp[1].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, - (char *)dep - (char *)block)); + INT_SET(blp[1].hashval, ARCH_CONVERT, xfs_dir_hash_dotdot); + INT_SET(blp[1].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); offset = XFS_DIR2_DATA_FIRST_OFFSET; /* * Loop over existing entries, stuff them in. */ - if ((i = 0) == sfp->hdr.count) + if ((i = 0) == INT_GET(sfp->hdr.count, ARCH_CONVERT)) sfep = NULL; else sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); @@ -1179,14 +1176,15 @@ xfs_dir2_sf_to_block( if (offset < newoffset) { dup = (xfs_dir2_data_unused_t *) ((char *)block + offset); - dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); - dup->length = cpu_to_be16(newoffset - offset); - *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16( + INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(dup->length, ARCH_CONVERT, newoffset - offset); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, + (xfs_dir2_data_off_t) ((char *)dup - (char *)block)); xfs_dir2_data_log_unused(tp, bp, dup); (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, dup, &dummy); - offset += be16_to_cpu(dup->length); + offset += INT_GET(dup->length, ARCH_CONVERT); continue; } /* @@ -1198,14 +1196,13 @@ xfs_dir2_sf_to_block( dep->namelen = sfep->namelen; memcpy(dep->name, sfep->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)block)); xfs_dir2_data_log_entry(tp, bp, dep); - blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname( - (char *)sfep->name, sfep->namelen)); - blp[2 + i].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, + INT_SET(blp[2 + i].hashval, ARCH_CONVERT, xfs_da_hashname((char *)sfep->name, sfep->namelen)); + INT_SET(blp[2 + i].address, ARCH_CONVERT, XFS_DIR2_BYTE_TO_DATAPTR(mp, (char *)dep - (char *)block)); offset = (int)((char *)(tagp + 1) - (char *)block); - if (++i == sfp->hdr.count) + if (++i == INT_GET(sfp->hdr.count, ARCH_CONVERT)) sfep = NULL; else sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); @@ -1215,13 +1212,13 @@ xfs_dir2_sf_to_block( /* * Sort the leaf entries by hash value. */ - xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort); + xfs_sort(blp, INT_GET(btp->count, ARCH_CONVERT), sizeof(*blp), xfs_dir2_block_sort); /* * Log the leaf entry area and tail. * Already logged the header in data_init, ignore needlog. */ ASSERT(needscan == 0); - xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1); + xfs_dir2_block_log_leaf(tp, bp, 0, INT_GET(btp->count, ARCH_CONVERT) - 1); xfs_dir2_block_log_tail(tp, bp); xfs_dir2_data_check(dp, bp); xfs_da_buf_done(bp); diff --git a/trunk/fs/xfs/xfs_dir2_block.h b/trunk/fs/xfs/xfs_dir2_block.h index 6722effd0b20..a2e5cb98a838 100644 --- a/trunk/fs/xfs/xfs_dir2_block.h +++ b/trunk/fs/xfs/xfs_dir2_block.h @@ -43,8 +43,8 @@ struct xfs_trans; #define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ typedef struct xfs_dir2_block_tail { - __be32 count; /* count of leaf entries */ - __be32 stale; /* count of stale lf entries */ + __uint32_t count; /* count of leaf entries */ + __uint32_t stale; /* count of stale lf entries */ } xfs_dir2_block_tail_t; /* @@ -75,7 +75,8 @@ xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block) static inline struct xfs_dir2_leaf_entry * xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) { - return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count); + return (((struct xfs_dir2_leaf_entry *) + (btp)) - INT_GET((btp)->count, ARCH_CONVERT)); } /* diff --git a/trunk/fs/xfs/xfs_dir2_data.c b/trunk/fs/xfs/xfs_dir2_data.c index bb3d03ff002b..5b7c47e2f14a 100644 --- a/trunk/fs/xfs/xfs_dir2_data.c +++ b/trunk/fs/xfs/xfs_dir2_data.c @@ -70,11 +70,11 @@ xfs_dir2_data_check( mp = dp->i_mount; d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); bf = d->hdr.bestfree; p = (char *)d->u; - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); lep = XFS_DIR2_BLOCK_LEAF_P(btp); endp = (char *)lep; @@ -96,8 +96,8 @@ xfs_dir2_data_check( ASSERT(!bf[2].offset); freeseen |= 1 << 2; } - ASSERT(be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length)); - ASSERT(be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length)); + ASSERT(INT_GET(bf[0].length, ARCH_CONVERT) >= INT_GET(bf[1].length, ARCH_CONVERT)); + ASSERT(INT_GET(bf[1].length, ARCH_CONVERT) >= INT_GET(bf[2].length, ARCH_CONVERT)); /* * Loop over the data/unused entries. */ @@ -108,20 +108,18 @@ xfs_dir2_data_check( * If we find it, account for that, else make sure it * doesn't need to be there. */ - if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { ASSERT(lastfree == 0); - ASSERT(be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)) == + ASSERT(INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT) == (char *)dup - (char *)d); dfp = xfs_dir2_data_freefind(d, dup); if (dfp) { i = (int)(dfp - bf); ASSERT((freeseen & (1 << i)) == 0); freeseen |= 1 << i; - } else { - ASSERT(be16_to_cpu(dup->length) <= - be16_to_cpu(bf[2].length)); - } - p += be16_to_cpu(dup->length); + } else + ASSERT(INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(bf[2].length, ARCH_CONVERT)); + p += INT_GET(dup->length, ARCH_CONVERT); lastfree = 1; continue; } @@ -134,21 +132,21 @@ xfs_dir2_data_check( dep = (xfs_dir2_data_entry_t *)p; ASSERT(dep->namelen != 0); ASSERT(xfs_dir_ino_validate(mp, INT_GET(dep->inumber, ARCH_CONVERT)) == 0); - ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) == + ASSERT(INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT) == (char *)dep - (char *)d); count++; lastfree = 0; - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)d)); hash = xfs_da_hashname((char *)dep->name, dep->namelen); - for (i = 0; i < be32_to_cpu(btp->count); i++) { - if (be32_to_cpu(lep[i].address) == addr && - be32_to_cpu(lep[i].hashval) == hash) + for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if (INT_GET(lep[i].address, ARCH_CONVERT) == addr && + INT_GET(lep[i].hashval, ARCH_CONVERT) == hash) break; } - ASSERT(i < be32_to_cpu(btp->count)); + ASSERT(i < INT_GET(btp->count, ARCH_CONVERT)); } p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } @@ -156,15 +154,15 @@ xfs_dir2_data_check( * Need to have seen all the entries and all the bestfree slots. */ ASSERT(freeseen == 7); - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { - for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { - if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR) + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { + for (i = stale = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if (INT_GET(lep[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) stale++; if (i > 0) - ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); + ASSERT(INT_GET(lep[i].hashval, ARCH_CONVERT) >= INT_GET(lep[i - 1].hashval, ARCH_CONVERT)); } - ASSERT(count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)); - ASSERT(stale == be32_to_cpu(btp->stale)); + ASSERT(count == INT_GET(btp->count, ARCH_CONVERT) - INT_GET(btp->stale, ARCH_CONVERT)); + ASSERT(stale == INT_GET(btp->stale, ARCH_CONVERT)); } } #endif @@ -192,8 +190,8 @@ xfs_dir2_data_freefind( * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { @@ -203,24 +201,23 @@ xfs_dir2_data_freefind( continue; } ASSERT(seenzero == 0); - if (be16_to_cpu(dfp->offset) == off) { + if (INT_GET(dfp->offset, ARCH_CONVERT) == off) { matched = 1; - ASSERT(dfp->length == dup->length); - } else if (off < be16_to_cpu(dfp->offset)) - ASSERT(off + be16_to_cpu(dup->length) <= be16_to_cpu(dfp->offset)); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(dup->length, ARCH_CONVERT)); + } else if (off < INT_GET(dfp->offset, ARCH_CONVERT)) + ASSERT(off + INT_GET(dup->length, ARCH_CONVERT) <= INT_GET(dfp->offset, ARCH_CONVERT)); else - ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off); - ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) + INT_GET(dfp->length, ARCH_CONVERT) <= off); + ASSERT(matched || INT_GET(dfp->length, ARCH_CONVERT) >= INT_GET(dup->length, ARCH_CONVERT)); if (dfp > &d->hdr.bestfree[0]) - ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length)); + ASSERT(INT_GET(dfp[-1].length, ARCH_CONVERT) >= INT_GET(dfp[0].length, ARCH_CONVERT)); } #endif /* * If this is smaller than the smallest bestfree entry, * it can't be there since they're sorted. */ - if (be16_to_cpu(dup->length) < - be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length)) + if (INT_GET(dup->length, ARCH_CONVERT) < INT_GET(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length, ARCH_CONVERT)) return NULL; /* * Look at the three bestfree entries for our guy. @@ -230,7 +227,7 @@ xfs_dir2_data_freefind( dfp++) { if (!dfp->offset) return NULL; - if (be16_to_cpu(dfp->offset) == off) + if (INT_GET(dfp->offset, ARCH_CONVERT) == off) return dfp; } /* @@ -252,29 +249,29 @@ xfs_dir2_data_freeinsert( xfs_dir2_data_free_t new; /* new bestfree entry */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); #endif dfp = d->hdr.bestfree; - new.length = dup->length; - new.offset = cpu_to_be16((char *)dup - (char *)d); + INT_COPY(new.length, dup->length, ARCH_CONVERT); + INT_SET(new.offset, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dup - (char *)d)); /* * Insert at position 0, 1, or 2; or not at all. */ - if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) { + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[0].length, ARCH_CONVERT)) { dfp[2] = dfp[1]; dfp[1] = dfp[0]; dfp[0] = new; *loghead = 1; return &dfp[0]; } - if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) { + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[1].length, ARCH_CONVERT)) { dfp[2] = dfp[1]; dfp[1] = new; *loghead = 1; return &dfp[1]; } - if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) { + if (INT_GET(new.length, ARCH_CONVERT) > INT_GET(dfp[2].length, ARCH_CONVERT)) { dfp[2] = new; *loghead = 1; return &dfp[2]; @@ -292,8 +289,8 @@ xfs_dir2_data_freeremove( int *loghead) /* out: log data header */ { #ifdef __KERNEL__ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); #endif /* * It's the first entry, slide the next 2 up. @@ -337,8 +334,8 @@ xfs_dir2_data_freescan( char *p; /* current entry pointer */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); #endif /* * Start by clearing the table. @@ -351,7 +348,7 @@ xfs_dir2_data_freescan( p = (char *)d->u; if (aendp) endp = aendp; - else if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + else if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC) { btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); } else @@ -364,11 +361,11 @@ xfs_dir2_data_freescan( /* * If it's a free entry, insert it. */ - if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { ASSERT((char *)dup - (char *)d == - be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); + INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); xfs_dir2_data_freeinsert(d, dup, loghead); - p += be16_to_cpu(dup->length); + p += INT_GET(dup->length, ARCH_CONVERT); } /* * For active entries, check their tags and skip them. @@ -376,7 +373,7 @@ xfs_dir2_data_freescan( else { dep = (xfs_dir2_data_entry_t *)p; ASSERT((char *)dep - (char *)d == - be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep))); + INT_GET(*XFS_DIR2_DATA_ENTRY_TAG_P(dep), ARCH_CONVERT)); p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); } } @@ -418,8 +415,8 @@ xfs_dir2_data_init( * Initialize the header. */ d = bp->data; - d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); - d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr)); + INT_SET(d->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); + INT_SET(d->hdr.bestfree[0].offset, ARCH_CONVERT, (xfs_dir2_data_off_t)sizeof(d->hdr)); for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { d->hdr.bestfree[i].length = 0; d->hdr.bestfree[i].offset = 0; @@ -428,12 +425,13 @@ xfs_dir2_data_init( * Set up an unused entry for the block's body. */ dup = &d->u[0].unused; - dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); + INT_SET(dup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); t=mp->m_dirblksize - (uint)sizeof(d->hdr); - d->hdr.bestfree[0].length = cpu_to_be16(t); - dup->length = cpu_to_be16(t); - *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16((char *)dup - (char *)d); + INT_SET(d->hdr.bestfree[0].length, ARCH_CONVERT, t); + INT_SET(dup->length, ARCH_CONVERT, t); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)dup - (char *)d)); /* * Log it and return it. */ @@ -455,8 +453,8 @@ xfs_dir2_data_log_entry( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - (char *)d - 1)); @@ -473,8 +471,8 @@ xfs_dir2_data_log_header( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), (uint)(sizeof(d->hdr) - 1)); } @@ -491,8 +489,8 @@ xfs_dir2_data_log_unused( xfs_dir2_data_t *d; /* data block pointer */ d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); /* * Log the first part of the unused entry. */ @@ -535,12 +533,12 @@ xfs_dir2_data_make_free( /* * Figure out where the end of the data area is. */ - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC) + if (INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC) endptr = (char *)d + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); } @@ -549,11 +547,11 @@ xfs_dir2_data_make_free( * the previous entry and see if it's free. */ if (offset > sizeof(d->hdr)) { - __be16 *tagp; /* tag just before us */ + xfs_dir2_data_off_t *tagp; /* tag just before us */ - tagp = (__be16 *)((char *)d + offset) - 1; - prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp)); - if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG) + tagp = (xfs_dir2_data_off_t *)((char *)d + offset) - 1; + prevdup = (xfs_dir2_data_unused_t *)((char *)d + INT_GET(*tagp, ARCH_CONVERT)); + if (INT_GET(prevdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) prevdup = NULL; } else prevdup = NULL; @@ -564,7 +562,7 @@ xfs_dir2_data_make_free( if ((char *)d + offset + len < endptr) { postdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG) + if (INT_GET(postdup->freetag, ARCH_CONVERT) != XFS_DIR2_DATA_FREE_TAG) postdup = NULL; } else postdup = NULL; @@ -588,13 +586,13 @@ xfs_dir2_data_make_free( * since the third bestfree is there, there might be more * entries. */ - needscan = (d->hdr.bestfree[2].length != 0); + needscan = d->hdr.bestfree[2].length; /* * Fix up the new big freespace. */ - be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); - *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = - cpu_to_be16((char *)prevdup - (char *)d); + INT_MOD(prevdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, prevdup); if (!needscan) { /* @@ -616,7 +614,7 @@ xfs_dir2_data_make_free( */ dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); ASSERT(dfp == &d->hdr.bestfree[0]); - ASSERT(dfp->length == prevdup->length); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(prevdup->length, ARCH_CONVERT)); ASSERT(!dfp[1].length); ASSERT(!dfp[2].length); } @@ -626,9 +624,9 @@ xfs_dir2_data_make_free( */ else if (prevdup) { dfp = xfs_dir2_data_freefind(d, prevdup); - be16_add(&prevdup->length, len); - *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = - cpu_to_be16((char *)prevdup - (char *)d); + INT_MOD(prevdup->length, ARCH_CONVERT, len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(prevdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)prevdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, prevdup); /* * If the previous entry was in the table, the new entry @@ -642,10 +640,8 @@ xfs_dir2_data_make_free( /* * Otherwise we need a scan if the new entry is big enough. */ - else { - needscan = be16_to_cpu(prevdup->length) > - be16_to_cpu(d->hdr.bestfree[2].length); - } + else + needscan = INT_GET(prevdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); } /* * The following entry is free, merge with it. @@ -653,10 +649,10 @@ xfs_dir2_data_make_free( else if (postdup) { dfp = xfs_dir2_data_freefind(d, postdup); newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); - newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); - newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, len + INT_GET(postdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If the following entry was in the table, the new entry @@ -670,20 +666,18 @@ xfs_dir2_data_make_free( /* * Otherwise we need a scan if the new entry is big enough. */ - else { - needscan = be16_to_cpu(newdup->length) > - be16_to_cpu(d->hdr.bestfree[2].length); - } + else + needscan = INT_GET(newdup->length, ARCH_CONVERT) > INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT); } /* * Neither neighbor is free. Make a new entry. */ else { newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); - newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); - newdup->length = cpu_to_be16(len); - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, newdup); (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); } @@ -713,18 +707,18 @@ xfs_dir2_data_use_free( int oldlen; /* old unused entry's length */ d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); + ASSERT(INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC || + INT_GET(d->hdr.magic, ARCH_CONVERT) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG); ASSERT(offset >= (char *)dup - (char *)d); - ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d); - ASSERT((char *)dup - (char *)d == be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); + ASSERT(offset + len <= (char *)dup + INT_GET(dup->length, ARCH_CONVERT) - (char *)d); + ASSERT((char *)dup - (char *)d == INT_GET(*XFS_DIR2_DATA_UNUSED_TAG_P(dup), ARCH_CONVERT)); /* * Look up the entry in the bestfree table. */ dfp = xfs_dir2_data_freefind(d, dup); - oldlen = be16_to_cpu(dup->length); - ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length)); + oldlen = INT_GET(dup->length, ARCH_CONVERT); + ASSERT(dfp || oldlen <= INT_GET(d->hdr.bestfree[2].length, ARCH_CONVERT)); /* * Check for alignment with front and back of the entry. */ @@ -738,7 +732,7 @@ xfs_dir2_data_use_free( */ if (matchfront && matchback) { if (dfp) { - needscan = (d->hdr.bestfree[2].offset != 0); + needscan = d->hdr.bestfree[2].offset; if (!needscan) xfs_dir2_data_freeremove(d, dfp, needlogp); } @@ -749,10 +743,10 @@ xfs_dir2_data_use_free( */ else if (matchfront) { newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); - newdup->length = cpu_to_be16(oldlen - len); - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + INT_SET(newdup->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup->length, ARCH_CONVERT, oldlen - len); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. @@ -761,8 +755,8 @@ xfs_dir2_data_use_free( xfs_dir2_data_freeremove(d, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); ASSERT(dfp != NULL); - ASSERT(dfp->length == newdup->length); - ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); /* * If we got inserted at the last slot, * that means we don't know if there was a better @@ -777,9 +771,10 @@ xfs_dir2_data_use_free( */ else if (matchback) { newdup = dup; - newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) + (((char *)d + offset) - (char *)newdup)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. @@ -788,8 +783,8 @@ xfs_dir2_data_use_free( xfs_dir2_data_freeremove(d, dfp, needlogp); dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); ASSERT(dfp != NULL); - ASSERT(dfp->length == newdup->length); - ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); + ASSERT(INT_GET(dfp->length, ARCH_CONVERT) == INT_GET(newdup->length, ARCH_CONVERT)); + ASSERT(INT_GET(dfp->offset, ARCH_CONVERT) == (char *)newdup - (char *)d); /* * If we got inserted at the last slot, * that means we don't know if there was a better @@ -804,15 +799,16 @@ xfs_dir2_data_use_free( */ else { newdup = dup; - newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + INT_SET(newdup->length, ARCH_CONVERT, (xfs_dir2_data_off_t) + (((char *)d + offset) - (char *)newdup)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup - (char *)d)); xfs_dir2_data_log_unused(tp, bp, newdup); newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); - newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); - newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup2) = - cpu_to_be16((char *)newdup2 - (char *)d); + INT_SET(newdup2->freetag, ARCH_CONVERT, XFS_DIR2_DATA_FREE_TAG); + INT_SET(newdup2->length, ARCH_CONVERT, oldlen - len - INT_GET(newdup->length, ARCH_CONVERT)); + INT_SET(*XFS_DIR2_DATA_UNUSED_TAG_P(newdup2), ARCH_CONVERT, + (xfs_dir2_data_off_t)((char *)newdup2 - (char *)d)); xfs_dir2_data_log_unused(tp, bp, newdup2); /* * If the old entry was in the table, we need to scan @@ -823,7 +819,7 @@ xfs_dir2_data_use_free( * the 2 new will work. */ if (dfp) { - needscan = (d->hdr.bestfree[2].length != 0); + needscan = d->hdr.bestfree[2].length; if (!needscan) { xfs_dir2_data_freeremove(d, dfp, needlogp); (void)xfs_dir2_data_freeinsert(d, newdup, diff --git a/trunk/fs/xfs/xfs_dir2_data.h b/trunk/fs/xfs/xfs_dir2_data.h index 0847cbb53e17..5e3a7f9ec735 100644 --- a/trunk/fs/xfs/xfs_dir2_data.h +++ b/trunk/fs/xfs/xfs_dir2_data.h @@ -65,8 +65,8 @@ struct xfs_trans; * The freespace will be formatted as a xfs_dir2_data_unused_t. */ typedef struct xfs_dir2_data_free { - __be16 offset; /* start of freespace */ - __be16 length; /* length of freespace */ + xfs_dir2_data_off_t offset; /* start of freespace */ + xfs_dir2_data_off_t length; /* length of freespace */ } xfs_dir2_data_free_t; /* @@ -75,7 +75,7 @@ typedef struct xfs_dir2_data_free { * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. */ typedef struct xfs_dir2_data_hdr { - __be32 magic; /* XFS_DIR2_DATA_MAGIC */ + __uint32_t magic; /* XFS_DIR2_DATA_MAGIC */ /* or XFS_DIR2_BLOCK_MAGIC */ xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; } xfs_dir2_data_hdr_t; @@ -97,10 +97,10 @@ typedef struct xfs_dir2_data_entry { * Tag appears as the last 2 bytes. */ typedef struct xfs_dir2_data_unused { - __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */ - __be16 length; /* total free length */ + __uint16_t freetag; /* XFS_DIR2_DATA_FREE_TAG */ + xfs_dir2_data_off_t length; /* total free length */ /* variable offset */ - __be16 tag; /* starting offset of us */ + xfs_dir2_data_off_t tag; /* starting offset of us */ } xfs_dir2_data_unused_t; typedef union { @@ -134,11 +134,12 @@ static inline int xfs_dir2_data_entsize(int n) * Pointer to an entry's tag word. */ #define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) -static inline __be16 * +static inline xfs_dir2_data_off_t * xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) { - return (__be16 *)((char *)dep + - XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16)); + return (xfs_dir2_data_off_t *) \ + ((char *)(dep) + XFS_DIR2_DATA_ENTSIZE((dep)->namelen) - \ + (uint)sizeof(xfs_dir2_data_off_t)); } /* @@ -146,11 +147,12 @@ xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) */ #define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \ xfs_dir2_data_unused_tag_p(dup) -static inline __be16 * +static inline xfs_dir2_data_off_t * xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) { - return (__be16 *)((char *)dup + - be16_to_cpu(dup->length) - sizeof(__be16)); + return (xfs_dir2_data_off_t *) \ + ((char *)(dup) + INT_GET((dup)->length, ARCH_CONVERT) \ + - (uint)sizeof(xfs_dir2_data_off_t)); } /* diff --git a/trunk/fs/xfs/xfs_dir2_leaf.c b/trunk/fs/xfs/xfs_dir2_leaf.c index 08648b18265c..d342b6b55239 100644 --- a/trunk/fs/xfs/xfs_dir2_leaf.c +++ b/trunk/fs/xfs/xfs_dir2_leaf.c @@ -66,7 +66,7 @@ xfs_dir2_block_to_leaf( xfs_da_args_t *args, /* operation arguments */ xfs_dabuf_t *dbp) /* input block's buffer */ { - __be16 *bestsp; /* leaf's bestsp entries */ + xfs_dir2_data_off_t *bestsp; /* leaf's bestsp entries */ xfs_dablk_t blkno; /* leaf block's bno */ xfs_dir2_block_t *block; /* block structure */ xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ @@ -111,14 +111,14 @@ xfs_dir2_block_to_leaf( /* * Set the counts in the leaf header. */ - leaf->hdr.count = cpu_to_be16(be32_to_cpu(btp->count)); - leaf->hdr.stale = cpu_to_be16(be32_to_cpu(btp->stale)); + INT_COPY(leaf->hdr.count, btp->count, ARCH_CONVERT); /* INT_: type change */ + INT_COPY(leaf->hdr.stale, btp->stale, ARCH_CONVERT); /* INT_: type change */ /* * Could compact these but I think we always do the conversion * after squeezing out stale entries. */ - memcpy(leaf->ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t)); - xfs_dir2_leaf_log_ents(tp, lbp, 0, be16_to_cpu(leaf->hdr.count) - 1); + memcpy(leaf->ents, blp, INT_GET(btp->count, ARCH_CONVERT) * sizeof(xfs_dir2_leaf_entry_t)); + xfs_dir2_leaf_log_ents(tp, lbp, 0, INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1); needscan = 0; needlog = 1; /* @@ -133,7 +133,7 @@ xfs_dir2_block_to_leaf( /* * Fix up the block header, make it a data block. */ - block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); + INT_SET(block->hdr.magic, ARCH_CONVERT, XFS_DIR2_DATA_MAGIC); if (needscan) xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog, NULL); @@ -141,9 +141,9 @@ xfs_dir2_block_to_leaf( * Set up leaf tail and bests table. */ ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - ltp->bestcount = cpu_to_be32(1); + INT_SET(ltp->bestcount, ARCH_CONVERT, 1); bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - bestsp[0] = block->hdr.bestfree[0].length; + INT_COPY(bestsp[0], block->hdr.bestfree[0].length, ARCH_CONVERT); /* * Log the data header and leaf bests table. */ @@ -163,7 +163,7 @@ int /* error */ xfs_dir2_leaf_addname( xfs_da_args_t *args) /* operation arguments */ { - __be16 *bestsp; /* freespace table in leaf */ + xfs_dir2_data_off_t *bestsp; /* freespace table in leaf */ int compact; /* need to compact leaves */ xfs_dir2_data_t *data; /* data block structure */ xfs_dabuf_t *dbp; /* data block buffer */ @@ -187,7 +187,7 @@ xfs_dir2_leaf_addname( int needbytes; /* leaf block bytes needed */ int needlog; /* need to log data header */ int needscan; /* need to rescan data free */ - __be16 *tagp; /* end of data entry */ + xfs_dir2_data_off_t *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_db_t use_block; /* data block number */ @@ -222,14 +222,14 @@ xfs_dir2_leaf_addname( * in a data block, improving the lookup of those entries. */ for (use_block = -1, lep = &leaf->ents[index]; - index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; index++, lep++) { - if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) continue; - i = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); - ASSERT(i < be32_to_cpu(ltp->bestcount)); - ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); - if (be16_to_cpu(bestsp[i]) >= length) { + i = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); + ASSERT(i < INT_GET(ltp->bestcount, ARCH_CONVERT)); + ASSERT(INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF); + if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { use_block = i; break; } @@ -238,13 +238,13 @@ xfs_dir2_leaf_addname( * Didn't find a block yet, linear search all the data blocks. */ if (use_block == -1) { - for (i = 0; i < be32_to_cpu(ltp->bestcount); i++) { + for (i = 0; i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++) { /* * Remember a block we see that's missing. */ - if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1) + if (INT_GET(bestsp[i], ARCH_CONVERT) == NULLDATAOFF && use_block == -1) use_block = i; - else if (be16_to_cpu(bestsp[i]) >= length) { + else if (INT_GET(bestsp[i], ARCH_CONVERT) >= length) { use_block = i; break; } @@ -260,21 +260,21 @@ xfs_dir2_leaf_addname( * Now kill use_block if it refers to a missing block, so we * can use it as an indication of allocation needed. */ - if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF) + if (use_block != -1 && INT_GET(bestsp[use_block], ARCH_CONVERT) == NULLDATAOFF) use_block = -1; /* * If we don't have enough free bytes but we can make enough * by compacting out stale entries, we'll do that. */ - if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes && - be16_to_cpu(leaf->hdr.stale) > 1) { + if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes && + INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1) { compact = 1; } /* * Otherwise if we don't have enough free bytes we need to * convert to node form. */ - else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < + else if ((char *)bestsp - (char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] < needbytes) { /* * Just checking or no space reservation, give up. @@ -330,8 +330,8 @@ xfs_dir2_leaf_addname( * There are stale entries, so we'll need log-low and log-high * impossibly bad values later. */ - else if (be16_to_cpu(leaf->hdr.stale)) { - lfloglow = be16_to_cpu(leaf->hdr.count); + else if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) { + lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); lfloghigh = -1; } /* @@ -358,13 +358,13 @@ xfs_dir2_leaf_addname( * If we're adding a new data block on the end we need to * extend the bests table. Copy it up one entry. */ - if (use_block >= be32_to_cpu(ltp->bestcount)) { + if (use_block >= INT_GET(ltp->bestcount, ARCH_CONVERT)) { bestsp--; memmove(&bestsp[0], &bestsp[1], - be32_to_cpu(ltp->bestcount) * sizeof(bestsp[0])); - be32_add(<p->bestcount, 1); + INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(bestsp[0])); + INT_MOD(ltp->bestcount, ARCH_CONVERT, +1); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); } /* * If we're filling in a previously empty block just log it. @@ -372,7 +372,7 @@ xfs_dir2_leaf_addname( else xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); data = dbp->data; - bestsp[use_block] = data->hdr.bestfree[0].length; + INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); grown = 1; } /* @@ -394,8 +394,8 @@ xfs_dir2_leaf_addname( * Point to the biggest freespace in our data block. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); - ASSERT(be16_to_cpu(dup->length) >= length); + ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); + ASSERT(INT_GET(dup->length, ARCH_CONVERT) >= length); needscan = needlog = 0; /* * Mark the initial part of our freespace in use for the new entry. @@ -411,7 +411,7 @@ xfs_dir2_leaf_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - *tagp = cpu_to_be16((char *)dep - (char *)data); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); /* * Need to scan fix up the bestfree table. */ @@ -427,8 +427,8 @@ xfs_dir2_leaf_addname( * If the bests table needs to be changed, do it. * Log the change unless we've already done that. */ - if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) { - bestsp[use_block] = data->hdr.bestfree[0].length; + if (INT_GET(bestsp[use_block], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + INT_COPY(bestsp[use_block], data->hdr.bestfree[0].length, ARCH_CONVERT); if (!grown) xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); } @@ -440,15 +440,15 @@ xfs_dir2_leaf_addname( /* * lep is still good as the index leaf entry. */ - if (index < be16_to_cpu(leaf->hdr.count)) + if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) memmove(lep + 1, lep, - (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); /* * Record low and high logging indices for the leaf. */ lfloglow = index; - lfloghigh = be16_to_cpu(leaf->hdr.count); - be16_add(&leaf->hdr.count, 1); + lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); } /* * There are stale entries. @@ -468,7 +468,7 @@ xfs_dir2_leaf_addname( */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; @@ -478,8 +478,8 @@ xfs_dir2_leaf_addname( * lowstale entry would be better. */ for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale - 1 >= highstale - index); @@ -490,10 +490,10 @@ xfs_dir2_leaf_addname( * If the low one is better, use it. */ if (lowstale >= 0 && - (highstale == be16_to_cpu(leaf->hdr.count) || + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || index - lowstale - 1 < highstale - index)) { ASSERT(index - lowstale - 1 >= 0); - ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == + ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR); /* * Copy entries up to cover the stale entry @@ -512,7 +512,7 @@ xfs_dir2_leaf_addname( */ else { ASSERT(highstale - index >= 0); - ASSERT(be32_to_cpu(leaf->ents[highstale].address) == + ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR); /* * Copy entries down to copver the stale entry @@ -526,14 +526,13 @@ xfs_dir2_leaf_addname( lfloglow = MIN(index, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - be16_add(&leaf->hdr.stale, -1); + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); } /* * Fill in the new leaf entry. */ - lep->hashval = cpu_to_be32(args->hashval); - lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, - be16_to_cpu(*tagp))); + INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, INT_GET(*tagp, ARCH_CONVERT))); /* * Log the leaf fields and give up the buffers. */ @@ -564,30 +563,30 @@ xfs_dir2_leaf_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); /* * This value is not restrictive enough. * Should factor in the size of the bests table as well. * We can deduce a value for that from di_size. */ - ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); /* * Leaves and bests don't overlap. */ - ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <= + ASSERT((char *)&leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT)] <= (char *)XFS_DIR2_LEAF_BESTS_P(ltp)); /* * Check hash value order, count stale entries. */ - for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { - if (i + 1 < be16_to_cpu(leaf->hdr.count)) - ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= - be32_to_cpu(leaf->ents[i + 1].hashval)); - if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { + if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) + ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= + INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); + if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) stale++; } - ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); } #endif /* DEBUG */ @@ -612,8 +611,8 @@ xfs_dir2_leaf_compact( /* * Compress out the stale entries in place. */ - for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) { - if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) + for (from = to = 0, loglow = -1; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { + if (INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) continue; /* * Only actually copy the entries that are different. @@ -628,8 +627,8 @@ xfs_dir2_leaf_compact( /* * Update and log the header, log the leaf entries. */ - ASSERT(be16_to_cpu(leaf->hdr.stale) == from - to); - be16_add(&leaf->hdr.count, -(be16_to_cpu(leaf->hdr.stale))); + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == from - to); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(INT_GET(leaf->hdr.stale, ARCH_CONVERT))); leaf->hdr.stale = 0; xfs_dir2_leaf_log_header(args->trans, bp); if (loglow != -1) @@ -663,14 +662,14 @@ xfs_dir2_leaf_compact_x1( int to; /* destination copy index */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.stale) > 1); + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1); index = *indexp; /* * Find the first stale entry before our index, if any. */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; /* @@ -678,8 +677,8 @@ xfs_dir2_leaf_compact_x1( * Stop if the answer would be worse than lowstale. */ for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale > highstale - index); highstale++) continue; @@ -687,7 +686,7 @@ xfs_dir2_leaf_compact_x1( * Pick the better of lowstale and highstale. */ if (lowstale >= 0 && - (highstale == be16_to_cpu(leaf->hdr.count) || + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || index - lowstale <= highstale - index)) keepstale = lowstale; else @@ -696,14 +695,14 @@ xfs_dir2_leaf_compact_x1( * Copy the entries in place, removing all the stale entries * except keepstale. */ - for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { + for (from = to = 0; from < INT_GET(leaf->hdr.count, ARCH_CONVERT); from++) { /* * Notice the new value of index. */ if (index == from) newindex = to; if (from != keepstale && - be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) { + INT_GET(leaf->ents[from].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) { if (from == to) *lowlogp = to; continue; @@ -731,8 +730,8 @@ xfs_dir2_leaf_compact_x1( /* * Adjust the leaf header values. */ - be16_add(&leaf->hdr.count, -(from - to)); - leaf->hdr.stale = cpu_to_be16(1); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, -(from - to)); + INT_SET(leaf->hdr.stale, ARCH_CONVERT, 1); /* * Remember the low/high stale value only in the "right" * direction. @@ -740,8 +739,8 @@ xfs_dir2_leaf_compact_x1( if (lowstale >= newindex) lowstale = -1; else - highstale = be16_to_cpu(leaf->hdr.count); - *highlogp = be16_to_cpu(leaf->hdr.count) - 1; + highstale = INT_GET(leaf->hdr.count, ARCH_CONVERT); + *highlogp = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1; *lowstalep = lowstale; *highstalep = highstale; } @@ -767,7 +766,7 @@ xfs_dir2_leaf_getdents( xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ int eof; /* reached end of directory */ - int error = 0; /* error return value */ + int error=0; /* error return value */ int i; /* temporary loop index */ int j; /* temporary loop index */ int length; /* temporary length value */ @@ -779,8 +778,8 @@ xfs_dir2_leaf_getdents( xfs_mount_t *mp; /* filesystem mount point */ xfs_dir2_off_t newoff; /* new curoff after new blk */ int nmap; /* mappings to ask xfs_bmapi */ - xfs_dir2_put_args_t *p; /* formatting arg bundle */ - char *ptr = NULL; /* pointer to current data */ + xfs_dir2_put_args_t p; /* formatting arg bundle */ + char *ptr=NULL; /* pointer to current data */ int ra_current; /* number of read-ahead blks */ int ra_index; /* *map index for read-ahead */ int ra_offset; /* map entry offset for ra */ @@ -798,10 +797,9 @@ xfs_dir2_leaf_getdents( /* * Setup formatting arguments. */ - p = kmem_alloc(sizeof(*p), KM_SLEEP); - p->dbp = dbp; - p->put = put; - p->uio = uio; + p.dbp = dbp; + p.put = put; + p.uio = uio; /* * Set up to bmap a number of blocks based on the caller's * buffer size, the directory block size, and the filesystem @@ -1048,10 +1046,11 @@ xfs_dir2_leaf_getdents( while ((char *)ptr - (char *)data < byteoff) { dup = (xfs_dir2_data_unused_t *)ptr; - if (be16_to_cpu(dup->freetag) + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { - length = be16_to_cpu(dup->length); + length = INT_GET(dup->length, + ARCH_CONVERT); ptr += length; continue; } @@ -1080,8 +1079,9 @@ xfs_dir2_leaf_getdents( /* * No, it's unused, skip over it. */ - if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - length = be16_to_cpu(dup->length); + if (INT_GET(dup->freetag, ARCH_CONVERT) + == XFS_DIR2_DATA_FREE_TAG) { + length = INT_GET(dup->length, ARCH_CONVERT); ptr += length; curoff += length; continue; @@ -1092,24 +1092,24 @@ xfs_dir2_leaf_getdents( */ dep = (xfs_dir2_data_entry_t *)ptr; - p->namelen = dep->namelen; + p.namelen = dep->namelen; - length = XFS_DIR2_DATA_ENTSIZE(p->namelen); + length = XFS_DIR2_DATA_ENTSIZE(p.namelen); - p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); + p.cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); - p->ino = INT_GET(dep->inumber, ARCH_CONVERT); + p.ino = INT_GET(dep->inumber, ARCH_CONVERT); #if XFS_BIG_INUMS - p->ino += mp->m_inoadd; + p.ino += mp->m_inoadd; #endif - p->name = (char *)dep->name; + p.name = (char *)dep->name; - error = p->put(p); + error = p.put(&p); /* * Won't fit. Return to caller. */ - if (!p->done) { + if (!p.done) { eof = 0; break; } @@ -1129,7 +1129,6 @@ xfs_dir2_leaf_getdents( else uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); kmem_free(map, map_size * sizeof(*map)); - kmem_free(p, sizeof(*p)); if (bp) xfs_da_brelse(tp, bp); return error; @@ -1172,7 +1171,7 @@ xfs_dir2_leaf_init( /* * Initialize the header. */ - leaf->hdr.info.magic = cpu_to_be16(magic); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, magic); leaf->hdr.info.forw = 0; leaf->hdr.info.back = 0; leaf->hdr.count = 0; @@ -1202,13 +1201,13 @@ xfs_dir2_leaf_log_bests( int first, /* first entry to log */ int last) /* last entry to log */ { - __be16 *firstb; /* pointer to first entry */ - __be16 *lastb; /* pointer to last entry */ + xfs_dir2_data_off_t *firstb; /* pointer to first entry */ + xfs_dir2_data_off_t *lastb; /* pointer to last entry */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first; lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last; @@ -1231,8 +1230,8 @@ xfs_dir2_leaf_log_ents( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || - be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || + INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); firstlep = &leaf->ents[first]; lastlep = &leaf->ents[last]; xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), @@ -1250,8 +1249,8 @@ xfs_dir2_leaf_log_header( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || - be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC || + INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), (uint)(sizeof(leaf->hdr) - 1)); } @@ -1270,7 +1269,7 @@ xfs_dir2_leaf_log_tail( mp = tp->t_mountp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAF1_MAGIC); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), (uint)(mp->m_dirblksize - 1)); @@ -1315,7 +1314,7 @@ xfs_dir2_leaf_lookup( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); /* * Return the found inode number. */ @@ -1374,17 +1373,17 @@ xfs_dir2_leaf_lookup_int( * looking to match the name. */ for (lep = &leaf->ents[index], dbp = NULL, curdb = -1; - index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; lep++, index++) { /* * Skip over stale leaf entries. */ - if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) continue; /* * Get the new data block number. */ - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); /* * If it's not the same as the old data block number, * need to pitch the old one and read the new one. @@ -1407,7 +1406,7 @@ xfs_dir2_leaf_lookup_int( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); /* * If it matches then return it. */ @@ -1436,7 +1435,7 @@ int /* error */ xfs_dir2_leaf_removename( xfs_da_args_t *args) /* operation arguments */ { - __be16 *bestsp; /* leaf block best freespace */ + xfs_dir2_data_off_t *bestsp; /* leaf block best freespace */ xfs_dir2_data_t *data; /* data block structure */ xfs_dir2_db_t db; /* data block number */ xfs_dabuf_t *dbp; /* data block buffer */ @@ -1472,14 +1471,14 @@ xfs_dir2_leaf_removename( * Point to the leaf entry, use that to point to the data entry. */ lep = &leaf->ents[index]; - db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); + db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); dep = (xfs_dir2_data_entry_t *) - ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); + ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); needscan = needlog = 0; - oldbest = be16_to_cpu(data->hdr.bestfree[0].length); + oldbest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - ASSERT(be16_to_cpu(bestsp[db]) == oldbest); + ASSERT(INT_GET(bestsp[db], ARCH_CONVERT) == oldbest); /* * Mark the former data entry unused. */ @@ -1489,9 +1488,9 @@ xfs_dir2_leaf_removename( /* * We just mark the leaf entry stale by putting a null in it. */ - be16_add(&leaf->hdr.stale, 1); + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); xfs_dir2_leaf_log_header(tp, lbp); - lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); xfs_dir2_leaf_log_ents(tp, lbp, index, index); /* * Scan the freespace in the data block again if necessary, @@ -1505,15 +1504,15 @@ xfs_dir2_leaf_removename( * If the longest freespace in the data block has changed, * put the new value in the bests table and log that. */ - if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) { - bestsp[db] = data->hdr.bestfree[0].length; + if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) != oldbest) { + INT_COPY(bestsp[db], data->hdr.bestfree[0].length, ARCH_CONVERT); xfs_dir2_leaf_log_bests(tp, lbp, db, db); } xfs_dir2_data_check(dp, dbp); /* * If the data block is now empty then get rid of the data block. */ - if (be16_to_cpu(data->hdr.bestfree[0].length) == + if (INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == mp->m_dirblksize - (uint)sizeof(data->hdr)) { ASSERT(db != mp->m_dirdatablk); if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { @@ -1536,12 +1535,12 @@ xfs_dir2_leaf_removename( * If this is the last data block then compact the * bests table by getting rid of entries. */ - if (db == be32_to_cpu(ltp->bestcount) - 1) { + if (db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1) { /* * Look for the last active entry (i). */ for (i = db - 1; i > 0; i--) { - if (be16_to_cpu(bestsp[i]) != NULLDATAOFF) + if (INT_GET(bestsp[i], ARCH_CONVERT) != NULLDATAOFF) break; } /* @@ -1549,12 +1548,12 @@ xfs_dir2_leaf_removename( * end are removed. */ memmove(&bestsp[db - i], bestsp, - (be32_to_cpu(ltp->bestcount) - (db - i)) * sizeof(*bestsp)); - be32_add(<p->bestcount, -(db - i)); + (INT_GET(ltp->bestcount, ARCH_CONVERT) - (db - i)) * sizeof(*bestsp)); + INT_MOD(ltp->bestcount, ARCH_CONVERT, -(db - i)); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); } else - bestsp[db] = cpu_to_be16(NULLDATAOFF); + INT_SET(bestsp[db], ARCH_CONVERT, NULLDATAOFF); } /* * If the data block was not the first one, drop it. @@ -1605,7 +1604,7 @@ xfs_dir2_leaf_replace( */ dep = (xfs_dir2_data_entry_t *) ((char *)dbp->data + - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); + XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, INT_GET(lep->address, ARCH_CONVERT))); ASSERT(args->inumber != INT_GET(dep->inumber, ARCH_CONVERT)); /* * Put the new inode number in, log it. @@ -1646,11 +1645,11 @@ xfs_dir2_leaf_search_hash( * Note, the table cannot be empty, so we have to go through the loop. * Binary search the leaf entries looking for our hash value. */ - for (lep = leaf->ents, low = 0, high = be16_to_cpu(leaf->hdr.count) - 1, + for (lep = leaf->ents, low = 0, high = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1, hashwant = args->hashval; low <= high; ) { mid = (low + high) >> 1; - if ((hash = be32_to_cpu(lep[mid].hashval)) == hashwant) + if ((hash = INT_GET(lep[mid].hashval, ARCH_CONVERT)) == hashwant) break; if (hash < hashwant) low = mid + 1; @@ -1661,7 +1660,7 @@ xfs_dir2_leaf_search_hash( * Found one, back up through all the equal hash values. */ if (hash == hashwant) { - while (mid > 0 && be32_to_cpu(lep[mid - 1].hashval) == hashwant) { + while (mid > 0 && INT_GET(lep[mid - 1].hashval, ARCH_CONVERT) == hashwant) { mid--; } } @@ -1683,7 +1682,7 @@ xfs_dir2_leaf_trim_data( xfs_dabuf_t *lbp, /* leaf buffer */ xfs_dir2_db_t db) /* data block number */ { - __be16 *bestsp; /* leaf bests table */ + xfs_dir2_data_off_t *bestsp; /* leaf bests table */ #ifdef DEBUG xfs_dir2_data_t *data; /* data block structure */ #endif @@ -1707,7 +1706,7 @@ xfs_dir2_leaf_trim_data( } #ifdef DEBUG data = dbp->data; - ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); #endif /* this seems to be an error * data is only valid if DEBUG is defined? @@ -1716,9 +1715,9 @@ xfs_dir2_leaf_trim_data( leaf = lbp->data; ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) == + ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) == mp->m_dirblksize - (uint)sizeof(data->hdr)); - ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); + ASSERT(db == INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); /* * Get rid of the data block. */ @@ -1731,10 +1730,10 @@ xfs_dir2_leaf_trim_data( * Eliminate the last bests entry from the table. */ bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); - be32_add(<p->bestcount, -1); - memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); + INT_MOD(ltp->bestcount, ARCH_CONVERT, -1); + memmove(&bestsp[1], &bestsp[0], INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(*bestsp)); xfs_dir2_leaf_log_tail(tp, lbp); - xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); return 0; } @@ -1806,7 +1805,7 @@ xfs_dir2_node_to_leaf( return 0; lbp = state->path.blk[0].bp; leaf = lbp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); /* * Read the freespace block. */ @@ -1815,15 +1814,15 @@ xfs_dir2_node_to_leaf( return error; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); ASSERT(!free->hdr.firstdb); /* * Now see if the leafn and free data will fit in a leaf1. * If not, release the buffer and give up. */ if ((uint)sizeof(leaf->hdr) + - (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) + - be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) + + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT)) * (uint)sizeof(leaf->ents[0]) + + INT_GET(free->hdr.nvalid, ARCH_CONVERT) * (uint)sizeof(leaf->bests[0]) + (uint)sizeof(leaf->tail) > mp->m_dirblksize) { xfs_da_brelse(tp, fbp); @@ -1833,22 +1832,22 @@ xfs_dir2_node_to_leaf( * If the leaf has any stale entries in it, compress them out. * The compact routine will log the header. */ - if (be16_to_cpu(leaf->hdr.stale)) + if (INT_GET(leaf->hdr.stale, ARCH_CONVERT)) xfs_dir2_leaf_compact(args, lbp); else xfs_dir2_leaf_log_header(tp, lbp); - leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAF1_MAGIC); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAF1_MAGIC); /* * Set up the leaf tail from the freespace block. */ ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); - ltp->bestcount = free->hdr.nvalid; + INT_COPY(ltp->bestcount, free->hdr.nvalid, ARCH_CONVERT); /* * Set up the leaf bests table. */ memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests, - be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0])); - xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); + INT_GET(ltp->bestcount, ARCH_CONVERT) * sizeof(leaf->bests[0])); + xfs_dir2_leaf_log_bests(tp, lbp, 0, INT_GET(ltp->bestcount, ARCH_CONVERT) - 1); xfs_dir2_leaf_log_tail(tp, lbp); xfs_dir2_leaf_check(dp, lbp); /* diff --git a/trunk/fs/xfs/xfs_dir2_leaf.h b/trunk/fs/xfs/xfs_dir2_leaf.h index f57ca1162412..1393993d61e9 100644 --- a/trunk/fs/xfs/xfs_dir2_leaf.h +++ b/trunk/fs/xfs/xfs_dir2_leaf.h @@ -46,23 +46,23 @@ typedef __uint32_t xfs_dir2_dataptr_t; */ typedef struct xfs_dir2_leaf_hdr { xfs_da_blkinfo_t info; /* header for da routines */ - __be16 count; /* count of entries */ - __be16 stale; /* count of stale entries */ + __uint16_t count; /* count of entries */ + __uint16_t stale; /* count of stale entries */ } xfs_dir2_leaf_hdr_t; /* * Leaf block entry. */ typedef struct xfs_dir2_leaf_entry { - __be32 hashval; /* hash value of name */ - __be32 address; /* address of data entry */ + xfs_dahash_t hashval; /* hash value of name */ + xfs_dir2_dataptr_t address; /* address of data entry */ } xfs_dir2_leaf_entry_t; /* * Leaf block tail. */ typedef struct xfs_dir2_leaf_tail { - __be32 bestcount; + __uint32_t bestcount; } xfs_dir2_leaf_tail_t; /* @@ -105,10 +105,11 @@ xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp) * Get address of the bests array in the single-leaf block. */ #define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp) -static inline __be16 * +static inline xfs_dir2_data_off_t * xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) { - return (__be16 *)ltp - be32_to_cpu(ltp->bestcount); + return (xfs_dir2_data_off_t *) + (ltp) - INT_GET((ltp)->bestcount, ARCH_CONVERT); } /* diff --git a/trunk/fs/xfs/xfs_dir2_node.c b/trunk/fs/xfs/xfs_dir2_node.c index af556f16a0c7..641f8633d254 100644 --- a/trunk/fs/xfs/xfs_dir2_node.c +++ b/trunk/fs/xfs/xfs_dir2_node.c @@ -76,7 +76,7 @@ xfs_dir2_free_log_bests( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&free->bests[first] - (char *)free), (uint)((char *)&free->bests[last] - (char *)free + @@ -94,7 +94,7 @@ xfs_dir2_free_log_header( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); } @@ -114,14 +114,14 @@ xfs_dir2_leaf_to_node( xfs_dabuf_t *fbp; /* freespace buffer */ xfs_dir2_db_t fdb; /* freespace block number */ xfs_dir2_free_t *free; /* freespace structure */ - __be16 *from; /* pointer to freespace entry */ + xfs_dir2_data_off_t *from; /* pointer to freespace entry */ int i; /* leaf freespace index */ xfs_dir2_leaf_t *leaf; /* leaf structure */ xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ xfs_mount_t *mp; /* filesystem mount point */ int n; /* count of live freespc ents */ xfs_dir2_data_off_t off; /* freespace entry value */ - __be16 *to; /* pointer to freespace entry */ + xfs_dir2_data_off_t *to; /* pointer to freespace entry */ xfs_trans_t *tp; /* transaction pointer */ xfs_dir2_trace_args_b("leaf_to_node", args, lbp); @@ -149,28 +149,28 @@ xfs_dir2_leaf_to_node( /* * Initialize the freespace block header. */ - free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); + INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); free->hdr.firstdb = 0; - ASSERT(be32_to_cpu(ltp->bestcount) <= (uint)dp->i_d.di_size / mp->m_dirblksize); - free->hdr.nvalid = ltp->bestcount; + ASSERT(INT_GET(ltp->bestcount, ARCH_CONVERT) <= (uint)dp->i_d.di_size / mp->m_dirblksize); + INT_COPY(free->hdr.nvalid, ltp->bestcount, ARCH_CONVERT); /* * Copy freespace entries from the leaf block to the new block. * Count active entries. */ for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests; - i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { - if ((off = be16_to_cpu(*from)) != NULLDATAOFF) + i < INT_GET(ltp->bestcount, ARCH_CONVERT); i++, from++, to++) { + if ((off = INT_GET(*from, ARCH_CONVERT)) != NULLDATAOFF) n++; - *to = cpu_to_be16(off); + INT_SET(*to, ARCH_CONVERT, off); } - free->hdr.nused = cpu_to_be32(n); - leaf->hdr.info.magic = cpu_to_be16(XFS_DIR2_LEAFN_MAGIC); + INT_SET(free->hdr.nused, ARCH_CONVERT, n); + INT_SET(leaf->hdr.info.magic, ARCH_CONVERT, XFS_DIR2_LEAFN_MAGIC); /* * Log everything. */ xfs_dir2_leaf_log_header(tp, lbp); xfs_dir2_free_log_header(tp, fbp); - xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1); + xfs_dir2_free_log_bests(tp, fbp, 0, INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1); xfs_da_buf_done(fbp); xfs_dir2_leafn_check(dp, lbp); return 0; @@ -217,15 +217,15 @@ xfs_dir2_leafn_add( * a compact. */ - if (be16_to_cpu(leaf->hdr.count) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { + if (INT_GET(leaf->hdr.count, ARCH_CONVERT) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { if (!leaf->hdr.stale) return XFS_ERROR(ENOSPC); - compact = be16_to_cpu(leaf->hdr.stale) > 1; + compact = INT_GET(leaf->hdr.stale, ARCH_CONVERT) > 1; } else compact = 0; - ASSERT(index == 0 || be32_to_cpu(leaf->ents[index - 1].hashval) <= args->hashval); - ASSERT(index == be16_to_cpu(leaf->hdr.count) || - be32_to_cpu(leaf->ents[index].hashval) >= args->hashval); + ASSERT(index == 0 || INT_GET(leaf->ents[index - 1].hashval, ARCH_CONVERT) <= args->hashval); + ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || + INT_GET(leaf->ents[index].hashval, ARCH_CONVERT) >= args->hashval); if (args->justcheck) return 0; @@ -242,7 +242,7 @@ xfs_dir2_leafn_add( * Set impossible logging indices for this case. */ else if (leaf->hdr.stale) { - lfloglow = be16_to_cpu(leaf->hdr.count); + lfloglow = INT_GET(leaf->hdr.count, ARCH_CONVERT); lfloghigh = -1; } /* @@ -250,12 +250,12 @@ xfs_dir2_leafn_add( */ if (!leaf->hdr.stale) { lep = &leaf->ents[index]; - if (index < be16_to_cpu(leaf->hdr.count)) + if (index < INT_GET(leaf->hdr.count, ARCH_CONVERT)) memmove(lep + 1, lep, - (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - index) * sizeof(*lep)); lfloglow = index; - lfloghigh = be16_to_cpu(leaf->hdr.count); - be16_add(&leaf->hdr.count, 1); + lfloghigh = INT_GET(leaf->hdr.count, ARCH_CONVERT); + INT_MOD(leaf->hdr.count, ARCH_CONVERT, +1); } /* * There are stale entries. We'll use one for the new entry. @@ -271,7 +271,7 @@ xfs_dir2_leafn_add( */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != + INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR; lowstale--) continue; @@ -281,8 +281,8 @@ xfs_dir2_leafn_add( * lowstale already found. */ for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != + highstale < INT_GET(leaf->hdr.count, ARCH_CONVERT) && + INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) != XFS_DIR2_NULL_DATAPTR && (lowstale < 0 || index - lowstale - 1 >= highstale - index); @@ -294,9 +294,9 @@ xfs_dir2_leafn_add( * Shift entries up toward the stale slot. */ if (lowstale >= 0 && - (highstale == be16_to_cpu(leaf->hdr.count) || + (highstale == INT_GET(leaf->hdr.count, ARCH_CONVERT) || index - lowstale - 1 < highstale - index)) { - ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == + ASSERT(INT_GET(leaf->ents[lowstale].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR); ASSERT(index - lowstale - 1 >= 0); if (index - lowstale - 1 > 0) @@ -312,7 +312,7 @@ xfs_dir2_leafn_add( * Shift entries down toward the stale slot. */ else { - ASSERT(be32_to_cpu(leaf->ents[highstale].address) == + ASSERT(INT_GET(leaf->ents[highstale].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR); ASSERT(highstale - index >= 0); if (highstale - index > 0) @@ -323,14 +323,13 @@ xfs_dir2_leafn_add( lfloglow = MIN(index, lfloglow); lfloghigh = MAX(highstale, lfloghigh); } - be16_add(&leaf->hdr.stale, -1); + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, -1); } /* * Insert the new entry, log everything. */ - lep->hashval = cpu_to_be32(args->hashval); - lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, - args->blkno, args->index)); + INT_SET(lep->hashval, ARCH_CONVERT, args->hashval); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_DB_OFF_TO_DATAPTR(mp, args->blkno, args->index)); xfs_dir2_leaf_log_header(tp, bp); xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); xfs_dir2_leafn_check(dp, bp); @@ -353,17 +352,17 @@ xfs_dir2_leafn_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); - for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { - if (i + 1 < be16_to_cpu(leaf->hdr.count)) { - ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= - be32_to_cpu(leaf->ents[i + 1].hashval)); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); + for (i = stale = 0; i < INT_GET(leaf->hdr.count, ARCH_CONVERT); i++) { + if (i + 1 < INT_GET(leaf->hdr.count, ARCH_CONVERT)) { + ASSERT(INT_GET(leaf->ents[i].hashval, ARCH_CONVERT) <= + INT_GET(leaf->ents[i + 1].hashval, ARCH_CONVERT)); } - if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (INT_GET(leaf->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) stale++; } - ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); + ASSERT(INT_GET(leaf->hdr.stale, ARCH_CONVERT) == stale); } #endif /* DEBUG */ @@ -379,12 +378,12 @@ xfs_dir2_leafn_lasthash( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); if (count) - *count = be16_to_cpu(leaf->hdr.count); + *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); if (!leaf->hdr.count) return 0; - return be32_to_cpu(leaf->ents[be16_to_cpu(leaf->hdr.count) - 1].hashval); + return INT_GET(leaf->ents[INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); } /* @@ -420,9 +419,9 @@ xfs_dir2_leafn_lookup_int( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); #ifdef __KERNEL__ - ASSERT(be16_to_cpu(leaf->hdr.count) > 0); + ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) > 0); #endif xfs_dir2_leafn_check(dp, bp); /* @@ -444,7 +443,7 @@ xfs_dir2_leafn_lookup_int( curdb = -1; length = XFS_DIR2_DATA_ENTSIZE(args->namelen); if ((free = (curbp ? curbp->data : NULL))) - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); } /* * For others, it's a data block buffer, get the block number. @@ -457,17 +456,17 @@ xfs_dir2_leafn_lookup_int( * Loop over leaf entries with the right hash value. */ for (lep = &leaf->ents[index]; - index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval; + index < INT_GET(leaf->hdr.count, ARCH_CONVERT) && INT_GET(lep->hashval, ARCH_CONVERT) == args->hashval; lep++, index++) { /* * Skip stale leaf entries. */ - if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) + if (INT_GET(lep->address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) continue; /* * Pull the data block number from the entry. */ - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); + newdb = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); /* * For addname, we're looking for a place to put the new entry. * We want to use a data block with an entry of equal @@ -507,15 +506,15 @@ xfs_dir2_leafn_lookup_int( } curfdb = newfdb; free = curbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); - ASSERT((be32_to_cpu(free->hdr.firstdb) % + ASSERT((INT_GET(free->hdr.firstdb, ARCH_CONVERT) % XFS_DIR2_MAX_FREE_BESTS(mp)) == 0); - ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb); + ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) <= curdb); ASSERT(curdb < - be32_to_cpu(free->hdr.firstdb) + - be32_to_cpu(free->hdr.nvalid)); + INT_GET(free->hdr.firstdb, ARCH_CONVERT) + + INT_GET(free->hdr.nvalid, ARCH_CONVERT)); } /* * Get the index for our entry. @@ -524,12 +523,12 @@ xfs_dir2_leafn_lookup_int( /* * If it has room, return it. */ - if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { + if (unlikely(INT_GET(free->bests[fi], ARCH_CONVERT) == NULLDATAOFF)) { XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", XFS_ERRLEVEL_LOW, mp); return XFS_ERROR(EFSCORRUPTED); } - if (be16_to_cpu(free->bests[fi]) >= length) { + if (INT_GET(free->bests[fi], ARCH_CONVERT) >= length) { *indexp = index; state->extravalid = 1; state->extrablk.bp = curbp; @@ -573,7 +572,7 @@ xfs_dir2_leafn_lookup_int( */ dep = (xfs_dir2_data_entry_t *) ((char *)curbp->data + - XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); + XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT))); /* * Compare the entry, return it if it matches. */ @@ -620,7 +619,7 @@ xfs_dir2_leafn_lookup_int( * Return the final index, that will be the insertion point. */ *indexp = index; - ASSERT(index == be16_to_cpu(leaf->hdr.count) || args->oknoent); + ASSERT(index == INT_GET(leaf->hdr.count, ARCH_CONVERT) || args->oknoent); return XFS_ERROR(ENOENT); } @@ -658,12 +657,12 @@ xfs_dir2_leafn_moveents( * destination leaf entries, open up a hole in the destination * to hold the new entries. */ - if (start_d < be16_to_cpu(leaf_d->hdr.count)) { + if (start_d < INT_GET(leaf_d->hdr.count, ARCH_CONVERT)) { memmove(&leaf_d->ents[start_d + count], &leaf_d->ents[start_d], - (be16_to_cpu(leaf_d->hdr.count) - start_d) * + (INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - start_d) * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_leaf_log_ents(tp, bp_d, start_d + count, - count + be16_to_cpu(leaf_d->hdr.count) - 1); + count + INT_GET(leaf_d->hdr.count, ARCH_CONVERT) - 1); } /* * If the source has stale leaves, count the ones in the copy range @@ -673,7 +672,7 @@ xfs_dir2_leafn_moveents( int i; /* temp leaf index */ for (i = start_s, stale = 0; i < start_s + count; i++) { - if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (INT_GET(leaf_s->ents[i].address, ARCH_CONVERT) == XFS_DIR2_NULL_DATAPTR) stale++; } } else @@ -688,7 +687,7 @@ xfs_dir2_leafn_moveents( * If there are source entries after the ones we copied, * delete the ones we copied by sliding the next ones down. */ - if (start_s + count < be16_to_cpu(leaf_s->hdr.count)) { + if (start_s + count < INT_GET(leaf_s->hdr.count, ARCH_CONVERT)) { memmove(&leaf_s->ents[start_s], &leaf_s->ents[start_s + count], count * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_leaf_log_ents(tp, bp_s, start_s, start_s + count - 1); @@ -696,10 +695,10 @@ xfs_dir2_leafn_moveents( /* * Update the headers and log them. */ - be16_add(&leaf_s->hdr.count, -(count)); - be16_add(&leaf_s->hdr.stale, -(stale)); - be16_add(&leaf_d->hdr.count, count); - be16_add(&leaf_d->hdr.stale, stale); + INT_MOD(leaf_s->hdr.count, ARCH_CONVERT, -(count)); + INT_MOD(leaf_s->hdr.stale, ARCH_CONVERT, -(stale)); + INT_MOD(leaf_d->hdr.count, ARCH_CONVERT, count); + INT_MOD(leaf_d->hdr.stale, ARCH_CONVERT, stale); xfs_dir2_leaf_log_header(tp, bp_s); xfs_dir2_leaf_log_header(tp, bp_d); xfs_dir2_leafn_check(args->dp, bp_s); @@ -720,13 +719,13 @@ xfs_dir2_leafn_order( leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - if (be16_to_cpu(leaf1->hdr.count) > 0 && - be16_to_cpu(leaf2->hdr.count) > 0 && - (be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) || - be32_to_cpu(leaf2->ents[be16_to_cpu(leaf2->hdr.count) - 1].hashval) < - be32_to_cpu(leaf1->ents[be16_to_cpu(leaf1->hdr.count) - 1].hashval))) + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0 && + INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0 && + (INT_GET(leaf2->ents[0].hashval, ARCH_CONVERT) < INT_GET(leaf1->ents[0].hashval, ARCH_CONVERT) || + INT_GET(leaf2->ents[INT_GET(leaf2->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT) < + INT_GET(leaf1->ents[INT_GET(leaf1->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT))) return 1; return 0; } @@ -769,9 +768,9 @@ xfs_dir2_leafn_rebalance( } leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count); + oldsum = INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT); #ifdef DEBUG - oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale); + oldstale = INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT); #endif mid = oldsum >> 1; /* @@ -781,10 +780,10 @@ xfs_dir2_leafn_rebalance( if (oldsum & 1) { xfs_dahash_t midhash; /* middle entry hash value */ - if (mid >= be16_to_cpu(leaf1->hdr.count)) - midhash = be32_to_cpu(leaf2->ents[mid - be16_to_cpu(leaf1->hdr.count)].hashval); + if (mid >= INT_GET(leaf1->hdr.count, ARCH_CONVERT)) + midhash = INT_GET(leaf2->ents[mid - INT_GET(leaf1->hdr.count, ARCH_CONVERT)].hashval, ARCH_CONVERT); else - midhash = be32_to_cpu(leaf1->ents[mid].hashval); + midhash = INT_GET(leaf1->ents[mid].hashval, ARCH_CONVERT); isleft = args->hashval <= midhash; } /* @@ -798,30 +797,30 @@ xfs_dir2_leafn_rebalance( * Calculate moved entry count. Positive means left-to-right, * negative means right-to-left. Then move the entries. */ - count = be16_to_cpu(leaf1->hdr.count) - mid + (isleft == 0); + count = INT_GET(leaf1->hdr.count, ARCH_CONVERT) - mid + (isleft == 0); if (count > 0) xfs_dir2_leafn_moveents(args, blk1->bp, - be16_to_cpu(leaf1->hdr.count) - count, blk2->bp, 0, count); + INT_GET(leaf1->hdr.count, ARCH_CONVERT) - count, blk2->bp, 0, count); else if (count < 0) xfs_dir2_leafn_moveents(args, blk2->bp, 0, blk1->bp, - be16_to_cpu(leaf1->hdr.count), count); - ASSERT(be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count) == oldsum); - ASSERT(be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale) == oldstale); + INT_GET(leaf1->hdr.count, ARCH_CONVERT), count); + ASSERT(INT_GET(leaf1->hdr.count, ARCH_CONVERT) + INT_GET(leaf2->hdr.count, ARCH_CONVERT) == oldsum); + ASSERT(INT_GET(leaf1->hdr.stale, ARCH_CONVERT) + INT_GET(leaf2->hdr.stale, ARCH_CONVERT) == oldstale); /* * Mark whether we're inserting into the old or new leaf. */ - if (be16_to_cpu(leaf1->hdr.count) < be16_to_cpu(leaf2->hdr.count)) + if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) < INT_GET(leaf2->hdr.count, ARCH_CONVERT)) state->inleaf = swap; - else if (be16_to_cpu(leaf1->hdr.count) > be16_to_cpu(leaf2->hdr.count)) + else if (INT_GET(leaf1->hdr.count, ARCH_CONVERT) > INT_GET(leaf2->hdr.count, ARCH_CONVERT)) state->inleaf = !swap; else state->inleaf = - swap ^ (blk1->index <= be16_to_cpu(leaf1->hdr.count)); + swap ^ (blk1->index <= INT_GET(leaf1->hdr.count, ARCH_CONVERT)); /* * Adjust the expected index for insertion. */ if (!state->inleaf) - blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count); + blk2->index = blk1->index - INT_GET(leaf1->hdr.count, ARCH_CONVERT); /* * Finally sanity check just to make sure we are not returning a negative index @@ -868,7 +867,7 @@ xfs_dir2_leafn_remove( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); /* * Point to the entry we're removing. */ @@ -876,17 +875,17 @@ xfs_dir2_leafn_remove( /* * Extract the data block and offset from the entry. */ - db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); + db = XFS_DIR2_DATAPTR_TO_DB(mp, INT_GET(lep->address, ARCH_CONVERT)); ASSERT(dblk->blkno == db); - off = XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)); + off = XFS_DIR2_DATAPTR_TO_OFF(mp, INT_GET(lep->address, ARCH_CONVERT)); ASSERT(dblk->index == off); /* * Kill the leaf entry by marking it stale. * Log the leaf block changes. */ - be16_add(&leaf->hdr.stale, 1); + INT_MOD(leaf->hdr.stale, ARCH_CONVERT, +1); xfs_dir2_leaf_log_header(tp, bp); - lep->address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); + INT_SET(lep->address, ARCH_CONVERT, XFS_DIR2_NULL_DATAPTR); xfs_dir2_leaf_log_ents(tp, bp, index, index); /* * Make the data entry free. Keep track of the longest freespace @@ -895,7 +894,7 @@ xfs_dir2_leafn_remove( dbp = dblk->bp; data = dbp->data; dep = (xfs_dir2_data_entry_t *)((char *)data + off); - longest = be16_to_cpu(data->hdr.bestfree[0].length); + longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); needlog = needscan = 0; xfs_dir2_data_make_free(tp, dbp, off, XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); @@ -912,7 +911,7 @@ xfs_dir2_leafn_remove( * If the longest data block freespace changes, need to update * the corresponding freeblock entry. */ - if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) { + if (longest < INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { int error; /* error return value */ xfs_dabuf_t *fbp; /* freeblock buffer */ xfs_dir2_db_t fdb; /* freeblock block number */ @@ -930,15 +929,15 @@ xfs_dir2_leafn_remove( return error; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); - ASSERT(be32_to_cpu(free->hdr.firstdb) == + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.firstdb, ARCH_CONVERT) == XFS_DIR2_MAX_FREE_BESTS(mp) * (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); /* * Calculate which entry we need to fix. */ findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); - longest = be16_to_cpu(data->hdr.bestfree[0].length); + longest = INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT); /* * If the data block is now empty we can get rid of it * (usually). @@ -970,7 +969,7 @@ xfs_dir2_leafn_remove( /* * One less used entry in the free table. */ - free->hdr.nused = cpu_to_be32(-1); + INT_MOD(free->hdr.nused, ARCH_CONVERT, -1); xfs_dir2_free_log_header(tp, fbp); /* * If this was the last entry in the table, we can @@ -978,21 +977,21 @@ xfs_dir2_leafn_remove( * entries at the end referring to non-existent * data blocks, get those too. */ - if (findex == be32_to_cpu(free->hdr.nvalid) - 1) { + if (findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT) - 1) { int i; /* free entry index */ for (i = findex - 1; - i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF; + i >= 0 && INT_GET(free->bests[i], ARCH_CONVERT) == NULLDATAOFF; i--) continue; - free->hdr.nvalid = cpu_to_be32(i + 1); + INT_SET(free->hdr.nvalid, ARCH_CONVERT, i + 1); logfree = 0; } /* * Not the last entry, just punch it out. */ else { - free->bests[findex] = cpu_to_be16(NULLDATAOFF); + INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); logfree = 1; } /* @@ -1018,7 +1017,7 @@ xfs_dir2_leafn_remove( * the new value. */ else { - free->bests[findex] = cpu_to_be16(longest); + INT_SET(free->bests[findex], ARCH_CONVERT, longest); logfree = 1; } /* @@ -1040,7 +1039,7 @@ xfs_dir2_leafn_remove( *rval = ((uint)sizeof(leaf->hdr) + (uint)sizeof(leaf->ents[0]) * - (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale))) < + (INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT))) < mp->m_dir_magicpct; return 0; } @@ -1139,9 +1138,9 @@ xfs_dir2_leafn_toosmall( */ blk = &state->path.blk[state->path.active - 1]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); leaf = (xfs_dir2_leaf_t *)info; - count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); + count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); if (bytes > (state->blocksize >> 1)) { /* @@ -1161,7 +1160,7 @@ xfs_dir2_leafn_toosmall( * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = (info->forw != 0); + forward = info->forw; memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &rval); @@ -1177,9 +1176,9 @@ xfs_dir2_leafn_toosmall( * We prefer coalescing with the lower numbered sibling so as * to shrink a directory over time. */ - forward = be32_to_cpu(info->forw) < be32_to_cpu(info->back); + forward = INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT); for (i = 0, bp = NULL; i < 2; forward = !forward, i++) { - blkno = forward ? be32_to_cpu(info->forw) : be32_to_cpu(info->back); + blkno = forward ?INT_GET( info->forw, ARCH_CONVERT) : INT_GET(info->back, ARCH_CONVERT); if (blkno == 0) continue; /* @@ -1195,11 +1194,11 @@ xfs_dir2_leafn_toosmall( * Count bytes in the two blocks combined. */ leaf = (xfs_dir2_leaf_t *)info; - count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); + count = INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); bytes = state->blocksize - (state->blocksize >> 2); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + count += INT_GET(leaf->hdr.count, ARCH_CONVERT) - INT_GET(leaf->hdr.stale, ARCH_CONVERT); bytes -= count * (uint)sizeof(leaf->ents[0]); /* * Fits with at least 25% to spare. @@ -1257,27 +1256,27 @@ xfs_dir2_leafn_unbalance( ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR2_LEAFN_MAGIC); /* * If there are any stale leaf entries, take this opportunity * to purge them. */ - if (drop_leaf->hdr.stale) + if (INT_GET(drop_leaf->hdr.stale, ARCH_CONVERT)) xfs_dir2_leaf_compact(args, drop_blk->bp); - if (save_leaf->hdr.stale) + if (INT_GET(save_leaf->hdr.stale, ARCH_CONVERT)) xfs_dir2_leaf_compact(args, save_blk->bp); /* * Move the entries from drop to the appropriate end of save. */ - drop_blk->hashval = be32_to_cpu(drop_leaf->ents[be16_to_cpu(drop_leaf->hdr.count) - 1].hashval); + drop_blk->hashval = INT_GET(drop_leaf->ents[INT_GET(drop_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); if (xfs_dir2_leafn_order(save_blk->bp, drop_blk->bp)) xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, 0, - be16_to_cpu(drop_leaf->hdr.count)); + INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); else xfs_dir2_leafn_moveents(args, drop_blk->bp, 0, save_blk->bp, - be16_to_cpu(save_leaf->hdr.count), be16_to_cpu(drop_leaf->hdr.count)); - save_blk->hashval = be32_to_cpu(save_leaf->ents[be16_to_cpu(save_leaf->hdr.count) - 1].hashval); + INT_GET(save_leaf->hdr.count, ARCH_CONVERT), INT_GET(drop_leaf->hdr.count, ARCH_CONVERT)); + save_blk->hashval = INT_GET(save_leaf->ents[INT_GET(save_leaf->hdr.count, ARCH_CONVERT) - 1].hashval, ARCH_CONVERT); xfs_dir2_leafn_check(args->dp, save_blk->bp); } @@ -1379,7 +1378,7 @@ xfs_dir2_node_addname_int( xfs_mount_t *mp; /* filesystem mount point */ int needlog; /* need to log data header */ int needscan; /* need to rescan data frees */ - __be16 *tagp; /* data entry tag pointer */ + xfs_dir2_data_off_t *tagp; /* data entry tag pointer */ xfs_trans_t *tp; /* transaction pointer */ dp = args->dp; @@ -1398,7 +1397,7 @@ xfs_dir2_node_addname_int( */ ifbno = fblk->blkno; free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); findex = fblk->index; /* * This means the free entry showed that the data block had @@ -1406,10 +1405,10 @@ xfs_dir2_node_addname_int( * Use that data block. */ if (findex >= 0) { - ASSERT(findex < be32_to_cpu(free->hdr.nvalid)); - ASSERT(be16_to_cpu(free->bests[findex]) != NULLDATAOFF); - ASSERT(be16_to_cpu(free->bests[findex]) >= length); - dbno = be32_to_cpu(free->hdr.firstdb) + findex; + ASSERT(findex < INT_GET(free->hdr.nvalid, ARCH_CONVERT)); + ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF); + ASSERT(INT_GET(free->bests[findex], ARCH_CONVERT) >= length); + dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; } /* * The data block looked at didn't have enough room. @@ -1482,20 +1481,20 @@ xfs_dir2_node_addname_int( continue; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); findex = 0; } /* * Look at the current free entry. Is it good enough? */ - if (be16_to_cpu(free->bests[findex]) != NULLDATAOFF && - be16_to_cpu(free->bests[findex]) >= length) - dbno = be32_to_cpu(free->hdr.firstdb) + findex; + if (INT_GET(free->bests[findex], ARCH_CONVERT) != NULLDATAOFF && + INT_GET(free->bests[findex], ARCH_CONVERT) >= length) + dbno = INT_GET(free->hdr.firstdb, ARCH_CONVERT) + findex; else { /* * Are we done with the freeblock? */ - if (++findex == be32_to_cpu(free->hdr.nvalid)) { + if (++findex == INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { /* * Drop the block. */ @@ -1609,15 +1608,15 @@ xfs_dir2_node_addname_int( * its first slot as our empty slot. */ free = fbp->data; - free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); - free->hdr.firstdb = cpu_to_be32( + INT_SET(free->hdr.magic, ARCH_CONVERT, XFS_DIR2_FREE_MAGIC); + INT_SET(free->hdr.firstdb, ARCH_CONVERT, (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * XFS_DIR2_MAX_FREE_BESTS(mp)); free->hdr.nvalid = 0; free->hdr.nused = 0; } else { free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); } /* @@ -1628,20 +1627,20 @@ xfs_dir2_node_addname_int( * If it's after the end of the current entries in the * freespace block, extend that table. */ - if (findex >= be32_to_cpu(free->hdr.nvalid)) { + if (findex >= INT_GET(free->hdr.nvalid, ARCH_CONVERT)) { ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); - free->hdr.nvalid = cpu_to_be32(findex + 1); + INT_SET(free->hdr.nvalid, ARCH_CONVERT, findex + 1); /* * Tag new entry so nused will go up. */ - free->bests[findex] = cpu_to_be16(NULLDATAOFF); + INT_SET(free->bests[findex], ARCH_CONVERT, NULLDATAOFF); } /* * If this entry was for an empty data block * (this should always be true) then update the header. */ - if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { - be32_add(&free->hdr.nused, 1); + if (INT_GET(free->bests[findex], ARCH_CONVERT) == NULLDATAOFF) { + INT_MOD(free->hdr.nused, ARCH_CONVERT, +1); xfs_dir2_free_log_header(tp, fbp); } /* @@ -1650,7 +1649,7 @@ xfs_dir2_node_addname_int( * change again. */ data = dbp->data; - free->bests[findex] = data->hdr.bestfree[0].length; + INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); logfree = 1; } /* @@ -1678,12 +1677,12 @@ xfs_dir2_node_addname_int( data = dbp->data; logfree = 0; } - ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length); + ASSERT(INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT) >= length); /* * Point to the existing unused space. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); + ((char *)data + INT_GET(data->hdr.bestfree[0].offset, ARCH_CONVERT)); needscan = needlog = 0; /* * Mark the first part of the unused space, inuse for us. @@ -1699,7 +1698,7 @@ xfs_dir2_node_addname_int( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); - *tagp = cpu_to_be16((char *)dep - (char *)data); + INT_SET(*tagp, ARCH_CONVERT, (xfs_dir2_data_off_t)((char *)dep - (char *)data)); xfs_dir2_data_log_entry(tp, dbp, dep); /* * Rescan the block for bestfree if needed. @@ -1714,8 +1713,8 @@ xfs_dir2_node_addname_int( /* * If the freespace entry is now wrong, update it. */ - if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) { - free->bests[findex] = data->hdr.bestfree[0].length; + if (INT_GET(free->bests[findex], ARCH_CONVERT) != INT_GET(data->hdr.bestfree[0].length, ARCH_CONVERT)) { + INT_COPY(free->bests[findex], data->hdr.bestfree[0].length, ARCH_CONVERT); logfree = 1; } /* @@ -1732,7 +1731,7 @@ xfs_dir2_node_addname_int( * Return the data block and offset in args, then drop the data block. */ args->blkno = (xfs_dablk_t)dbno; - args->index = be16_to_cpu(*tagp); + args->index = INT_GET(*tagp, ARCH_CONVERT); xfs_da_buf_done(dbp); return 0; } @@ -1901,10 +1900,10 @@ xfs_dir2_node_replace( * Point to the data entry. */ data = state->extrablk.bp->data; - ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(INT_GET(data->hdr.magic, ARCH_CONVERT) == XFS_DIR2_DATA_MAGIC); dep = (xfs_dir2_data_entry_t *) ((char *)data + - XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address))); + XFS_DIR2_DATAPTR_TO_OFF(state->mp, INT_GET(lep->address, ARCH_CONVERT))); ASSERT(inum != INT_GET(dep->inumber, ARCH_CONVERT)); /* * Fill in the new inode number and log the entry. @@ -1967,11 +1966,11 @@ xfs_dir2_node_trim_free( return 0; } free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(INT_GET(free->hdr.magic, ARCH_CONVERT) == XFS_DIR2_FREE_MAGIC); /* * If there are used entries, there's nothing to do. */ - if (be32_to_cpu(free->hdr.nused) > 0) { + if (INT_GET(free->hdr.nused, ARCH_CONVERT) > 0) { xfs_da_brelse(tp, bp); *rvalp = 0; return 0; diff --git a/trunk/fs/xfs/xfs_dir2_node.h b/trunk/fs/xfs/xfs_dir2_node.h index c7c870ee7857..0ab8fbd59512 100644 --- a/trunk/fs/xfs/xfs_dir2_node.h +++ b/trunk/fs/xfs/xfs_dir2_node.h @@ -41,15 +41,15 @@ struct xfs_trans; #define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ typedef struct xfs_dir2_free_hdr { - __be32 magic; /* XFS_DIR2_FREE_MAGIC */ - __be32 firstdb; /* db of first entry */ - __be32 nvalid; /* count of valid entries */ - __be32 nused; /* count of used entries */ + __uint32_t magic; /* XFS_DIR2_FREE_MAGIC */ + __int32_t firstdb; /* db of first entry */ + __int32_t nvalid; /* count of valid entries */ + __int32_t nused; /* count of used entries */ } xfs_dir2_free_hdr_t; typedef struct xfs_dir2_free { xfs_dir2_free_hdr_t hdr; /* block header */ - __be16 bests[1]; /* best free counts */ + xfs_dir2_data_off_t bests[1]; /* best free counts */ /* unused entries are -1 */ } xfs_dir2_free_t; diff --git a/trunk/fs/xfs/xfs_dir2_sf.c b/trunk/fs/xfs/xfs_dir2_sf.c index d98a41d1fe63..ec8e7476c8b7 100644 --- a/trunk/fs/xfs/xfs_dir2_sf.c +++ b/trunk/fs/xfs/xfs_dir2_sf.c @@ -98,8 +98,8 @@ xfs_dir2_block_sfsize( /* * Iterate over the block's data entries by using the leaf pointers. */ - for (i = 0; i < be32_to_cpu(btp->count); i++) { - if ((addr = be32_to_cpu(blp[i].address)) == XFS_DIR2_NULL_DATAPTR) + for (i = 0; i < INT_GET(btp->count, ARCH_CONVERT); i++) { + if ((addr = INT_GET(blp[i].address, ARCH_CONVERT)) == XFS_DIR2_NULL_DATAPTR) continue; /* * Calculate the pointer to the entry at hand. @@ -220,8 +220,8 @@ xfs_dir2_block_to_sf( * If it's unused, just skip over it. */ dup = (xfs_dir2_data_unused_t *)ptr; - if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - ptr += be16_to_cpu(dup->length); + if (INT_GET(dup->freetag, ARCH_CONVERT) == XFS_DIR2_DATA_FREE_TAG) { + ptr += INT_GET(dup->length, ARCH_CONVERT); continue; } dep = (xfs_dir2_data_entry_t *)ptr; diff --git a/trunk/fs/xfs/xfs_dir_leaf.c b/trunk/fs/xfs/xfs_dir_leaf.c index ee88751c3be6..e83074016abb 100644 --- a/trunk/fs/xfs/xfs_dir_leaf.c +++ b/trunk/fs/xfs/xfs_dir_leaf.c @@ -176,7 +176,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args) ASSERT(dp->i_df.if_u1.if_data != NULL); sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; sfe = &sf->list[0]; - for (i = sf->hdr.count-1; i >= 0; i--) { + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { if (sfe->namelen == args->namelen && args->name[0] == sfe->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) @@ -193,7 +193,7 @@ xfs_dir_shortform_addname(xfs_da_args_t *args) XFS_DIR_SF_PUT_DIRINO(&args->inumber, &sfe->inumber); sfe->namelen = args->namelen; memcpy(sfe->name, args->name, sfe->namelen); - sf->hdr.count++; + INT_MOD(sf->hdr.count, ARCH_CONVERT, +1); dp->i_d.di_size += size; xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); @@ -227,7 +227,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args) base = sizeof(xfs_dir_sf_hdr_t); sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; sfe = &sf->list[0]; - for (i = sf->hdr.count-1; i >= 0; i--) { + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { size = XFS_DIR_SF_ENTSIZE_BYENTRY(sfe); if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && @@ -245,7 +245,7 @@ xfs_dir_shortform_removename(xfs_da_args_t *args) memmove(&((char *)sf)[base], &((char *)sf)[base+size], dp->i_d.di_size - (base+size)); } - sf->hdr.count--; + INT_MOD(sf->hdr.count, ARCH_CONVERT, -1); xfs_idata_realloc(dp, -size, XFS_DATA_FORK); dp->i_d.di_size -= size; @@ -288,7 +288,7 @@ xfs_dir_shortform_lookup(xfs_da_args_t *args) return(XFS_ERROR(EEXIST)); } sfe = &sf->list[0]; - for (i = sf->hdr.count-1; i >= 0; i--) { + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) { @@ -375,7 +375,7 @@ xfs_dir_shortform_to_leaf(xfs_da_args_t *iargs) goto out; sfe = &sf->list[0]; - for (i = 0; i < sf->hdr.count; i++) { + for (i = 0; i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { args.name = (char *)(sfe->name); args.namelen = sfe->namelen; args.hashval = xfs_da_hashname((char *)(sfe->name), @@ -428,7 +428,7 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, sf = (xfs_dir_shortform_t *)dp->i_df.if_u1.if_data; cookhash = XFS_DA_COOKIE_HASH(mp, uio->uio_offset); want_entno = XFS_DA_COOKIE_ENTRY(mp, uio->uio_offset); - nsbuf = sf->hdr.count + 2; + nsbuf = INT_GET(sf->hdr.count, ARCH_CONVERT) + 2; sbsize = (nsbuf + 1) * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); @@ -460,7 +460,8 @@ xfs_dir_shortform_getdents(xfs_inode_t *dp, uio_t *uio, int *eofp, /* * Scan the directory data for the rest of the entries. */ - for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { + for (i = 0, sfe = &sf->list[0]; + i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) { if (unlikely( ((char *)sfe < (char *)sf) || @@ -599,7 +600,7 @@ xfs_dir_shortform_replace(xfs_da_args_t *args) } ASSERT(args->namelen != 1 || args->name[0] != '.'); sfe = &sf->list[0]; - for (i = sf->hdr.count-1; i >= 0; i--) { + for (i = INT_GET(sf->hdr.count, ARCH_CONVERT)-1; i >= 0; i--) { if (sfe->namelen == args->namelen && sfe->name[0] == args->name[0] && memcmp(args->name, sfe->name, args->namelen) == 0) { @@ -643,7 +644,7 @@ xfs_dir_leaf_to_shortform(xfs_da_args_t *iargs) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_dir_leafblock_t *)tmpbuffer; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -741,13 +742,11 @@ xfs_dir_leaf_to_node(xfs_da_args_t *args) } node = bp1->data; leaf = bp2->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); - node->btree[0].hashval = cpu_to_be32( - INT_GET(leaf->entries[ - INT_GET(leaf->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT)); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + INT_SET(node->btree[0].hashval, ARCH_CONVERT, INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT)); xfs_da_buf_done(bp2); - node->btree[0].before = cpu_to_be32(blkno); - node->hdr.count = cpu_to_be16(1); + INT_SET(node->btree[0].before, ARCH_CONVERT, blkno); + INT_SET(node->hdr.count, ARCH_CONVERT, 1); xfs_da_log_buf(args->trans, bp1, XFS_DA_LOGRANGE(node, &node->btree[0], sizeof(node->btree[0]))); xfs_da_buf_done(bp1); @@ -782,7 +781,7 @@ xfs_dir_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp) leaf = bp->data; memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount)); hdr = &leaf->hdr; - hdr->info.magic = cpu_to_be16(XFS_DIR_LEAF_MAGIC); + INT_SET(hdr->info.magic, ARCH_CONVERT, XFS_DIR_LEAF_MAGIC); INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount)); if (!hdr->firstused) INT_SET(hdr->firstused, ARCH_CONVERT, XFS_LBSIZE(dp->i_mount) - 1); @@ -861,7 +860,7 @@ xfs_dir_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index) int tablesize, entsize, sum, i, tmp, error; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); ASSERT((index >= 0) && (index <= INT_GET(leaf->hdr.count, ARCH_CONVERT))); hdr = &leaf->hdr; entsize = XFS_DIR_LEAF_ENTSIZE_BYNAME(args->namelen); @@ -941,7 +940,7 @@ xfs_dir_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int index, int tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_DIR_LEAF_MAPSIZE)); ASSERT((index >= 0) && (index <= INT_GET(hdr->count, ARCH_CONVERT))); @@ -1098,8 +1097,8 @@ xfs_dir_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_DIR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); /* * Check ordering of blocks, reverse if it makes things simpler. @@ -1326,7 +1325,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(info->magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); leaf = (xfs_dir_leafblock_t *)info; count = INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes = (uint)sizeof(xfs_dir_leaf_hdr_t) + @@ -1349,7 +1348,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) * Make altpath point to the block we want to keep and * path point to the block we want to drop (this one). */ - forward = (info->forw != 0); + forward = info->forw; memcpy(&state->altpath, &state->path, sizeof(state->path)); error = xfs_da_path_shift(state, &state->altpath, forward, 0, &retval); @@ -1370,12 +1369,12 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) * We prefer coalescing with the lower numbered sibling so as * to shrink a directory over time. */ - forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back)); /* start with smaller blk num */ + forward = (INT_GET(info->forw, ARCH_CONVERT) < INT_GET(info->back, ARCH_CONVERT)); /* start with smaller blk num */ for (i = 0; i < 2; forward = !forward, i++) { if (forward) - blkno = be32_to_cpu(info->forw); + blkno = INT_GET(info->forw, ARCH_CONVERT); else - blkno = be32_to_cpu(info->back); + blkno = INT_GET(info->back, ARCH_CONVERT); if (blkno == 0) continue; error = xfs_da_read_buf(state->args->trans, state->args->dp, @@ -1390,7 +1389,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action) bytes = state->blocksize - (state->blocksize>>2); bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); count += INT_GET(leaf->hdr.count, ARCH_CONVERT); bytes -= INT_GET(leaf->hdr.namebytes, ARCH_CONVERT); bytes -= count * ((uint)sizeof(xfs_dir_leaf_name_t) - 1); @@ -1448,7 +1447,7 @@ xfs_dir_leaf_remove(xfs_trans_t *trans, xfs_dabuf_t *bp, int index) xfs_mount_t *mp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); hdr = &leaf->hdr; mp = trans->t_mountp; ASSERT((INT_GET(hdr->count, ARCH_CONVERT) > 0) && (INT_GET(hdr->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); @@ -1600,8 +1599,8 @@ xfs_dir_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_DIR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(drop_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(save_leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; @@ -1696,7 +1695,7 @@ xfs_dir_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args, int *index) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); ASSERT(INT_GET(leaf->hdr.count, ARCH_CONVERT) < (XFS_LBSIZE(args->dp->i_mount)/8)); /* @@ -1783,8 +1782,8 @@ xfs_dir_leaf_moveents(xfs_dir_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_s->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf_d->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; ASSERT((INT_GET(hdr_s->count, ARCH_CONVERT) > 0) && (INT_GET(hdr_s->count, ARCH_CONVERT) < (XFS_LBSIZE(mp)/8))); @@ -1884,8 +1883,8 @@ xfs_dir_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR_LEAF_MAGIC) && - (be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR_LEAF_MAGIC)); + ASSERT((INT_GET(leaf1->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC) && + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC)); if ((INT_GET(leaf1->hdr.count, ARCH_CONVERT) > 0) && (INT_GET(leaf2->hdr.count, ARCH_CONVERT) > 0) && ((INT_GET(leaf2->entries[ 0 ].hashval, ARCH_CONVERT) < INT_GET(leaf1->entries[ 0 ].hashval, ARCH_CONVERT)) || @@ -1905,7 +1904,7 @@ xfs_dir_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_dir_leafblock_t *leaf; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR_LEAF_MAGIC); + ASSERT(INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) == XFS_DIR_LEAF_MAGIC); if (count) *count = INT_GET(leaf->hdr.count, ARCH_CONVERT); if (!leaf->hdr.count) @@ -1941,7 +1940,7 @@ xfs_dir_leaf_getdents_int( mp = dp->i_mount; leaf = bp->data; - if (be16_to_cpu(leaf->hdr.info.magic) != XFS_DIR_LEAF_MAGIC) { + if (INT_GET(leaf->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) { *eobp = 1; return XFS_ERROR(ENOENT); /* XXX wrong code */ } @@ -1993,7 +1992,7 @@ xfs_dir_leaf_getdents_int( if (i == INT_GET(leaf->hdr.count, ARCH_CONVERT)) { xfs_dir_trace_g_du("leaf: hash not found", dp, uio); - if (!leaf->hdr.info.forw) + if (!INT_GET(leaf->hdr.info.forw, ARCH_CONVERT)) uio->uio_offset = XFS_DA_MAKE_COOKIE(mp, 0, 0, XFS_DA_MAXHASH); /* @@ -2048,7 +2047,8 @@ xfs_dir_leaf_getdents_int( xfs_dir_trace_g_duc("leaf: middle cookie ", dp, uio, p.cook.o); - } else if ((thishash = be32_to_cpu(leaf->hdr.info.forw))) { + } else if ((thishash = INT_GET(leaf->hdr.info.forw, + ARCH_CONVERT))) { xfs_dabuf_t *bp2; xfs_dir_leafblock_t *leaf2; @@ -2064,9 +2064,9 @@ xfs_dir_leaf_getdents_int( leaf2 = bp2->data; if (unlikely( - (be16_to_cpu(leaf2->hdr.info.magic) + (INT_GET(leaf2->hdr.info.magic, ARCH_CONVERT) != XFS_DIR_LEAF_MAGIC) - || (be32_to_cpu(leaf2->hdr.info.back) + || (INT_GET(leaf2->hdr.info.back, ARCH_CONVERT) != bno))) { /* GROT */ XFS_CORRUPTION_ERROR("xfs_dir_leaf_getdents_int(3)", XFS_ERRLEVEL_LOW, mp, diff --git a/trunk/fs/xfs/xfs_dir_sf.h b/trunk/fs/xfs/xfs_dir_sf.h index 5b20b4d3f57d..fe44c6f4d560 100644 --- a/trunk/fs/xfs/xfs_dir_sf.h +++ b/trunk/fs/xfs/xfs_dir_sf.h @@ -35,21 +35,19 @@ typedef struct { __uint8_t i[sizeof(xfs_ino_t)]; } xfs_dir_ino_t; * and the elements much be memcpy'd out into a work area to get correct * alignment for the inode number fields. */ -typedef struct xfs_dir_sf_hdr { /* constant-structure header block */ - xfs_dir_ino_t parent; /* parent dir inode number */ - __uint8_t count; /* count of active entries */ -} xfs_dir_sf_hdr_t; - -typedef struct xfs_dir_sf_entry { - xfs_dir_ino_t inumber; /* referenced inode number */ - __uint8_t namelen; /* actual length of name (no NULL) */ - __uint8_t name[1]; /* name */ -} xfs_dir_sf_entry_t; - typedef struct xfs_dir_shortform { - xfs_dir_sf_hdr_t hdr; - xfs_dir_sf_entry_t list[1]; /* variable sized array */ + struct xfs_dir_sf_hdr { /* constant-structure header block */ + xfs_dir_ino_t parent; /* parent dir inode number */ + __uint8_t count; /* count of active entries */ + } hdr; + struct xfs_dir_sf_entry { + xfs_dir_ino_t inumber; /* referenced inode number */ + __uint8_t namelen; /* actual length of name (no NULL) */ + __uint8_t name[1]; /* name */ + } list[1]; /* variable sized array */ } xfs_dir_shortform_t; +typedef struct xfs_dir_sf_hdr xfs_dir_sf_hdr_t; +typedef struct xfs_dir_sf_entry xfs_dir_sf_entry_t; /* * We generate this then sort it, so that readdirs are returned in diff --git a/trunk/fs/xfs/xfs_dmapi.h b/trunk/fs/xfs/xfs_dmapi.h index 00b1540f8108..b4c7f2bc55a0 100644 --- a/trunk/fs/xfs/xfs_dmapi.h +++ b/trunk/fs/xfs/xfs_dmapi.h @@ -191,4 +191,14 @@ typedef enum { extern struct bhv_vfsops xfs_dmops; +#ifdef CONFIG_XFS_DMAPI +void xfs_dm_init(struct file_system_type *); +void xfs_dm_exit(struct file_system_type *); +#define XFS_DM_INIT(fstype) xfs_dm_init(fstype) +#define XFS_DM_EXIT(fstype) xfs_dm_exit(fstype) +#else +#define XFS_DM_INIT(fstype) +#define XFS_DM_EXIT(fstype) +#endif + #endif /* __XFS_DMAPI_H__ */ diff --git a/trunk/fs/xfs/xfs_fsops.c b/trunk/fs/xfs/xfs_fsops.c index 56caa88713ab..b4d971b01588 100644 --- a/trunk/fs/xfs/xfs_fsops.c +++ b/trunk/fs/xfs/xfs_fsops.c @@ -462,7 +462,6 @@ xfs_fs_counts( { unsigned long s; - xfs_icsb_sync_counters_lazy(mp); s = XFS_SB_LOCK(mp); cnt->freedata = mp->m_sb.sb_fdblocks; cnt->freertx = mp->m_sb.sb_frextents; diff --git a/trunk/fs/xfs/xfs_ialloc.c b/trunk/fs/xfs/xfs_ialloc.c index 0024892841a3..8f3fae1aa98a 100644 --- a/trunk/fs/xfs/xfs_ialloc.c +++ b/trunk/fs/xfs/xfs_ialloc.c @@ -138,6 +138,8 @@ xfs_ialloc_ag_alloc( int version; /* inode version number to use */ int isaligned; /* inode allocation at stripe unit */ /* boundary */ + xfs_dinode_core_t dic; /* a dinode_core to copy to new */ + /* inodes */ args.tp = tp; args.mp = tp->t_mountp; @@ -248,6 +250,10 @@ xfs_ialloc_ag_alloc( else version = XFS_DINODE_VERSION_1; + memset(&dic, 0, sizeof(xfs_dinode_core_t)); + INT_SET(dic.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); + INT_SET(dic.di_version, ARCH_CONVERT, version); + for (j = 0; j < nbufs; j++) { /* * Get the block. @@ -260,13 +266,12 @@ xfs_ialloc_ag_alloc( ASSERT(fbuf); ASSERT(!XFS_BUF_GETERROR(fbuf)); /* - * Set initial values for the inodes in this buffer. + * Loop over the inodes in this buffer. */ - xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); + for (i = 0; i < ninodes; i++) { free = XFS_MAKE_IPTR(args.mp, fbuf, i); - INT_SET(free->di_core.di_magic, ARCH_CONVERT, XFS_DINODE_MAGIC); - INT_SET(free->di_core.di_version, ARCH_CONVERT, version); + memcpy(&(free->di_core), &dic, sizeof(xfs_dinode_core_t)); INT_SET(free->di_next_unlinked, ARCH_CONVERT, NULLAGINO); xfs_ialloc_log_di(tp, fbuf, i, XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); diff --git a/trunk/fs/xfs/xfs_iget.c b/trunk/fs/xfs/xfs_iget.c index 3ce35a6f700b..8e380a1fb79b 100644 --- a/trunk/fs/xfs/xfs_iget.c +++ b/trunk/fs/xfs/xfs_iget.c @@ -258,7 +258,7 @@ xfs_iget_core( goto finish_inode; } else if (vp != inode_vp) { - struct inode *inode = vn_to_inode(inode_vp); + struct inode *inode = LINVFS_GET_IP(inode_vp); /* The inode is being torn down, pause and * try again. @@ -495,7 +495,7 @@ xfs_iget( if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { xfs_inode_t *ip; - vp = vn_from_inode(inode); + vp = LINVFS_GET_VP(inode); if (inode->i_state & I_NEW) { vn_initialize(inode); error = xfs_iget_core(vp, mp, tp, ino, flags, @@ -617,7 +617,7 @@ xfs_iput_new(xfs_inode_t *ip, uint lock_flags) { vnode_t *vp = XFS_ITOV(ip); - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); diff --git a/trunk/fs/xfs/xfs_inode.c b/trunk/fs/xfs/xfs_inode.c index 88a517fad07b..1d7f5a7e063e 100644 --- a/trunk/fs/xfs/xfs_inode.c +++ b/trunk/fs/xfs/xfs_inode.c @@ -76,18 +76,16 @@ STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); */ STATIC void xfs_validate_extents( - xfs_ifork_t *ifp, + xfs_bmbt_rec_t *ep, int nrecs, int disk, xfs_exntfmt_t fmt) { - xfs_bmbt_rec_t *ep; xfs_bmbt_irec_t irec; xfs_bmbt_rec_t rec; int i; for (i = 0; i < nrecs; i++) { - ep = xfs_iext_get_ext(ifp, i); rec.l0 = get_unaligned((__uint64_t*)&ep->l0); rec.l1 = get_unaligned((__uint64_t*)&ep->l1); if (disk) @@ -96,10 +94,11 @@ xfs_validate_extents( xfs_bmbt_get_all(&rec, &irec); if (fmt == XFS_EXTFMT_NOSTATE) ASSERT(irec.br_state == XFS_EXT_NORM); + ep++; } } #else /* DEBUG */ -#define xfs_validate_extents(ifp, nrecs, disk, fmt) +#define xfs_validate_extents(ep, nrecs, disk, fmt) #endif /* DEBUG */ /* @@ -253,8 +252,7 @@ xfs_itobp( xfs_inode_t *ip, xfs_dinode_t **dipp, xfs_buf_t **bpp, - xfs_daddr_t bno, - uint imap_flags) + xfs_daddr_t bno) { xfs_buf_t *bp; int error; @@ -270,9 +268,10 @@ xfs_itobp( * inode on disk. */ imap.im_blkno = bno; - if ((error = xfs_imap(mp, tp, ip->i_ino, &imap, - XFS_IMAP_LOOKUP | imap_flags))) + error = xfs_imap(mp, tp, ip->i_ino, &imap, XFS_IMAP_LOOKUP); + if (error != 0) { return error; + } /* * If the inode number maps to a block outside the bounds @@ -336,10 +335,9 @@ xfs_itobp( * (if DEBUG kernel) or the first inode in the buffer, otherwise. */ #ifdef DEBUG - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : - (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); + ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; #else - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; + ni = 1; #endif for (i = 0; i < ni; i++) { int di_ok; @@ -506,7 +504,7 @@ xfs_iformat( switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) { case XFS_DINODE_FMT_LOCAL: atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); - size = be16_to_cpu(atp->hdr.totsize); + size = (int)INT_GET(atp->hdr.totsize, ARCH_CONVERT); error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); break; case XFS_DINODE_FMT_EXTENTS: @@ -599,6 +597,7 @@ xfs_iformat_extents( xfs_bmbt_rec_t *ep, *dp; xfs_ifork_t *ifp; int nex; + int real_size; int size; int i; @@ -620,20 +619,23 @@ xfs_iformat_extents( return XFS_ERROR(EFSCORRUPTED); } - ifp->if_real_bytes = 0; + real_size = 0; if (nex == 0) ifp->if_u1.if_extents = NULL; else if (nex <= XFS_INLINE_EXTS) ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - else - xfs_iext_add(ifp, 0, nex); - + else { + ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_u1.if_extents != NULL); + real_size = size; + } ifp->if_bytes = size; + ifp->if_real_bytes = real_size; if (size) { dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); - xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip)); - for (i = 0; i < nex; i++, dp++) { - ep = xfs_iext_get_ext(ifp, i); + xfs_validate_extents(dp, nex, 1, XFS_EXTFMT_INODE(ip)); + ep = ifp->if_u1.if_extents; + for (i = 0; i < nex; i++, ep++, dp++) { ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0), ARCH_CONVERT); ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), @@ -644,7 +646,7 @@ xfs_iformat_extents( if (whichfork != XFS_DATA_FORK || XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) if (unlikely(xfs_check_nostate_extents( - ifp, 0, nex))) { + ifp->if_u1.if_extents, nex))) { XFS_ERROR_REPORT("xfs_iformat_extents(2)", XFS_ERRLEVEL_LOW, ip->i_mount); @@ -869,8 +871,9 @@ xfs_iread( * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will * know that this is a new incore inode. */ - error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0); - if (error) { + error = xfs_itobp(mp, tp, ip, &dip, &bp, bno); + + if (error != 0) { kmem_zone_free(xfs_inode_zone, ip); return error; } @@ -1012,7 +1015,6 @@ xfs_iread_extents( { int error; xfs_ifork_t *ifp; - xfs_extnum_t nextents; size_t size; if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { @@ -1020,24 +1022,26 @@ xfs_iread_extents( ip->i_mount); return XFS_ERROR(EFSCORRUPTED); } - nextents = XFS_IFORK_NEXTENTS(ip, whichfork); - size = nextents * sizeof(xfs_bmbt_rec_t); + size = XFS_IFORK_NEXTENTS(ip, whichfork) * (uint)sizeof(xfs_bmbt_rec_t); ifp = XFS_IFORK_PTR(ip, whichfork); - /* * We know that the size is valid (it's checked in iformat_btree) */ + ifp->if_u1.if_extents = kmem_alloc(size, KM_SLEEP); + ASSERT(ifp->if_u1.if_extents != NULL); ifp->if_lastex = NULLEXTNUM; - ifp->if_bytes = ifp->if_real_bytes = 0; + ifp->if_bytes = ifp->if_real_bytes = (int)size; ifp->if_flags |= XFS_IFEXTENTS; - xfs_iext_add(ifp, 0, nextents); error = xfs_bmap_read_extents(tp, ip, whichfork); if (error) { - xfs_iext_destroy(ifp); + kmem_free(ifp->if_u1.if_extents, size); + ifp->if_u1.if_extents = NULL; + ifp->if_bytes = ifp->if_real_bytes = 0; ifp->if_flags &= ~XFS_IFEXTENTS; return error; } - xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip)); + xfs_validate_extents((xfs_bmbt_rec_t *)ifp->if_u1.if_extents, + XFS_IFORK_NEXTENTS(ip, whichfork), 0, XFS_EXTFMT_INODE(ip)); return 0; } @@ -1372,10 +1376,10 @@ xfs_itrunc_trace( (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff), (void*)(unsigned long)(toss_finish & 0xffffffff), (void*)(unsigned long)current_cpu(), - (void*)(unsigned long)current_pid(), - (void*)NULL, - (void*)NULL, - (void*)NULL); + (void*)0, + (void*)0, + (void*)0, + (void*)0); } #else #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish) @@ -1393,16 +1397,6 @@ xfs_itrunc_trace( * calling into the buffer/page cache code and we can't hold the * inode lock when we do so. * - * We need to wait for any direct I/Os in flight to complete before we - * proceed with the truncate. This is needed to prevent the extents - * being read or written by the direct I/Os from being removed while the - * I/O is in flight as there is no other method of synchronising - * direct I/O with the truncate operation. Also, because we hold - * the IOLOCK in exclusive mode, we prevent new direct I/Os from being - * started until the truncate completes and drops the lock. Essentially, - * the vn_iowait() call forms an I/O barrier that provides strict ordering - * between direct I/Os and the truncate operation. - * * The flags parameter can have either the value XFS_ITRUNC_DEFINITE * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used * in the case that the caller is locking things out of order and @@ -1430,9 +1424,6 @@ xfs_itruncate_start( mp = ip->i_mount; vp = XFS_ITOV(ip); - - vn_iowait(vp); /* wait for the completion of any pending DIOs */ - /* * Call VOP_TOSS_PAGES() or VOP_FLUSHINVAL_PAGES() to get rid of pages and buffers * overlapping the region being removed. We have to use @@ -1908,7 +1899,7 @@ xfs_iunlink( * Here we put the head pointer into our next pointer, * and then we fall through to point the head at us. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); if (error) { return error; } @@ -2017,7 +2008,7 @@ xfs_iunlink_remove( * of dealing with the buffer when there is no need to * change it. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2079,7 +2070,7 @@ xfs_iunlink_remove( * Now last_ibp points to the buffer previous to us on * the unlinked list. Pull us from the list. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2484,6 +2475,92 @@ xfs_iroot_realloc( } +/* + * This is called when the amount of space needed for if_extents + * is increased or decreased. The change in size is indicated by + * the number of extents that need to be added or deleted in the + * ext_diff parameter. + * + * If the amount of space needed has decreased below the size of the + * inline buffer, then switch to using the inline buffer. Otherwise, + * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer + * to what is needed. + * + * ip -- the inode whose if_extents area is changing + * ext_diff -- the change in the number of extents, positive or negative, + * requested for the if_extents array. + */ +void +xfs_iext_realloc( + xfs_inode_t *ip, + int ext_diff, + int whichfork) +{ + int byte_diff; + xfs_ifork_t *ifp; + int new_size; + uint rnew_size; + + if (ext_diff == 0) { + return; + } + + ifp = XFS_IFORK_PTR(ip, whichfork); + byte_diff = ext_diff * (uint)sizeof(xfs_bmbt_rec_t); + new_size = (int)ifp->if_bytes + byte_diff; + ASSERT(new_size >= 0); + + if (new_size == 0) { + if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { + ASSERT(ifp->if_real_bytes != 0); + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + } + ifp->if_u1.if_extents = NULL; + rnew_size = 0; + } else if (new_size <= sizeof(ifp->if_u2.if_inline_ext)) { + /* + * If the valid extents can fit in if_inline_ext, + * copy them from the malloc'd vector and free it. + */ + if (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext) { + /* + * For now, empty files are format EXTENTS, + * so the if_extents pointer is null. + */ + if (ifp->if_u1.if_extents) { + memcpy(ifp->if_u2.if_inline_ext, + ifp->if_u1.if_extents, new_size); + kmem_free(ifp->if_u1.if_extents, + ifp->if_real_bytes); + } + ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; + } + rnew_size = 0; + } else { + rnew_size = new_size; + if ((rnew_size & (rnew_size - 1)) != 0) + rnew_size = xfs_iroundup(rnew_size); + /* + * Stuck with malloc/realloc. + */ + if (ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_alloc(rnew_size, KM_SLEEP); + memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, + sizeof(ifp->if_u2.if_inline_ext)); + } else if (rnew_size != ifp->if_real_bytes) { + ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) + kmem_realloc(ifp->if_u1.if_extents, + rnew_size, + ifp->if_real_bytes, + KM_NOFS); + } + } + ifp->if_real_bytes = rnew_size; + ifp->if_bytes = new_size; +} + + /* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by @@ -2643,11 +2720,12 @@ xfs_idestroy_fork( ifp->if_real_bytes = 0; } } else if ((ifp->if_flags & XFS_IFEXTENTS) && - ((ifp->if_flags & XFS_IFEXTIREC) || - ((ifp->if_u1.if_extents != NULL) && - (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) { + (ifp->if_u1.if_extents != NULL) && + (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)) { ASSERT(ifp->if_real_bytes != 0); - xfs_iext_destroy(ifp); + kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); + ifp->if_u1.if_extents = NULL; + ifp->if_real_bytes = 0; } ASSERT(ifp->if_u1.if_extents == NULL || ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext); @@ -2736,7 +2814,7 @@ xfs_iunpin( /* make sync come back and flush this inode */ if (vp) { - struct inode *inode = vn_to_inode(vp); + struct inode *inode = LINVFS_GET_IP(vp); if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); @@ -2824,15 +2902,16 @@ xfs_iextents_copy( * the delayed ones. There must be at least one * non-delayed extent. */ + ep = ifp->if_u1.if_extents; dest_ep = buffer; copied = 0; for (i = 0; i < nrecs; i++) { - ep = xfs_iext_get_ext(ifp, i); start_block = xfs_bmbt_get_startblock(ep); if (ISNULLSTARTBLOCK(start_block)) { /* * It's a delayed allocation extent, so skip it. */ + ep++; continue; } @@ -2842,10 +2921,11 @@ xfs_iextents_copy( put_unaligned(INT_GET(ep->l1, ARCH_CONVERT), (__uint64_t*)&dest_ep->l1); dest_ep++; + ep++; copied++; } ASSERT(copied != 0); - xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip)); + xfs_validate_extents(buffer, copied, 1, XFS_EXTFMT_INODE(ip)); return (copied * (uint)sizeof(xfs_bmbt_rec_t)); } @@ -2915,10 +2995,8 @@ xfs_iflush_fork( case XFS_DINODE_FMT_EXTENTS: ASSERT((ifp->if_flags & XFS_IFEXTENTS) || !(iip->ili_format.ilf_fields & extflag[whichfork])); - ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || - (ifp->if_bytes == 0)); - ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || - (ifp->if_bytes > 0)); + ASSERT((ifp->if_u1.if_extents != NULL) || (ifp->if_bytes == 0)); + ASSERT((ifp->if_u1.if_extents == NULL) || (ifp->if_bytes > 0)); if ((iip->ili_format.ilf_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); @@ -3036,8 +3114,8 @@ xfs_iflush( /* * Get the buffer containing the on-disk inode. */ - error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0); - if (error) { + error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); + if (error != 0) { xfs_ifunlock(ip); return error; } @@ -3532,7 +3610,7 @@ xfs_iaccess( { int error; mode_t orgmode = mode; - struct inode *inode = vn_to_inode(XFS_ITOV(ip)); + struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip)); if (mode & S_IWUSR) { umode_t imode = inode->i_mode; @@ -3626,1100 +3704,3 @@ xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL); } #endif - -/* - * Return a pointer to the extent record at file index idx. - */ -xfs_bmbt_rec_t * -xfs_iext_get_ext( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx) /* index of target extent */ -{ - ASSERT(idx >= 0); - if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { - return ifp->if_u1.if_ext_irec->er_extbuf; - } else if (ifp->if_flags & XFS_IFEXTIREC) { - xfs_ext_irec_t *erp; /* irec pointer */ - int erp_idx = 0; /* irec index */ - xfs_extnum_t page_idx = idx; /* ext index in target list */ - - erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); - return &erp->er_extbuf[page_idx]; - } else if (ifp->if_bytes) { - return &ifp->if_u1.if_extents[idx]; - } else { - return NULL; - } -} - -/* - * Insert new item(s) into the extent records for incore inode - * fork 'ifp'. 'count' new items are inserted at index 'idx'. - */ -void -xfs_iext_insert( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx, /* starting index of new items */ - xfs_extnum_t count, /* number of inserted items */ - xfs_bmbt_irec_t *new) /* items to insert */ -{ - xfs_bmbt_rec_t *ep; /* extent record pointer */ - xfs_extnum_t i; /* extent record index */ - - ASSERT(ifp->if_flags & XFS_IFEXTENTS); - xfs_iext_add(ifp, idx, count); - for (i = idx; i < idx + count; i++, new++) { - ep = xfs_iext_get_ext(ifp, i); - xfs_bmbt_set_all(ep, new); - } -} - -/* - * This is called when the amount of space required for incore file - * extents needs to be increased. The ext_diff parameter stores the - * number of new extents being added and the idx parameter contains - * the extent index where the new extents will be added. If the new - * extents are being appended, then we just need to (re)allocate and - * initialize the space. Otherwise, if the new extents are being - * inserted into the middle of the existing entries, a bit more work - * is required to make room for the new extents to be inserted. The - * caller is responsible for filling in the new extent entries upon - * return. - */ -void -xfs_iext_add( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx, /* index to begin adding exts */ - int ext_diff) /* nubmer of extents to add */ -{ - int byte_diff; /* new bytes being added */ - int new_size; /* size of extents after adding */ - xfs_extnum_t nextents; /* number of extents in file */ - - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ASSERT((idx >= 0) && (idx <= nextents)); - byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t); - new_size = ifp->if_bytes + byte_diff; - /* - * If the new number of extents (nextents + ext_diff) - * fits inside the inode, then continue to use the inline - * extent buffer. - */ - if (nextents + ext_diff <= XFS_INLINE_EXTS) { - if (idx < nextents) { - memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff], - &ifp->if_u2.if_inline_ext[idx], - (nextents - idx) * sizeof(xfs_bmbt_rec_t)); - memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff); - } - ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - ifp->if_real_bytes = 0; - ifp->if_lastex = nextents + ext_diff; - } - /* - * Otherwise use a linear (direct) extent list. - * If the extents are currently inside the inode, - * xfs_iext_realloc_direct will switch us from - * inline to direct extent allocation mode. - */ - else if (nextents + ext_diff <= XFS_LINEAR_EXTS) { - xfs_iext_realloc_direct(ifp, new_size); - if (idx < nextents) { - memmove(&ifp->if_u1.if_extents[idx + ext_diff], - &ifp->if_u1.if_extents[idx], - (nextents - idx) * sizeof(xfs_bmbt_rec_t)); - memset(&ifp->if_u1.if_extents[idx], 0, byte_diff); - } - } - /* Indirection array */ - else { - xfs_ext_irec_t *erp; - int erp_idx = 0; - int page_idx = idx; - - ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS); - if (ifp->if_flags & XFS_IFEXTIREC) { - erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1); - } else { - xfs_iext_irec_init(ifp); - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - erp = ifp->if_u1.if_ext_irec; - } - /* Extents fit in target extent page */ - if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) { - if (page_idx < erp->er_extcount) { - memmove(&erp->er_extbuf[page_idx + ext_diff], - &erp->er_extbuf[page_idx], - (erp->er_extcount - page_idx) * - sizeof(xfs_bmbt_rec_t)); - memset(&erp->er_extbuf[page_idx], 0, byte_diff); - } - erp->er_extcount += ext_diff; - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); - } - /* Insert a new extent page */ - else if (erp) { - xfs_iext_add_indirect_multi(ifp, - erp_idx, page_idx, ext_diff); - } - /* - * If extent(s) are being appended to the last page in - * the indirection array and the new extent(s) don't fit - * in the page, then erp is NULL and erp_idx is set to - * the next index needed in the indirection array. - */ - else { - int count = ext_diff; - - while (count) { - erp = xfs_iext_irec_new(ifp, erp_idx); - erp->er_extcount = count; - count -= MIN(count, (int)XFS_LINEAR_EXTS); - if (count) { - erp_idx++; - } - } - } - } - ifp->if_bytes = new_size; -} - -/* - * This is called when incore extents are being added to the indirection - * array and the new extents do not fit in the target extent list. The - * erp_idx parameter contains the irec index for the target extent list - * in the indirection array, and the idx parameter contains the extent - * index within the list. The number of extents being added is stored - * in the count parameter. - * - * |-------| |-------| - * | | | | idx - number of extents before idx - * | idx | | count | - * | | | | count - number of extents being inserted at idx - * |-------| |-------| - * | count | | nex2 | nex2 - number of extents after idx + count - * |-------| |-------| - */ -void -xfs_iext_add_indirect_multi( - xfs_ifork_t *ifp, /* inode fork pointer */ - int erp_idx, /* target extent irec index */ - xfs_extnum_t idx, /* index within target list */ - int count) /* new extents being added */ -{ - int byte_diff; /* new bytes being added */ - xfs_ext_irec_t *erp; /* pointer to irec entry */ - xfs_extnum_t ext_diff; /* number of extents to add */ - xfs_extnum_t ext_cnt; /* new extents still needed */ - xfs_extnum_t nex2; /* extents after idx + count */ - xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */ - int nlists; /* number of irec's (lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - nex2 = erp->er_extcount - idx; - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - - /* - * Save second part of target extent list - * (all extents past */ - if (nex2) { - byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); - nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP); - memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff); - erp->er_extcount -= nex2; - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2); - memset(&erp->er_extbuf[idx], 0, byte_diff); - } - - /* - * Add the new extents to the end of the target - * list, then allocate new irec record(s) and - * extent buffer(s) as needed to store the rest - * of the new extents. - */ - ext_cnt = count; - ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount); - if (ext_diff) { - erp->er_extcount += ext_diff; - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); - ext_cnt -= ext_diff; - } - while (ext_cnt) { - erp_idx++; - erp = xfs_iext_irec_new(ifp, erp_idx); - ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS); - erp->er_extcount = ext_diff; - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff); - ext_cnt -= ext_diff; - } - - /* Add nex2 extents back to indirection array */ - if (nex2) { - xfs_extnum_t ext_avail; - int i; - - byte_diff = nex2 * sizeof(xfs_bmbt_rec_t); - ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; - i = 0; - /* - * If nex2 extents fit in the current page, append - * nex2_ep after the new extents. - */ - if (nex2 <= ext_avail) { - i = erp->er_extcount; - } - /* - * Otherwise, check if space is available in the - * next page. - */ - else if ((erp_idx < nlists - 1) && - (nex2 <= (ext_avail = XFS_LINEAR_EXTS - - ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) { - erp_idx++; - erp++; - /* Create a hole for nex2 extents */ - memmove(&erp->er_extbuf[nex2], erp->er_extbuf, - erp->er_extcount * sizeof(xfs_bmbt_rec_t)); - } - /* - * Final choice, create a new extent page for - * nex2 extents. - */ - else { - erp_idx++; - erp = xfs_iext_irec_new(ifp, erp_idx); - } - memmove(&erp->er_extbuf[i], nex2_ep, byte_diff); - kmem_free(nex2_ep, byte_diff); - erp->er_extcount += nex2; - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2); - } -} - -/* - * This is called when the amount of space required for incore file - * extents needs to be decreased. The ext_diff parameter stores the - * number of extents to be removed and the idx parameter contains - * the extent index where the extents will be removed from. - * - * If the amount of space needed has decreased below the linear - * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous - * extent array. Otherwise, use kmem_realloc() to adjust the - * size to what is needed. - */ -void -xfs_iext_remove( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx, /* index to begin removing exts */ - int ext_diff) /* number of extents to remove */ -{ - xfs_extnum_t nextents; /* number of extents in file */ - int new_size; /* size of extents after removal */ - - ASSERT(ext_diff > 0); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t); - - if (new_size == 0) { - xfs_iext_destroy(ifp); - } else if (ifp->if_flags & XFS_IFEXTIREC) { - xfs_iext_remove_indirect(ifp, idx, ext_diff); - } else if (ifp->if_real_bytes) { - xfs_iext_remove_direct(ifp, idx, ext_diff); - } else { - xfs_iext_remove_inline(ifp, idx, ext_diff); - } - ifp->if_bytes = new_size; -} - -/* - * This removes ext_diff extents from the inline buffer, beginning - * at extent index idx. - */ -void -xfs_iext_remove_inline( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx, /* index to begin removing exts */ - int ext_diff) /* number of extents to remove */ -{ - int nextents; /* number of extents in file */ - - ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); - ASSERT(idx < XFS_INLINE_EXTS); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ASSERT(((nextents - ext_diff) > 0) && - (nextents - ext_diff) < XFS_INLINE_EXTS); - - if (idx + ext_diff < nextents) { - memmove(&ifp->if_u2.if_inline_ext[idx], - &ifp->if_u2.if_inline_ext[idx + ext_diff], - (nextents - (idx + ext_diff)) * - sizeof(xfs_bmbt_rec_t)); - memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff], - 0, ext_diff * sizeof(xfs_bmbt_rec_t)); - } else { - memset(&ifp->if_u2.if_inline_ext[idx], 0, - ext_diff * sizeof(xfs_bmbt_rec_t)); - } -} - -/* - * This removes ext_diff extents from a linear (direct) extent list, - * beginning at extent index idx. If the extents are being removed - * from the end of the list (ie. truncate) then we just need to re- - * allocate the list to remove the extra space. Otherwise, if the - * extents are being removed from the middle of the existing extent - * entries, then we first need to move the extent records beginning - * at idx + ext_diff up in the list to overwrite the records being - * removed, then remove the extra space via kmem_realloc. - */ -void -xfs_iext_remove_direct( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx, /* index to begin removing exts */ - int ext_diff) /* number of extents to remove */ -{ - xfs_extnum_t nextents; /* number of extents in file */ - int new_size; /* size of extents after removal */ - - ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); - new_size = ifp->if_bytes - - (ext_diff * sizeof(xfs_bmbt_rec_t)); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - - if (new_size == 0) { - xfs_iext_destroy(ifp); - return; - } - /* Move extents up in the list (if needed) */ - if (idx + ext_diff < nextents) { - memmove(&ifp->if_u1.if_extents[idx], - &ifp->if_u1.if_extents[idx + ext_diff], - (nextents - (idx + ext_diff)) * - sizeof(xfs_bmbt_rec_t)); - } - memset(&ifp->if_u1.if_extents[nextents - ext_diff], - 0, ext_diff * sizeof(xfs_bmbt_rec_t)); - /* - * Reallocate the direct extent list. If the extents - * will fit inside the inode then xfs_iext_realloc_direct - * will switch from direct to inline extent allocation - * mode for us. - */ - xfs_iext_realloc_direct(ifp, new_size); - ifp->if_bytes = new_size; -} - -/* - * This is called when incore extents are being removed from the - * indirection array and the extents being removed span multiple extent - * buffers. The idx parameter contains the file extent index where we - * want to begin removing extents, and the count parameter contains - * how many extents need to be removed. - * - * |-------| |-------| - * | nex1 | | | nex1 - number of extents before idx - * |-------| | count | - * | | | | count - number of extents being removed at idx - * | count | |-------| - * | | | nex2 | nex2 - number of extents after idx + count - * |-------| |-------| - */ -void -xfs_iext_remove_indirect( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t idx, /* index to begin removing extents */ - int count) /* number of extents to remove */ -{ - xfs_ext_irec_t *erp; /* indirection array pointer */ - int erp_idx = 0; /* indirection array index */ - xfs_extnum_t ext_cnt; /* extents left to remove */ - xfs_extnum_t ext_diff; /* extents to remove in current list */ - xfs_extnum_t nex1; /* number of extents before idx */ - xfs_extnum_t nex2; /* extents after idx + count */ - int nlists; /* entries in indirecton array */ - int page_idx = idx; /* index in target extent list */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0); - ASSERT(erp != NULL); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - nex1 = page_idx; - ext_cnt = count; - while (ext_cnt) { - nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0); - ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1)); - /* - * Check for deletion of entire list; - * xfs_iext_irec_remove() updates extent offsets. - */ - if (ext_diff == erp->er_extcount) { - xfs_iext_irec_remove(ifp, erp_idx); - ext_cnt -= ext_diff; - nex1 = 0; - if (ext_cnt) { - ASSERT(erp_idx < ifp->if_real_bytes / - XFS_IEXT_BUFSZ); - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - nex1 = 0; - continue; - } else { - break; - } - } - /* Move extents up (if needed) */ - if (nex2) { - memmove(&erp->er_extbuf[nex1], - &erp->er_extbuf[nex1 + ext_diff], - nex2 * sizeof(xfs_bmbt_rec_t)); - } - /* Zero out rest of page */ - memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ - - ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t)))); - /* Update remaining counters */ - erp->er_extcount -= ext_diff; - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff); - ext_cnt -= ext_diff; - nex1 = 0; - erp_idx++; - erp++; - } - ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t); - xfs_iext_irec_compact(ifp); -} - -/* - * Create, destroy, or resize a linear (direct) block of extents. - */ -void -xfs_iext_realloc_direct( - xfs_ifork_t *ifp, /* inode fork pointer */ - int new_size) /* new size of extents */ -{ - int rnew_size; /* real new size of extents */ - - rnew_size = new_size; - - ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) || - ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) && - (new_size != ifp->if_real_bytes))); - - /* Free extent records */ - if (new_size == 0) { - xfs_iext_destroy(ifp); - } - /* Resize direct extent list and zero any new bytes */ - else if (ifp->if_real_bytes) { - /* Check if extents will fit inside the inode */ - if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) { - xfs_iext_direct_to_inline(ifp, new_size / - (uint)sizeof(xfs_bmbt_rec_t)); - ifp->if_bytes = new_size; - return; - } - if ((new_size & (new_size - 1)) != 0) { - rnew_size = xfs_iroundup(new_size); - } - if (rnew_size != ifp->if_real_bytes) { - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_realloc(ifp->if_u1.if_extents, - rnew_size, - ifp->if_real_bytes, - KM_SLEEP); - } - if (rnew_size > ifp->if_real_bytes) { - memset(&ifp->if_u1.if_extents[ifp->if_bytes / - (uint)sizeof(xfs_bmbt_rec_t)], 0, - rnew_size - ifp->if_real_bytes); - } - } - /* - * Switch from the inline extent buffer to a direct - * extent list. Be sure to include the inline extent - * bytes in new_size. - */ - else { - new_size += ifp->if_bytes; - if ((new_size & (new_size - 1)) != 0) { - rnew_size = xfs_iroundup(new_size); - } - xfs_iext_inline_to_direct(ifp, rnew_size); - } - ifp->if_real_bytes = rnew_size; - ifp->if_bytes = new_size; -} - -/* - * Switch from linear (direct) extent records to inline buffer. - */ -void -xfs_iext_direct_to_inline( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t nextents) /* number of extents in file */ -{ - ASSERT(ifp->if_flags & XFS_IFEXTENTS); - ASSERT(nextents <= XFS_INLINE_EXTS); - /* - * The inline buffer was zeroed when we switched - * from inline to direct extent allocation mode, - * so we don't need to clear it here. - */ - memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents, - nextents * sizeof(xfs_bmbt_rec_t)); - kmem_free(ifp->if_u1.if_extents, KM_SLEEP); - ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; - ifp->if_real_bytes = 0; -} - -/* - * Switch from inline buffer to linear (direct) extent records. - * new_size should already be rounded up to the next power of 2 - * by the caller (when appropriate), so use new_size as it is. - * However, since new_size may be rounded up, we can't update - * if_bytes here. It is the caller's responsibility to update - * if_bytes upon return. - */ -void -xfs_iext_inline_to_direct( - xfs_ifork_t *ifp, /* inode fork pointer */ - int new_size) /* number of extents in file */ -{ - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_alloc(new_size, KM_SLEEP); - memset(ifp->if_u1.if_extents, 0, new_size); - if (ifp->if_bytes) { - memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext, - ifp->if_bytes); - memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * - sizeof(xfs_bmbt_rec_t)); - } - ifp->if_real_bytes = new_size; -} - -/* - * Resize an extent indirection array to new_size bytes. - */ -void -xfs_iext_realloc_indirect( - xfs_ifork_t *ifp, /* inode fork pointer */ - int new_size) /* new indirection array size */ -{ - int nlists; /* number of irec's (ex lists) */ - int size; /* current indirection array size */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - size = nlists * sizeof(xfs_ext_irec_t); - ASSERT(ifp->if_real_bytes); - ASSERT((new_size >= 0) && (new_size != size)); - if (new_size == 0) { - xfs_iext_destroy(ifp); - } else { - ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *) - kmem_realloc(ifp->if_u1.if_ext_irec, - new_size, size, KM_SLEEP); - } -} - -/* - * Switch from indirection array to linear (direct) extent allocations. - */ -void -xfs_iext_indirect_to_direct( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - xfs_bmbt_rec_t *ep; /* extent record pointer */ - xfs_extnum_t nextents; /* number of extents in file */ - int size; /* size of file extents */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ASSERT(nextents <= XFS_LINEAR_EXTS); - size = nextents * sizeof(xfs_bmbt_rec_t); - - xfs_iext_irec_compact_full(ifp); - ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); - - ep = ifp->if_u1.if_ext_irec->er_extbuf; - kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t)); - ifp->if_flags &= ~XFS_IFEXTIREC; - ifp->if_u1.if_extents = ep; - ifp->if_bytes = size; - if (nextents < XFS_LINEAR_EXTS) { - xfs_iext_realloc_direct(ifp, size); - } -} - -/* - * Free incore file extents. - */ -void -xfs_iext_destroy( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - if (ifp->if_flags & XFS_IFEXTIREC) { - int erp_idx; - int nlists; - - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) { - xfs_iext_irec_remove(ifp, erp_idx); - } - ifp->if_flags &= ~XFS_IFEXTIREC; - } else if (ifp->if_real_bytes) { - kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes); - } else if (ifp->if_bytes) { - memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS * - sizeof(xfs_bmbt_rec_t)); - } - ifp->if_u1.if_extents = NULL; - ifp->if_real_bytes = 0; - ifp->if_bytes = 0; -} - -/* - * Return a pointer to the extent record for file system block bno. - */ -xfs_bmbt_rec_t * /* pointer to found extent record */ -xfs_iext_bno_to_ext( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_fileoff_t bno, /* block number to search for */ - xfs_extnum_t *idxp) /* index of target extent */ -{ - xfs_bmbt_rec_t *base; /* pointer to first extent */ - xfs_filblks_t blockcount = 0; /* number of blocks in extent */ - xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ - xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ - int high; /* upper boundry in search */ - xfs_extnum_t idx = 0; /* index of target extent */ - int low; /* lower boundry in search */ - xfs_extnum_t nextents; /* number of file extents */ - xfs_fileoff_t startoff = 0; /* start offset of extent */ - - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - if (nextents == 0) { - *idxp = 0; - return NULL; - } - low = 0; - if (ifp->if_flags & XFS_IFEXTIREC) { - /* Find target extent list */ - int erp_idx = 0; - erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx); - base = erp->er_extbuf; - high = erp->er_extcount - 1; - } else { - base = ifp->if_u1.if_extents; - high = nextents - 1; - } - /* Binary search extent records */ - while (low <= high) { - idx = (low + high) >> 1; - ep = base + idx; - startoff = xfs_bmbt_get_startoff(ep); - blockcount = xfs_bmbt_get_blockcount(ep); - if (bno < startoff) { - high = idx - 1; - } else if (bno >= startoff + blockcount) { - low = idx + 1; - } else { - /* Convert back to file-based extent index */ - if (ifp->if_flags & XFS_IFEXTIREC) { - idx += erp->er_extoff; - } - *idxp = idx; - return ep; - } - } - /* Convert back to file-based extent index */ - if (ifp->if_flags & XFS_IFEXTIREC) { - idx += erp->er_extoff; - } - if (bno >= startoff + blockcount) { - if (++idx == nextents) { - ep = NULL; - } else { - ep = xfs_iext_get_ext(ifp, idx); - } - } - *idxp = idx; - return ep; -} - -/* - * Return a pointer to the indirection array entry containing the - * extent record for filesystem block bno. Store the index of the - * target irec in *erp_idxp. - */ -xfs_ext_irec_t * /* pointer to found extent record */ -xfs_iext_bno_to_irec( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_fileoff_t bno, /* block number to search for */ - int *erp_idxp) /* irec index of target ext list */ -{ - xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ - xfs_ext_irec_t *erp_next; /* next indirection array entry */ - int erp_idx; /* indirection array index */ - int nlists; /* number of extent irec's (lists) */ - int high; /* binary search upper limit */ - int low; /* binary search lower limit */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - erp_idx = 0; - low = 0; - high = nlists - 1; - while (low <= high) { - erp_idx = (low + high) >> 1; - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL; - if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) { - high = erp_idx - 1; - } else if (erp_next && bno >= - xfs_bmbt_get_startoff(erp_next->er_extbuf)) { - low = erp_idx + 1; - } else { - break; - } - } - *erp_idxp = erp_idx; - return erp; -} - -/* - * Return a pointer to the indirection array entry containing the - * extent record at file extent index *idxp. Store the index of the - * target irec in *erp_idxp and store the page index of the target - * extent record in *idxp. - */ -xfs_ext_irec_t * -xfs_iext_idx_to_irec( - xfs_ifork_t *ifp, /* inode fork pointer */ - xfs_extnum_t *idxp, /* extent index (file -> page) */ - int *erp_idxp, /* pointer to target irec */ - int realloc) /* new bytes were just added */ -{ - xfs_ext_irec_t *prev; /* pointer to previous irec */ - xfs_ext_irec_t *erp = NULL; /* pointer to current irec */ - int erp_idx; /* indirection array index */ - int nlists; /* number of irec's (ex lists) */ - int high; /* binary search upper limit */ - int low; /* binary search lower limit */ - xfs_extnum_t page_idx = *idxp; /* extent index in target list */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - ASSERT(page_idx >= 0 && page_idx <= - ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - erp_idx = 0; - low = 0; - high = nlists - 1; - - /* Binary search extent irec's */ - while (low <= high) { - erp_idx = (low + high) >> 1; - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - prev = erp_idx > 0 ? erp - 1 : NULL; - if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff && - realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) { - high = erp_idx - 1; - } else if (page_idx > erp->er_extoff + erp->er_extcount || - (page_idx == erp->er_extoff + erp->er_extcount && - !realloc)) { - low = erp_idx + 1; - } else if (page_idx == erp->er_extoff + erp->er_extcount && - erp->er_extcount == XFS_LINEAR_EXTS) { - ASSERT(realloc); - page_idx = 0; - erp_idx++; - erp = erp_idx < nlists ? erp + 1 : NULL; - break; - } else { - page_idx -= erp->er_extoff; - break; - } - } - *idxp = page_idx; - *erp_idxp = erp_idx; - return(erp); -} - -/* - * Allocate and initialize an indirection array once the space needed - * for incore extents increases above XFS_IEXT_BUFSZ. - */ -void -xfs_iext_irec_init( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - xfs_ext_irec_t *erp; /* indirection array pointer */ - xfs_extnum_t nextents; /* number of extents in file */ - - ASSERT(!(ifp->if_flags & XFS_IFEXTIREC)); - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - ASSERT(nextents <= XFS_LINEAR_EXTS); - - erp = (xfs_ext_irec_t *) - kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP); - - if (nextents == 0) { - ifp->if_u1.if_extents = (xfs_bmbt_rec_t *) - kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); - } else if (!ifp->if_real_bytes) { - xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ); - } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) { - xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ); - } - erp->er_extbuf = ifp->if_u1.if_extents; - erp->er_extcount = nextents; - erp->er_extoff = 0; - - ifp->if_flags |= XFS_IFEXTIREC; - ifp->if_real_bytes = XFS_IEXT_BUFSZ; - ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t); - ifp->if_u1.if_ext_irec = erp; - - return; -} - -/* - * Allocate and initialize a new entry in the indirection array. - */ -xfs_ext_irec_t * -xfs_iext_irec_new( - xfs_ifork_t *ifp, /* inode fork pointer */ - int erp_idx) /* index for new irec */ -{ - xfs_ext_irec_t *erp; /* indirection array pointer */ - int i; /* loop counter */ - int nlists; /* number of irec's (ex lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - - /* Resize indirection array */ - xfs_iext_realloc_indirect(ifp, ++nlists * - sizeof(xfs_ext_irec_t)); - /* - * Move records down in the array so the - * new page can use erp_idx. - */ - erp = ifp->if_u1.if_ext_irec; - for (i = nlists - 1; i > erp_idx; i--) { - memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t)); - } - ASSERT(i == erp_idx); - - /* Initialize new extent record */ - erp = ifp->if_u1.if_ext_irec; - erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *) - kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP); - ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; - memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ); - erp[erp_idx].er_extcount = 0; - erp[erp_idx].er_extoff = erp_idx > 0 ? - erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0; - return (&erp[erp_idx]); -} - -/* - * Remove a record from the indirection array. - */ -void -xfs_iext_irec_remove( - xfs_ifork_t *ifp, /* inode fork pointer */ - int erp_idx) /* irec index to remove */ -{ - xfs_ext_irec_t *erp; /* indirection array pointer */ - int i; /* loop counter */ - int nlists; /* number of irec's (ex lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - if (erp->er_extbuf) { - xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, - -erp->er_extcount); - kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ); - } - /* Compact extent records */ - erp = ifp->if_u1.if_ext_irec; - for (i = erp_idx; i < nlists - 1; i++) { - memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t)); - } - /* - * Manually free the last extent record from the indirection - * array. A call to xfs_iext_realloc_indirect() with a size - * of zero would result in a call to xfs_iext_destroy() which - * would in turn call this function again, creating a nasty - * infinite loop. - */ - if (--nlists) { - xfs_iext_realloc_indirect(ifp, - nlists * sizeof(xfs_ext_irec_t)); - } else { - kmem_free(ifp->if_u1.if_ext_irec, - sizeof(xfs_ext_irec_t)); - } - ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ; -} - -/* - * This is called to clean up large amounts of unused memory allocated - * by the indirection array. Before compacting anything though, verify - * that the indirection array is still needed and switch back to the - * linear extent list (or even the inline buffer) if possible. The - * compaction policy is as follows: - * - * Full Compaction: Extents fit into a single page (or inline buffer) - * Full Compaction: Extents occupy less than 10% of allocated space - * Partial Compaction: Extents occupy > 10% and < 50% of allocated space - * No Compaction: Extents occupy at least 50% of allocated space - */ -void -xfs_iext_irec_compact( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - xfs_extnum_t nextents; /* number of extents in file */ - int nlists; /* number of irec's (ex lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - - if (nextents == 0) { - xfs_iext_destroy(ifp); - } else if (nextents <= XFS_INLINE_EXTS) { - xfs_iext_indirect_to_direct(ifp); - xfs_iext_direct_to_inline(ifp, nextents); - } else if (nextents <= XFS_LINEAR_EXTS) { - xfs_iext_indirect_to_direct(ifp); - } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { - xfs_iext_irec_compact_full(ifp); - } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { - xfs_iext_irec_compact_pages(ifp); - } -} - -/* - * Combine extents from neighboring extent pages. - */ -void -xfs_iext_irec_compact_pages( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */ - int erp_idx = 0; /* indirection array index */ - int nlists; /* number of irec's (ex lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - while (erp_idx < nlists - 1) { - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - erp_next = erp + 1; - if (erp_next->er_extcount <= - (XFS_LINEAR_EXTS - erp->er_extcount)) { - memmove(&erp->er_extbuf[erp->er_extcount], - erp_next->er_extbuf, erp_next->er_extcount * - sizeof(xfs_bmbt_rec_t)); - erp->er_extcount += erp_next->er_extcount; - /* - * Free page before removing extent record - * so er_extoffs don't get modified in - * xfs_iext_irec_remove. - */ - kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ); - erp_next->er_extbuf = NULL; - xfs_iext_irec_remove(ifp, erp_idx + 1); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - } else { - erp_idx++; - } - } -} - -/* - * Fully compact the extent records managed by the indirection array. - */ -void -xfs_iext_irec_compact_full( - xfs_ifork_t *ifp) /* inode fork pointer */ -{ - xfs_bmbt_rec_t *ep, *ep_next; /* extent record pointers */ - xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ - int erp_idx = 0; /* extent irec index */ - int ext_avail; /* empty entries in ex list */ - int ext_diff; /* number of exts to add */ - int nlists; /* number of irec's (ex lists) */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - erp = ifp->if_u1.if_ext_irec; - ep = &erp->er_extbuf[erp->er_extcount]; - erp_next = erp + 1; - ep_next = erp_next->er_extbuf; - while (erp_idx < nlists - 1) { - ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; - ext_diff = MIN(ext_avail, erp_next->er_extcount); - memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); - erp->er_extcount += ext_diff; - erp_next->er_extcount -= ext_diff; - /* Remove next page */ - if (erp_next->er_extcount == 0) { - /* - * Free page before removing extent record - * so er_extoffs don't get modified in - * xfs_iext_irec_remove. - */ - kmem_free(erp_next->er_extbuf, - erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); - erp_next->er_extbuf = NULL; - xfs_iext_irec_remove(ifp, erp_idx + 1); - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - /* Update next page */ - } else { - /* Move rest of page up to become next new page */ - memmove(erp_next->er_extbuf, ep_next, - erp_next->er_extcount * sizeof(xfs_bmbt_rec_t)); - ep_next = erp_next->er_extbuf; - memset(&ep_next[erp_next->er_extcount], 0, - (XFS_LINEAR_EXTS - erp_next->er_extcount) * - sizeof(xfs_bmbt_rec_t)); - } - if (erp->er_extcount == XFS_LINEAR_EXTS) { - erp_idx++; - if (erp_idx < nlists) - erp = &ifp->if_u1.if_ext_irec[erp_idx]; - else - break; - } - ep = &erp->er_extbuf[erp->er_extcount]; - erp_next = erp + 1; - ep_next = erp_next->er_extbuf; - } -} - -/* - * This is called to update the er_extoff field in the indirection - * array when extents have been added or removed from one of the - * extent lists. erp_idx contains the irec index to begin updating - * at and ext_diff contains the number of extents that were added - * or removed. - */ -void -xfs_iext_irec_update_extoffs( - xfs_ifork_t *ifp, /* inode fork pointer */ - int erp_idx, /* irec index to update */ - int ext_diff) /* number of new extents */ -{ - int i; /* loop counter */ - int nlists; /* number of irec's (ex lists */ - - ASSERT(ifp->if_flags & XFS_IFEXTIREC); - nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; - for (i = erp_idx; i < nlists; i++) { - ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff; - } -} diff --git a/trunk/fs/xfs/xfs_inode.h b/trunk/fs/xfs/xfs_inode.h index 39ef9c36ea55..1cfbcf18ce86 100644 --- a/trunk/fs/xfs/xfs_inode.h +++ b/trunk/fs/xfs/xfs_inode.h @@ -24,38 +24,11 @@ #define XFS_DATA_FORK 0 #define XFS_ATTR_FORK 1 -/* - * The following xfs_ext_irec_t struct introduces a second (top) level - * to the in-core extent allocation scheme. These structs are allocated - * in a contiguous block, creating an indirection array where each entry - * (irec) contains a pointer to a buffer of in-core extent records which - * it manages. Each extent buffer is 4k in size, since 4k is the system - * page size on Linux i386 and systems with larger page sizes don't seem - * to gain much, if anything, by using their native page size as the - * extent buffer size. Also, using 4k extent buffers everywhere provides - * a consistent interface for CXFS across different platforms. - * - * There is currently no limit on the number of irec's (extent lists) - * allowed, so heavily fragmented files may require an indirection array - * which spans multiple system pages of memory. The number of extents - * which would require this amount of contiguous memory is very large - * and should not cause problems in the foreseeable future. However, - * if the memory needed for the contiguous array ever becomes a problem, - * it is possible that a third level of indirection may be required. - */ -typedef struct xfs_ext_irec { - xfs_bmbt_rec_t *er_extbuf; /* block of extent records */ - xfs_extnum_t er_extoff; /* extent offset in file */ - xfs_extnum_t er_extcount; /* number of extents in page/block */ -} xfs_ext_irec_t; - /* * File incore extent information, present for each of data & attr forks. */ -#define XFS_IEXT_BUFSZ 4096 -#define XFS_LINEAR_EXTS (XFS_IEXT_BUFSZ / (uint)sizeof(xfs_bmbt_rec_t)) -#define XFS_INLINE_EXTS 2 -#define XFS_INLINE_DATA 32 +#define XFS_INLINE_EXTS 2 +#define XFS_INLINE_DATA 32 typedef struct xfs_ifork { int if_bytes; /* bytes in if_u1 */ int if_real_bytes; /* bytes allocated in if_u1 */ @@ -66,7 +39,6 @@ typedef struct xfs_ifork { xfs_extnum_t if_lastex; /* last if_extents used */ union { xfs_bmbt_rec_t *if_extents; /* linear map file exts */ - xfs_ext_irec_t *if_ext_irec; /* irec map file exts */ char *if_data; /* inline file data */ } if_u1; union { @@ -89,16 +61,20 @@ typedef struct xfs_ifork { /* * Per-fork incore inode flags. */ -#define XFS_IFINLINE 0x01 /* Inline data is read in */ -#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */ -#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ -#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ +#define XFS_IFINLINE 0x0001 /* Inline data is read in */ +#define XFS_IFEXTENTS 0x0002 /* All extent pointers are read in */ +#define XFS_IFBROOT 0x0004 /* i_broot points to the bmap b-tree root */ /* - * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate(). + * Flags for xfs_imap() and xfs_dilocate(). */ -#define XFS_IMAP_LOOKUP 0x1 -#define XFS_IMAP_BULKSTAT 0x2 +#define XFS_IMAP_LOOKUP 0x1 + +/* + * Maximum number of extent pointers in if_u1.if_extents. + */ +#define XFS_MAX_INCORE_EXTENTS 32768 + #ifdef __KERNEL__ struct bhv_desc; @@ -422,7 +398,7 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int); */ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, xfs_inode_t *, xfs_dinode_t **, struct xfs_buf **, - xfs_daddr_t, uint); + xfs_daddr_t); int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_inode_t **, xfs_daddr_t); int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); @@ -464,32 +440,6 @@ xfs_inode_t *xfs_vtoi(struct vnode *vp); void xfs_synchronize_atime(xfs_inode_t *); -xfs_bmbt_rec_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); -void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, - xfs_bmbt_irec_t *); -void xfs_iext_add(xfs_ifork_t *, xfs_extnum_t, int); -void xfs_iext_add_indirect_multi(xfs_ifork_t *, int, xfs_extnum_t, int); -void xfs_iext_remove(xfs_ifork_t *, xfs_extnum_t, int); -void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int); -void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int); -void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int); -void xfs_iext_realloc_direct(xfs_ifork_t *, int); -void xfs_iext_realloc_indirect(xfs_ifork_t *, int); -void xfs_iext_indirect_to_direct(xfs_ifork_t *); -void xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t); -void xfs_iext_inline_to_direct(xfs_ifork_t *, int); -void xfs_iext_destroy(xfs_ifork_t *); -xfs_bmbt_rec_t *xfs_iext_bno_to_ext(xfs_ifork_t *, xfs_fileoff_t, int *); -xfs_ext_irec_t *xfs_iext_bno_to_irec(xfs_ifork_t *, xfs_fileoff_t, int *); -xfs_ext_irec_t *xfs_iext_idx_to_irec(xfs_ifork_t *, xfs_extnum_t *, int *, int); -void xfs_iext_irec_init(xfs_ifork_t *); -xfs_ext_irec_t *xfs_iext_irec_new(xfs_ifork_t *, int); -void xfs_iext_irec_remove(xfs_ifork_t *, int); -void xfs_iext_irec_compact(xfs_ifork_t *); -void xfs_iext_irec_compact_pages(xfs_ifork_t *); -void xfs_iext_irec_compact_full(xfs_ifork_t *); -void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int); - #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) #ifdef DEBUG diff --git a/trunk/fs/xfs/xfs_iomap.c b/trunk/fs/xfs/xfs_iomap.c index d5dfedcb8922..788917f355c4 100644 --- a/trunk/fs/xfs/xfs_iomap.c +++ b/trunk/fs/xfs/xfs_iomap.c @@ -76,7 +76,7 @@ xfs_iomap_enter_trace( (void *)((unsigned long)count), (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), (void *)((unsigned long)(io->io_new_size & 0xffffffff)), - (void *)((unsigned long)current_pid()), + (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL, diff --git a/trunk/fs/xfs/xfs_itable.c b/trunk/fs/xfs/xfs_itable.c index 32247b6bfee7..c59450e1be40 100644 --- a/trunk/fs/xfs/xfs_itable.c +++ b/trunk/fs/xfs/xfs_itable.c @@ -562,8 +562,7 @@ xfs_bulkstat( if (bp) xfs_buf_relse(bp); error = xfs_itobp(mp, NULL, ip, - &dip, &bp, bno, - XFS_IMAP_BULKSTAT); + &dip, &bp, bno); if (!error) clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; kmem_zone_free(xfs_inode_zone, ip); @@ -571,8 +570,6 @@ xfs_bulkstat( mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, XFS_RANDOM_BULKSTAT_READ_CHUNK)) { bp = NULL; - ubleft = 0; - rval = error; break; } } diff --git a/trunk/fs/xfs/xfs_log_recover.c b/trunk/fs/xfs/xfs_log_recover.c index add13f507ed2..7d46cbd6a07a 100644 --- a/trunk/fs/xfs/xfs_log_recover.c +++ b/trunk/fs/xfs/xfs_log_recover.c @@ -3249,7 +3249,7 @@ xlog_recover_process_iunlinks( * next inode in the bucket. */ error = xfs_itobp(mp, NULL, ip, &dip, - &ibp, 0, 0); + &ibp, 0); ASSERT(error || (dip != NULL)); } diff --git a/trunk/fs/xfs/xfs_mount.c b/trunk/fs/xfs/xfs_mount.c index 20e8abc16d18..62188ea392c7 100644 --- a/trunk/fs/xfs/xfs_mount.c +++ b/trunk/fs/xfs/xfs_mount.c @@ -51,32 +51,11 @@ STATIC int xfs_uuid_mount(xfs_mount_t *); STATIC void xfs_uuid_unmount(xfs_mount_t *mp); STATIC void xfs_unmountfs_wait(xfs_mount_t *); - -#ifdef HAVE_PERCPU_SB -STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); -STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); -STATIC void xfs_icsb_sync_counters(xfs_mount_t *); -STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, - int, int); -STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t, - int, int); -STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); - -#else - -#define xfs_icsb_destroy_counters(mp) do { } while (0) -#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) -#define xfs_icsb_sync_counters(mp) do { } while (0) -#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) -#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0) - -#endif - static const struct { - short offset; - short type; /* 0 = integer - * 1 = binary / string (no translation) - */ + short offset; + short type; /* 0 = integer + * 1 = binary / string (no translation) + */ } xfs_sb_info[] = { { offsetof(xfs_sb_t, sb_magicnum), 0 }, { offsetof(xfs_sb_t, sb_blocksize), 0 }, @@ -134,11 +113,7 @@ xfs_mount_init(void) { xfs_mount_t *mp; - mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP); - - if (xfs_icsb_init_counters(mp)) { - mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; - } + mp = kmem_zalloc(sizeof(*mp), KM_SLEEP); AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); spinlock_init(&mp->m_sb_lock, "xfs_sb"); @@ -161,8 +136,8 @@ xfs_mount_init(void) */ void xfs_mount_free( - xfs_mount_t *mp, - int remove_bhv) + xfs_mount_t *mp, + int remove_bhv) { if (mp->m_ihash) xfs_ihash_free(mp); @@ -202,7 +177,6 @@ xfs_mount_free( VFS_REMOVEBHV(vfsp, &mp->m_bhv); } - xfs_icsb_destroy_counters(mp); kmem_free(mp, sizeof(xfs_mount_t)); } @@ -268,12 +242,9 @@ xfs_mount_validate_sb( sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || - sbp->sb_inodelog < XFS_DINODE_MIN_LOG || - sbp->sb_inodelog > XFS_DINODE_MAX_LOG || - (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || - (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) { + sbp->sb_imax_pct > 100)) { cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", XFS_ERRLEVEL_LOW, mp, sbp); @@ -556,10 +527,6 @@ xfs_readsb(xfs_mount_t *mp) ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); } - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); - mp->m_sb_bp = bp; xfs_buf_relse(bp); ASSERT(XFS_BUF_VALUSEMA(bp) > 0); @@ -1187,9 +1154,6 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) sbp = xfs_getsb(mp, 0); if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || XFS_FORCED_SHUTDOWN(mp))) { - - xfs_icsb_sync_counters(mp); - /* * mark shared-readonly if desired */ @@ -1263,6 +1227,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) xfs_trans_log_buf(tp, bp, first, last); } + /* * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply * a delta to a specified field in the in-core superblock. Simply @@ -1272,7 +1237,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) * * The SB_LOCK must be held when this routine is called. */ -int +STATIC int xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) { @@ -1441,26 +1406,9 @@ xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) unsigned long s; int status; - /* check for per-cpu counters */ - switch (field) { -#ifdef HAVE_PERCPU_SB - case XFS_SBS_ICOUNT: - case XFS_SBS_IFREE: - case XFS_SBS_FDBLOCKS: - if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { - status = xfs_icsb_modify_counters(mp, field, - delta, rsvd); - break; - } - /* FALLTHROUGH */ -#endif - default: - s = XFS_SB_LOCK(mp); - status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - XFS_SB_UNLOCK(mp, s); - break; - } - + s = XFS_SB_LOCK(mp); + status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); + XFS_SB_UNLOCK(mp, s); return status; } @@ -1497,26 +1445,8 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) * from the loop so we'll fall into the undo loop * below. */ - switch (msbp->msb_field) { -#ifdef HAVE_PERCPU_SB - case XFS_SBS_ICOUNT: - case XFS_SBS_IFREE: - case XFS_SBS_FDBLOCKS: - if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { - status = xfs_icsb_modify_counters_locked(mp, - msbp->msb_field, - msbp->msb_delta, rsvd); - break; - } - /* FALLTHROUGH */ -#endif - default: - status = xfs_mod_incore_sb_unlocked(mp, - msbp->msb_field, - msbp->msb_delta, rsvd); - break; - } - + status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, + msbp->msb_delta, rsvd); if (status != 0) { break; } @@ -1533,28 +1463,8 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) if (status != 0) { msbp--; while (msbp >= msb) { - switch (msbp->msb_field) { -#ifdef HAVE_PERCPU_SB - case XFS_SBS_ICOUNT: - case XFS_SBS_IFREE: - case XFS_SBS_FDBLOCKS: - if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { - status = - xfs_icsb_modify_counters_locked(mp, - msbp->msb_field, - -(msbp->msb_delta), - rsvd); - break; - } - /* FALLTHROUGH */ -#endif - default: - status = xfs_mod_incore_sb_unlocked(mp, - msbp->msb_field, - -(msbp->msb_delta), - rsvd); - break; - } + status = xfs_mod_incore_sb_unlocked(mp, + msbp->msb_field, -(msbp->msb_delta), rsvd); ASSERT(status == 0); msbp--; } @@ -1667,525 +1577,3 @@ xfs_mount_log_sbunit( xfs_mod_sb(tp, fields); xfs_trans_commit(tp, 0, NULL); } - - -#ifdef HAVE_PERCPU_SB -/* - * Per-cpu incore superblock counters - * - * Simple concept, difficult implementation - * - * Basically, replace the incore superblock counters with a distributed per cpu - * counter for contended fields (e.g. free block count). - * - * Difficulties arise in that the incore sb is used for ENOSPC checking, and - * hence needs to be accurately read when we are running low on space. Hence - * there is a method to enable and disable the per-cpu counters based on how - * much "stuff" is available in them. - * - * Basically, a counter is enabled if there is enough free resource to justify - * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local - * ENOSPC), then we disable the counters to synchronise all callers and - * re-distribute the available resources. - * - * If, once we redistributed the available resources, we still get a failure, - * we disable the per-cpu counter and go through the slow path. - * - * The slow path is the current xfs_mod_incore_sb() function. This means that - * when we disable a per-cpu counter, we need to drain it's resources back to - * the global superblock. We do this after disabling the counter to prevent - * more threads from queueing up on the counter. - * - * Essentially, this means that we still need a lock in the fast path to enable - * synchronisation between the global counters and the per-cpu counters. This - * is not a problem because the lock will be local to a CPU almost all the time - * and have little contention except when we get to ENOSPC conditions. - * - * Basically, this lock becomes a barrier that enables us to lock out the fast - * path while we do things like enabling and disabling counters and - * synchronising the counters. - * - * Locking rules: - * - * 1. XFS_SB_LOCK() before picking up per-cpu locks - * 2. per-cpu locks always picked up via for_each_online_cpu() order - * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks - * 4. modifying per-cpu counters requires holding per-cpu lock - * 5. modifying global counters requires holding XFS_SB_LOCK - * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK - * and _none_ of the per-cpu locks. - * - * Disabled counters are only ever re-enabled by a balance operation - * that results in more free resources per CPU than a given threshold. - * To ensure counters don't remain disabled, they are rebalanced when - * the global resource goes above a higher threshold (i.e. some hysteresis - * is present to prevent thrashing). - */ - -/* - * hot-plug CPU notifier support. - * - * We cannot use the hotcpu_register() function because it does - * not allow notifier instances. We need a notifier per filesystem - * as we need to be able to identify the filesystem to balance - * the counters out. This is acheived by having a notifier block - * embedded in the xfs_mount_t and doing pointer magic to get the - * mount pointer from the notifier block address. - */ -STATIC int -xfs_icsb_cpu_notify( - struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - xfs_icsb_cnts_t *cntp; - xfs_mount_t *mp; - int s; - - mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); - cntp = (xfs_icsb_cnts_t *) - per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); - switch (action) { - case CPU_UP_PREPARE: - /* Easy Case - initialize the area and locks, and - * then rebalance when online does everything else for us. */ - memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - break; - case CPU_ONLINE: - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); - break; - case CPU_DEAD: - /* Disable all the counters, then fold the dead cpu's - * count into the total on the global superblock and - * re-enable the counters. */ - s = XFS_SB_LOCK(mp); - xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); - xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); - xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); - - mp->m_sb.sb_icount += cntp->icsb_icount; - mp->m_sb.sb_ifree += cntp->icsb_ifree; - mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; - - memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED); - XFS_SB_UNLOCK(mp, s); - break; - } - - return NOTIFY_OK; -} - -int -xfs_icsb_init_counters( - xfs_mount_t *mp) -{ - xfs_icsb_cnts_t *cntp; - int i; - - mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); - if (mp->m_sb_cnts == NULL) - return -ENOMEM; - - mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; - mp->m_icsb_notifier.priority = 0; - register_cpu_notifier(&mp->m_icsb_notifier); - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - } - /* - * start with all counters disabled so that the - * initial balance kicks us off correctly - */ - mp->m_icsb_counters = -1; - return 0; -} - -STATIC void -xfs_icsb_destroy_counters( - xfs_mount_t *mp) -{ - if (mp->m_sb_cnts) { - unregister_cpu_notifier(&mp->m_icsb_notifier); - free_percpu(mp->m_sb_cnts); - } -} - -STATIC inline void -xfs_icsb_lock_cntr( - xfs_icsb_cnts_t *icsbp) -{ - while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { - ndelay(1000); - } -} - -STATIC inline void -xfs_icsb_unlock_cntr( - xfs_icsb_cnts_t *icsbp) -{ - clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); -} - - -STATIC inline void -xfs_icsb_lock_all_counters( - xfs_mount_t *mp) -{ - xfs_icsb_cnts_t *cntp; - int i; - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - xfs_icsb_lock_cntr(cntp); - } -} - -STATIC inline void -xfs_icsb_unlock_all_counters( - xfs_mount_t *mp) -{ - xfs_icsb_cnts_t *cntp; - int i; - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - xfs_icsb_unlock_cntr(cntp); - } -} - -STATIC void -xfs_icsb_count( - xfs_mount_t *mp, - xfs_icsb_cnts_t *cnt, - int flags) -{ - xfs_icsb_cnts_t *cntp; - int i; - - memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); - - if (!(flags & XFS_ICSB_LAZY_COUNT)) - xfs_icsb_lock_all_counters(mp); - - for_each_online_cpu(i) { - cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); - cnt->icsb_icount += cntp->icsb_icount; - cnt->icsb_ifree += cntp->icsb_ifree; - cnt->icsb_fdblocks += cntp->icsb_fdblocks; - } - - if (!(flags & XFS_ICSB_LAZY_COUNT)) - xfs_icsb_unlock_all_counters(mp); -} - -STATIC int -xfs_icsb_counter_disabled( - xfs_mount_t *mp, - xfs_sb_field_t field) -{ - ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); - return test_bit(field, &mp->m_icsb_counters); -} - -STATIC int -xfs_icsb_disable_counter( - xfs_mount_t *mp, - xfs_sb_field_t field) -{ - xfs_icsb_cnts_t cnt; - - ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); - - xfs_icsb_lock_all_counters(mp); - if (!test_and_set_bit(field, &mp->m_icsb_counters)) { - /* drain back to superblock */ - - xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); - switch(field) { - case XFS_SBS_ICOUNT: - mp->m_sb.sb_icount = cnt.icsb_icount; - break; - case XFS_SBS_IFREE: - mp->m_sb.sb_ifree = cnt.icsb_ifree; - break; - case XFS_SBS_FDBLOCKS: - mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; - break; - default: - BUG(); - } - } - - xfs_icsb_unlock_all_counters(mp); - - return 0; -} - -STATIC void -xfs_icsb_enable_counter( - xfs_mount_t *mp, - xfs_sb_field_t field, - uint64_t count, - uint64_t resid) -{ - xfs_icsb_cnts_t *cntp; - int i; - - ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); - - xfs_icsb_lock_all_counters(mp); - for_each_online_cpu(i) { - cntp = per_cpu_ptr(mp->m_sb_cnts, i); - switch (field) { - case XFS_SBS_ICOUNT: - cntp->icsb_icount = count + resid; - break; - case XFS_SBS_IFREE: - cntp->icsb_ifree = count + resid; - break; - case XFS_SBS_FDBLOCKS: - cntp->icsb_fdblocks = count + resid; - break; - default: - BUG(); - break; - } - resid = 0; - } - clear_bit(field, &mp->m_icsb_counters); - xfs_icsb_unlock_all_counters(mp); -} - -STATIC void -xfs_icsb_sync_counters_int( - xfs_mount_t *mp, - int flags) -{ - xfs_icsb_cnts_t cnt; - int s; - - /* Pass 1: lock all counters */ - if ((flags & XFS_ICSB_SB_LOCKED) == 0) - s = XFS_SB_LOCK(mp); - - xfs_icsb_count(mp, &cnt, flags); - - /* Step 3: update mp->m_sb fields */ - if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) - mp->m_sb.sb_icount = cnt.icsb_icount; - if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) - mp->m_sb.sb_ifree = cnt.icsb_ifree; - if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) - mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; - - if ((flags & XFS_ICSB_SB_LOCKED) == 0) - XFS_SB_UNLOCK(mp, s); -} - -/* - * Accurate update of per-cpu counters to incore superblock - */ -STATIC void -xfs_icsb_sync_counters( - xfs_mount_t *mp) -{ - xfs_icsb_sync_counters_int(mp, 0); -} - -/* - * lazy addition used for things like df, background sb syncs, etc - */ -void -xfs_icsb_sync_counters_lazy( - xfs_mount_t *mp) -{ - xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT); -} - -/* - * Balance and enable/disable counters as necessary. - * - * Thresholds for re-enabling counters are somewhat magic. - * inode counts are chosen to be the same number as single - * on disk allocation chunk per CPU, and free blocks is - * something far enough zero that we aren't going thrash - * when we get near ENOSPC. - */ -#define XFS_ICSB_INO_CNTR_REENABLE 64 -#define XFS_ICSB_FDBLK_CNTR_REENABLE 512 -STATIC void -xfs_icsb_balance_counter( - xfs_mount_t *mp, - xfs_sb_field_t field, - int flags) -{ - uint64_t count, resid = 0; - int weight = num_online_cpus(); - int s; - - if (!(flags & XFS_ICSB_SB_LOCKED)) - s = XFS_SB_LOCK(mp); - - /* disable counter and sync counter */ - xfs_icsb_disable_counter(mp, field); - - /* update counters - first CPU gets residual*/ - switch (field) { - case XFS_SBS_ICOUNT: - count = mp->m_sb.sb_icount; - resid = do_div(count, weight); - if (count < XFS_ICSB_INO_CNTR_REENABLE) - goto out; - break; - case XFS_SBS_IFREE: - count = mp->m_sb.sb_ifree; - resid = do_div(count, weight); - if (count < XFS_ICSB_INO_CNTR_REENABLE) - goto out; - break; - case XFS_SBS_FDBLOCKS: - count = mp->m_sb.sb_fdblocks; - resid = do_div(count, weight); - if (count < XFS_ICSB_FDBLK_CNTR_REENABLE) - goto out; - break; - default: - BUG(); - break; - } - - xfs_icsb_enable_counter(mp, field, count, resid); -out: - if (!(flags & XFS_ICSB_SB_LOCKED)) - XFS_SB_UNLOCK(mp, s); -} - -STATIC int -xfs_icsb_modify_counters_int( - xfs_mount_t *mp, - xfs_sb_field_t field, - int delta, - int rsvd, - int flags) -{ - xfs_icsb_cnts_t *icsbp; - long long lcounter; /* long counter for 64 bit fields */ - int cpu, s, locked = 0; - int ret = 0, balance_done = 0; - -again: - cpu = get_cpu(); - icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu), - xfs_icsb_lock_cntr(icsbp); - if (unlikely(xfs_icsb_counter_disabled(mp, field))) - goto slow_path; - - switch (field) { - case XFS_SBS_ICOUNT: - lcounter = icsbp->icsb_icount; - lcounter += delta; - if (unlikely(lcounter < 0)) - goto slow_path; - icsbp->icsb_icount = lcounter; - break; - - case XFS_SBS_IFREE: - lcounter = icsbp->icsb_ifree; - lcounter += delta; - if (unlikely(lcounter < 0)) - goto slow_path; - icsbp->icsb_ifree = lcounter; - break; - - case XFS_SBS_FDBLOCKS: - BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); - - lcounter = icsbp->icsb_fdblocks; - lcounter += delta; - if (unlikely(lcounter < 0)) - goto slow_path; - icsbp->icsb_fdblocks = lcounter; - break; - default: - BUG(); - break; - } - xfs_icsb_unlock_cntr(icsbp); - put_cpu(); - if (locked) - XFS_SB_UNLOCK(mp, s); - return 0; - - /* - * The slow path needs to be run with the SBLOCK - * held so that we prevent other threads from - * attempting to run this path at the same time. - * this provides exclusion for the balancing code, - * and exclusive fallback if the balance does not - * provide enough resources to continue in an unlocked - * manner. - */ -slow_path: - xfs_icsb_unlock_cntr(icsbp); - put_cpu(); - - /* need to hold superblock incase we need - * to disable a counter */ - if (!(flags & XFS_ICSB_SB_LOCKED)) { - s = XFS_SB_LOCK(mp); - locked = 1; - flags |= XFS_ICSB_SB_LOCKED; - } - if (!balance_done) { - xfs_icsb_balance_counter(mp, field, flags); - balance_done = 1; - goto again; - } else { - /* - * we might not have enough on this local - * cpu to allocate for a bulk request. - * We need to drain this field from all CPUs - * and disable the counter fastpath - */ - xfs_icsb_disable_counter(mp, field); - } - - ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); - - if (locked) - XFS_SB_UNLOCK(mp, s); - return ret; -} - -STATIC int -xfs_icsb_modify_counters( - xfs_mount_t *mp, - xfs_sb_field_t field, - int delta, - int rsvd) -{ - return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0); -} - -/* - * Called when superblock is already locked - */ -STATIC int -xfs_icsb_modify_counters_locked( - xfs_mount_t *mp, - xfs_sb_field_t field, - int delta, - int rsvd) -{ - return xfs_icsb_modify_counters_int(mp, field, delta, - rsvd, XFS_ICSB_SB_LOCKED); -} -#endif diff --git a/trunk/fs/xfs/xfs_mount.h b/trunk/fs/xfs/xfs_mount.h index ebd73960e9db..cd3cf9613a00 100644 --- a/trunk/fs/xfs/xfs_mount.h +++ b/trunk/fs/xfs/xfs_mount.h @@ -267,34 +267,6 @@ typedef struct xfs_ioops { #define XFS_IODONE(vfsp) \ (*(mp)->m_io_ops.xfs_iodone)(vfsp) -#ifdef HAVE_PERCPU_SB - -/* - * Valid per-cpu incore superblock counters. Note that if you add new counters, - * you may need to define new counter disabled bit field descriptors as there - * are more possible fields in the superblock that can fit in a bitfield on a - * 32 bit platform. The XFS_SBS_* values for the current current counters just - * fit. - */ -typedef struct xfs_icsb_cnts { - uint64_t icsb_fdblocks; - uint64_t icsb_ifree; - uint64_t icsb_icount; - unsigned long icsb_flags; -} xfs_icsb_cnts_t; - -#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ - -#define XFS_ICSB_SB_LOCKED (1 << 0) /* sb already locked */ -#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ - -extern int xfs_icsb_init_counters(struct xfs_mount *); -extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *); - -#else -#define xfs_icsb_init_counters(mp) (0) -#define xfs_icsb_sync_counters_lazy(mp) do { } while (0) -#endif typedef struct xfs_mount { bhv_desc_t m_bhv; /* vfs xfs behavior */ @@ -400,11 +372,6 @@ typedef struct xfs_mount { struct xfs_qmops m_qm_ops; /* vector of XQM ops */ struct xfs_ioops m_io_ops; /* vector of I/O ops */ atomic_t m_active_trans; /* number trans frozen */ -#ifdef HAVE_PERCPU_SB - xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ - unsigned long m_icsb_counters; /* disabled per-cpu counters */ - struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ -#endif } xfs_mount_t; /* @@ -419,6 +386,8 @@ typedef struct xfs_mount { #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem operations, typically for disk errors in metadata */ +#define XFS_MOUNT_NOATIME (1ULL << 5) /* don't modify inode access + times on reads */ #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to user */ #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment @@ -442,8 +411,6 @@ typedef struct xfs_mount { #define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */ #define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred * I/O size in stat() */ -#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock - counters */ /* @@ -505,6 +472,11 @@ xfs_preferred_iosize(xfs_mount_t *mp) #define XFS_CORRUPT_INCORE 0x8 /* Corrupt in-memory data structures */ #define XFS_SHUTDOWN_REMOTE_REQ 0x10 /* Shutdown came from remote cell */ +/* + * xflags for xfs_syncsub + */ +#define XFS_XSYNC_RELOC 0x01 + /* * Flags for xfs_mountfs */ @@ -576,8 +548,6 @@ extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *); extern int xfs_unmountfs_writesb(xfs_mount_t *); extern int xfs_unmount_flush(xfs_mount_t *, int); extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int); -extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, - int, int); extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, uint, int); extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); diff --git a/trunk/fs/xfs/xfs_rw.h b/trunk/fs/xfs/xfs_rw.h index e63795644478..de85eefb7966 100644 --- a/trunk/fs/xfs/xfs_rw.h +++ b/trunk/fs/xfs/xfs_rw.h @@ -89,7 +89,6 @@ extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, */ extern int xfs_rwlock(bhv_desc_t *bdp, vrwlock_t write_lock); extern void xfs_rwunlock(bhv_desc_t *bdp, vrwlock_t write_lock); -extern int xfs_setattr(bhv_desc_t *bdp, vattr_t *vap, int flags, cred_t *credp); extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf, xfs_off_t offset, cred_t *credp, int flags); extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state, diff --git a/trunk/fs/xfs/xfs_trans.c b/trunk/fs/xfs/xfs_trans.c index 2918956553a5..d3d714e6b32a 100644 --- a/trunk/fs/xfs/xfs_trans.c +++ b/trunk/fs/xfs/xfs_trans.c @@ -55,140 +55,9 @@ STATIC void xfs_trans_committed(xfs_trans_t *, int); STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); STATIC void xfs_trans_free(xfs_trans_t *); -kmem_zone_t *xfs_trans_zone; +kmem_zone_t *xfs_trans_zone; -/* - * Reservation functions here avoid a huge stack in xfs_trans_init - * due to register overflow from temporaries in the calculations. - */ - -STATIC uint -xfs_calc_write_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_itruncate_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_rename_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_link_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_remove_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_symlink_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_create_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_mkdir_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_ifree_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_ichange_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_growdata_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_GROWDATA_LOG_RES(mp); -} - -STATIC uint -xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_GROWRTALLOC_LOG_RES(mp); -} - -STATIC uint -xfs_calc_growrtzero_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_GROWRTZERO_LOG_RES(mp); -} - -STATIC uint -xfs_calc_growrtfree_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_GROWRTFREE_LOG_RES(mp); -} - -STATIC uint -xfs_calc_swrite_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_SWRITE_LOG_RES(mp); -} - -STATIC uint -xfs_calc_writeid_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_WRITEID_LOG_RES(mp); -} - -STATIC uint -xfs_calc_addafork_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_attrinval_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_ATTRINVAL_LOG_RES(mp); -} - -STATIC uint -xfs_calc_attrset_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_attrrm_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); -} - -STATIC uint -xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) -{ - return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); -} - /* * Initialize the precomputed transaction reservation values * in the mount structure. @@ -200,27 +69,39 @@ xfs_trans_init( xfs_trans_reservations_t *resp; resp = &(mp->m_reservations); - resp->tr_write = xfs_calc_write_reservation(mp); - resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); - resp->tr_rename = xfs_calc_rename_reservation(mp); - resp->tr_link = xfs_calc_link_reservation(mp); - resp->tr_remove = xfs_calc_remove_reservation(mp); - resp->tr_symlink = xfs_calc_symlink_reservation(mp); - resp->tr_create = xfs_calc_create_reservation(mp); - resp->tr_mkdir = xfs_calc_mkdir_reservation(mp); - resp->tr_ifree = xfs_calc_ifree_reservation(mp); - resp->tr_ichange = xfs_calc_ichange_reservation(mp); - resp->tr_growdata = xfs_calc_growdata_reservation(mp); - resp->tr_swrite = xfs_calc_swrite_reservation(mp); - resp->tr_writeid = xfs_calc_writeid_reservation(mp); - resp->tr_addafork = xfs_calc_addafork_reservation(mp); - resp->tr_attrinval = xfs_calc_attrinval_reservation(mp); - resp->tr_attrset = xfs_calc_attrset_reservation(mp); - resp->tr_attrrm = xfs_calc_attrrm_reservation(mp); - resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp); - resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp); - resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp); - resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp); + resp->tr_write = + (uint)(XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_itruncate = + (uint)(XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_rename = + (uint)(XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_link = (uint)XFS_CALC_LINK_LOG_RES(mp); + resp->tr_remove = + (uint)(XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_symlink = + (uint)(XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_create = + (uint)(XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_mkdir = + (uint)(XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_ifree = + (uint)(XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_ichange = + (uint)(XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_growdata = (uint)XFS_CALC_GROWDATA_LOG_RES(mp); + resp->tr_swrite = (uint)XFS_CALC_SWRITE_LOG_RES(mp); + resp->tr_writeid = (uint)XFS_CALC_WRITEID_LOG_RES(mp); + resp->tr_addafork = + (uint)(XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_attrinval = (uint)XFS_CALC_ATTRINVAL_LOG_RES(mp); + resp->tr_attrset = + (uint)(XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_attrrm = + (uint)(XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp)); + resp->tr_clearagi = (uint)XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); + resp->tr_growrtalloc = (uint)XFS_CALC_GROWRTALLOC_LOG_RES(mp); + resp->tr_growrtzero = (uint)XFS_CALC_GROWRTZERO_LOG_RES(mp); + resp->tr_growrtfree = (uint)XFS_CALC_GROWRTFREE_LOG_RES(mp); } /* diff --git a/trunk/fs/xfs/xfs_trans.h b/trunk/fs/xfs/xfs_trans.h index e48befa4e337..d77901c07f63 100644 --- a/trunk/fs/xfs/xfs_trans.h +++ b/trunk/fs/xfs/xfs_trans.h @@ -380,7 +380,7 @@ typedef struct xfs_trans { xfs_trans_header_t t_header; /* header for in-log trans */ unsigned int t_busy_free; /* busy descs free */ xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ - unsigned long t_pflags; /* saved process flags state */ + xfs_pflags_t t_pflags; /* saved pflags state */ } xfs_trans_t; #endif /* __KERNEL__ */ diff --git a/trunk/fs/xfs/xfs_vfsops.c b/trunk/fs/xfs/xfs_vfsops.c index d4ec4dfaf19c..b6ad370fab3d 100644 --- a/trunk/fs/xfs/xfs_vfsops.c +++ b/trunk/fs/xfs/xfs_vfsops.c @@ -55,7 +55,7 @@ #include "xfs_clnt.h" #include "xfs_fsops.h" -STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); +STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); int xfs_init(void) @@ -77,12 +77,11 @@ xfs_init(void) "xfs_bmap_free_item"); xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), "xfs_btree_cur"); + xfs_inode_zone = kmem_zone_init(sizeof(xfs_inode_t), "xfs_inode"); xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); - xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); - xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); /* * The size of the zone allocated buf log item is the maximum @@ -94,30 +93,17 @@ xfs_init(void) (((XFS_MAX_BLOCKSIZE / XFS_BLI_CHUNK) / NBWORD) * sizeof(int))), "xfs_buf_item"); - xfs_efd_zone = - kmem_zone_init((sizeof(xfs_efd_log_item_t) + - ((XFS_EFD_MAX_FAST_EXTENTS - 1) * - sizeof(xfs_extent_t))), + xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), "xfs_efd_item"); - xfs_efi_zone = - kmem_zone_init((sizeof(xfs_efi_log_item_t) + - ((XFS_EFI_MAX_FAST_EXTENTS - 1) * - sizeof(xfs_extent_t))), + xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), "xfs_efi_item"); - - /* - * These zones warrant special memory allocator hints - */ - xfs_inode_zone = - kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | - KM_ZONE_SPREAD, NULL); - xfs_ili_zone = - kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", - KM_ZONE_SPREAD, NULL); - xfs_chashlist_zone = - kmem_zone_init_flags(sizeof(xfs_chashlist_t), "xfs_chashlist", - KM_ZONE_SPREAD, NULL); + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); + xfs_ili_zone = kmem_zone_init(sizeof(xfs_inode_log_item_t), "xfs_ili"); + xfs_chashlist_zone = kmem_zone_init(sizeof(xfs_chashlist_t), + "xfs_chashlist"); + xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); /* * Allocate global trace buffers. @@ -190,18 +176,18 @@ xfs_cleanup(void) ktrace_free(xfs_alloc_trace_buf); #endif - kmem_zone_destroy(xfs_bmap_free_item_zone); - kmem_zone_destroy(xfs_btree_cur_zone); - kmem_zone_destroy(xfs_inode_zone); - kmem_zone_destroy(xfs_trans_zone); - kmem_zone_destroy(xfs_da_state_zone); - kmem_zone_destroy(xfs_dabuf_zone); - kmem_zone_destroy(xfs_buf_item_zone); - kmem_zone_destroy(xfs_efd_zone); - kmem_zone_destroy(xfs_efi_zone); - kmem_zone_destroy(xfs_ifork_zone); - kmem_zone_destroy(xfs_ili_zone); - kmem_zone_destroy(xfs_chashlist_zone); + kmem_cache_destroy(xfs_bmap_free_item_zone); + kmem_cache_destroy(xfs_btree_cur_zone); + kmem_cache_destroy(xfs_inode_zone); + kmem_cache_destroy(xfs_trans_zone); + kmem_cache_destroy(xfs_da_state_zone); + kmem_cache_destroy(xfs_dabuf_zone); + kmem_cache_destroy(xfs_buf_item_zone); + kmem_cache_destroy(xfs_efd_zone); + kmem_cache_destroy(xfs_efi_zone); + kmem_cache_destroy(xfs_ifork_zone); + kmem_cache_destroy(xfs_ili_zone); + kmem_cache_destroy(xfs_chashlist_zone); } /* @@ -272,6 +258,8 @@ xfs_start_flags( mp->m_inoadd = XFS_INO64_OFFSET; } #endif + if (ap->flags & XFSMNT_NOATIME) + mp->m_flags |= XFS_MOUNT_NOATIME; if (ap->flags & XFSMNT_RETERR) mp->m_flags |= XFS_MOUNT_RETERR; if (ap->flags & XFSMNT_NOALIGN) @@ -632,7 +620,7 @@ xfs_quiesce_fs( xfs_mount_t *mp) { int count = 0, pincount; - + xfs_refcache_purge_mp(mp); xfs_flush_buftarg(mp->m_ddev_targp, 0); xfs_finish_reclaim_all(mp, 0); @@ -643,7 +631,7 @@ xfs_quiesce_fs( * meta data (typically directory updates). * Which then must be flushed and logged before * we can write the unmount record. - */ + */ do { xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); @@ -666,6 +654,11 @@ xfs_mntupdate( xfs_mount_t *mp = XFS_BHVTOM(bdp); int error; + if (args->flags & XFSMNT_NOATIME) + mp->m_flags |= XFS_MOUNT_NOATIME; + else + mp->m_flags &= ~XFS_MOUNT_NOATIME; + if (args->flags & XFSMNT_BARRIER) mp->m_flags |= XFS_MOUNT_BARRIER; else @@ -821,7 +814,6 @@ xfs_statvfs( statp->f_type = XFS_SB_MAGIC; - xfs_icsb_sync_counters_lazy(mp); s = XFS_SB_LOCK(mp); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; @@ -1229,7 +1221,7 @@ xfs_sync_inodes( xfs_iunlock(ip, XFS_ILOCK_SHARED); error = xfs_itobp(mp, NULL, ip, - &dip, &bp, 0, 0); + &dip, &bp, 0); if (!error) { xfs_buf_relse(bp); } else { @@ -1698,7 +1690,10 @@ xfs_parseargs( int iosize; args->flags2 |= XFSMNT2_COMPAT_IOSIZE; - args->flags |= XFSMNT_IDELETE; + +#if 0 /* XXX: off by default, until some remaining issues ironed out */ + args->flags |= XFSMNT_IDELETE; /* default to on */ +#endif if (!options) goto done; @@ -1908,6 +1903,7 @@ xfs_showargs( { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, + { XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP }, { 0, NULL } }; struct proc_xfs_info *xfs_infop; @@ -1943,8 +1939,6 @@ xfs_showargs( seq_printf(m, "," MNTOPT_SWIDTH "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); - if (!(mp->m_flags & XFS_MOUNT_IDELETE)) - seq_printf(m, "," MNTOPT_IKEEP); if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) seq_printf(m, "," MNTOPT_LARGEIO); if (mp->m_flags & XFS_MOUNT_BARRIER) diff --git a/trunk/fs/xfs/xfs_vnodeops.c b/trunk/fs/xfs/xfs_vnodeops.c index a478f42e63ff..eaab355f5a89 100644 --- a/trunk/fs/xfs/xfs_vnodeops.c +++ b/trunk/fs/xfs/xfs_vnodeops.c @@ -615,7 +615,6 @@ xfs_setattr( code = xfs_igrow_start(ip, vap->va_size, credp); } xfs_iunlock(ip, XFS_ILOCK_EXCL); - vn_iowait(vp); /* wait for the completion of any pending DIOs */ if (!code) code = xfs_itruncate_data(ip, vap->va_size); if (code) { @@ -1557,7 +1556,7 @@ xfs_release( if ((error = xfs_inactive_free_eofblocks(mp, ip))) return error; /* Update linux inode block count after free above */ - vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, + LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); } } @@ -1638,7 +1637,7 @@ xfs_inactive( if ((error = xfs_inactive_free_eofblocks(mp, ip))) return VN_INACTIVE_CACHE; /* Update linux inode block count after free above */ - vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, + LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); } goto out; @@ -3187,7 +3186,7 @@ xfs_rmdir( /* Fall through to std_return with error = 0 or the errno * from xfs_trans_commit. */ - std_return: +std_return: if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) { (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dir_vp, DM_RIGHT_NULL, @@ -3197,12 +3196,12 @@ xfs_rmdir( } return error; - error1: +error1: xfs_bmap_cancel(&free_list); cancel_flags |= XFS_TRANS_ABORT; /* FALLTHROUGH */ - error_return: +error_return: xfs_trans_cancel(tp, cancel_flags); goto std_return; } @@ -4311,10 +4310,8 @@ xfs_free_file_space( ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1); if (attr_flags & ATTR_NOLOCK) need_iolock = 0; - if (need_iolock) { + if (need_iolock) xfs_ilock(ip, XFS_IOLOCK_EXCL); - vn_iowait(vp); /* wait for the completion of any pending DIOs */ - } rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog), (__uint8_t)NBPP); diff --git a/trunk/include/asm-alpha/mmu_context.h b/trunk/include/asm-alpha/mmu_context.h index 0c017fc181c1..6f92482cc96c 100644 --- a/trunk/include/asm-alpha/mmu_context.h +++ b/trunk/include/asm-alpha/mmu_context.h @@ -231,8 +231,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for_each_online_cpu(i) - mm->context[i] = 0; + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i)) + mm->context[i] = 0; if (tsk != current) task_thread_info(tsk)->pcb.ptbr = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; diff --git a/trunk/include/asm-alpha/topology.h b/trunk/include/asm-alpha/topology.h index 420ccde6b916..eb740e280d9c 100644 --- a/trunk/include/asm-alpha/topology.h +++ b/trunk/include/asm-alpha/topology.h @@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node) cpumask_t node_cpu_mask = CPU_MASK_NONE; int cpu; - for_each_online_cpu(cpu) { - if (cpu_to_node(cpu) == node) + for(cpu = 0; cpu < NR_CPUS; cpu++) { + if (cpu_online(cpu) && (cpu_to_node(cpu) == node)) cpu_set(cpu, node_cpu_mask); } diff --git a/trunk/include/asm-generic/bug.h b/trunk/include/asm-generic/bug.h index 1a565a9d2fa7..400c2b41896e 100644 --- a/trunk/include/asm-generic/bug.h +++ b/trunk/include/asm-generic/bug.h @@ -7,7 +7,7 @@ #ifdef CONFIG_BUG #ifndef HAVE_ARCH_BUG #define BUG() do { \ - printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ + printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ panic("BUG!"); \ } while (0) #endif @@ -19,7 +19,7 @@ #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) do { \ if (unlikely((condition)!=0)) { \ - printk("BUG: warning at %s:%d/%s()\n", __FILE__, __LINE__, __FUNCTION__); \ + printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ dump_stack(); \ } \ } while (0) diff --git a/trunk/include/asm-generic/percpu.h b/trunk/include/asm-generic/percpu.h index 78cf45547e31..9044aeb37828 100644 --- a/trunk/include/asm-generic/percpu.h +++ b/trunk/include/asm-generic/percpu.h @@ -19,9 +19,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ + for (__i = 0; __i < NR_CPUS; __i++) \ + if (cpu_possible(__i)) \ + memcpy((pcpudst)+__per_cpu_offset[__i], \ + (src), (size)); \ } while (0) #else /* ! SMP */ diff --git a/trunk/include/asm-i386/alternative.h b/trunk/include/asm-i386/alternative.h deleted file mode 100644 index e201decea0c9..000000000000 --- a/trunk/include/asm-i386/alternative.h +++ /dev/null @@ -1,129 +0,0 @@ -#ifndef _I386_ALTERNATIVE_H -#define _I386_ALTERNATIVE_H - -#ifdef __KERNEL__ - -struct alt_instr { - u8 *instr; /* original instruction */ - u8 *replacement; - u8 cpuid; /* cpuid bit set for replacement */ - u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction, <= instrlen */ - u8 pad; -}; - -extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); - -struct module; -extern void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end); -extern void alternatives_smp_module_del(struct module *mod); -extern void alternatives_smp_switch(int smp); - -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */\ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */\ - ".previous" :: "i" (feature), ##input) - -/* - * Alternative inline assembly for SMP. - * - * alternative_smp() takes two versions (SMP first, UP second) and is - * for more complex stuff such as spinlocks. - * - * The LOCK_PREFIX macro defined here replaces the LOCK and - * LOCK_PREFIX macros used everywhere in the source tree. - * - * SMP alternatives use the same data structures as the other - * alternatives and the X86_FEATURE_UP flag to indicate the case of a - * UP system running a SMP kernel. The existing apply_alternatives() - * works fine for patching a SMP kernel for UP. - * - * The SMP alternative tables can be kept after boot and contain both - * UP and SMP versions of the instructions to allow switching back to - * SMP at runtime, when hotplugging in a new CPU, which is especially - * useful in virtualized environments. - * - * The very common lock prefix is handled as special case in a - * separate table which is a pure address list without replacement ptr - * and size information. That keeps the table sizes small. - */ - -#ifdef CONFIG_SMP -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile ("661:\n\t" smpinstr "\n662:\n" \ - ".section .smp_altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte 0x68\n" /* X86_FEATURE_UP */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .smp_altinstr_replacement,\"awx\"\n" \ - "663:\n\t" upinstr "\n" /* replacement */ \ - "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \ - ".previous" : args) - -#define LOCK_PREFIX \ - ".section .smp_locks,\"a\"\n" \ - " .align 4\n" \ - " .long 661f\n" /* address */ \ - ".previous\n" \ - "661:\n\tlock; " - -#else /* ! CONFIG_SMP */ -#define alternative_smp(smpinstr, upinstr, args...) \ - asm volatile (upinstr : args) -#define LOCK_PREFIX "" -#endif - -#endif /* _I386_ALTERNATIVE_H */ diff --git a/trunk/include/asm-i386/arch_hooks.h b/trunk/include/asm-i386/arch_hooks.h index 238cf4275b96..28b96a6fb9fa 100644 --- a/trunk/include/asm-i386/arch_hooks.h +++ b/trunk/include/asm-i386/arch_hooks.h @@ -24,7 +24,4 @@ extern void trap_init_hook(void); extern void time_init_hook(void); extern void mca_nmi_hook(void); -extern int setup_early_printk(char *); -extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); - #endif diff --git a/trunk/include/asm-i386/atomic.h b/trunk/include/asm-i386/atomic.h index 22d80ece95cb..de649d3aa2d4 100644 --- a/trunk/include/asm-i386/atomic.h +++ b/trunk/include/asm-i386/atomic.h @@ -10,6 +10,12 @@ * resource counting etc.. */ +#ifdef CONFIG_SMP +#define LOCK "lock ; " +#else +#define LOCK "" +#endif + /* * Make sure gcc doesn't try to be clever and move things around * on us. We need to use _exactly_ the address the user gave us, @@ -46,7 +52,7 @@ typedef struct { volatile int counter; } atomic_t; static __inline__ void atomic_add(int i, atomic_t *v) { __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" + LOCK "addl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -61,7 +67,7 @@ static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_sub(int i, atomic_t *v) { __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" + LOCK "subl %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } @@ -80,7 +86,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" + LOCK "subl %2,%0; sete %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -95,7 +101,7 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v) static __inline__ void atomic_inc(atomic_t *v) { __asm__ __volatile__( - LOCK_PREFIX "incl %0" + LOCK "incl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -109,7 +115,7 @@ static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_dec(atomic_t *v) { __asm__ __volatile__( - LOCK_PREFIX "decl %0" + LOCK "decl %0" :"=m" (v->counter) :"m" (v->counter)); } @@ -127,7 +133,7 @@ static __inline__ int atomic_dec_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" + LOCK "decl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -146,7 +152,7 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" + LOCK "incl %0; sete %1" :"=m" (v->counter), "=qm" (c) :"m" (v->counter) : "memory"); return c != 0; @@ -166,7 +172,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) unsigned char c; __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" + LOCK "addl %2,%0; sets %1" :"=m" (v->counter), "=qm" (c) :"ir" (i), "m" (v->counter) : "memory"); return c; @@ -189,7 +195,7 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) /* Modern 486+ processor */ __i = i; __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1;" + LOCK "xaddl %0, %1;" :"=r"(i) :"m"(v->counter), "0"(i)); return i + __i; @@ -225,14 +231,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -242,11 +242,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) /* These are x86-specific, used by some header files */ #define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ +__asm__ __volatile__(LOCK "andl %0,%1" \ : : "r" (~(mask)),"m" (*addr) : "memory") #define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ +__asm__ __volatile__(LOCK "orl %0,%1" \ : : "r" (mask),"m" (*(addr)) : "memory") /* Atomic operations are already serializing on x86 */ diff --git a/trunk/include/asm-i386/bitops.h b/trunk/include/asm-i386/bitops.h index 7d20b95edb3b..88e6ca248cd7 100644 --- a/trunk/include/asm-i386/bitops.h +++ b/trunk/include/asm-i386/bitops.h @@ -7,7 +7,6 @@ #include #include -#include /* * These have to be done with inline assembly: that way the bit-setting @@ -17,6 +16,12 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ +#ifdef CONFIG_SMP +#define LOCK_PREFIX "lock ; " +#else +#define LOCK_PREFIX "" +#endif + #define ADDR (*(volatile long *) addr) /** diff --git a/trunk/include/asm-i386/cache.h b/trunk/include/asm-i386/cache.h index ca15c9c665cf..615911e5bd24 100644 --- a/trunk/include/asm-i386/cache.h +++ b/trunk/include/asm-i386/cache.h @@ -10,6 +10,4 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) - #endif diff --git a/trunk/include/asm-i386/cpufeature.h b/trunk/include/asm-i386/cpufeature.h index 5c0b5876b931..c4ec2a4d8fdf 100644 --- a/trunk/include/asm-i386/cpufeature.h +++ b/trunk/include/asm-i386/cpufeature.h @@ -70,7 +70,6 @@ #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/trunk/include/asm-i386/mach-default/do_timer.h b/trunk/include/asm-i386/mach-default/do_timer.h index 6312c3e79814..56211414fc95 100644 --- a/trunk/include/asm-i386/mach-default/do_timer.h +++ b/trunk/include/asm-i386/mach-default/do_timer.h @@ -18,7 +18,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) { do_timer(regs); #ifndef CONFIG_SMP - update_process_times(user_mode_vm(regs)); + update_process_times(user_mode(regs)); #endif /* * In the SMP case we use the local APIC timer interrupt to do the diff --git a/trunk/include/asm-i386/mach-es7000/mach_mpparse.h b/trunk/include/asm-i386/mach-es7000/mach_mpparse.h index 99f66be240be..4a0637a3e208 100644 --- a/trunk/include/asm-i386/mach-es7000/mach_mpparse.h +++ b/trunk/include/asm-i386/mach-es7000/mach_mpparse.h @@ -30,8 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, return 0; } -#ifdef CONFIG_ACPI -static inline int es7000_check_dsdt(void) +static inline int es7000_check_dsdt() { struct acpi_table_header *header = NULL; if(!acpi_get_table_header_early(ACPI_DSDT, &header)) @@ -55,11 +54,6 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) } return 0; } -#else -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} -#endif + #endif /* __ASM_MACH_MPPARSE_H */ diff --git a/trunk/include/asm-i386/mach-visws/do_timer.h b/trunk/include/asm-i386/mach-visws/do_timer.h index 95568e6ca91c..92d638fc8b11 100644 --- a/trunk/include/asm-i386/mach-visws/do_timer.h +++ b/trunk/include/asm-i386/mach-visws/do_timer.h @@ -11,7 +11,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) do_timer(regs); #ifndef CONFIG_SMP - update_process_times(user_mode_vm(regs)); + update_process_times(user_mode(regs)); #endif /* * In the SMP case we use the local APIC timer interrupt to do the diff --git a/trunk/include/asm-i386/mach-voyager/do_timer.h b/trunk/include/asm-i386/mach-voyager/do_timer.h index eaf518098981..ae510e5d0d78 100644 --- a/trunk/include/asm-i386/mach-voyager/do_timer.h +++ b/trunk/include/asm-i386/mach-voyager/do_timer.h @@ -5,7 +5,7 @@ static inline void do_timer_interrupt_hook(struct pt_regs *regs) { do_timer(regs); #ifndef CONFIG_SMP - update_process_times(user_mode_vm(regs)); + update_process_times(user_mode(regs)); #endif voyager_timer_interrupt(regs); diff --git a/trunk/include/asm-i386/mpspec.h b/trunk/include/asm-i386/mpspec.h index 62113d3bfdc2..64a0b8e6afeb 100644 --- a/trunk/include/asm-i386/mpspec.h +++ b/trunk/include/asm-i386/mpspec.h @@ -22,6 +22,7 @@ extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; extern int mpc_default_type; +extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; extern unsigned long mp_lapic_addr; extern int pic_mode; extern int using_apic_timer; diff --git a/trunk/include/asm-i386/mtrr.h b/trunk/include/asm-i386/mtrr.h index 64cf937c7e33..5b6ceda68c5f 100644 --- a/trunk/include/asm-i386/mtrr.h +++ b/trunk/include/asm-i386/mtrr.h @@ -25,7 +25,6 @@ #include #include -#include #define MTRR_IOCTL_BASE 'M' diff --git a/trunk/include/asm-i386/mutex.h b/trunk/include/asm-i386/mutex.h index 05a538531229..9b2199e829f3 100644 --- a/trunk/include/asm-i386/mutex.h +++ b/trunk/include/asm-i386/mutex.h @@ -9,8 +9,6 @@ #ifndef _ASM_MUTEX_H #define _ASM_MUTEX_H -#include "asm/alternative.h" - /** * __mutex_fastpath_lock - try to take the lock by moving the count * from 1 to a 0 value @@ -29,7 +27,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK_PREFIX " decl (%%eax) \n" \ + LOCK " decl (%%eax) \n" \ " js 2f \n" \ "1: \n" \ \ @@ -85,7 +83,7 @@ do { \ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ \ __asm__ __volatile__( \ - LOCK_PREFIX " incl (%%eax) \n" \ + LOCK " incl (%%eax) \n" \ " jle 2f \n" \ "1: \n" \ \ diff --git a/trunk/include/asm-i386/pgtable-2level.h b/trunk/include/asm-i386/pgtable-2level.h index 27bde973abc7..74ef721b534d 100644 --- a/trunk/include/asm-i386/pgtable-2level.h +++ b/trunk/include/asm-i386/pgtable-2level.h @@ -61,6 +61,4 @@ static inline int pte_exec_kernel(pte_t pte) #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -void vmalloc_sync_all(void); - #endif /* _I386_PGTABLE_2LEVEL_H */ diff --git a/trunk/include/asm-i386/pgtable-3level.h b/trunk/include/asm-i386/pgtable-3level.h index 36a5aa63cbbf..f1a8b454920a 100644 --- a/trunk/include/asm-i386/pgtable-3level.h +++ b/trunk/include/asm-i386/pgtable-3level.h @@ -152,6 +152,4 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) #define __pmd_free_tlb(tlb, x) do { } while (0) -#define vmalloc_sync_all() ((void)0) - #endif /* _I386_PGTABLE_3LEVEL_H */ diff --git a/trunk/include/asm-i386/rwlock.h b/trunk/include/asm-i386/rwlock.h index 94f00195d543..b57cc7afdf7e 100644 --- a/trunk/include/asm-i386/rwlock.h +++ b/trunk/include/asm-i386/rwlock.h @@ -21,23 +21,21 @@ #define RW_LOCK_BIAS_STR "0x01000000" #define __build_read_lock_ptr(rw, helper) \ - alternative_smp("lock; subl $1,(%0)\n\t" \ - "jns 1f\n" \ - "call " helper "\n\t" \ - "1:\n", \ - "subl $1,(%0)\n\t", \ - :"a" (rw) : "memory") + asm volatile(LOCK "subl $1,(%0)\n\t" \ + "jns 1f\n" \ + "call " helper "\n\t" \ + "1:\n" \ + ::"a" (rw) : "memory") #define __build_read_lock_const(rw, helper) \ - alternative_smp("lock; subl $1,%0\n\t" \ - "jns 1f\n" \ - "pushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "1:\n", \ - "subl $1,%0\n\t", \ - "=m" (*(volatile int *)rw) : : "memory") + asm volatile(LOCK "subl $1,%0\n\t" \ + "jns 1f\n" \ + "pushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "1:\n" \ + :"=m" (*(volatile int *)rw) : : "memory") #define __build_read_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ @@ -47,23 +45,21 @@ } while (0) #define __build_write_lock_ptr(rw, helper) \ - alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ - "jz 1f\n" \ - "call " helper "\n\t" \ - "1:\n", \ - "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t", \ - :"a" (rw) : "memory") + asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ + "jz 1f\n" \ + "call " helper "\n\t" \ + "1:\n" \ + ::"a" (rw) : "memory") #define __build_write_lock_const(rw, helper) \ - alternative_smp("lock; subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ - "jz 1f\n" \ - "pushl %%eax\n\t" \ - "leal %0,%%eax\n\t" \ - "call " helper "\n\t" \ - "popl %%eax\n\t" \ - "1:\n", \ - "subl $" RW_LOCK_BIAS_STR ",%0\n\t", \ - "=m" (*(volatile int *)rw) : : "memory") + asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",%0\n\t" \ + "jz 1f\n" \ + "pushl %%eax\n\t" \ + "leal %0,%%eax\n\t" \ + "call " helper "\n\t" \ + "popl %%eax\n\t" \ + "1:\n" \ + :"=m" (*(volatile int *)rw) : : "memory") #define __build_write_lock(rw, helper) do { \ if (__builtin_constant_p(rw)) \ diff --git a/trunk/include/asm-i386/semaphore.h b/trunk/include/asm-i386/semaphore.h index f7a0f310c524..6a42b2142fd6 100644 --- a/trunk/include/asm-i386/semaphore.h +++ b/trunk/include/asm-i386/semaphore.h @@ -99,7 +99,7 @@ static inline void down(struct semaphore * sem) might_sleep(); __asm__ __volatile__( "# atomic down operation\n\t" - LOCK_PREFIX "decl %0\n\t" /* --sem->count */ + LOCK "decl %0\n\t" /* --sem->count */ "js 2f\n" "1:\n" LOCK_SECTION_START("") @@ -123,7 +123,7 @@ static inline int down_interruptible(struct semaphore * sem) might_sleep(); __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK_PREFIX "decl %1\n\t" /* --sem->count */ + LOCK "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -148,7 +148,7 @@ static inline int down_trylock(struct semaphore * sem) __asm__ __volatile__( "# atomic interruptible down operation\n\t" - LOCK_PREFIX "decl %1\n\t" /* --sem->count */ + LOCK "decl %1\n\t" /* --sem->count */ "js 2f\n\t" "xorl %0,%0\n" "1:\n" @@ -173,7 +173,7 @@ static inline void up(struct semaphore * sem) { __asm__ __volatile__( "# atomic up operation\n\t" - LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ + LOCK "incl %0\n\t" /* ++sem->count */ "jle 2f\n" "1:\n" LOCK_SECTION_START("") diff --git a/trunk/include/asm-i386/spinlock.h b/trunk/include/asm-i386/spinlock.h index d76b7693cf1d..23604350cdf4 100644 --- a/trunk/include/asm-i386/spinlock.h +++ b/trunk/include/asm-i386/spinlock.h @@ -35,41 +35,31 @@ #define __raw_spin_lock_string_flags \ "\n1:\t" \ "lock ; decb %0\n\t" \ - "jns 5f\n" \ + "jns 4f\n\t" \ "2:\t" \ "testl $0x200, %1\n\t" \ - "jz 4f\n\t" \ - "sti\n" \ + "jz 3f\n\t" \ + "sti\n\t" \ "3:\t" \ "rep;nop\n\t" \ "cmpb $0, %0\n\t" \ "jle 3b\n\t" \ "cli\n\t" \ "jmp 1b\n" \ - "4:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jg 1b\n\t" \ - "jmp 4b\n" \ - "5:\n\t" - -#define __raw_spin_lock_string_up \ - "\n\tdecb %0" + "4:\n\t" static inline void __raw_spin_lock(raw_spinlock_t *lock) { - alternative_smp( - __raw_spin_lock_string, - __raw_spin_lock_string_up, - "=m" (lock->slock) : : "memory"); + __asm__ __volatile__( + __raw_spin_lock_string + :"=m" (lock->slock) : : "memory"); } static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - alternative_smp( - __raw_spin_lock_string_flags, - __raw_spin_lock_string_up, - "=m" (lock->slock) : "r" (flags) : "memory"); + __asm__ __volatile__( + __raw_spin_lock_string_flags + :"=m" (lock->slock) : "r" (flags) : "memory"); } static inline int __raw_spin_trylock(raw_spinlock_t *lock) @@ -188,12 +178,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) static inline void __raw_read_unlock(raw_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory"); + asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" + asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0" : "=m" (rw->lock) : : "memory"); } diff --git a/trunk/include/asm-i386/system.h b/trunk/include/asm-i386/system.h index d0d8d7448d88..399145a247f2 100644 --- a/trunk/include/asm-i386/system.h +++ b/trunk/include/asm-i386/system.h @@ -352,6 +352,67 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #endif +#ifdef __KERNEL__ +struct alt_instr { + __u8 *instr; /* original instruction */ + __u8 *replacement; + __u8 cpuid; /* cpuid bit set for replacement */ + __u8 instrlen; /* length of original instruction */ + __u8 replacementlen; /* length of new instruction, <= instrlen */ + __u8 pad; +}; +#endif + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement maake sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) + /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking @@ -497,6 +558,5 @@ static inline void sched_cacheflush(void) } extern unsigned long arch_align_stack(unsigned long sp); -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); #endif diff --git a/trunk/include/asm-i386/uaccess.h b/trunk/include/asm-i386/uaccess.h index 371457b1ceb6..3f1337c34208 100644 --- a/trunk/include/asm-i386/uaccess.h +++ b/trunk/include/asm-i386/uaccess.h @@ -197,15 +197,13 @@ extern void __put_user_8(void); #define put_user(x,ptr) \ ({ int __ret_pu; \ - __typeof__(*(ptr)) __pu_val; \ __chk_user_ptr(ptr); \ - __pu_val = x; \ switch(sizeof(*(ptr))) { \ - case 1: __put_user_1(__pu_val, ptr); break; \ - case 2: __put_user_2(__pu_val, ptr); break; \ - case 4: __put_user_4(__pu_val, ptr); break; \ - case 8: __put_user_8(__pu_val, ptr); break; \ - default:__put_user_X(__pu_val, ptr); break; \ + case 1: __put_user_1(x, ptr); break; \ + case 2: __put_user_2(x, ptr); break; \ + case 4: __put_user_4(x, ptr); break; \ + case 8: __put_user_8(x, ptr); break; \ + default:__put_user_X(x, ptr); break; \ } \ __ret_pu; \ }) diff --git a/trunk/include/asm-i386/unistd.h b/trunk/include/asm-i386/unistd.h index d8afd0e3b81a..dc81a55dd94d 100644 --- a/trunk/include/asm-i386/unistd.h +++ b/trunk/include/asm-i386/unistd.h @@ -347,9 +347,9 @@ __syscall_return(type,__res); \ type name(type1 arg1) \ { \ long __res; \ -__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ +__asm__ volatile ("int $0x80" \ : "=a" (__res) \ - : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \ + : "0" (__NR_##name),"b" ((long)(arg1)) : "memory"); \ __syscall_return(type,__res); \ } @@ -357,10 +357,9 @@ __syscall_return(type,__res); \ type name(type1 arg1,type2 arg2) \ { \ long __res; \ -__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ +__asm__ volatile ("int $0x80" \ : "=a" (__res) \ - : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \ - : "memory"); \ + : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)) : "memory"); \ __syscall_return(type,__res); \ } @@ -368,9 +367,9 @@ __syscall_return(type,__res); \ type name(type1 arg1,type2 arg2,type3 arg3) \ { \ long __res; \ -__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ +__asm__ volatile ("int $0x80" \ : "=a" (__res) \ - : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ + : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)) : "memory"); \ __syscall_return(type,__res); \ } @@ -379,9 +378,9 @@ __syscall_return(type,__res); \ type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ { \ long __res; \ -__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \ +__asm__ volatile ("int $0x80" \ : "=a" (__res) \ - : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ + : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \ __syscall_return(type,__res); \ } @@ -391,12 +390,10 @@ __syscall_return(type,__res); \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \ { \ long __res; \ -__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \ - "int $0x80 ; pop %%ebx" \ +__asm__ volatile ("int $0x80" \ : "=a" (__res) \ - : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \ - "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ - : "memory"); \ + : "0" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) : "memory"); \ __syscall_return(type,__res); \ } @@ -405,14 +402,11 @@ __syscall_return(type,__res); \ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \ { \ long __res; \ - struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \ -__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \ - "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \ - "pop %%ebx ; pop %%ebp" \ +__asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; int $0x80 ; pop %%ebp" \ : "=a" (__res) \ - : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \ - "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \ - : "memory"); \ + : "i" (__NR_##name),"b" ((long)(arg1)),"c" ((long)(arg2)), \ + "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)), \ + "0" ((long)(arg6)) : "memory"); \ __syscall_return(type,__res); \ } diff --git a/trunk/include/asm-ia64/atomic.h b/trunk/include/asm-ia64/atomic.h index 569ec7574baf..d3e0dfa99e1f 100644 --- a/trunk/include/asm-ia64/atomic.h +++ b/trunk/include/asm-ia64/atomic.h @@ -95,14 +95,8 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/trunk/include/asm-ia64/cache.h b/trunk/include/asm-ia64/cache.h index f0a104db8f20..40dd25195d65 100644 --- a/trunk/include/asm-ia64/cache.h +++ b/trunk/include/asm-ia64/cache.h @@ -25,6 +25,4 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) - #endif /* _ASM_IA64_CACHE_H */ diff --git a/trunk/include/asm-m68k/atomic.h b/trunk/include/asm-m68k/atomic.h index 732d696d31a6..862e497c2645 100644 --- a/trunk/include/asm-m68k/atomic.h +++ b/trunk/include/asm-m68k/atomic.h @@ -175,14 +175,8 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) ({ \ int c, old; \ c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/trunk/include/asm-parisc/cache.h b/trunk/include/asm-parisc/cache.h index ae50f8e12eed..93f179f13ce8 100644 --- a/trunk/include/asm-parisc/cache.h +++ b/trunk/include/asm-parisc/cache.h @@ -29,8 +29,6 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) - extern void flush_data_cache_local(void *); /* flushes local data-cache only */ extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ #ifdef CONFIG_SMP diff --git a/trunk/include/asm-powerpc/percpu.h b/trunk/include/asm-powerpc/percpu.h index 464301cd0d03..e31922c50e53 100644 --- a/trunk/include/asm-powerpc/percpu.h +++ b/trunk/include/asm-powerpc/percpu.h @@ -27,9 +27,10 @@ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for (__i = 0; __i < NR_CPUS; __i++) \ + if (cpu_possible(__i)) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); diff --git a/trunk/include/asm-s390/atomic.h b/trunk/include/asm-s390/atomic.h index de1d9926aa60..be6fefe223d6 100644 --- a/trunk/include/asm-s390/atomic.h +++ b/trunk/include/asm-s390/atomic.h @@ -89,15 +89,10 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; + c = atomic_read(v); - for (;;) { - if (unlikely(c == u)) - break; - old = atomic_cmpxchg(v, c, c + a); - if (likely(old == c)) - break; + while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) c = old; - } return c != u; } @@ -172,15 +167,10 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long long a, long long u) { long long c, old; + c = atomic64_read(v); - for (;;) { - if (unlikely(c == u)) - break; - old = atomic64_cmpxchg(v, c, c + a); - if (likely(old == c)) - break; + while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) c = old; - } return c != u; } diff --git a/trunk/include/asm-s390/percpu.h b/trunk/include/asm-s390/percpu.h index e10ed87094f0..123fcaca295e 100644 --- a/trunk/include/asm-s390/percpu.h +++ b/trunk/include/asm-s390/percpu.h @@ -46,9 +46,10 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ + for (__i = 0; __i < NR_CPUS; __i++) \ + if (cpu_possible(__i)) \ + memcpy((pcpudst)+__per_cpu_offset[__i], \ + (src), (size)); \ } while (0) #else /* ! SMP */ diff --git a/trunk/include/asm-sparc64/atomic.h b/trunk/include/asm-sparc64/atomic.h index 468eb48d8142..25256bdc8aae 100644 --- a/trunk/include/asm-sparc64/atomic.h +++ b/trunk/include/asm-sparc64/atomic.h @@ -78,15 +78,9 @@ extern int atomic64_sub_ret(int, atomic64_t *); ({ \ int c, old; \ c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - } \ - likely(c != (u)); \ + c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/trunk/include/asm-sparc64/cache.h b/trunk/include/asm-sparc64/cache.h index e9df17acedde..f7d35a2ae9b8 100644 --- a/trunk/include/asm-sparc64/cache.h +++ b/trunk/include/asm-sparc64/cache.h @@ -13,6 +13,4 @@ #define SMP_CACHE_BYTES_SHIFT 6 #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) - #endif diff --git a/trunk/include/asm-sparc64/percpu.h b/trunk/include/asm-sparc64/percpu.h index 82032e159a76..aea4e51e7cd1 100644 --- a/trunk/include/asm-sparc64/percpu.h +++ b/trunk/include/asm-sparc64/percpu.h @@ -26,9 +26,10 @@ register unsigned long __local_per_cpu_offset asm("g5"); #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for (__i = 0; __i < NR_CPUS; __i++) \ + if (cpu_possible(__i)) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) #else /* ! SMP */ diff --git a/trunk/include/asm-um/alternative.h b/trunk/include/asm-um/alternative.h deleted file mode 100644 index b6434396bd42..000000000000 --- a/trunk/include/asm-um/alternative.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __UM_ALTERNATIVE_H -#define __UM_ALTERNATIVE_H - -#include "asm/arch/alternative.h" - -#endif diff --git a/trunk/include/asm-x86_64/atomic.h b/trunk/include/asm-x86_64/atomic.h index cecbf7baa6aa..4b5cd553e772 100644 --- a/trunk/include/asm-x86_64/atomic.h +++ b/trunk/include/asm-x86_64/atomic.h @@ -405,14 +405,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ c = old; \ - } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/trunk/include/asm-x86_64/cache.h b/trunk/include/asm-x86_64/cache.h index c8043a16152e..263f0a211ed7 100644 --- a/trunk/include/asm-x86_64/cache.h +++ b/trunk/include/asm-x86_64/cache.h @@ -20,8 +20,6 @@ __attribute__((__section__(".data.page_aligned"))) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) - #endif #endif diff --git a/trunk/include/asm-x86_64/percpu.h b/trunk/include/asm-x86_64/percpu.h index 4405b4adeaba..29a6b0408f75 100644 --- a/trunk/include/asm-x86_64/percpu.h +++ b/trunk/include/asm-x86_64/percpu.h @@ -26,9 +26,10 @@ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for (__i = 0; __i < NR_CPUS; __i++) \ + if (cpu_possible(__i)) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); diff --git a/trunk/include/linux/cache.h b/trunk/include/linux/cache.h index cc4b3aafad9a..d22e632f41fb 100644 --- a/trunk/include/linux/cache.h +++ b/trunk/include/linux/cache.h @@ -13,7 +13,9 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif -#ifndef __read_mostly +#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#else #define __read_mostly #endif diff --git a/trunk/include/linux/cdrom.h b/trunk/include/linux/cdrom.h index 3c9b0bc05123..b68fdf1f3156 100644 --- a/trunk/include/linux/cdrom.h +++ b/trunk/include/linux/cdrom.h @@ -378,6 +378,7 @@ struct cdrom_generic_command #define CDC_MEDIA_CHANGED 0x80 /* media changed */ #define CDC_PLAY_AUDIO 0x100 /* audio functions */ #define CDC_RESET 0x200 /* hard reset device */ +#define CDC_IOCTLS 0x400 /* driver has non-standard ioctls */ #define CDC_DRIVE_STATUS 0x800 /* driver implements drive status */ #define CDC_GENERIC_PACKET 0x1000 /* driver implements generic packets */ #define CDC_CD_R 0x2000 /* drive is a CD-R */ @@ -973,7 +974,9 @@ struct cdrom_device_ops { int (*reset) (struct cdrom_device_info *); /* play stuff */ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); - + /* dev-specific */ + int (*dev_ioctl) (struct cdrom_device_info *, + unsigned int, unsigned long); /* driver specifications */ const int capability; /* capability flags */ int n_minors; /* number of active minor devices */ diff --git a/trunk/include/linux/eventpoll.h b/trunk/include/linux/eventpoll.h index 1e4bdfcf83a2..1289f0ec4c00 100644 --- a/trunk/include/linux/eventpoll.h +++ b/trunk/include/linux/eventpoll.h @@ -52,12 +52,7 @@ struct file; #ifdef CONFIG_EPOLL /* Used to initialize the epoll bits inside the "struct file" */ -static inline void eventpoll_init_file(struct file *file) -{ - INIT_LIST_HEAD(&file->f_ep_links); - spin_lock_init(&file->f_ep_lock); -} - +void eventpoll_init_file(struct file *file); /* Used to release the epoll bits inside the "struct file" */ void eventpoll_release_file(struct file *file); @@ -90,6 +85,7 @@ static inline void eventpoll_release(struct file *file) eventpoll_release_file(file); } + #else static inline void eventpoll_init_file(struct file *file) {} diff --git a/trunk/include/linux/ext3_fs.h b/trunk/include/linux/ext3_fs.h index e7239f2f97a1..c0272d73ab20 100644 --- a/trunk/include/linux/ext3_fs.h +++ b/trunk/include/linux/ext3_fs.h @@ -772,12 +772,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned); /* inode.c */ -int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); -struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); -struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); -int ext3_get_block_handle(handle_t *handle, struct inode *inode, - sector_t iblock, struct buffer_head *bh_result, int create, - int extend_disksize); +extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); +extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); +extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); extern void ext3_read_inode (struct inode *); extern int ext3_write_inode (struct inode *, int); diff --git a/trunk/include/linux/ext3_fs_i.h b/trunk/include/linux/ext3_fs_i.h index 7abf90147180..e71dd98dbcae 100644 --- a/trunk/include/linux/ext3_fs_i.h +++ b/trunk/include/linux/ext3_fs_i.h @@ -19,7 +19,6 @@ #include #include #include -#include struct ext3_reserve_window { __u32 _rsv_start; /* First byte reserved */ @@ -123,16 +122,16 @@ struct ext3_inode_info { __u16 i_extra_isize; /* - * truncate_mutex is for serialising ext3_truncate() against + * truncate_sem is for serialising ext3_truncate() against * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's * data tree are chopped off during truncate. We can't do that in * ext3 because whenever we perform intermediate commits during * truncate, the inode and all the metadata blocks *must* be in a * consistent state which allows truncation of the orphans to restart * during recovery. Hence we must fix the get_block-vs-truncate race - * by other means, so we have truncate_mutex. + * by other means, so we have truncate_sem. */ - struct mutex truncate_mutex; + struct semaphore truncate_sem; struct inode vfs_inode; }; diff --git a/trunk/include/linux/file.h b/trunk/include/linux/file.h index 9f7c2513866f..9901b850f2e4 100644 --- a/trunk/include/linux/file.h +++ b/trunk/include/linux/file.h @@ -10,7 +10,6 @@ #include #include #include -#include /* * The default fd array needs to be at least BITS_PER_LONG, @@ -18,22 +17,10 @@ */ #define NR_OPEN_DEFAULT BITS_PER_LONG -/* - * The embedded_fd_set is a small fd_set, - * suitable for most tasks (which open <= BITS_PER_LONG files) - */ -struct embedded_fd_set { - unsigned long fds_bits[1]; -}; - -/* - * More than this number of fds: we use a separately allocated fd_set - */ -#define EMBEDDED_FD_SET_SIZE (BITS_PER_BYTE * sizeof(struct embedded_fd_set)) - struct fdtable { unsigned int max_fds; int max_fdset; + int next_fd; struct file ** fd; /* current fd array */ fd_set *close_on_exec; fd_set *open_fds; @@ -46,20 +33,13 @@ struct fdtable { * Open file table structure */ struct files_struct { - /* - * read mostly part - */ atomic_t count; struct fdtable *fdt; struct fdtable fdtab; - /* - * written part on a separate cache line in SMP - */ - spinlock_t file_lock ____cacheline_aligned_in_smp; - int next_fd; - struct embedded_fd_set close_on_exec_init; - struct embedded_fd_set open_fds_init; + fd_set close_on_exec_init; + fd_set open_fds_init; struct file * fd_array[NR_OPEN_DEFAULT]; + spinlock_t file_lock; /* Protects concurrent writers. Nests inside tsk->alloc_lock */ }; #define files_fdtable(files) (rcu_dereference((files)->fdt)) diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index f9c9dea636d0..128d0082522c 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -397,8 +397,8 @@ struct block_device { dev_t bd_dev; /* not a kdev_t - it's a search key */ struct inode * bd_inode; /* will die */ int bd_openers; - struct mutex bd_mutex; /* open/close mutex */ - struct mutex bd_mount_mutex; /* mount mutex */ + struct semaphore bd_sem; /* open/close mutex */ + struct semaphore bd_mount_sem; /* mount mutex */ struct list_head bd_inodes; void * bd_holder; int bd_holders; @@ -509,7 +509,7 @@ struct inode { #ifdef CONFIG_INOTIFY struct list_head inotify_watches; /* watches on this inode */ - struct mutex inotify_mutex; /* protects the watches list */ + struct semaphore inotify_sem; /* protects the watches list */ #endif unsigned long i_state; @@ -847,7 +847,7 @@ struct super_block { * The next field is for VFS *only*. No filesystems have any business * even looking at it. You had been warned. */ - struct mutex s_vfs_rename_mutex; /* Kludge */ + struct semaphore s_vfs_rename_sem; /* Kludge */ /* Granuality of c/m/atime in ns. Cannot be worse than a second */ @@ -1115,18 +1115,6 @@ static inline void mark_inode_dirty_sync(struct inode *inode) __mark_inode_dirty(inode, I_DIRTY_SYNC); } -static inline void inode_inc_link_count(struct inode *inode) -{ - inode->i_nlink++; - mark_inode_dirty(inode); -} - -static inline void inode_dec_link_count(struct inode *inode) -{ - inode->i_nlink--; - mark_inode_dirty(inode); -} - extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); static inline void file_accessed(struct file *file) { @@ -1546,7 +1534,7 @@ extern void destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); extern int remove_suid(struct dentry *); extern void remove_dquot_ref(struct super_block *, int, struct list_head *); -extern struct mutex iprune_mutex; +extern struct semaphore iprune_sem; extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void remove_inode_hash(struct inode *); diff --git a/trunk/include/linux/generic_serial.h b/trunk/include/linux/generic_serial.h index 652611a4bdcd..0abe9d9a0069 100644 --- a/trunk/include/linux/generic_serial.h +++ b/trunk/include/linux/generic_serial.h @@ -12,8 +12,6 @@ #ifndef GENERIC_SERIAL_H #define GENERIC_SERIAL_H -#include - struct real_driver { void (*disable_tx_interrupts) (void *); void (*enable_tx_interrupts) (void *); @@ -36,7 +34,7 @@ struct gs_port { int xmit_head; int xmit_tail; int xmit_cnt; - struct mutex port_write_mutex; + struct semaphore port_write_sem; int flags; wait_queue_head_t open_wait; wait_queue_head_t close_wait; diff --git a/trunk/include/linux/genhd.h b/trunk/include/linux/genhd.h index fd647fde5ec1..eef5ccdcd731 100644 --- a/trunk/include/linux/genhd.h +++ b/trunk/include/linux/genhd.h @@ -149,16 +149,22 @@ struct disk_attribute { ({ \ typeof(gendiskp->dkstats->field) res = 0; \ int i; \ - for_each_cpu(i) \ + for (i=0; i < NR_CPUS; i++) { \ + if (!cpu_possible(i)) \ + continue; \ res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ + } \ res; \ }) static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { int i; - for_each_cpu(i) - memset(per_cpu_ptr(gendiskp->dkstats, i), value, - sizeof (struct disk_stats)); + for (i=0; i < NR_CPUS; i++) { + if (cpu_possible(i)) { + memset(per_cpu_ptr(gendiskp->dkstats, i), value, + sizeof (struct disk_stats)); + } + } } #else diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 92146f3b7423..dcfd2ecccb5d 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -7,10 +7,11 @@ #define INIT_FDTABLE \ { \ .max_fds = NR_OPEN_DEFAULT, \ - .max_fdset = EMBEDDED_FD_SET_SIZE, \ + .max_fdset = __FD_SETSIZE, \ + .next_fd = 0, \ .fd = &init_files.fd_array[0], \ - .close_on_exec = (fd_set *)&init_files.close_on_exec_init, \ - .open_fds = (fd_set *)&init_files.open_fds_init, \ + .close_on_exec = &init_files.close_on_exec_init, \ + .open_fds = &init_files.open_fds_init, \ .rcu = RCU_HEAD_INIT, \ .free_files = NULL, \ .next = NULL, \ @@ -19,10 +20,9 @@ #define INIT_FILES \ { \ .count = ATOMIC_INIT(1), \ + .file_lock = SPIN_LOCK_UNLOCKED, \ .fdt = &init_files.fdtab, \ .fdtab = INIT_FDTABLE, \ - .file_lock = SPIN_LOCK_UNLOCKED, \ - .next_fd = 0, \ .close_on_exec_init = { { 0, } }, \ .open_fds_init = { { 0, } }, \ .fd_array = { NULL, } \ diff --git a/trunk/include/linux/jbd.h b/trunk/include/linux/jbd.h index 2ccbfb6340ba..41ee79962bb2 100644 --- a/trunk/include/linux/jbd.h +++ b/trunk/include/linux/jbd.h @@ -28,7 +28,6 @@ #include #include #include -#include #include #endif @@ -576,7 +575,7 @@ struct transaction_s * @j_wait_checkpoint: Wait queue to trigger checkpointing * @j_wait_commit: Wait queue to trigger commit * @j_wait_updates: Wait queue to wait for updates to complete - * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints + * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints * @j_head: Journal head - identifies the first unused block in the journal * @j_tail: Journal tail - identifies the oldest still-used block in the * journal. @@ -646,7 +645,7 @@ struct journal_s int j_barrier_count; /* The barrier lock itself */ - struct mutex j_barrier; + struct semaphore j_barrier; /* * Transactions: The current running transaction... @@ -688,7 +687,7 @@ struct journal_s wait_queue_head_t j_wait_updates; /* Semaphore for locking against concurrent checkpoints */ - struct mutex j_checkpoint_mutex; + struct semaphore j_checkpoint_sem; /* * Journal head: identifies the first unused block in the journal. diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index bb6e7ddee2fd..3b507bf05d09 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -91,9 +91,6 @@ extern struct notifier_block *panic_notifier_list; extern long (*panic_blink)(long time); NORET_TYPE void panic(const char * fmt, ...) __attribute__ ((NORET_AND format (printf, 1, 2))); -extern void oops_enter(void); -extern void oops_exit(void); -extern int oops_may_print(void); fastcall NORET_TYPE void do_exit(long error_code) ATTRIB_NORET; NORET_TYPE void complete_and_exit(struct completion *, long) diff --git a/trunk/include/linux/kprobes.h b/trunk/include/linux/kprobes.h index 778adc0fa640..669756bc20a2 100644 --- a/trunk/include/linux/kprobes.h +++ b/trunk/include/linux/kprobes.h @@ -36,7 +36,6 @@ #include #include #include -#include #ifdef CONFIG_KPROBES #include @@ -153,7 +152,7 @@ struct kretprobe_instance { }; extern spinlock_t kretprobe_lock; -extern struct mutex kprobe_mutex; +extern struct semaphore kprobe_mutex; extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); diff --git a/trunk/include/linux/loop.h b/trunk/include/linux/loop.h index e76c7611d6cc..f96506782ebe 100644 --- a/trunk/include/linux/loop.h +++ b/trunk/include/linux/loop.h @@ -17,7 +17,6 @@ #include #include #include -#include /* Possible states of device */ enum { @@ -61,7 +60,7 @@ struct loop_device { int lo_state; struct completion lo_done; struct completion lo_bh_done; - struct mutex lo_ctl_mutex; + struct semaphore lo_ctl_mutex; int lo_pending; request_queue_t *lo_queue; diff --git a/trunk/include/linux/msdos_fs.h b/trunk/include/linux/msdos_fs.h index 779e6a5744c7..8bcd9450d926 100644 --- a/trunk/include/linux/msdos_fs.h +++ b/trunk/include/linux/msdos_fs.h @@ -184,7 +184,6 @@ struct fat_slot_info { #include #include #include -#include struct fat_mount_options { uid_t fs_uid; @@ -227,7 +226,7 @@ struct msdos_sb_info { unsigned long max_cluster; /* maximum cluster number */ unsigned long root_cluster; /* first cluster of the root directory */ unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */ - struct mutex fat_lock; + struct semaphore fat_lock; unsigned int prev_free; /* previously allocated cluster number */ unsigned int free_clusters; /* -1 if undefined */ struct fat_mount_options options; diff --git a/trunk/include/linux/nbd.h b/trunk/include/linux/nbd.h index a6ce409ec6fc..f95d51fae733 100644 --- a/trunk/include/linux/nbd.h +++ b/trunk/include/linux/nbd.h @@ -38,7 +38,6 @@ enum { #ifdef __KERNEL__ #include -#include /* values for flags field */ #define NBD_READ_ONLY 0x0001 @@ -58,7 +57,7 @@ struct nbd_device { struct request *active_req; wait_queue_head_t active_wq; - struct mutex tx_lock; + struct semaphore tx_lock; struct gendisk *disk; int blksize; u64 bytesize; diff --git a/trunk/include/linux/ncp_fs_i.h b/trunk/include/linux/ncp_fs_i.h index bdb4c8ae6924..415be1ec6f98 100644 --- a/trunk/include/linux/ncp_fs_i.h +++ b/trunk/include/linux/ncp_fs_i.h @@ -19,7 +19,7 @@ struct ncp_inode_info { __le32 DosDirNum; __u8 volNumber; __le32 nwattr; - struct mutex open_mutex; + struct semaphore open_sem; atomic_t opened; int access; int flags; diff --git a/trunk/include/linux/ncp_fs_sb.h b/trunk/include/linux/ncp_fs_sb.h index b089d9506283..cf858eb80f0b 100644 --- a/trunk/include/linux/ncp_fs_sb.h +++ b/trunk/include/linux/ncp_fs_sb.h @@ -11,7 +11,6 @@ #include #include #include -#include #ifdef __KERNEL__ @@ -52,7 +51,7 @@ struct ncp_server { receive replies */ int lock; /* To prevent mismatch in protocols. */ - struct mutex mutex; + struct semaphore sem; int current_size; /* for packet preparation */ int has_subfunction; @@ -97,7 +96,7 @@ struct ncp_server { struct { struct work_struct tq; /* STREAM/DGRAM: data/error ready */ struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */ - struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */ + struct semaphore creq_sem; /* DGRAM only: lock accesses to rcv.creq */ unsigned int state; /* STREAM only: receiver state */ struct { diff --git a/trunk/include/linux/pm.h b/trunk/include/linux/pm.h index 6df2585c0169..5be87ba3b7ac 100644 --- a/trunk/include/linux/pm.h +++ b/trunk/include/linux/pm.h @@ -188,8 +188,6 @@ extern void device_power_up(void); extern void device_resume(void); #ifdef CONFIG_PM -extern suspend_disk_method_t pm_disk_mode; - extern int device_suspend(pm_message_t state); #define device_set_wakeup_enable(dev,val) \ @@ -217,6 +215,7 @@ static inline int dpm_runtime_suspend(struct device * dev, pm_message_t state) static inline void dpm_runtime_resume(struct device * dev) { + } #endif diff --git a/trunk/include/linux/profile.h b/trunk/include/linux/profile.h index 1f2fea6640a4..026969a5595c 100644 --- a/trunk/include/linux/profile.h +++ b/trunk/include/linux/profile.h @@ -14,7 +14,6 @@ struct proc_dir_entry; struct pt_regs; -struct notifier_block; /* init basic kernel profiler */ void __init profile_init(void); @@ -33,6 +32,7 @@ enum profile_type { #ifdef CONFIG_PROFILING +struct notifier_block; struct task_struct; struct mm_struct; diff --git a/trunk/include/linux/quota.h b/trunk/include/linux/quota.h index 8dc2d04a103f..f33aeb22c26a 100644 --- a/trunk/include/linux/quota.h +++ b/trunk/include/linux/quota.h @@ -38,7 +38,6 @@ #include #include #include -#include #define __DQUOT_VERSION__ "dquot_6.5.1" #define __DQUOT_NUM_VERSION__ 6*10000+5*100+1 @@ -216,7 +215,7 @@ struct dquot { struct list_head dq_inuse; /* List of all quotas */ struct list_head dq_free; /* Free list element */ struct list_head dq_dirty; /* List of dirty dquots */ - struct mutex dq_lock; /* dquot IO lock */ + struct semaphore dq_lock; /* dquot IO lock */ atomic_t dq_count; /* Use count */ wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */ struct super_block *dq_sb; /* superblock this applies to */ @@ -286,8 +285,8 @@ struct quota_format_type { struct quota_info { unsigned int flags; /* Flags for diskquotas on this device */ - struct mutex dqio_mutex; /* lock device while I/O in progress */ - struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ + struct semaphore dqio_sem; /* lock device while I/O in progress */ + struct semaphore dqonoff_sem; /* Serialize quotaon & quotaoff */ struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ diff --git a/trunk/include/linux/raid/raid1.h b/trunk/include/linux/raid/raid1.h index 3009c813d83d..9d5494aaac0f 100644 --- a/trunk/include/linux/raid/raid1.h +++ b/trunk/include/linux/raid/raid1.h @@ -130,6 +130,6 @@ struct r1bio_s { * with failure when last write completes (and all failed). * Record that bi_end_io was called with this flag... */ -#define R1BIO_Returned 6 +#define R1BIO_Returned 4 #endif diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index 5673008b61e1..c2ec6c77874e 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -113,6 +113,8 @@ struct rcu_data { DECLARE_PER_CPU(struct rcu_data, rcu_data); DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); +extern struct rcu_ctrlblk rcu_ctrlblk; +extern struct rcu_ctrlblk rcu_bh_ctrlblk; /* * Increment the quiescent state counter. diff --git a/trunk/include/linux/seq_file.h b/trunk/include/linux/seq_file.h index b95f6eb7254c..850a974ee505 100644 --- a/trunk/include/linux/seq_file.h +++ b/trunk/include/linux/seq_file.h @@ -4,7 +4,7 @@ #include #include -#include +#include struct seq_operations; struct file; @@ -19,7 +19,7 @@ struct seq_file { size_t count; loff_t index; loff_t version; - struct mutex lock; + struct semaphore sem; struct seq_operations *op; void *private; }; diff --git a/trunk/include/linux/swap.h b/trunk/include/linux/swap.h index 54eac8a39a4c..12415dd94451 100644 --- a/trunk/include/linux/swap.h +++ b/trunk/include/linux/swap.h @@ -234,15 +234,14 @@ extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *v /* linux/mm/swapfile.c */ extern long total_swap_pages; extern unsigned int nr_swapfiles; +extern struct swap_info_struct swap_info[]; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); -extern swp_entry_t get_swap_page_of_type(int); +extern swp_entry_t get_swap_page_of_type(int type); extern int swap_duplicate(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); extern void free_swap_and_cache(swp_entry_t); -extern int swap_type_of(dev_t); -extern unsigned int count_swap_pages(int, int); extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); extern struct swap_info_struct *get_swap_info_struct(unsigned); extern int can_share_swap_page(struct page *); diff --git a/trunk/include/linux/tty.h b/trunk/include/linux/tty.h index f13f49afe198..f45cd74e6f24 100644 --- a/trunk/include/linux/tty.h +++ b/trunk/include/linux/tty.h @@ -24,7 +24,6 @@ #include #include #include -#include #include @@ -232,8 +231,8 @@ struct tty_struct { int canon_data; unsigned long canon_head; unsigned int canon_column; - struct mutex atomic_read_lock; - struct mutex atomic_write_lock; + struct semaphore atomic_read; + struct semaphore atomic_write; unsigned char *write_buf; int write_cnt; spinlock_t read_lock; @@ -320,7 +319,8 @@ extern void tty_ldisc_put(int); extern void tty_wakeup(struct tty_struct *tty); extern void tty_ldisc_flush(struct tty_struct *tty); -extern struct mutex tty_mutex; +struct semaphore; +extern struct semaphore tty_sem; /* n_tty.c */ extern struct tty_ldisc tty_ldisc_N_TTY; diff --git a/trunk/include/linux/tty_flip.h b/trunk/include/linux/tty_flip.h index 0c6169fff366..222faf97d5f9 100644 --- a/trunk/include/linux/tty_flip.h +++ b/trunk/include/linux/tty_flip.h @@ -7,8 +7,14 @@ extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *c extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size); extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size); -static inline int tty_insert_flip_char(struct tty_struct *tty, - unsigned char ch, char flag) +#ifdef INCLUDE_INLINE_FUNCS +#define _INLINE_ extern +#else +#define _INLINE_ static __inline__ +#endif + +_INLINE_ int tty_insert_flip_char(struct tty_struct *tty, + unsigned char ch, char flag) { struct tty_buffer *tb = tty->buf.tail; if (tb && tb->active && tb->used < tb->size) { @@ -19,7 +25,7 @@ static inline int tty_insert_flip_char(struct tty_struct *tty, return tty_insert_flip_string_flags(tty, &ch, &flag, 1); } -static inline void tty_schedule_flip(struct tty_struct *tty) +_INLINE_ void tty_schedule_flip(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->buf.lock, flags); diff --git a/trunk/include/linux/udf_fs_sb.h b/trunk/include/linux/udf_fs_sb.h index 80ae9ef940dc..b15ff2e99c91 100644 --- a/trunk/include/linux/udf_fs_sb.h +++ b/trunk/include/linux/udf_fs_sb.h @@ -13,7 +13,7 @@ #ifndef _UDF_FS_SB_H #define _UDF_FS_SB_H 1 -#include +#include #pragma pack(1) @@ -111,7 +111,7 @@ struct udf_sb_info /* VAT inode */ struct inode *s_vat; - struct mutex s_alloc_mutex; + struct semaphore s_alloc_sem; }; #endif /* _UDF_FS_SB_H */ diff --git a/trunk/include/linux/vt_kern.h b/trunk/include/linux/vt_kern.h index 530ae3f4248c..fab5aed8ca31 100644 --- a/trunk/include/linux/vt_kern.h +++ b/trunk/include/linux/vt_kern.h @@ -73,11 +73,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); int vt_waitactive(int vt); void change_console(struct vc_data *new_vc); void reset_vc(struct vc_data *vc); -#ifdef CONFIG_VT -int is_console_suspend_safe(void); -#else -static inline int is_console_suspend_safe(void) { return 1; } -#endif /* * vc_screen.c shares this temporary buffer with the console write code so that diff --git a/trunk/init/do_mounts_initrd.c b/trunk/init/do_mounts_initrd.c index 405f9031af87..a05cabd0fd10 100644 --- a/trunk/init/do_mounts_initrd.c +++ b/trunk/init/do_mounts_initrd.c @@ -56,7 +56,6 @@ static void __init handle_initrd(void) sys_chroot("."); mount_devfs_fs (); - current->flags |= PF_NOFREEZE; pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); if (pid > 0) { while (pid != sys_wait4(-1, NULL, 0, NULL)) diff --git a/trunk/init/main.c b/trunk/init/main.c index 2714e0e7cfec..4c194c47395f 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -325,7 +325,7 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else #ifdef __GENERIC_PER_CPU -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +unsigned long __per_cpu_offset[NR_CPUS]; EXPORT_SYMBOL(__per_cpu_offset); @@ -333,7 +333,6 @@ static void __init setup_per_cpu_areas(void) { unsigned long size, i; char *ptr; - unsigned long nr_possible_cpus = num_possible_cpus(); /* Copy section for each CPU (we discard the original) */ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); @@ -341,12 +340,12 @@ static void __init setup_per_cpu_areas(void) if (size < PERCPU_ENOUGH_ROOM) size = PERCPU_ENOUGH_ROOM; #endif - ptr = alloc_bootmem(size * nr_possible_cpus); - for_each_cpu(i) { + ptr = alloc_bootmem(size * NR_CPUS); + + for (i = 0; i < NR_CPUS; i++, ptr += size) { __per_cpu_offset[i] = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - ptr += size; } } #endif /* !__GENERIC_PER_CPU */ @@ -439,15 +438,6 @@ void __init parse_early_param(void) * Activate the first processor. */ -static void __init boot_cpu_init(void) -{ - int cpu = smp_processor_id(); - /* Mark the boot cpu "present", "online" etc for SMP and UP case */ - cpu_set(cpu, cpu_online_map); - cpu_set(cpu, cpu_present_map); - cpu_set(cpu, cpu_possible_map); -} - asmlinkage void __init start_kernel(void) { char * command_line; @@ -457,13 +447,17 @@ asmlinkage void __init start_kernel(void) * enable them */ lock_kernel(); - boot_cpu_init(); page_address_init(); printk(KERN_NOTICE); printk(linux_banner); setup_arch(&command_line); setup_per_cpu_areas(); - smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + + /* + * Mark the boot cpu "online" so that it can call console drivers in + * printk() and can access its per-cpu storage. + */ + smp_prepare_boot_cpu(); /* * Set up the scheduler prior starting any interrupts (such as the diff --git a/trunk/kernel/cpuset.c b/trunk/kernel/cpuset.c index c86ee051b734..12815d3f1a05 100644 --- a/trunk/kernel/cpuset.c +++ b/trunk/kernel/cpuset.c @@ -53,7 +53,7 @@ #include #include -#include +#include #define CPUSET_SUPER_MAGIC 0x27e0eb @@ -168,57 +168,63 @@ static struct vfsmount *cpuset_mount; static struct super_block *cpuset_sb; /* - * We have two global cpuset mutexes below. They can nest. - * It is ok to first take manage_mutex, then nest callback_mutex. We also + * We have two global cpuset semaphores below. They can nest. + * It is ok to first take manage_sem, then nest callback_sem. We also * require taking task_lock() when dereferencing a tasks cpuset pointer. * See "The task_lock() exception", at the end of this comment. * - * A task must hold both mutexes to modify cpusets. If a task - * holds manage_mutex, then it blocks others wanting that mutex, - * ensuring that it is the only task able to also acquire callback_mutex + * A task must hold both semaphores to modify cpusets. If a task + * holds manage_sem, then it blocks others wanting that semaphore, + * ensuring that it is the only task able to also acquire callback_sem * and be able to modify cpusets. It can perform various checks on * the cpuset structure first, knowing nothing will change. It can - * also allocate memory while just holding manage_mutex. While it is + * also allocate memory while just holding manage_sem. While it is * performing these checks, various callback routines can briefly - * acquire callback_mutex to query cpusets. Once it is ready to make - * the changes, it takes callback_mutex, blocking everyone else. + * acquire callback_sem to query cpusets. Once it is ready to make + * the changes, it takes callback_sem, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding - * callback_mutex, as that would risk double tripping on callback_mutex + * callback_sem, as that would risk double tripping on callback_sem * from one of the callbacks into the cpuset code from within * __alloc_pages(). * - * If a task is only holding callback_mutex, then it has read-only + * If a task is only holding callback_sem, then it has read-only * access to cpusets. * * The task_struct fields mems_allowed and mems_generation may only * be accessed in the context of that task, so require no locks. * * Any task can increment and decrement the count field without lock. - * So in general, code holding manage_mutex or callback_mutex can't rely + * So in general, code holding manage_sem or callback_sem can't rely * on the count field not changing. However, if the count goes to - * zero, then only attach_task(), which holds both mutexes, can + * zero, then only attach_task(), which holds both semaphores, can * increment it again. Because a count of zero means that no tasks * are currently attached, therefore there is no way a task attached * to that cpuset can fork (the other way to increment the count). - * So code holding manage_mutex or callback_mutex can safely assume that + * So code holding manage_sem or callback_sem can safely assume that * if the count is zero, it will stay zero. Similarly, if a task - * holds manage_mutex or callback_mutex on a cpuset with zero count, it + * holds manage_sem or callback_sem on a cpuset with zero count, it * knows that the cpuset won't be removed, as cpuset_rmdir() needs - * both of those mutexes. + * both of those semaphores. + * + * A possible optimization to improve parallelism would be to make + * callback_sem a R/W semaphore (rwsem), allowing the callback routines + * to proceed in parallel, with read access, until the holder of + * manage_sem needed to take this rwsem for exclusive write access + * and modify some cpusets. * * The cpuset_common_file_write handler for operations that modify - * the cpuset hierarchy holds manage_mutex across the entire operation, + * the cpuset hierarchy holds manage_sem across the entire operation, * single threading all such cpuset modifications across the system. * - * The cpuset_common_file_read() handlers only hold callback_mutex across + * The cpuset_common_file_read() handlers only hold callback_sem across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. * * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't - * (usually) take either mutex. These are the two most performance + * (usually) take either semaphore. These are the two most performance * critical pieces of code here. The exception occurs on cpuset_exit(), - * when a task in a notify_on_release cpuset exits. Then manage_mutex + * when a task in a notify_on_release cpuset exits. Then manage_sem * is taken, and if the cpuset count is zero, a usermode call made * to /sbin/cpuset_release_agent with the name of the cpuset (path * relative to the root of cpuset file system) as the argument. @@ -236,9 +242,9 @@ static struct super_block *cpuset_sb; * * The need for this exception arises from the action of attach_task(), * which overwrites one tasks cpuset pointer with another. It does - * so using both mutexes, however there are several performance + * so using both semaphores, however there are several performance * critical places that need to reference task->cpuset without the - * expense of grabbing a system global mutex. Therefore except as + * expense of grabbing a system global semaphore. Therefore except as * noted below, when dereferencing or, as in attach_task(), modifying * a tasks cpuset pointer we use task_lock(), which acts on a spinlock * (task->alloc_lock) already in the task_struct routinely used for @@ -250,8 +256,8 @@ static struct super_block *cpuset_sb; * the routine cpuset_update_task_memory_state(). */ -static DEFINE_MUTEX(manage_mutex); -static DEFINE_MUTEX(callback_mutex); +static DECLARE_MUTEX(manage_sem); +static DECLARE_MUTEX(callback_sem); /* * A couple of forward declarations required, due to cyclic reference loop: @@ -426,7 +432,7 @@ static inline struct cftype *__d_cft(struct dentry *dentry) } /* - * Call with manage_mutex held. Writes path of cpuset into buf. + * Call with manage_sem held. Writes path of cpuset into buf. * Returns 0 on success, -errno on error. */ @@ -478,11 +484,11 @@ static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) * status of the /sbin/cpuset_release_agent task, so no sense holding * our caller up for that. * - * When we had only one cpuset mutex, we had to call this + * When we had only one cpuset semaphore, we had to call this * without holding it, to avoid deadlock when call_usermodehelper() * allocated memory. With two locks, we could now call this while - * holding manage_mutex, but we still don't, so as to minimize - * the time manage_mutex is held. + * holding manage_sem, but we still don't, so as to minimize + * the time manage_sem is held. */ static void cpuset_release_agent(const char *pathbuf) @@ -514,15 +520,15 @@ static void cpuset_release_agent(const char *pathbuf) * cs is notify_on_release() and now both the user count is zero and * the list of children is empty, prepare cpuset path in a kmalloc'd * buffer, to be returned via ppathbuf, so that the caller can invoke - * cpuset_release_agent() with it later on, once manage_mutex is dropped. - * Call here with manage_mutex held. + * cpuset_release_agent() with it later on, once manage_sem is dropped. + * Call here with manage_sem held. * * This check_for_release() routine is responsible for kmalloc'ing * pathbuf. The above cpuset_release_agent() is responsible for * kfree'ing pathbuf. The caller of these routines is responsible * for providing a pathbuf pointer, initialized to NULL, then - * calling check_for_release() with manage_mutex held and the address - * of the pathbuf pointer, then dropping manage_mutex, then calling + * calling check_for_release() with manage_sem held and the address + * of the pathbuf pointer, then dropping manage_sem, then calling * cpuset_release_agent() with pathbuf, as set by check_for_release(). */ @@ -553,7 +559,7 @@ static void check_for_release(struct cpuset *cs, char **ppathbuf) * One way or another, we guarantee to return some non-empty subset * of cpu_online_map. * - * Call with callback_mutex held. + * Call with callback_sem held. */ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) @@ -577,7 +583,7 @@ static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) * One way or another, we guarantee to return some non-empty subset * of node_online_map. * - * Call with callback_mutex held. + * Call with callback_sem held. */ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) @@ -602,12 +608,12 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) * current->cpuset if a task has its memory placement changed. * Do not call this routine if in_interrupt(). * - * Call without callback_mutex or task_lock() held. May be called - * with or without manage_mutex held. Doesn't need task_lock to guard + * Call without callback_sem or task_lock() held. May be called + * with or without manage_sem held. Doesn't need task_lock to guard * against another task changing a non-NULL cpuset pointer to NULL, * as that is only done by a task on itself, and if the current task * is here, it is not simultaneously in the exit code NULL'ing its - * cpuset pointer. This routine also might acquire callback_mutex and + * cpuset pointer. This routine also might acquire callback_sem and * current->mm->mmap_sem during call. * * Reading current->cpuset->mems_generation doesn't need task_lock @@ -652,13 +658,13 @@ void cpuset_update_task_memory_state(void) } if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { - mutex_lock(&callback_mutex); + down(&callback_sem); task_lock(tsk); cs = tsk->cpuset; /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; task_unlock(tsk); - mutex_unlock(&callback_mutex); + up(&callback_sem); mpol_rebind_task(tsk, &tsk->mems_allowed); } } @@ -668,7 +674,7 @@ void cpuset_update_task_memory_state(void) * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding manage_mutex. + * are only set if the other's are set. Call holding manage_sem. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -686,7 +692,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * manage_mutex held. + * manage_sem held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -740,7 +746,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) * exclusive child cpusets * Build these two partitions by calling partition_sched_domains * - * Call with manage_mutex held. May nest a call to the + * Call with manage_sem held. May nest a call to the * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. */ @@ -786,7 +792,7 @@ static void update_cpu_domains(struct cpuset *cur) } /* - * Call with manage_mutex held. May take callback_mutex during call. + * Call with manage_sem held. May take callback_sem during call. */ static int update_cpumask(struct cpuset *cs, char *buf) @@ -805,9 +811,9 @@ static int update_cpumask(struct cpuset *cs, char *buf) if (retval < 0) return retval; cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed); - mutex_lock(&callback_mutex); + down(&callback_sem); cs->cpus_allowed = trialcs.cpus_allowed; - mutex_unlock(&callback_mutex); + up(&callback_sem); if (is_cpu_exclusive(cs) && !cpus_unchanged) update_cpu_domains(cs); return 0; @@ -821,7 +827,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) * the cpuset is marked 'memory_migrate', migrate the tasks * pages to the new memory. * - * Call with manage_mutex held. May take callback_mutex during call. + * Call with manage_sem held. May take callback_sem during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_sem, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. @@ -856,11 +862,11 @@ static int update_nodemask(struct cpuset *cs, char *buf) if (retval < 0) goto done; - mutex_lock(&callback_mutex); + down(&callback_sem); cs->mems_allowed = trialcs.mems_allowed; atomic_inc(&cpuset_mems_generation); cs->mems_generation = atomic_read(&cpuset_mems_generation); - mutex_unlock(&callback_mutex); + up(&callback_sem); set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ @@ -916,7 +922,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) * tasklist_lock. Forks can happen again now - the mpol_copy() * cpuset_being_rebound check will catch such forks, and rebind * their vma mempolicies too. Because we still hold the global - * cpuset manage_mutex, we know that no other rebind effort will + * cpuset manage_sem, we know that no other rebind effort will * be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. @@ -942,7 +948,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) } /* - * Call with manage_mutex held. + * Call with manage_sem held. */ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) @@ -961,7 +967,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) * cs: the cpuset to update * buf: the buffer where we read the 0 or 1 * - * Call with manage_mutex held. + * Call with manage_sem held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) @@ -983,12 +989,12 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) return err; cpu_exclusive_changed = (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs)); - mutex_lock(&callback_mutex); + down(&callback_sem); if (turning_on) set_bit(bit, &cs->flags); else clear_bit(bit, &cs->flags); - mutex_unlock(&callback_mutex); + up(&callback_sem); if (cpu_exclusive_changed) update_cpu_domains(cs); @@ -1098,7 +1104,7 @@ static int fmeter_getrate(struct fmeter *fmp) * writing the path of the old cpuset in 'ppathbuf' if it needs to be * notified on release. * - * Call holding manage_mutex. May take callback_mutex and task_lock of + * Call holding manage_sem. May take callback_sem and task_lock of * the task 'pid' during call. */ @@ -1138,13 +1144,13 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) get_task_struct(tsk); } - mutex_lock(&callback_mutex); + down(&callback_sem); task_lock(tsk); oldcs = tsk->cpuset; if (!oldcs) { task_unlock(tsk); - mutex_unlock(&callback_mutex); + up(&callback_sem); put_task_struct(tsk); return -ESRCH; } @@ -1158,7 +1164,7 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) from = oldcs->mems_allowed; to = cs->mems_allowed; - mutex_unlock(&callback_mutex); + up(&callback_sem); mm = get_task_mm(tsk); if (mm) { @@ -1215,7 +1221,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us } buffer[nbytes] = 0; /* nul-terminate */ - mutex_lock(&manage_mutex); + down(&manage_sem); if (is_removed(cs)) { retval = -ENODEV; @@ -1258,7 +1264,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us if (retval == 0) retval = nbytes; out2: - mutex_unlock(&manage_mutex); + up(&manage_sem); cpuset_release_agent(pathbuf); out1: kfree(buffer); @@ -1298,9 +1304,9 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) { cpumask_t mask; - mutex_lock(&callback_mutex); + down(&callback_sem); mask = cs->cpus_allowed; - mutex_unlock(&callback_mutex); + up(&callback_sem); return cpulist_scnprintf(page, PAGE_SIZE, mask); } @@ -1309,9 +1315,9 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) { nodemask_t mask; - mutex_lock(&callback_mutex); + down(&callback_sem); mask = cs->mems_allowed; - mutex_unlock(&callback_mutex); + up(&callback_sem); return nodelist_scnprintf(page, PAGE_SIZE, mask); } @@ -1592,7 +1598,7 @@ static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) * Handle an open on 'tasks' file. Prepare a buffer listing the * process id's of tasks currently attached to the cpuset being opened. * - * Does not require any specific cpuset mutexes, and does not take any. + * Does not require any specific cpuset semaphores, and does not take any. */ static int cpuset_tasks_open(struct inode *unused, struct file *file) { @@ -1748,7 +1754,7 @@ static int cpuset_populate_dir(struct dentry *cs_dentry) * name: name of the new cpuset. Will be strcpy'ed. * mode: mode to set on new inode * - * Must be called with the mutex on the parent inode held + * Must be called with the semaphore on the parent inode held */ static long cpuset_create(struct cpuset *parent, const char *name, int mode) @@ -1760,7 +1766,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) if (!cs) return -ENOMEM; - mutex_lock(&manage_mutex); + down(&manage_sem); cpuset_update_task_memory_state(); cs->flags = 0; if (notify_on_release(parent)) @@ -1776,28 +1782,28 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode) cs->parent = parent; - mutex_lock(&callback_mutex); + down(&callback_sem); list_add(&cs->sibling, &cs->parent->children); number_of_cpusets++; - mutex_unlock(&callback_mutex); + up(&callback_sem); err = cpuset_create_dir(cs, name, mode); if (err < 0) goto err; /* - * Release manage_mutex before cpuset_populate_dir() because it + * Release manage_sem before cpuset_populate_dir() because it * will down() this new directory's i_mutex and if we race with * another mkdir, we might deadlock. */ - mutex_unlock(&manage_mutex); + up(&manage_sem); err = cpuset_populate_dir(cs->dentry); /* If err < 0, we have a half-filled directory - oh well ;) */ return 0; err: list_del(&cs->sibling); - mutex_unlock(&manage_mutex); + up(&manage_sem); kfree(cs); return err; } @@ -1819,18 +1825,18 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) /* the vfs holds both inode->i_mutex already */ - mutex_lock(&manage_mutex); + down(&manage_sem); cpuset_update_task_memory_state(); if (atomic_read(&cs->count) > 0) { - mutex_unlock(&manage_mutex); + up(&manage_sem); return -EBUSY; } if (!list_empty(&cs->children)) { - mutex_unlock(&manage_mutex); + up(&manage_sem); return -EBUSY; } parent = cs->parent; - mutex_lock(&callback_mutex); + down(&callback_sem); set_bit(CS_REMOVED, &cs->flags); if (is_cpu_exclusive(cs)) update_cpu_domains(cs); @@ -1842,10 +1848,10 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) cpuset_d_remove_dir(d); dput(d); number_of_cpusets--; - mutex_unlock(&callback_mutex); + up(&callback_sem); if (list_empty(&parent->children)) check_for_release(parent, &pathbuf); - mutex_unlock(&manage_mutex); + up(&manage_sem); cpuset_release_agent(pathbuf); return 0; } @@ -1954,19 +1960,19 @@ void cpuset_fork(struct task_struct *child) * Description: Detach cpuset from @tsk and release it. * * Note that cpusets marked notify_on_release force every task in - * them to take the global manage_mutex mutex when exiting. + * them to take the global manage_sem semaphore when exiting. * This could impact scaling on very large systems. Be reluctant to * use notify_on_release cpusets where very high task exit scaling * is required on large systems. * * Don't even think about derefencing 'cs' after the cpuset use count - * goes to zero, except inside a critical section guarded by manage_mutex - * or callback_mutex. Otherwise a zero cpuset use count is a license to + * goes to zero, except inside a critical section guarded by manage_sem + * or callback_sem. Otherwise a zero cpuset use count is a license to * any other task to nuke the cpuset immediately, via cpuset_rmdir(). * - * This routine has to take manage_mutex, not callback_mutex, because - * it is holding that mutex while calling check_for_release(), - * which calls kmalloc(), so can't be called holding callback_mutex(). + * This routine has to take manage_sem, not callback_sem, because + * it is holding that semaphore while calling check_for_release(), + * which calls kmalloc(), so can't be called holding callback__sem(). * * We don't need to task_lock() this reference to tsk->cpuset, * because tsk is already marked PF_EXITING, so attach_task() won't @@ -2016,10 +2022,10 @@ void cpuset_exit(struct task_struct *tsk) if (notify_on_release(cs)) { char *pathbuf = NULL; - mutex_lock(&manage_mutex); + down(&manage_sem); if (atomic_dec_and_test(&cs->count)) check_for_release(cs, &pathbuf); - mutex_unlock(&manage_mutex); + up(&manage_sem); cpuset_release_agent(pathbuf); } else { atomic_dec(&cs->count); @@ -2040,11 +2046,11 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) { cpumask_t mask; - mutex_lock(&callback_mutex); + down(&callback_sem); task_lock(tsk); guarantee_online_cpus(tsk->cpuset, &mask); task_unlock(tsk); - mutex_unlock(&callback_mutex); + up(&callback_sem); return mask; } @@ -2068,11 +2074,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) { nodemask_t mask; - mutex_lock(&callback_mutex); + down(&callback_sem); task_lock(tsk); guarantee_online_mems(tsk->cpuset, &mask); task_unlock(tsk); - mutex_unlock(&callback_mutex); + up(&callback_sem); return mask; } @@ -2098,7 +2104,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) /* * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive - * ancestor to the specified cpuset. Call holding callback_mutex. + * ancestor to the specified cpuset. Call holding callback_sem. * If no ancestor is mem_exclusive (an unusual configuration), then * returns the root cpuset. */ @@ -2125,12 +2131,12 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) * GFP_KERNEL allocations are not so marked, so can escape to the * nearest mem_exclusive ancestor cpuset. * - * Scanning up parent cpusets requires callback_mutex. The __alloc_pages() + * Scanning up parent cpusets requires callback_sem. The __alloc_pages() * routine only calls here with __GFP_HARDWALL bit _not_ set if * it's a GFP_KERNEL allocation, and all nodes in the current tasks * mems_allowed came up empty on the first pass over the zonelist. * So only GFP_KERNEL allocations, if all nodes in the cpuset are - * short of memory, might require taking the callback_mutex mutex. + * short of memory, might require taking the callback_sem semaphore. * * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing @@ -2165,31 +2171,31 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) return 1; /* Not hardwall and node outside mems_allowed: scan up cpusets */ - mutex_lock(&callback_mutex); + down(&callback_sem); task_lock(current); cs = nearest_exclusive_ancestor(current->cpuset); task_unlock(current); allowed = node_isset(node, cs->mems_allowed); - mutex_unlock(&callback_mutex); + up(&callback_sem); return allowed; } /** * cpuset_lock - lock out any changes to cpuset structures * - * The out of memory (oom) code needs to mutex_lock cpusets + * The out of memory (oom) code needs to lock down cpusets * from being changed while it scans the tasklist looking for a - * task in an overlapping cpuset. Expose callback_mutex via this + * task in an overlapping cpuset. Expose callback_sem via this * cpuset_lock() routine, so the oom code can lock it, before * locking the task list. The tasklist_lock is a spinlock, so - * must be taken inside callback_mutex. + * must be taken inside callback_sem. */ void cpuset_lock(void) { - mutex_lock(&callback_mutex); + down(&callback_sem); } /** @@ -2200,7 +2206,7 @@ void cpuset_lock(void) void cpuset_unlock(void) { - mutex_unlock(&callback_mutex); + up(&callback_sem); } /** @@ -2212,7 +2218,7 @@ void cpuset_unlock(void) * determine if task @p's memory usage might impact the memory * available to the current task. * - * Call while holding callback_mutex. + * Call while holding callback_sem. **/ int cpuset_excl_nodes_overlap(const struct task_struct *p) @@ -2283,7 +2289,7 @@ void __cpuset_memory_pressure_bump(void) * - Used for /proc//cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, - * and we take manage_mutex, keeping attach_task() from changing it + * and we take manage_sem, keeping attach_task() from changing it * anyway. */ @@ -2299,7 +2305,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) return -ENOMEM; tsk = m->private; - mutex_lock(&manage_mutex); + down(&manage_sem); cs = tsk->cpuset; if (!cs) { retval = -EINVAL; @@ -2312,7 +2318,7 @@ static int proc_cpuset_show(struct seq_file *m, void *v) seq_puts(m, buf); seq_putc(m, '\n'); out: - mutex_unlock(&manage_mutex); + up(&manage_sem); kfree(buf); return retval; } diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index 8037405e136e..d1e8d500a7e1 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -345,9 +345,9 @@ void daemonize(const char *name, ...) exit_mm(current); set_special_pids(1, 1); - mutex_lock(&tty_mutex); + down(&tty_sem); current->signal->tty = NULL; - mutex_unlock(&tty_mutex); + up(&tty_sem); /* Block and flush all signals */ sigfillset(&blocked); diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index c79ae0b19a49..9bd7b65ee418 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -607,12 +607,12 @@ static struct files_struct *alloc_files(void) atomic_set(&newf->count, 1); spin_lock_init(&newf->file_lock); - newf->next_fd = 0; fdt = &newf->fdtab; + fdt->next_fd = 0; fdt->max_fds = NR_OPEN_DEFAULT; - fdt->max_fdset = EMBEDDED_FD_SET_SIZE; - fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; - fdt->open_fds = (fd_set *)&newf->open_fds_init; + fdt->max_fdset = __FD_SETSIZE; + fdt->close_on_exec = &newf->close_on_exec_init; + fdt->open_fds = &newf->open_fds_init; fdt->fd = &newf->fd_array[0]; INIT_RCU_HEAD(&fdt->rcu); fdt->free_files = NULL; diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c index 1fb9f753ef60..fef1af8a73ce 100644 --- a/trunk/kernel/kprobes.c +++ b/trunk/kernel/kprobes.c @@ -48,7 +48,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; -DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ +DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; @@ -460,7 +460,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, } p->nmissed = 0; - mutex_lock(&kprobe_mutex); + down(&kprobe_mutex); old_p = get_kprobe(p->addr); if (old_p) { ret = register_aggr_kprobe(old_p, p); @@ -477,7 +477,7 @@ static int __kprobes __register_kprobe(struct kprobe *p, arch_arm_kprobe(p); out: - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); if (ret && probed_mod) module_put(probed_mod); @@ -496,10 +496,10 @@ void __kprobes unregister_kprobe(struct kprobe *p) struct kprobe *old_p, *list_p; int cleanup_p; - mutex_lock(&kprobe_mutex); + down(&kprobe_mutex); old_p = get_kprobe(p->addr); if (unlikely(!old_p)) { - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); return; } if (p != old_p) { @@ -507,7 +507,7 @@ void __kprobes unregister_kprobe(struct kprobe *p) if (list_p == p) /* kprobe p is a valid probe */ goto valid_p; - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); return; } valid_p: @@ -523,7 +523,7 @@ void __kprobes unregister_kprobe(struct kprobe *p) cleanup_p = 0; } - mutex_unlock(&kprobe_mutex); + up(&kprobe_mutex); synchronize_sched(); if (p->mod_refcounted && diff --git a/trunk/kernel/kthread.c b/trunk/kernel/kthread.c index 6a5373868a98..e75950a1092c 100644 --- a/trunk/kernel/kthread.c +++ b/trunk/kernel/kthread.c @@ -12,7 +12,6 @@ #include #include #include -#include #include /* @@ -42,7 +41,7 @@ struct kthread_stop_info /* Thread stopping is done by setthing this var: lock serializes * multiple kthread_stop calls. */ -static DEFINE_MUTEX(kthread_stop_lock); +static DECLARE_MUTEX(kthread_stop_lock); static struct kthread_stop_info kthread_stop_info; int kthread_should_stop(void) @@ -174,7 +173,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s) { int ret; - mutex_lock(&kthread_stop_lock); + down(&kthread_stop_lock); /* It could exit after stop_info.k set, but before wake_up_process. */ get_task_struct(k); @@ -195,7 +194,7 @@ int kthread_stop_sem(struct task_struct *k, struct semaphore *s) wait_for_completion(&kthread_stop_info.done); kthread_stop_info.k = NULL; ret = kthread_stop_info.err; - mutex_unlock(&kthread_stop_lock); + up(&kthread_stop_lock); return ret; } diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index fb404299082e..77764f22f021 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -61,18 +60,18 @@ static DEFINE_SPINLOCK(modlist_lock); /* List of modules, protected by module_mutex AND modlist_lock */ -static DEFINE_MUTEX(module_mutex); +static DECLARE_MUTEX(module_mutex); static LIST_HEAD(modules); -static DEFINE_MUTEX(notify_mutex); +static DECLARE_MUTEX(notify_mutex); static struct notifier_block * module_notify_list; int register_module_notifier(struct notifier_block * nb) { int err; - mutex_lock(¬ify_mutex); + down(¬ify_mutex); err = notifier_chain_register(&module_notify_list, nb); - mutex_unlock(¬ify_mutex); + up(¬ify_mutex); return err; } EXPORT_SYMBOL(register_module_notifier); @@ -80,9 +79,9 @@ EXPORT_SYMBOL(register_module_notifier); int unregister_module_notifier(struct notifier_block * nb) { int err; - mutex_lock(¬ify_mutex); + down(¬ify_mutex); err = notifier_chain_unregister(&module_notify_list, nb); - mutex_unlock(¬ify_mutex); + up(¬ify_mutex); return err; } EXPORT_SYMBOL(unregister_module_notifier); @@ -602,7 +601,7 @@ static void free_module(struct module *mod); static void wait_for_zero_refcount(struct module *mod) { /* Since we might sleep for some time, drop the semaphore first */ - mutex_unlock(&module_mutex); + up(&module_mutex); for (;;) { DEBUGP("Looking at refcount...\n"); set_current_state(TASK_UNINTERRUPTIBLE); @@ -611,7 +610,7 @@ static void wait_for_zero_refcount(struct module *mod) schedule(); } current->state = TASK_RUNNING; - mutex_lock(&module_mutex); + down(&module_mutex); } asmlinkage long @@ -628,7 +627,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags) return -EFAULT; name[MODULE_NAME_LEN-1] = '\0'; - if (mutex_lock_interruptible(&module_mutex) != 0) + if (down_interruptible(&module_mutex) != 0) return -EINTR; mod = find_module(name); @@ -677,14 +676,14 @@ sys_delete_module(const char __user *name_user, unsigned int flags) /* Final destruction now noone is using it. */ if (mod->exit != NULL) { - mutex_unlock(&module_mutex); + up(&module_mutex); mod->exit(); - mutex_lock(&module_mutex); + down(&module_mutex); } free_module(mod); out: - mutex_unlock(&module_mutex); + up(&module_mutex); return ret; } @@ -1973,13 +1972,13 @@ sys_init_module(void __user *umod, return -EPERM; /* Only one module load at a time, please */ - if (mutex_lock_interruptible(&module_mutex) != 0) + if (down_interruptible(&module_mutex) != 0) return -EINTR; /* Do all the hard work */ mod = load_module(umod, len, uargs); if (IS_ERR(mod)) { - mutex_unlock(&module_mutex); + up(&module_mutex); return PTR_ERR(mod); } @@ -1988,11 +1987,11 @@ sys_init_module(void __user *umod, stop_machine_run(__link_module, mod, NR_CPUS); /* Drop lock so they can recurse */ - mutex_unlock(&module_mutex); + up(&module_mutex); - mutex_lock(¬ify_mutex); + down(¬ify_mutex); notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); - mutex_unlock(¬ify_mutex); + up(¬ify_mutex); /* Start the module */ if (mod->init != NULL) @@ -2007,15 +2006,15 @@ sys_init_module(void __user *umod, mod->name); else { module_put(mod); - mutex_lock(&module_mutex); + down(&module_mutex); free_module(mod); - mutex_unlock(&module_mutex); + up(&module_mutex); } return ret; } /* Now it's a first class citizen! */ - mutex_lock(&module_mutex); + down(&module_mutex); mod->state = MODULE_STATE_LIVE; /* Drop initial reference. */ module_put(mod); @@ -2023,7 +2022,7 @@ sys_init_module(void __user *umod, mod->module_init = NULL; mod->init_size = 0; mod->init_text_size = 0; - mutex_unlock(&module_mutex); + up(&module_mutex); return 0; } @@ -2113,7 +2112,7 @@ struct module *module_get_kallsym(unsigned int symnum, { struct module *mod; - mutex_lock(&module_mutex); + down(&module_mutex); list_for_each_entry(mod, &modules, list) { if (symnum < mod->num_symtab) { *value = mod->symtab[symnum].st_value; @@ -2121,12 +2120,12 @@ struct module *module_get_kallsym(unsigned int symnum, strncpy(namebuf, mod->strtab + mod->symtab[symnum].st_name, 127); - mutex_unlock(&module_mutex); + up(&module_mutex); return mod; } symnum -= mod->num_symtab; } - mutex_unlock(&module_mutex); + up(&module_mutex); return NULL; } @@ -2169,7 +2168,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) struct list_head *i; loff_t n = 0; - mutex_lock(&module_mutex); + down(&module_mutex); list_for_each(i, &modules) { if (n++ == *pos) break; @@ -2190,7 +2189,7 @@ static void *m_next(struct seq_file *m, void *p, loff_t *pos) static void m_stop(struct seq_file *m, void *p) { - mutex_unlock(&module_mutex); + up(&module_mutex); } static int m_show(struct seq_file *m, void *p) diff --git a/trunk/kernel/panic.c b/trunk/kernel/panic.c index acd95adddb93..126dc43f1c74 100644 --- a/trunk/kernel/panic.c +++ b/trunk/kernel/panic.c @@ -20,13 +20,10 @@ #include #include +int panic_timeout; int panic_on_oops; int tainted; -static int pause_on_oops; -static int pause_on_oops_flag; -static DEFINE_SPINLOCK(pause_on_oops_lock); -int panic_timeout; EXPORT_SYMBOL(panic_timeout); struct notifier_block *panic_notifier_list; @@ -177,95 +174,3 @@ void add_taint(unsigned flag) tainted |= flag; } EXPORT_SYMBOL(add_taint); - -static int __init pause_on_oops_setup(char *str) -{ - pause_on_oops = simple_strtoul(str, NULL, 0); - return 1; -} -__setup("pause_on_oops=", pause_on_oops_setup); - -static void spin_msec(int msecs) -{ - int i; - - for (i = 0; i < msecs; i++) { - touch_nmi_watchdog(); - mdelay(1); - } -} - -/* - * It just happens that oops_enter() and oops_exit() are identically - * implemented... - */ -static void do_oops_enter_exit(void) -{ - unsigned long flags; - static int spin_counter; - - if (!pause_on_oops) - return; - - spin_lock_irqsave(&pause_on_oops_lock, flags); - if (pause_on_oops_flag == 0) { - /* This CPU may now print the oops message */ - pause_on_oops_flag = 1; - } else { - /* We need to stall this CPU */ - if (!spin_counter) { - /* This CPU gets to do the counting */ - spin_counter = pause_on_oops; - do { - spin_unlock(&pause_on_oops_lock); - spin_msec(MSEC_PER_SEC); - spin_lock(&pause_on_oops_lock); - } while (--spin_counter); - pause_on_oops_flag = 0; - } else { - /* This CPU waits for a different one */ - while (spin_counter) { - spin_unlock(&pause_on_oops_lock); - spin_msec(1); - spin_lock(&pause_on_oops_lock); - } - } - } - spin_unlock_irqrestore(&pause_on_oops_lock, flags); -} - -/* - * Return true if the calling CPU is allowed to print oops-related info. This - * is a bit racy.. - */ -int oops_may_print(void) -{ - return pause_on_oops_flag == 0; -} - -/* - * Called when the architecture enters its oops handler, before it prints - * anything. If this is the first CPU to oops, and it's oopsing the first time - * then let it proceed. - * - * This is all enabled by the pause_on_oops kernel boot option. We do all this - * to ensure that oopses don't scroll off the screen. It has the side-effect - * of preventing later-oopsing CPUs from mucking up the display, too. - * - * It turns out that the CPU which is allowed to print ends up pausing for the - * right duration, whereas all the other CPUs pause for twice as long: once in - * oops_enter(), once in oops_exit(). - */ -void oops_enter(void) -{ - do_oops_enter_exit(); -} - -/* - * Called when the architecture exits its oops handler, after printing - * everything. - */ -void oops_exit(void) -{ - do_oops_enter_exit(); -} diff --git a/trunk/kernel/posix-timers.c b/trunk/kernel/posix-timers.c index 9944379360b5..fa895fc2ecf5 100644 --- a/trunk/kernel/posix-timers.c +++ b/trunk/kernel/posix-timers.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include diff --git a/trunk/kernel/power/Makefile b/trunk/kernel/power/Makefile index 8d0af3d37a4b..04be7d0d96a7 100644 --- a/trunk/kernel/power/Makefile +++ b/trunk/kernel/power/Makefile @@ -5,7 +5,7 @@ endif obj-y := main.o process.o console.o obj-$(CONFIG_PM_LEGACY) += pm.o -obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o swap.o user.o +obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o obj-$(CONFIG_SUSPEND_SMP) += smp.o diff --git a/trunk/kernel/power/disk.c b/trunk/kernel/power/disk.c index 81d4d982f3f0..0b43847dc980 100644 --- a/trunk/kernel/power/disk.c +++ b/trunk/kernel/power/disk.c @@ -22,6 +22,17 @@ #include "power.h" +extern suspend_disk_method_t pm_disk_mode; + +extern int swsusp_shrink_memory(void); +extern int swsusp_suspend(void); +extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages); +extern int swsusp_check(void); +extern int swsusp_read(struct pbe **pblist_ptr); +extern void swsusp_close(void); +extern int swsusp_resume(void); + + static int noresume = 0; char resume_file[256] = CONFIG_PM_STD_PARTITION; dev_t swsusp_resume_device; @@ -59,6 +70,10 @@ static void power_down(suspend_disk_method_t mode) while(1); } + +static int in_suspend __nosavedata = 0; + + static inline void platform_finish(void) { if (pm_disk_mode == PM_DISK_PLATFORM) { @@ -72,6 +87,7 @@ static int prepare_processes(void) int error; pm_prepare_console(); + sys_sync(); disable_nonboot_cpus(); if (freeze_processes()) { @@ -129,7 +145,7 @@ int pm_suspend_disk(void) if (in_suspend) { device_resume(); pr_debug("PM: writing image.\n"); - error = swsusp_write(); + error = swsusp_write(pagedir_nosave, nr_copy_pages); if (!error) power_down(pm_disk_mode); else { @@ -200,7 +216,7 @@ static int software_resume(void) pr_debug("PM: Reading swsusp image.\n"); - if ((error = swsusp_read())) { + if ((error = swsusp_read(&pagedir_nosave))) { swsusp_free(); goto Thaw; } diff --git a/trunk/kernel/power/main.c b/trunk/kernel/power/main.c index ee371f50ccaa..9cb235cba4a9 100644 --- a/trunk/kernel/power/main.c +++ b/trunk/kernel/power/main.c @@ -103,7 +103,7 @@ static int suspend_prepare(suspend_state_t state) } -int suspend_enter(suspend_state_t state) +static int suspend_enter(suspend_state_t state) { int error = 0; unsigned long flags; diff --git a/trunk/kernel/power/pm.c b/trunk/kernel/power/pm.c index 0f6908cce1dd..33c508e857dd 100644 --- a/trunk/kernel/power/pm.c +++ b/trunk/kernel/power/pm.c @@ -25,7 +25,6 @@ #include #include #include -#include int pm_active; @@ -41,7 +40,7 @@ int pm_active; * until a resume but that will be fine. */ -static DEFINE_MUTEX(pm_devs_lock); +static DECLARE_MUTEX(pm_devs_lock); static LIST_HEAD(pm_devs); /** @@ -68,9 +67,9 @@ struct pm_dev *pm_register(pm_dev_t type, dev->id = id; dev->callback = callback; - mutex_lock(&pm_devs_lock); + down(&pm_devs_lock); list_add(&dev->entry, &pm_devs); - mutex_unlock(&pm_devs_lock); + up(&pm_devs_lock); } return dev; } @@ -86,9 +85,9 @@ struct pm_dev *pm_register(pm_dev_t type, void pm_unregister(struct pm_dev *dev) { if (dev) { - mutex_lock(&pm_devs_lock); + down(&pm_devs_lock); list_del(&dev->entry); - mutex_unlock(&pm_devs_lock); + up(&pm_devs_lock); kfree(dev); } @@ -119,7 +118,7 @@ void pm_unregister_all(pm_callback callback) if (!callback) return; - mutex_lock(&pm_devs_lock); + down(&pm_devs_lock); entry = pm_devs.next; while (entry != &pm_devs) { struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); @@ -127,7 +126,7 @@ void pm_unregister_all(pm_callback callback) if (dev->callback == callback) __pm_unregister(dev); } - mutex_unlock(&pm_devs_lock); + up(&pm_devs_lock); } /** @@ -235,7 +234,7 @@ int pm_send_all(pm_request_t rqst, void *data) { struct list_head *entry; - mutex_lock(&pm_devs_lock); + down(&pm_devs_lock); entry = pm_devs.next; while (entry != &pm_devs) { struct pm_dev *dev = list_entry(entry, struct pm_dev, entry); @@ -247,13 +246,13 @@ int pm_send_all(pm_request_t rqst, void *data) */ if (rqst == PM_SUSPEND) pm_undo_all(dev); - mutex_unlock(&pm_devs_lock); + up(&pm_devs_lock); return status; } } entry = entry->next; } - mutex_unlock(&pm_devs_lock); + up(&pm_devs_lock); return 0; } diff --git a/trunk/kernel/power/power.h b/trunk/kernel/power/power.h index f06f12f21767..388dba680841 100644 --- a/trunk/kernel/power/power.h +++ b/trunk/kernel/power/power.h @@ -8,7 +8,6 @@ struct swsusp_info { int cpus; unsigned long image_pages; unsigned long pages; - unsigned long size; } __attribute__((aligned(PAGE_SIZE))); @@ -38,79 +37,21 @@ extern struct subsystem power_subsys; /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; +extern unsigned int nr_copy_pages; extern struct pbe *pagedir_nosave; /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; -extern int in_suspend; -extern dev_t swsusp_resume_device; extern asmlinkage int swsusp_arch_suspend(void); extern asmlinkage int swsusp_arch_resume(void); extern unsigned int count_data_pages(void); - -struct snapshot_handle { - loff_t offset; - unsigned int page; - unsigned int page_offset; - unsigned int prev; - struct pbe *pbe; - void *buffer; - unsigned int buf_offset; -}; - -#define data_of(handle) ((handle).buffer + (handle).buf_offset) - -extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); -extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); -int snapshot_image_loaded(struct snapshot_handle *handle); - -#define SNAPSHOT_IOC_MAGIC '3' -#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) -#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2) -#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *) -#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4) -#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5) -#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long) -#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *) -#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *) -#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9) -#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int) -#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11) -#define SNAPSHOT_IOC_MAXNR 11 - -/** - * The bitmap is used for tracing allocated swap pages - * - * The entire bitmap consists of a number of bitmap_page - * structures linked with the help of the .next member. - * Thus each page can be allocated individually, so we only - * need to make 0-order memory allocations to create - * the bitmap. - */ - -#define BITMAP_PAGE_SIZE (PAGE_SIZE - sizeof(void *)) -#define BITMAP_PAGE_CHUNKS (BITMAP_PAGE_SIZE / sizeof(long)) -#define BITS_PER_CHUNK (sizeof(long) * 8) -#define BITMAP_PAGE_BITS (BITMAP_PAGE_CHUNKS * BITS_PER_CHUNK) - -struct bitmap_page { - unsigned long chunks[BITMAP_PAGE_CHUNKS]; - struct bitmap_page *next; -}; - -extern void free_bitmap(struct bitmap_page *bitmap); -extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits); -extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap); -extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap); - -extern int swsusp_check(void); -extern int swsusp_shrink_memory(void); +extern void free_pagedir(struct pbe *pblist); +extern void release_eaten_pages(void); +extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed); extern void swsusp_free(void); -extern int swsusp_suspend(void); -extern int swsusp_resume(void); -extern int swsusp_read(void); -extern int swsusp_write(void); -extern void swsusp_close(void); -extern int suspend_enter(suspend_state_t state); +extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed); +extern unsigned int snapshot_nr_pages(void); +extern struct pbe *snapshot_pblist(void); +extern void snapshot_pblist_set(struct pbe *pblist); diff --git a/trunk/kernel/power/process.c b/trunk/kernel/power/process.c index 8ac7c35fad77..28de118f7a0b 100644 --- a/trunk/kernel/power/process.c +++ b/trunk/kernel/power/process.c @@ -12,12 +12,11 @@ #include #include #include -#include /* * Timeout for stopping processes */ -#define TIMEOUT (20 * HZ) +#define TIMEOUT (6 * HZ) static inline int freezeable(struct task_struct * p) @@ -55,62 +54,38 @@ void refrigerator(void) current->state = save; } -static inline void freeze_process(struct task_struct *p) -{ - unsigned long flags; - - if (!freezing(p)) { - freeze(p); - spin_lock_irqsave(&p->sighand->siglock, flags); - signal_wake_up(p, 0); - spin_unlock_irqrestore(&p->sighand->siglock, flags); - } -} - /* 0 = success, else # of processes that we failed to stop */ int freeze_processes(void) { - int todo, nr_user, user_frozen; + int todo; unsigned long start_time; struct task_struct *g, *p; unsigned long flags; printk( "Stopping tasks: " ); start_time = jiffies; - user_frozen = 0; do { - nr_user = todo = 0; + todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (!freezeable(p)) continue; if (frozen(p)) continue; - if (p->mm && !(p->flags & PF_BORROWED_MM)) { - /* The task is a user-space one. - * Freeze it unless there's a vfork completion - * pending - */ - if (!p->vfork_done) - freeze_process(p); - nr_user++; - } else { - /* Freeze only if the user space is frozen */ - if (user_frozen) - freeze_process(p); - todo++; - } + + freeze(p); + spin_lock_irqsave(&p->sighand->siglock, flags); + signal_wake_up(p, 0); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); - todo += nr_user; - if (!user_frozen && !nr_user) { - sys_sync(); - start_time = jiffies; - } - user_frozen = !nr_user; yield(); /* Yield is okay here */ - if (todo && time_after(jiffies, start_time + TIMEOUT)) + if (todo && time_after(jiffies, start_time + TIMEOUT)) { + printk( "\n" ); + printk(KERN_ERR " stopping tasks failed (%d tasks remaining)\n", todo ); break; + } } while(todo); /* This does not unfreeze processes that are already frozen @@ -119,14 +94,8 @@ int freeze_processes(void) * but it cleans up leftover PF_FREEZE requests. */ if (todo) { - printk( "\n" ); - printk(KERN_ERR " stopping tasks timed out " - "after %d seconds (%d tasks remaining):\n", - TIMEOUT / HZ, todo); read_lock(&tasklist_lock); - do_each_thread(g, p) { - if (freezeable(p) && !frozen(p)) - printk(KERN_ERR " %s\n", p->comm); + do_each_thread(g, p) if (freezing(p)) { pr_debug(" clean up: %s\n", p->comm); p->flags &= ~PF_FREEZE; @@ -134,7 +103,7 @@ int freeze_processes(void) recalc_sigpending_tsk(p); spin_unlock_irqrestore(&p->sighand->siglock, flags); } - } while_each_thread(g, p); + while_each_thread(g, p); read_unlock(&tasklist_lock); return todo; } diff --git a/trunk/kernel/power/snapshot.c b/trunk/kernel/power/snapshot.c index c5863d02c89e..8d5a5986d621 100644 --- a/trunk/kernel/power/snapshot.c +++ b/trunk/kernel/power/snapshot.c @@ -10,7 +10,6 @@ */ -#include #include #include #include @@ -35,9 +34,7 @@ #include "power.h" struct pbe *pagedir_nosave; -static unsigned int nr_copy_pages; -static unsigned int nr_meta_pages; -static unsigned long *buffer; +unsigned int nr_copy_pages; #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void) @@ -83,7 +80,7 @@ static int save_highmem_zone(struct zone *zone) void *kaddr; unsigned long pfn = zone_pfn + zone->zone_start_pfn; - if (!(pfn%10000)) + if (!(pfn%1000)) printk("."); if (!pfn_valid(pfn)) continue; @@ -122,15 +119,13 @@ int save_highmem(void) struct zone *zone; int res = 0; - pr_debug("swsusp: Saving Highmem"); - drain_local_pages(); + pr_debug("swsusp: Saving Highmem\n"); for_each_zone (zone) { if (is_highmem(zone)) res = save_highmem_zone(zone); if (res) return res; } - printk("\n"); return 0; } @@ -240,7 +235,7 @@ static void copy_data_pages(struct pbe *pblist) * free_pagedir - free pages allocated with alloc_pagedir() */ -static void free_pagedir(struct pbe *pblist) +void free_pagedir(struct pbe *pblist) { struct pbe *pbe; @@ -306,7 +301,7 @@ struct eaten_page { static struct eaten_page *eaten_pages = NULL; -static void release_eaten_pages(void) +void release_eaten_pages(void) { struct eaten_page *p, *q; @@ -381,6 +376,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed if (!nr_pages) return NULL; + pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); pblist = alloc_image_page(gfp_mask, safe_needed); /* FIXME: rewrite this ugly loop */ for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; @@ -392,7 +388,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed free_pagedir(pblist); pblist = NULL; } else - create_pbe_list(pblist, nr_pages); + create_pbe_list(pblist, nr_pages); return pblist; } @@ -418,10 +414,6 @@ void swsusp_free(void) } } } - nr_copy_pages = 0; - nr_meta_pages = 0; - pagedir_nosave = NULL; - buffer = NULL; } @@ -445,7 +437,7 @@ static int enough_free_mem(unsigned int nr_pages) (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); } -static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) +int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) { struct pbe *p; @@ -512,318 +504,7 @@ asmlinkage int swsusp_save(void) */ nr_copy_pages = nr_pages; - nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT; printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); return 0; } - -static void init_header(struct swsusp_info *info) -{ - memset(info, 0, sizeof(struct swsusp_info)); - info->version_code = LINUX_VERSION_CODE; - info->num_physpages = num_physpages; - memcpy(&info->uts, &system_utsname, sizeof(system_utsname)); - info->cpus = num_online_cpus(); - info->image_pages = nr_copy_pages; - info->pages = nr_copy_pages + nr_meta_pages + 1; - info->size = info->pages; - info->size <<= PAGE_SHIFT; -} - -/** - * pack_orig_addresses - the .orig_address fields of the PBEs from the - * list starting at @pbe are stored in the array @buf[] (1 page) - */ - -static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe) -{ - int j; - - for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { - buf[j] = pbe->orig_address; - pbe = pbe->next; - } - if (!pbe) - for (; j < PAGE_SIZE / sizeof(long); j++) - buf[j] = 0; - return pbe; -} - -/** - * snapshot_read_next - used for reading the system memory snapshot. - * - * On the first call to it @handle should point to a zeroed - * snapshot_handle structure. The structure gets updated and a pointer - * to it should be passed to this function every next time. - * - * The @count parameter should contain the number of bytes the caller - * wants to read from the snapshot. It must not be zero. - * - * On success the function returns a positive number. Then, the caller - * is allowed to read up to the returned number of bytes from the memory - * location computed by the data_of() macro. The number returned - * may be smaller than @count, but this only happens if the read would - * cross a page boundary otherwise. - * - * The function returns 0 to indicate the end of data stream condition, - * and a negative number is returned on error. In such cases the - * structure pointed to by @handle is not updated and should not be used - * any more. - */ - -int snapshot_read_next(struct snapshot_handle *handle, size_t count) -{ - if (handle->page > nr_meta_pages + nr_copy_pages) - return 0; - if (!buffer) { - /* This makes the buffer be freed by swsusp_free() */ - buffer = alloc_image_page(GFP_ATOMIC, 0); - if (!buffer) - return -ENOMEM; - } - if (!handle->offset) { - init_header((struct swsusp_info *)buffer); - handle->buffer = buffer; - handle->pbe = pagedir_nosave; - } - if (handle->prev < handle->page) { - if (handle->page <= nr_meta_pages) { - handle->pbe = pack_orig_addresses(buffer, handle->pbe); - if (!handle->pbe) - handle->pbe = pagedir_nosave; - } else { - handle->buffer = (void *)handle->pbe->address; - handle->pbe = handle->pbe->next; - } - handle->prev = handle->page; - } - handle->buf_offset = handle->page_offset; - if (handle->page_offset + count >= PAGE_SIZE) { - count = PAGE_SIZE - handle->page_offset; - handle->page_offset = 0; - handle->page++; - } else { - handle->page_offset += count; - } - handle->offset += count; - return count; -} - -/** - * mark_unsafe_pages - mark the pages that cannot be used for storing - * the image during resume, because they conflict with the pages that - * had been used before suspend - */ - -static int mark_unsafe_pages(struct pbe *pblist) -{ - struct zone *zone; - unsigned long zone_pfn; - struct pbe *p; - - if (!pblist) /* a sanity check */ - return -EINVAL; - - /* Clear page flags */ - for_each_zone (zone) { - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - if (pfn_valid(zone_pfn + zone->zone_start_pfn)) - ClearPageNosaveFree(pfn_to_page(zone_pfn + - zone->zone_start_pfn)); - } - - /* Mark orig addresses */ - for_each_pbe (p, pblist) { - if (virt_addr_valid(p->orig_address)) - SetPageNosaveFree(virt_to_page(p->orig_address)); - else - return -EFAULT; - } - - return 0; -} - -static void copy_page_backup_list(struct pbe *dst, struct pbe *src) -{ - /* We assume both lists contain the same number of elements */ - while (src) { - dst->orig_address = src->orig_address; - dst = dst->next; - src = src->next; - } -} - -static int check_header(struct swsusp_info *info) -{ - char *reason = NULL; - - if (info->version_code != LINUX_VERSION_CODE) - reason = "kernel version"; - if (info->num_physpages != num_physpages) - reason = "memory size"; - if (strcmp(info->uts.sysname,system_utsname.sysname)) - reason = "system type"; - if (strcmp(info->uts.release,system_utsname.release)) - reason = "kernel release"; - if (strcmp(info->uts.version,system_utsname.version)) - reason = "version"; - if (strcmp(info->uts.machine,system_utsname.machine)) - reason = "machine"; - if (reason) { - printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); - return -EPERM; - } - return 0; -} - -/** - * load header - check the image header and copy data from it - */ - -static int load_header(struct snapshot_handle *handle, - struct swsusp_info *info) -{ - int error; - struct pbe *pblist; - - error = check_header(info); - if (!error) { - pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0); - if (!pblist) - return -ENOMEM; - pagedir_nosave = pblist; - handle->pbe = pblist; - nr_copy_pages = info->image_pages; - nr_meta_pages = info->pages - info->image_pages - 1; - } - return error; -} - -/** - * unpack_orig_addresses - copy the elements of @buf[] (1 page) to - * the PBEs in the list starting at @pbe - */ - -static inline struct pbe *unpack_orig_addresses(unsigned long *buf, - struct pbe *pbe) -{ - int j; - - for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { - pbe->orig_address = buf[j]; - pbe = pbe->next; - } - return pbe; -} - -/** - * create_image - use metadata contained in the PBE list - * pointed to by pagedir_nosave to mark the pages that will - * be overwritten in the process of restoring the system - * memory state from the image and allocate memory for - * the image avoiding these pages - */ - -static int create_image(struct snapshot_handle *handle) -{ - int error = 0; - struct pbe *p, *pblist; - - p = pagedir_nosave; - error = mark_unsafe_pages(p); - if (!error) { - pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); - if (pblist) - copy_page_backup_list(pblist, p); - free_pagedir(p); - if (!pblist) - error = -ENOMEM; - } - if (!error) - error = alloc_data_pages(pblist, GFP_ATOMIC, 1); - if (!error) { - release_eaten_pages(); - pagedir_nosave = pblist; - } else { - pagedir_nosave = NULL; - handle->pbe = NULL; - nr_copy_pages = 0; - nr_meta_pages = 0; - } - return error; -} - -/** - * snapshot_write_next - used for writing the system memory snapshot. - * - * On the first call to it @handle should point to a zeroed - * snapshot_handle structure. The structure gets updated and a pointer - * to it should be passed to this function every next time. - * - * The @count parameter should contain the number of bytes the caller - * wants to write to the image. It must not be zero. - * - * On success the function returns a positive number. Then, the caller - * is allowed to write up to the returned number of bytes to the memory - * location computed by the data_of() macro. The number returned - * may be smaller than @count, but this only happens if the write would - * cross a page boundary otherwise. - * - * The function returns 0 to indicate the "end of file" condition, - * and a negative number is returned on error. In such cases the - * structure pointed to by @handle is not updated and should not be used - * any more. - */ - -int snapshot_write_next(struct snapshot_handle *handle, size_t count) -{ - int error = 0; - - if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages) - return 0; - if (!buffer) { - /* This makes the buffer be freed by swsusp_free() */ - buffer = alloc_image_page(GFP_ATOMIC, 0); - if (!buffer) - return -ENOMEM; - } - if (!handle->offset) - handle->buffer = buffer; - if (handle->prev < handle->page) { - if (!handle->prev) { - error = load_header(handle, (struct swsusp_info *)buffer); - if (error) - return error; - } else if (handle->prev <= nr_meta_pages) { - handle->pbe = unpack_orig_addresses(buffer, handle->pbe); - if (!handle->pbe) { - error = create_image(handle); - if (error) - return error; - handle->pbe = pagedir_nosave; - handle->buffer = (void *)handle->pbe->address; - } - } else { - handle->pbe = handle->pbe->next; - handle->buffer = (void *)handle->pbe->address; - } - handle->prev = handle->page; - } - handle->buf_offset = handle->page_offset; - if (handle->page_offset + count >= PAGE_SIZE) { - count = PAGE_SIZE - handle->page_offset; - handle->page_offset = 0; - handle->page++; - } else { - handle->page_offset += count; - } - handle->offset += count; - return count; -} - -int snapshot_image_loaded(struct snapshot_handle *handle) -{ - return !(!handle->pbe || handle->pbe->next || !nr_copy_pages || - handle->page <= nr_meta_pages + nr_copy_pages); -} diff --git a/trunk/kernel/power/swap.c b/trunk/kernel/power/swap.c deleted file mode 100644 index 9177f3f73a6c..000000000000 --- a/trunk/kernel/power/swap.c +++ /dev/null @@ -1,544 +0,0 @@ -/* - * linux/kernel/power/swap.c - * - * This file provides functions for reading the suspend image from - * and writing it to a swap partition. - * - * Copyright (C) 1998,2001-2005 Pavel Machek - * Copyright (C) 2006 Rafael J. Wysocki - * - * This file is released under the GPLv2. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "power.h" - -extern char resume_file[]; - -#define SWSUSP_SIG "S1SUSPEND" - -static struct swsusp_header { - char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)]; - swp_entry_t image; - char orig_sig[10]; - char sig[10]; -} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; - -/* - * Saving part... - */ - -static unsigned short root_swap = 0xffff; - -static int mark_swapfiles(swp_entry_t start) -{ - int error; - - rw_swap_page_sync(READ, - swp_entry(root_swap, 0), - virt_to_page((unsigned long)&swsusp_header)); - if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || - !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { - memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); - memcpy(swsusp_header.sig,SWSUSP_SIG, 10); - swsusp_header.image = start; - error = rw_swap_page_sync(WRITE, - swp_entry(root_swap, 0), - virt_to_page((unsigned long) - &swsusp_header)); - } else { - pr_debug("swsusp: Partition is not swap space.\n"); - error = -ENODEV; - } - return error; -} - -/** - * swsusp_swap_check - check if the resume device is a swap device - * and get its index (if so) - */ - -static int swsusp_swap_check(void) /* This is called before saving image */ -{ - int res = swap_type_of(swsusp_resume_device); - - if (res >= 0) { - root_swap = res; - return 0; - } - return res; -} - -/** - * write_page - Write one page to given swap location. - * @buf: Address we're writing. - * @offset: Offset of the swap page we're writing to. - */ - -static int write_page(void *buf, unsigned long offset) -{ - swp_entry_t entry; - int error = -ENOSPC; - - if (offset) { - entry = swp_entry(root_swap, offset); - error = rw_swap_page_sync(WRITE, entry, virt_to_page(buf)); - } - return error; -} - -/* - * The swap map is a data structure used for keeping track of each page - * written to a swap partition. It consists of many swap_map_page - * structures that contain each an array of MAP_PAGE_SIZE swap entries. - * These structures are stored on the swap and linked together with the - * help of the .next_swap member. - * - * The swap map is created during suspend. The swap map pages are - * allocated and populated one at a time, so we only need one memory - * page to set up the entire structure. - * - * During resume we also only need to use one swap_map_page structure - * at a time. - */ - -#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(long) - 1) - -struct swap_map_page { - unsigned long entries[MAP_PAGE_ENTRIES]; - unsigned long next_swap; -}; - -/** - * The swap_map_handle structure is used for handling swap in - * a file-alike way - */ - -struct swap_map_handle { - struct swap_map_page *cur; - unsigned long cur_swap; - struct bitmap_page *bitmap; - unsigned int k; -}; - -static void release_swap_writer(struct swap_map_handle *handle) -{ - if (handle->cur) - free_page((unsigned long)handle->cur); - handle->cur = NULL; - if (handle->bitmap) - free_bitmap(handle->bitmap); - handle->bitmap = NULL; -} - -static int get_swap_writer(struct swap_map_handle *handle) -{ - handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); - if (!handle->cur) - return -ENOMEM; - handle->bitmap = alloc_bitmap(count_swap_pages(root_swap, 0)); - if (!handle->bitmap) { - release_swap_writer(handle); - return -ENOMEM; - } - handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap); - if (!handle->cur_swap) { - release_swap_writer(handle); - return -ENOSPC; - } - handle->k = 0; - return 0; -} - -static int swap_write_page(struct swap_map_handle *handle, void *buf) -{ - int error; - unsigned long offset; - - if (!handle->cur) - return -EINVAL; - offset = alloc_swap_page(root_swap, handle->bitmap); - error = write_page(buf, offset); - if (error) - return error; - handle->cur->entries[handle->k++] = offset; - if (handle->k >= MAP_PAGE_ENTRIES) { - offset = alloc_swap_page(root_swap, handle->bitmap); - if (!offset) - return -ENOSPC; - handle->cur->next_swap = offset; - error = write_page(handle->cur, handle->cur_swap); - if (error) - return error; - memset(handle->cur, 0, PAGE_SIZE); - handle->cur_swap = offset; - handle->k = 0; - } - return 0; -} - -static int flush_swap_writer(struct swap_map_handle *handle) -{ - if (handle->cur && handle->cur_swap) - return write_page(handle->cur, handle->cur_swap); - else - return -EINVAL; -} - -/** - * save_image - save the suspend image data - */ - -static int save_image(struct swap_map_handle *handle, - struct snapshot_handle *snapshot, - unsigned int nr_pages) -{ - unsigned int m; - int ret; - int error = 0; - - printk("Saving image data pages (%u pages) ... ", nr_pages); - m = nr_pages / 100; - if (!m) - m = 1; - nr_pages = 0; - do { - ret = snapshot_read_next(snapshot, PAGE_SIZE); - if (ret > 0) { - error = swap_write_page(handle, data_of(*snapshot)); - if (error) - break; - if (!(nr_pages % m)) - printk("\b\b\b\b%3d%%", nr_pages / m); - nr_pages++; - } - } while (ret > 0); - if (!error) - printk("\b\b\b\bdone\n"); - return error; -} - -/** - * enough_swap - Make sure we have enough swap to save the image. - * - * Returns TRUE or FALSE after checking the total amount of swap - * space avaiable from the resume partition. - */ - -static int enough_swap(unsigned int nr_pages) -{ - unsigned int free_swap = count_swap_pages(root_swap, 1); - - pr_debug("swsusp: free swap pages: %u\n", free_swap); - return free_swap > (nr_pages + PAGES_FOR_IO + - (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); -} - -/** - * swsusp_write - Write entire image and metadata. - * - * It is important _NOT_ to umount filesystems at this point. We want - * them synced (in case something goes wrong) but we DO not want to mark - * filesystem clean: it is not. (And it does not matter, if we resume - * correctly, we'll mark system clean, anyway.) - */ - -int swsusp_write(void) -{ - struct swap_map_handle handle; - struct snapshot_handle snapshot; - struct swsusp_info *header; - unsigned long start; - int error; - - if ((error = swsusp_swap_check())) { - printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); - return error; - } - memset(&snapshot, 0, sizeof(struct snapshot_handle)); - error = snapshot_read_next(&snapshot, PAGE_SIZE); - if (error < PAGE_SIZE) - return error < 0 ? error : -EFAULT; - header = (struct swsusp_info *)data_of(snapshot); - if (!enough_swap(header->pages)) { - printk(KERN_ERR "swsusp: Not enough free swap\n"); - return -ENOSPC; - } - error = get_swap_writer(&handle); - if (!error) { - start = handle.cur_swap; - error = swap_write_page(&handle, header); - } - if (!error) - error = save_image(&handle, &snapshot, header->pages - 1); - if (!error) { - flush_swap_writer(&handle); - printk("S"); - error = mark_swapfiles(swp_entry(root_swap, start)); - printk("|\n"); - } - if (error) - free_all_swap_pages(root_swap, handle.bitmap); - release_swap_writer(&handle); - return error; -} - -/* - * Using bio to read from swap. - * This code requires a bit more work than just using buffer heads - * but, it is the recommended way for 2.5/2.6. - * The following are to signal the beginning and end of I/O. Bios - * finish asynchronously, while we want them to happen synchronously. - * A simple atomic_t, and a wait loop take care of this problem. - */ - -static atomic_t io_done = ATOMIC_INIT(0); - -static int end_io(struct bio *bio, unsigned int num, int err) -{ - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - panic("I/O error reading memory image"); - atomic_set(&io_done, 0); - return 0; -} - -static struct block_device *resume_bdev; - -/** - * submit - submit BIO request. - * @rw: READ or WRITE. - * @off physical offset of page. - * @page: page we're reading or writing. - * - * Straight from the textbook - allocate and initialize the bio. - * If we're writing, make sure the page is marked as dirty. - * Then submit it and wait. - */ - -static int submit(int rw, pgoff_t page_off, void *page) -{ - int error = 0; - struct bio *bio; - - bio = bio_alloc(GFP_ATOMIC, 1); - if (!bio) - return -ENOMEM; - bio->bi_sector = page_off * (PAGE_SIZE >> 9); - bio->bi_bdev = resume_bdev; - bio->bi_end_io = end_io; - - if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) { - printk("swsusp: ERROR: adding page to bio at %ld\n",page_off); - error = -EFAULT; - goto Done; - } - - atomic_set(&io_done, 1); - submit_bio(rw | (1 << BIO_RW_SYNC), bio); - while (atomic_read(&io_done)) - yield(); - if (rw == READ) - bio_set_pages_dirty(bio); - Done: - bio_put(bio); - return error; -} - -static int bio_read_page(pgoff_t page_off, void *page) -{ - return submit(READ, page_off, page); -} - -static int bio_write_page(pgoff_t page_off, void *page) -{ - return submit(WRITE, page_off, page); -} - -/** - * The following functions allow us to read data using a swap map - * in a file-alike way - */ - -static void release_swap_reader(struct swap_map_handle *handle) -{ - if (handle->cur) - free_page((unsigned long)handle->cur); - handle->cur = NULL; -} - -static int get_swap_reader(struct swap_map_handle *handle, - swp_entry_t start) -{ - int error; - - if (!swp_offset(start)) - return -EINVAL; - handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); - if (!handle->cur) - return -ENOMEM; - error = bio_read_page(swp_offset(start), handle->cur); - if (error) { - release_swap_reader(handle); - return error; - } - handle->k = 0; - return 0; -} - -static int swap_read_page(struct swap_map_handle *handle, void *buf) -{ - unsigned long offset; - int error; - - if (!handle->cur) - return -EINVAL; - offset = handle->cur->entries[handle->k]; - if (!offset) - return -EFAULT; - error = bio_read_page(offset, buf); - if (error) - return error; - if (++handle->k >= MAP_PAGE_ENTRIES) { - handle->k = 0; - offset = handle->cur->next_swap; - if (!offset) - release_swap_reader(handle); - else - error = bio_read_page(offset, handle->cur); - } - return error; -} - -/** - * load_image - load the image using the swap map handle - * @handle and the snapshot handle @snapshot - * (assume there are @nr_pages pages to load) - */ - -static int load_image(struct swap_map_handle *handle, - struct snapshot_handle *snapshot, - unsigned int nr_pages) -{ - unsigned int m; - int ret; - int error = 0; - - printk("Loading image data pages (%u pages) ... ", nr_pages); - m = nr_pages / 100; - if (!m) - m = 1; - nr_pages = 0; - do { - ret = snapshot_write_next(snapshot, PAGE_SIZE); - if (ret > 0) { - error = swap_read_page(handle, data_of(*snapshot)); - if (error) - break; - if (!(nr_pages % m)) - printk("\b\b\b\b%3d%%", nr_pages / m); - nr_pages++; - } - } while (ret > 0); - if (!error) - printk("\b\b\b\bdone\n"); - if (!snapshot_image_loaded(snapshot)) - error = -ENODATA; - return error; -} - -int swsusp_read(void) -{ - int error; - struct swap_map_handle handle; - struct snapshot_handle snapshot; - struct swsusp_info *header; - - if (IS_ERR(resume_bdev)) { - pr_debug("swsusp: block device not initialised\n"); - return PTR_ERR(resume_bdev); - } - - memset(&snapshot, 0, sizeof(struct snapshot_handle)); - error = snapshot_write_next(&snapshot, PAGE_SIZE); - if (error < PAGE_SIZE) - return error < 0 ? error : -EFAULT; - header = (struct swsusp_info *)data_of(snapshot); - error = get_swap_reader(&handle, swsusp_header.image); - if (!error) - error = swap_read_page(&handle, header); - if (!error) - error = load_image(&handle, &snapshot, header->pages - 1); - release_swap_reader(&handle); - - blkdev_put(resume_bdev); - - if (!error) - pr_debug("swsusp: Reading resume file was successful\n"); - else - pr_debug("swsusp: Error %d resuming\n", error); - return error; -} - -/** - * swsusp_check - Check for swsusp signature in the resume device - */ - -int swsusp_check(void) -{ - int error; - - resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); - if (!IS_ERR(resume_bdev)) { - set_blocksize(resume_bdev, PAGE_SIZE); - memset(&swsusp_header, 0, sizeof(swsusp_header)); - if ((error = bio_read_page(0, &swsusp_header))) - return error; - if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { - memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); - /* Reset swap signature now */ - error = bio_write_page(0, &swsusp_header); - } else { - return -EINVAL; - } - if (error) - blkdev_put(resume_bdev); - else - pr_debug("swsusp: Signature found, resuming\n"); - } else { - error = PTR_ERR(resume_bdev); - } - - if (error) - pr_debug("swsusp: Error %d check for resume file\n", error); - - return error; -} - -/** - * swsusp_close - close swap device. - */ - -void swsusp_close(void) -{ - if (IS_ERR(resume_bdev)) { - pr_debug("swsusp: block device not initialised\n"); - return; - } - - blkdev_put(resume_bdev); -} diff --git a/trunk/kernel/power/swsusp.c b/trunk/kernel/power/swsusp.c index c4016cbbd3e0..2d9d08f72f76 100644 --- a/trunk/kernel/power/swsusp.c +++ b/trunk/kernel/power/swsusp.c @@ -31,24 +31,41 @@ * Fixed runaway init * * Rafael J. Wysocki - * Reworked the freeing of memory and the handling of swap + * Added the swap map data structure and reworked the handling of swap * * More state savers are welcome. Especially for the scsi layer... * * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt */ +#include #include #include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include #include +#include +#include #include #include #include #include +#include + +#include +#include +#include +#include +#include #include "power.h" @@ -60,8 +77,6 @@ */ unsigned long image_size = 500 * 1024 * 1024; -int in_suspend __nosavedata = 0; - #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void); int save_highmem(void); @@ -72,97 +87,471 @@ static int restore_highmem(void) { return 0; } static unsigned int count_highmem_pages(void) { return 0; } #endif +extern char resume_file[]; + +#define SWSUSP_SIG "S1SUSPEND" + +static struct swsusp_header { + char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)]; + swp_entry_t image; + char orig_sig[10]; + char sig[10]; +} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header; + +static struct swsusp_info swsusp_info; + +/* + * Saving part... + */ + +static unsigned short root_swap = 0xffff; + +static int mark_swapfiles(swp_entry_t start) +{ + int error; + + rw_swap_page_sync(READ, + swp_entry(root_swap, 0), + virt_to_page((unsigned long)&swsusp_header)); + if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || + !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { + memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); + memcpy(swsusp_header.sig,SWSUSP_SIG, 10); + swsusp_header.image = start; + error = rw_swap_page_sync(WRITE, + swp_entry(root_swap, 0), + virt_to_page((unsigned long) + &swsusp_header)); + } else { + pr_debug("swsusp: Partition is not swap space.\n"); + error = -ENODEV; + } + return error; +} + +/* + * Check whether the swap device is the specified resume + * device, irrespective of whether they are specified by + * identical names. + * + * (Thus, device inode aliasing is allowed. You can say /dev/hda4 + * instead of /dev/ide/host0/bus0/target0/lun0/part4 [if using devfs] + * and they'll be considered the same device. This is *necessary* for + * devfs, since the resume code can only recognize the form /dev/hda4, + * but the suspend code would see the long name.) + */ +static inline int is_resume_device(const struct swap_info_struct *swap_info) +{ + struct file *file = swap_info->swap_file; + struct inode *inode = file->f_dentry->d_inode; + + return S_ISBLK(inode->i_mode) && + swsusp_resume_device == MKDEV(imajor(inode), iminor(inode)); +} + +static int swsusp_swap_check(void) /* This is called before saving image */ +{ + int i; + + spin_lock(&swap_lock); + for (i = 0; i < MAX_SWAPFILES; i++) { + if (!(swap_info[i].flags & SWP_WRITEOK)) + continue; + if (!swsusp_resume_device || is_resume_device(swap_info + i)) { + spin_unlock(&swap_lock); + root_swap = i; + return 0; + } + } + spin_unlock(&swap_lock); + return -ENODEV; +} + +/** + * write_page - Write one page to a fresh swap location. + * @addr: Address we're writing. + * @loc: Place to store the entry we used. + * + * Allocate a new swap entry and 'sync' it. Note we discard -EIO + * errors. That is an artifact left over from swsusp. It did not + * check the return of rw_swap_page_sync() at all, since most pages + * written back to swap would return -EIO. + * This is a partial improvement, since we will at least return other + * errors, though we need to eventually fix the damn code. + */ +static int write_page(unsigned long addr, swp_entry_t *loc) +{ + swp_entry_t entry; + int error = -ENOSPC; + + entry = get_swap_page_of_type(root_swap); + if (swp_offset(entry)) { + error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr)); + if (!error || error == -EIO) + *loc = entry; + } + return error; +} + /** - * The following functions are used for tracing the allocated - * swap pages, so that they can be freed in case of an error. + * Swap map-handling functions + * + * The swap map is a data structure used for keeping track of each page + * written to the swap. It consists of many swap_map_page structures + * that contain each an array of MAP_PAGE_SIZE swap entries. + * These structures are linked together with the help of either the + * .next (in memory) or the .next_swap (in swap) member. * - * The functions operate on a linked bitmap structure defined - * in power.h + * The swap map is created during suspend. At that time we need to keep + * it in memory, because we have to free all of the allocated swap + * entries if an error occurs. The memory needed is preallocated + * so that we know in advance if there's enough of it. + * + * The first swap_map_page structure is filled with the swap entries that + * correspond to the first MAP_PAGE_SIZE data pages written to swap and + * so on. After the all of the data pages have been written, the order + * of the swap_map_page structures in the map is reversed so that they + * can be read from swap in the original order. This causes the data + * pages to be loaded in exactly the same order in which they have been + * saved. + * + * During resume we only need to use one swap_map_page structure + * at a time, which means that we only need to use two memory pages for + * reading the image - one for reading the swap_map_page structures + * and the second for reading the data pages from swap. */ -void free_bitmap(struct bitmap_page *bitmap) +#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \ + / sizeof(swp_entry_t)) + +struct swap_map_page { + swp_entry_t entries[MAP_PAGE_SIZE]; + swp_entry_t next_swap; + struct swap_map_page *next; +}; + +static inline void free_swap_map(struct swap_map_page *swap_map) { - struct bitmap_page *bp; + struct swap_map_page *swp; - while (bitmap) { - bp = bitmap->next; - free_page((unsigned long)bitmap); - bitmap = bp; + while (swap_map) { + swp = swap_map->next; + free_page((unsigned long)swap_map); + swap_map = swp; } } -struct bitmap_page *alloc_bitmap(unsigned int nr_bits) +static struct swap_map_page *alloc_swap_map(unsigned int nr_pages) { - struct bitmap_page *bitmap, *bp; - unsigned int n; + struct swap_map_page *swap_map, *swp; + unsigned n = 0; - if (!nr_bits) + if (!nr_pages) return NULL; - bitmap = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL); - bp = bitmap; - for (n = BITMAP_PAGE_BITS; n < nr_bits; n += BITMAP_PAGE_BITS) { - bp->next = (struct bitmap_page *)get_zeroed_page(GFP_KERNEL); - bp = bp->next; - if (!bp) { - free_bitmap(bitmap); + pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages); + swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + swp = swap_map; + for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) { + swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + swp = swp->next; + if (!swp) { + free_swap_map(swap_map); return NULL; } } - return bitmap; + return swap_map; } -static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit) +/** + * reverse_swap_map - reverse the order of pages in the swap map + * @swap_map + */ + +static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map) { - unsigned int n; + struct swap_map_page *prev, *next; - n = BITMAP_PAGE_BITS; - while (bitmap && n <= bit) { - n += BITMAP_PAGE_BITS; - bitmap = bitmap->next; + prev = NULL; + while (swap_map) { + next = swap_map->next; + swap_map->next = prev; + prev = swap_map; + swap_map = next; } - if (!bitmap) - return -EINVAL; - n -= BITMAP_PAGE_BITS; - bit -= n; - n = 0; - while (bit >= BITS_PER_CHUNK) { - bit -= BITS_PER_CHUNK; - n++; + return prev; +} + +/** + * free_swap_map_entries - free the swap entries allocated to store + * the swap map @swap_map (this is only called in case of an error) + */ +static inline void free_swap_map_entries(struct swap_map_page *swap_map) +{ + while (swap_map) { + if (swap_map->next_swap.val) + swap_free(swap_map->next_swap); + swap_map = swap_map->next; } - bitmap->chunks[n] |= (1UL << bit); +} + +/** + * save_swap_map - save the swap map used for tracing the data pages + * stored in the swap + */ + +static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start) +{ + swp_entry_t entry = (swp_entry_t){0}; + int error; + + while (swap_map) { + swap_map->next_swap = entry; + if ((error = write_page((unsigned long)swap_map, &entry))) + return error; + swap_map = swap_map->next; + } + *start = entry; return 0; } -unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap) +/** + * free_image_entries - free the swap entries allocated to store + * the image data pages (this is only called in case of an error) + */ + +static inline void free_image_entries(struct swap_map_page *swp) { - unsigned long offset; + unsigned k; - offset = swp_offset(get_swap_page_of_type(swap)); - if (offset) { - if (bitmap_set(bitmap, offset)) { - swap_free(swp_entry(swap, offset)); - offset = 0; - } + while (swp) { + for (k = 0; k < MAP_PAGE_SIZE; k++) + if (swp->entries[k].val) + swap_free(swp->entries[k]); + swp = swp->next; + } +} + +/** + * The swap_map_handle structure is used for handling the swap map in + * a file-alike way + */ + +struct swap_map_handle { + struct swap_map_page *cur; + unsigned int k; +}; + +static inline void init_swap_map_handle(struct swap_map_handle *handle, + struct swap_map_page *map) +{ + handle->cur = map; + handle->k = 0; +} + +static inline int swap_map_write_page(struct swap_map_handle *handle, + unsigned long addr) +{ + int error; + + error = write_page(addr, handle->cur->entries + handle->k); + if (error) + return error; + if (++handle->k >= MAP_PAGE_SIZE) { + handle->cur = handle->cur->next; + handle->k = 0; + } + return 0; +} + +/** + * save_image_data - save the data pages pointed to by the PBEs + * from the list @pblist using the swap map handle @handle + * (assume there are @nr_pages data pages to save) + */ + +static int save_image_data(struct pbe *pblist, + struct swap_map_handle *handle, + unsigned int nr_pages) +{ + unsigned int m; + struct pbe *p; + int error = 0; + + printk("Saving image data pages (%u pages) ... ", nr_pages); + m = nr_pages / 100; + if (!m) + m = 1; + nr_pages = 0; + for_each_pbe (p, pblist) { + error = swap_map_write_page(handle, p->address); + if (error) + break; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + if (!error) + printk("\b\b\b\bdone\n"); + return error; +} + +static void dump_info(void) +{ + pr_debug(" swsusp: Version: %u\n",swsusp_info.version_code); + pr_debug(" swsusp: Num Pages: %ld\n",swsusp_info.num_physpages); + pr_debug(" swsusp: UTS Sys: %s\n",swsusp_info.uts.sysname); + pr_debug(" swsusp: UTS Node: %s\n",swsusp_info.uts.nodename); + pr_debug(" swsusp: UTS Release: %s\n",swsusp_info.uts.release); + pr_debug(" swsusp: UTS Version: %s\n",swsusp_info.uts.version); + pr_debug(" swsusp: UTS Machine: %s\n",swsusp_info.uts.machine); + pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname); + pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus); + pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages); + pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages); +} + +static void init_header(unsigned int nr_pages) +{ + memset(&swsusp_info, 0, sizeof(swsusp_info)); + swsusp_info.version_code = LINUX_VERSION_CODE; + swsusp_info.num_physpages = num_physpages; + memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname)); + + swsusp_info.cpus = num_online_cpus(); + swsusp_info.image_pages = nr_pages; + swsusp_info.pages = nr_pages + + ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; +} + +/** + * pack_orig_addresses - the .orig_address fields of the PBEs from the + * list starting at @pbe are stored in the array @buf[] (1 page) + */ + +static inline struct pbe *pack_orig_addresses(unsigned long *buf, + struct pbe *pbe) +{ + int j; + + for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { + buf[j] = pbe->orig_address; + pbe = pbe->next; } - return offset; + if (!pbe) + for (; j < PAGE_SIZE / sizeof(long); j++) + buf[j] = 0; + return pbe; } -void free_all_swap_pages(int swap, struct bitmap_page *bitmap) +/** + * save_image_metadata - save the .orig_address fields of the PBEs + * from the list @pblist using the swap map handle @handle + */ + +static int save_image_metadata(struct pbe *pblist, + struct swap_map_handle *handle) { - unsigned int bit, n; - unsigned long test; + unsigned long *buf; + unsigned int n = 0; + struct pbe *p; + int error = 0; - bit = 0; - while (bitmap) { - for (n = 0; n < BITMAP_PAGE_CHUNKS; n++) - for (test = 1UL; test; test <<= 1) { - if (bitmap->chunks[n] & test) - swap_free(swp_entry(swap, bit)); - bit++; - } - bitmap = bitmap->next; + printk("Saving image metadata ... "); + buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); + if (!buf) + return -ENOMEM; + p = pblist; + while (p) { + p = pack_orig_addresses(buf, p); + error = swap_map_write_page(handle, (unsigned long)buf); + if (error) + break; + n++; } + free_page((unsigned long)buf); + if (!error) + printk("done (%u pages saved)\n", n); + return error; +} + +/** + * enough_swap - Make sure we have enough swap to save the image. + * + * Returns TRUE or FALSE after checking the total amount of swap + * space avaiable from the resume partition. + */ + +static int enough_swap(unsigned int nr_pages) +{ + unsigned int free_swap = swap_info[root_swap].pages - + swap_info[root_swap].inuse_pages; + + pr_debug("swsusp: free swap pages: %u\n", free_swap); + return free_swap > (nr_pages + PAGES_FOR_IO + + (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); +} + +/** + * swsusp_write - Write entire image and metadata. + * + * It is important _NOT_ to umount filesystems at this point. We want + * them synced (in case something goes wrong) but we DO not want to mark + * filesystem clean: it is not. (And it does not matter, if we resume + * correctly, we'll mark system clean, anyway.) + */ + +int swsusp_write(struct pbe *pblist, unsigned int nr_pages) +{ + struct swap_map_page *swap_map; + struct swap_map_handle handle; + swp_entry_t start; + int error; + + if ((error = swsusp_swap_check())) { + printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); + return error; + } + if (!enough_swap(nr_pages)) { + printk(KERN_ERR "swsusp: Not enough free swap\n"); + return -ENOSPC; + } + + init_header(nr_pages); + swap_map = alloc_swap_map(swsusp_info.pages); + if (!swap_map) + return -ENOMEM; + init_swap_map_handle(&handle, swap_map); + + error = swap_map_write_page(&handle, (unsigned long)&swsusp_info); + if (!error) + error = save_image_metadata(pblist, &handle); + if (!error) + error = save_image_data(pblist, &handle, nr_pages); + if (error) + goto Free_image_entries; + + swap_map = reverse_swap_map(swap_map); + error = save_swap_map(swap_map, &start); + if (error) + goto Free_map_entries; + + dump_info(); + printk( "S" ); + error = mark_swapfiles(start); + printk( "|\n" ); + if (error) + goto Free_map_entries; + +Free_swap_map: + free_swap_map(swap_map); + return error; + +Free_map_entries: + free_swap_map_entries(swap_map); +Free_image_entries: + free_image_entries(swap_map); + goto Free_swap_map; } /** @@ -271,3 +660,379 @@ int swsusp_resume(void) local_irq_enable(); return error; } + +/** + * mark_unsafe_pages - mark the pages that cannot be used for storing + * the image during resume, because they conflict with the pages that + * had been used before suspend + */ + +static void mark_unsafe_pages(struct pbe *pblist) +{ + struct zone *zone; + unsigned long zone_pfn; + struct pbe *p; + + if (!pblist) /* a sanity check */ + return; + + /* Clear page flags */ + for_each_zone (zone) { + for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) + if (pfn_valid(zone_pfn + zone->zone_start_pfn)) + ClearPageNosaveFree(pfn_to_page(zone_pfn + + zone->zone_start_pfn)); + } + + /* Mark orig addresses */ + for_each_pbe (p, pblist) + SetPageNosaveFree(virt_to_page(p->orig_address)); + +} + +static void copy_page_backup_list(struct pbe *dst, struct pbe *src) +{ + /* We assume both lists contain the same number of elements */ + while (src) { + dst->orig_address = src->orig_address; + dst = dst->next; + src = src->next; + } +} + +/* + * Using bio to read from swap. + * This code requires a bit more work than just using buffer heads + * but, it is the recommended way for 2.5/2.6. + * The following are to signal the beginning and end of I/O. Bios + * finish asynchronously, while we want them to happen synchronously. + * A simple atomic_t, and a wait loop take care of this problem. + */ + +static atomic_t io_done = ATOMIC_INIT(0); + +static int end_io(struct bio *bio, unsigned int num, int err) +{ + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + panic("I/O error reading memory image"); + atomic_set(&io_done, 0); + return 0; +} + +static struct block_device *resume_bdev; + +/** + * submit - submit BIO request. + * @rw: READ or WRITE. + * @off physical offset of page. + * @page: page we're reading or writing. + * + * Straight from the textbook - allocate and initialize the bio. + * If we're writing, make sure the page is marked as dirty. + * Then submit it and wait. + */ + +static int submit(int rw, pgoff_t page_off, void *page) +{ + int error = 0; + struct bio *bio; + + bio = bio_alloc(GFP_ATOMIC, 1); + if (!bio) + return -ENOMEM; + bio->bi_sector = page_off * (PAGE_SIZE >> 9); + bio->bi_bdev = resume_bdev; + bio->bi_end_io = end_io; + + if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) { + printk("swsusp: ERROR: adding page to bio at %ld\n",page_off); + error = -EFAULT; + goto Done; + } + + + atomic_set(&io_done, 1); + submit_bio(rw | (1 << BIO_RW_SYNC), bio); + while (atomic_read(&io_done)) + yield(); + if (rw == READ) + bio_set_pages_dirty(bio); + Done: + bio_put(bio); + return error; +} + +static int bio_read_page(pgoff_t page_off, void *page) +{ + return submit(READ, page_off, page); +} + +static int bio_write_page(pgoff_t page_off, void *page) +{ + return submit(WRITE, page_off, page); +} + +/** + * The following functions allow us to read data using a swap map + * in a file-alike way + */ + +static inline void release_swap_map_reader(struct swap_map_handle *handle) +{ + if (handle->cur) + free_page((unsigned long)handle->cur); + handle->cur = NULL; +} + +static inline int get_swap_map_reader(struct swap_map_handle *handle, + swp_entry_t start) +{ + int error; + + if (!swp_offset(start)) + return -EINVAL; + handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); + if (!handle->cur) + return -ENOMEM; + error = bio_read_page(swp_offset(start), handle->cur); + if (error) { + release_swap_map_reader(handle); + return error; + } + handle->k = 0; + return 0; +} + +static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf) +{ + unsigned long offset; + int error; + + if (!handle->cur) + return -EINVAL; + offset = swp_offset(handle->cur->entries[handle->k]); + if (!offset) + return -EINVAL; + error = bio_read_page(offset, buf); + if (error) + return error; + if (++handle->k >= MAP_PAGE_SIZE) { + handle->k = 0; + offset = swp_offset(handle->cur->next_swap); + if (!offset) + release_swap_map_reader(handle); + else + error = bio_read_page(offset, handle->cur); + } + return error; +} + +static int check_header(void) +{ + char *reason = NULL; + + dump_info(); + if (swsusp_info.version_code != LINUX_VERSION_CODE) + reason = "kernel version"; + if (swsusp_info.num_physpages != num_physpages) + reason = "memory size"; + if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname)) + reason = "system type"; + if (strcmp(swsusp_info.uts.release,system_utsname.release)) + reason = "kernel release"; + if (strcmp(swsusp_info.uts.version,system_utsname.version)) + reason = "version"; + if (strcmp(swsusp_info.uts.machine,system_utsname.machine)) + reason = "machine"; + if (reason) { + printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); + return -EPERM; + } + return 0; +} + +/** + * load_image_data - load the image data using the swap map handle + * @handle and store them using the page backup list @pblist + * (assume there are @nr_pages pages to load) + */ + +static int load_image_data(struct pbe *pblist, + struct swap_map_handle *handle, + unsigned int nr_pages) +{ + int error; + unsigned int m; + struct pbe *p; + + if (!pblist) + return -EINVAL; + printk("Loading image data pages (%u pages) ... ", nr_pages); + m = nr_pages / 100; + if (!m) + m = 1; + nr_pages = 0; + p = pblist; + while (p) { + error = swap_map_read_page(handle, (void *)p->address); + if (error) + break; + p = p->next; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + if (!error) + printk("\b\b\b\bdone\n"); + return error; +} + +/** + * unpack_orig_addresses - copy the elements of @buf[] (1 page) to + * the PBEs in the list starting at @pbe + */ + +static inline struct pbe *unpack_orig_addresses(unsigned long *buf, + struct pbe *pbe) +{ + int j; + + for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { + pbe->orig_address = buf[j]; + pbe = pbe->next; + } + return pbe; +} + +/** + * load_image_metadata - load the image metadata using the swap map + * handle @handle and put them into the PBEs in the list @pblist + */ + +static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle) +{ + struct pbe *p; + unsigned long *buf; + unsigned int n = 0; + int error = 0; + + printk("Loading image metadata ... "); + buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC); + if (!buf) + return -ENOMEM; + p = pblist; + while (p) { + error = swap_map_read_page(handle, buf); + if (error) + break; + p = unpack_orig_addresses(buf, p); + n++; + } + free_page((unsigned long)buf); + if (!error) + printk("done (%u pages loaded)\n", n); + return error; +} + +int swsusp_read(struct pbe **pblist_ptr) +{ + int error; + struct pbe *p, *pblist; + struct swap_map_handle handle; + unsigned int nr_pages; + + if (IS_ERR(resume_bdev)) { + pr_debug("swsusp: block device not initialised\n"); + return PTR_ERR(resume_bdev); + } + + error = get_swap_map_reader(&handle, swsusp_header.image); + if (!error) + error = swap_map_read_page(&handle, &swsusp_info); + if (!error) + error = check_header(); + if (error) + return error; + nr_pages = swsusp_info.image_pages; + p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0); + if (!p) + return -ENOMEM; + error = load_image_metadata(p, &handle); + if (!error) { + mark_unsafe_pages(p); + pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); + if (pblist) + copy_page_backup_list(pblist, p); + free_pagedir(p); + if (!pblist) + error = -ENOMEM; + + /* Allocate memory for the image and read the data from swap */ + if (!error) + error = alloc_data_pages(pblist, GFP_ATOMIC, 1); + if (!error) { + release_eaten_pages(); + error = load_image_data(pblist, &handle, nr_pages); + } + if (!error) + *pblist_ptr = pblist; + } + release_swap_map_reader(&handle); + + blkdev_put(resume_bdev); + + if (!error) + pr_debug("swsusp: Reading resume file was successful\n"); + else + pr_debug("swsusp: Error %d resuming\n", error); + return error; +} + +/** + * swsusp_check - Check for swsusp signature in the resume device + */ + +int swsusp_check(void) +{ + int error; + + resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ); + if (!IS_ERR(resume_bdev)) { + set_blocksize(resume_bdev, PAGE_SIZE); + memset(&swsusp_header, 0, sizeof(swsusp_header)); + if ((error = bio_read_page(0, &swsusp_header))) + return error; + if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { + memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); + /* Reset swap signature now */ + error = bio_write_page(0, &swsusp_header); + } else { + return -EINVAL; + } + if (error) + blkdev_put(resume_bdev); + else + pr_debug("swsusp: Signature found, resuming\n"); + } else { + error = PTR_ERR(resume_bdev); + } + + if (error) + pr_debug("swsusp: Error %d check for resume file\n", error); + + return error; +} + +/** + * swsusp_close - close swap device. + */ + +void swsusp_close(void) +{ + if (IS_ERR(resume_bdev)) { + pr_debug("swsusp: block device not initialised\n"); + return; + } + + blkdev_put(resume_bdev); +} diff --git a/trunk/kernel/power/user.c b/trunk/kernel/power/user.c deleted file mode 100644 index 3f1539fbe48a..000000000000 --- a/trunk/kernel/power/user.c +++ /dev/null @@ -1,333 +0,0 @@ -/* - * linux/kernel/power/user.c - * - * This file provides the user space interface for software suspend/resume. - * - * Copyright (C) 2006 Rafael J. Wysocki - * - * This file is released under the GPLv2. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "power.h" - -#define SNAPSHOT_MINOR 231 - -static struct snapshot_data { - struct snapshot_handle handle; - int swap; - struct bitmap_page *bitmap; - int mode; - char frozen; - char ready; -} snapshot_state; - -static atomic_t device_available = ATOMIC_INIT(1); - -static int snapshot_open(struct inode *inode, struct file *filp) -{ - struct snapshot_data *data; - - if (!atomic_add_unless(&device_available, -1, 0)) - return -EBUSY; - - if ((filp->f_flags & O_ACCMODE) == O_RDWR) - return -ENOSYS; - - nonseekable_open(inode, filp); - data = &snapshot_state; - filp->private_data = data; - memset(&data->handle, 0, sizeof(struct snapshot_handle)); - if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { - data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1; - data->mode = O_RDONLY; - } else { - data->swap = -1; - data->mode = O_WRONLY; - } - data->bitmap = NULL; - data->frozen = 0; - data->ready = 0; - - return 0; -} - -static int snapshot_release(struct inode *inode, struct file *filp) -{ - struct snapshot_data *data; - - swsusp_free(); - data = filp->private_data; - free_all_swap_pages(data->swap, data->bitmap); - free_bitmap(data->bitmap); - if (data->frozen) { - down(&pm_sem); - thaw_processes(); - enable_nonboot_cpus(); - up(&pm_sem); - } - atomic_inc(&device_available); - return 0; -} - -static ssize_t snapshot_read(struct file *filp, char __user *buf, - size_t count, loff_t *offp) -{ - struct snapshot_data *data; - ssize_t res; - - data = filp->private_data; - res = snapshot_read_next(&data->handle, count); - if (res > 0) { - if (copy_to_user(buf, data_of(data->handle), res)) - res = -EFAULT; - else - *offp = data->handle.offset; - } - return res; -} - -static ssize_t snapshot_write(struct file *filp, const char __user *buf, - size_t count, loff_t *offp) -{ - struct snapshot_data *data; - ssize_t res; - - data = filp->private_data; - res = snapshot_write_next(&data->handle, count); - if (res > 0) { - if (copy_from_user(data_of(data->handle), buf, res)) - res = -EFAULT; - else - *offp = data->handle.offset; - } - return res; -} - -static int snapshot_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - int error = 0; - struct snapshot_data *data; - loff_t offset, avail; - - if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) - return -ENOTTY; - if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) - return -ENOTTY; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - data = filp->private_data; - - switch (cmd) { - - case SNAPSHOT_FREEZE: - if (data->frozen) - break; - down(&pm_sem); - disable_nonboot_cpus(); - if (freeze_processes()) { - thaw_processes(); - enable_nonboot_cpus(); - error = -EBUSY; - } - up(&pm_sem); - if (!error) - data->frozen = 1; - break; - - case SNAPSHOT_UNFREEZE: - if (!data->frozen) - break; - down(&pm_sem); - thaw_processes(); - enable_nonboot_cpus(); - up(&pm_sem); - data->frozen = 0; - break; - - case SNAPSHOT_ATOMIC_SNAPSHOT: - if (data->mode != O_RDONLY || !data->frozen || data->ready) { - error = -EPERM; - break; - } - down(&pm_sem); - /* Free memory before shutting down devices. */ - error = swsusp_shrink_memory(); - if (!error) { - error = device_suspend(PMSG_FREEZE); - if (!error) { - in_suspend = 1; - error = swsusp_suspend(); - device_resume(); - } - } - up(&pm_sem); - if (!error) - error = put_user(in_suspend, (unsigned int __user *)arg); - if (!error) - data->ready = 1; - break; - - case SNAPSHOT_ATOMIC_RESTORE: - if (data->mode != O_WRONLY || !data->frozen || - !snapshot_image_loaded(&data->handle)) { - error = -EPERM; - break; - } - down(&pm_sem); - pm_prepare_console(); - error = device_suspend(PMSG_FREEZE); - if (!error) { - error = swsusp_resume(); - device_resume(); - } - pm_restore_console(); - up(&pm_sem); - break; - - case SNAPSHOT_FREE: - swsusp_free(); - memset(&data->handle, 0, sizeof(struct snapshot_handle)); - data->ready = 0; - break; - - case SNAPSHOT_SET_IMAGE_SIZE: - image_size = arg; - break; - - case SNAPSHOT_AVAIL_SWAP: - avail = count_swap_pages(data->swap, 1); - avail <<= PAGE_SHIFT; - error = put_user(avail, (loff_t __user *)arg); - break; - - case SNAPSHOT_GET_SWAP_PAGE: - if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { - error = -ENODEV; - break; - } - if (!data->bitmap) { - data->bitmap = alloc_bitmap(count_swap_pages(data->swap, 0)); - if (!data->bitmap) { - error = -ENOMEM; - break; - } - } - offset = alloc_swap_page(data->swap, data->bitmap); - if (offset) { - offset <<= PAGE_SHIFT; - error = put_user(offset, (loff_t __user *)arg); - } else { - error = -ENOSPC; - } - break; - - case SNAPSHOT_FREE_SWAP_PAGES: - if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { - error = -ENODEV; - break; - } - free_all_swap_pages(data->swap, data->bitmap); - free_bitmap(data->bitmap); - data->bitmap = NULL; - break; - - case SNAPSHOT_SET_SWAP_FILE: - if (!data->bitmap) { - /* - * User space encodes device types as two-byte values, - * so we need to recode them - */ - if (old_decode_dev(arg)) { - data->swap = swap_type_of(old_decode_dev(arg)); - if (data->swap < 0) - error = -ENODEV; - } else { - data->swap = -1; - error = -EINVAL; - } - } else { - error = -EPERM; - } - break; - - case SNAPSHOT_S2RAM: - if (!data->frozen) { - error = -EPERM; - break; - } - - if (down_trylock(&pm_sem)) { - error = -EBUSY; - break; - } - - if (pm_ops->prepare) { - error = pm_ops->prepare(PM_SUSPEND_MEM); - if (error) - goto OutS3; - } - - /* Put devices to sleep */ - error = device_suspend(PMSG_SUSPEND); - if (error) { - printk(KERN_ERR "Failed to suspend some devices.\n"); - } else { - /* Enter S3, system is already frozen */ - suspend_enter(PM_SUSPEND_MEM); - - /* Wake up devices */ - device_resume(); - } - - if (pm_ops->finish) - pm_ops->finish(PM_SUSPEND_MEM); - -OutS3: - up(&pm_sem); - break; - - default: - error = -ENOTTY; - - } - - return error; -} - -static struct file_operations snapshot_fops = { - .open = snapshot_open, - .release = snapshot_release, - .read = snapshot_read, - .write = snapshot_write, - .llseek = no_llseek, - .ioctl = snapshot_ioctl, -}; - -static struct miscdevice snapshot_device = { - .minor = SNAPSHOT_MINOR, - .name = "snapshot", - .fops = &snapshot_fops, -}; - -static int __init snapshot_device_init(void) -{ - return misc_register(&snapshot_device); -}; - -device_initcall(snapshot_device_init); diff --git a/trunk/kernel/profile.c b/trunk/kernel/profile.c index ad81f799a9b4..f89248e6d704 100644 --- a/trunk/kernel/profile.c +++ b/trunk/kernel/profile.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -45,7 +44,7 @@ static cpumask_t prof_cpu_mask = CPU_MASK_ALL; #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); static DEFINE_PER_CPU(int, cpu_profile_flip); -static DEFINE_MUTEX(profile_flip_mutex); +static DECLARE_MUTEX(profile_flip_mutex); #endif /* CONFIG_SMP */ static int __init profile_setup(char * str) @@ -244,7 +243,7 @@ static void profile_flip_buffers(void) { int i, j, cpu; - mutex_lock(&profile_flip_mutex); + down(&profile_flip_mutex); j = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); on_each_cpu(__profile_flip_buffers, NULL, 0, 1); @@ -260,14 +259,14 @@ static void profile_flip_buffers(void) hits[i].hits = hits[i].pc = 0; } } - mutex_unlock(&profile_flip_mutex); + up(&profile_flip_mutex); } static void profile_discard_flip_buffers(void) { int i, cpu; - mutex_lock(&profile_flip_mutex); + down(&profile_flip_mutex); i = per_cpu(cpu_profile_flip, get_cpu()); put_cpu(); on_each_cpu(__profile_flip_buffers, NULL, 0, 1); @@ -275,7 +274,7 @@ static void profile_discard_flip_buffers(void) struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); } - mutex_unlock(&profile_flip_mutex); + up(&profile_flip_mutex); } void profile_hit(int type, void *__pc) diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 6df1559b1c02..fedf5e369755 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -47,16 +47,15 @@ #include #include #include -#include /* Definition for rcupdate control block. */ -static struct rcu_ctrlblk rcu_ctrlblk = { +struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, .lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE, }; -static struct rcu_ctrlblk rcu_bh_ctrlblk = { +struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, .lock = SPIN_LOCK_UNLOCKED, @@ -76,7 +75,7 @@ static int rsinterval = 1000; #endif static atomic_t rcu_barrier_cpu_count; -static DEFINE_MUTEX(rcu_barrier_mutex); +static struct semaphore rcu_barrier_sema; static struct completion rcu_barrier_completion; #ifdef CONFIG_SMP @@ -208,13 +207,13 @@ static void rcu_barrier_func(void *notused) void rcu_barrier(void) { BUG_ON(in_interrupt()); - /* Take cpucontrol mutex to protect against CPU hotplug */ - mutex_lock(&rcu_barrier_mutex); + /* Take cpucontrol semaphore to protect against CPU hotplug */ + down(&rcu_barrier_sema); init_completion(&rcu_barrier_completion); atomic_set(&rcu_barrier_cpu_count, 0); on_each_cpu(rcu_barrier_func, NULL, 0, 1); wait_for_completion(&rcu_barrier_completion); - mutex_unlock(&rcu_barrier_mutex); + up(&rcu_barrier_sema); } EXPORT_SYMBOL_GPL(rcu_barrier); @@ -550,6 +549,7 @@ static struct notifier_block __devinitdata rcu_nb = { */ void __init rcu_init(void) { + sema_init(&rcu_barrier_sema, 1); rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)smp_processor_id()); /* Register notifier for non-boot CPUs */ diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 7ffaabd64f89..6b6e0d70eb30 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -237,7 +237,6 @@ struct runqueue { task_t *migration_thread; struct list_head migration_queue; - int cpu; #endif #ifdef CONFIG_SCHEDSTATS @@ -1655,9 +1654,6 @@ unsigned long nr_iowait(void) /* * double_rq_lock - safely lock two runqueues * - * We must take them in cpu order to match code in - * dependent_sleeper and wake_dependent_sleeper. - * * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ @@ -1669,7 +1665,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } else { - if (rq1->cpu < rq2->cpu) { + if (rq1 < rq2) { spin_lock(&rq1->lock); spin_lock(&rq2->lock); } else { @@ -1705,7 +1701,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) __acquires(this_rq->lock) { if (unlikely(!spin_trylock(&busiest->lock))) { - if (busiest->cpu < this_rq->cpu) { + if (busiest < this_rq) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); @@ -2873,7 +2869,7 @@ asmlinkage void __sched schedule(void) */ if (likely(!current->exit_state)) { if (unlikely(in_atomic())) { - printk(KERN_ERR "BUG: scheduling while atomic: " + printk(KERN_ERR "scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); dump_stack(); @@ -6033,7 +6029,6 @@ void __init sched_init(void) rq->push_cpu = 0; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); - rq->cpu = i; #endif atomic_set(&rq->nr_iowait, 0); @@ -6074,7 +6069,7 @@ void __might_sleep(char *file, int line) if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; - printk(KERN_ERR "BUG: sleeping function called from invalid" + printk(KERN_ERR "Debug: sleeping function called from invalid" " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", in_atomic(), irqs_disabled()); diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index 75f7341b0c39..ea154104a00b 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1922,8 +1922,6 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, sigset_t *mask = ¤t->blocked; int signr = 0; - try_to_freeze(); - relock: spin_lock_irq(¤t->sighand->siglock); for (;;) { @@ -2101,11 +2099,10 @@ long do_no_restart_syscall(struct restart_block *param) int sigprocmask(int how, sigset_t *set, sigset_t *oldset) { int error; + sigset_t old_block; spin_lock_irq(¤t->sighand->siglock); - if (oldset) - *oldset = current->blocked; - + old_block = current->blocked; error = 0; switch (how) { case SIG_BLOCK: @@ -2122,7 +2119,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) } recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - + if (oldset) + *oldset = old_block; return error; } @@ -2309,6 +2307,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, timeout = schedule_timeout_interruptible(timeout); + try_to_freeze(); spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); current->blocked = current->real_blocked; diff --git a/trunk/kernel/spinlock.c b/trunk/kernel/spinlock.c index d1b810782bc4..0375fcd5921d 100644 --- a/trunk/kernel/spinlock.c +++ b/trunk/kernel/spinlock.c @@ -179,16 +179,16 @@ EXPORT_SYMBOL(_write_lock); #define BUILD_LOCK_OPS(op, locktype) \ void __lockfunc _##op##_lock(locktype##_t *lock) \ { \ + preempt_disable(); \ for (;;) { \ - preempt_disable(); \ if (likely(_raw_##op##_trylock(lock))) \ break; \ preempt_enable(); \ - \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ cpu_relax(); \ + preempt_disable(); \ } \ (lock)->break_lock = 0; \ } \ @@ -199,18 +199,19 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ { \ unsigned long flags; \ \ + preempt_disable(); \ for (;;) { \ - preempt_disable(); \ local_irq_save(flags); \ if (likely(_raw_##op##_trylock(lock))) \ break; \ local_irq_restore(flags); \ - preempt_enable(); \ \ + preempt_enable(); \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ cpu_relax(); \ + preempt_disable(); \ } \ (lock)->break_lock = 0; \ return flags; \ diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c index c0fcad9f826c..f91218a5463e 100644 --- a/trunk/kernel/sys.c +++ b/trunk/kernel/sys.c @@ -1227,7 +1227,7 @@ asmlinkage long sys_setsid(void) struct pid *pid; int err = -EPERM; - mutex_lock(&tty_mutex); + down(&tty_sem); write_lock_irq(&tasklist_lock); pid = find_pid(PIDTYPE_PGID, group_leader->pid); @@ -1241,7 +1241,7 @@ asmlinkage long sys_setsid(void) err = process_group(group_leader); out: write_unlock_irq(&tasklist_lock); - mutex_unlock(&tty_mutex); + up(&tty_sem); return err; } @@ -1677,6 +1677,9 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) * a lot simpler! (Which we're not doing right now because we're not * measuring them yet). * + * This expects to be called with tasklist_lock read-locked or better, + * and the siglock not locked. It may momentarily take the siglock. + * * When sampling multiple threads for RUSAGE_SELF, under SMP we might have * races with threads incrementing their own counters. But since word * reads are atomic, we either get new values or old values and we don't @@ -1684,25 +1687,6 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) * the c* fields from p->signal from races with exit.c updating those * fields when reaping, so a sample either gets all the additions of a * given child after it's reaped, or none so this sample is before reaping. - * - * tasklist_lock locking optimisation: - * If we are current and single threaded, we do not need to take the tasklist - * lock or the siglock. No one else can take our signal_struct away, - * no one else can reap the children to update signal->c* counters, and - * no one else can race with the signal-> fields. - * If we do not take the tasklist_lock, the signal-> fields could be read - * out of order while another thread was just exiting. So we place a - * read memory barrier when we avoid the lock. On the writer side, - * write memory barrier is implied in __exit_signal as __exit_signal releases - * the siglock spinlock after updating the signal-> fields. - * - * We don't really need the siglock when we access the non c* fields - * of the signal_struct (for RUSAGE_SELF) even in multithreaded - * case, since we take the tasklist lock for read and the non c* signal-> - * fields are updated only in __exit_signal, which is called with - * tasklist_lock taken for write, hence these two threads cannot execute - * concurrently. - * */ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) @@ -1710,23 +1694,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) struct task_struct *t; unsigned long flags; cputime_t utime, stime; - int need_lock = 0; memset((char *) r, 0, sizeof *r); - utime = stime = cputime_zero; - if (p != current || !thread_group_empty(p)) - need_lock = 1; + if (unlikely(!p->signal)) + return; - if (need_lock) { - read_lock(&tasklist_lock); - if (unlikely(!p->signal)) { - read_unlock(&tasklist_lock); - return; - } - } else - /* See locking comments above */ - smp_rmb(); + utime = stime = cputime_zero; switch (who) { case RUSAGE_BOTH: @@ -1766,8 +1740,6 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) BUG(); } - if (need_lock) - read_unlock(&tasklist_lock); cputime_to_timeval(utime, &r->ru_utime); cputime_to_timeval(stime, &r->ru_stime); } @@ -1775,7 +1747,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) int getrusage(struct task_struct *p, int who, struct rusage __user *ru) { struct rusage r; + read_lock(&tasklist_lock); k_getrusage(p, who, &r); + read_unlock(&tasklist_lock); return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } diff --git a/trunk/lib/reed_solomon/reed_solomon.c b/trunk/lib/reed_solomon/reed_solomon.c index f8ac9fa95de1..f5fef948a415 100644 --- a/trunk/lib/reed_solomon/reed_solomon.c +++ b/trunk/lib/reed_solomon/reed_solomon.c @@ -44,13 +44,12 @@ #include #include #include -#include #include /* This list holds all currently allocated rs control structures */ static LIST_HEAD (rslist); /* Protection for the list */ -static DEFINE_MUTEX(rslistlock); +static DECLARE_MUTEX(rslistlock); /** * rs_init - Initialize a Reed-Solomon codec @@ -162,7 +161,7 @@ static struct rs_control *rs_init(int symsize, int gfpoly, int fcr, */ void free_rs(struct rs_control *rs) { - mutex_lock(&rslistlock); + down(&rslistlock); rs->users--; if(!rs->users) { list_del(&rs->list); @@ -171,7 +170,7 @@ void free_rs(struct rs_control *rs) kfree(rs->genpoly); kfree(rs); } - mutex_unlock(&rslistlock); + up(&rslistlock); } /** @@ -202,7 +201,7 @@ struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim, if (nroots < 0 || nroots >= (1<list, &rslist); } out: - mutex_unlock(&rslistlock); + up(&rslistlock); return rs; } diff --git a/trunk/mm/readahead.c b/trunk/mm/readahead.c index 0f142a40984b..301b36c4a0ce 100644 --- a/trunk/mm/readahead.c +++ b/trunk/mm/readahead.c @@ -555,7 +555,6 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, out: return ra->prev_page + 1; } -EXPORT_SYMBOL_GPL(page_cache_readahead); /* * handle_ra_miss() is called when it is known that a page which should have diff --git a/trunk/mm/swapfile.c b/trunk/mm/swapfile.c index 39aa9d129612..365ed6ff182d 100644 --- a/trunk/mm/swapfile.c +++ b/trunk/mm/swapfile.c @@ -45,7 +45,7 @@ static const char Unused_offset[] = "Unused swap offset entry "; struct swap_list_t swap_list = {-1, -1}; -static struct swap_info_struct swap_info[MAX_SWAPFILES]; +struct swap_info_struct swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); @@ -417,61 +417,6 @@ void free_swap_and_cache(swp_entry_t entry) } } -#ifdef CONFIG_SOFTWARE_SUSPEND -/* - * Find the swap type that corresponds to given device (if any) - * - * This is needed for software suspend and is done in such a way that inode - * aliasing is allowed. - */ -int swap_type_of(dev_t device) -{ - int i; - - spin_lock(&swap_lock); - for (i = 0; i < nr_swapfiles; i++) { - struct inode *inode; - - if (!(swap_info[i].flags & SWP_WRITEOK)) - continue; - if (!device) { - spin_unlock(&swap_lock); - return i; - } - inode = swap_info->swap_file->f_dentry->d_inode; - if (S_ISBLK(inode->i_mode) && - device == MKDEV(imajor(inode), iminor(inode))) { - spin_unlock(&swap_lock); - return i; - } - } - spin_unlock(&swap_lock); - return -ENODEV; -} - -/* - * Return either the total number of swap pages of given type, or the number - * of free pages of that type (depending on @free) - * - * This is needed for software suspend - */ -unsigned int count_swap_pages(int type, int free) -{ - unsigned int n = 0; - - if (type < nr_swapfiles) { - spin_lock(&swap_lock); - if (swap_info[type].flags & SWP_WRITEOK) { - n = swap_info[type].pages; - if (free) - n -= swap_info[type].inuse_pages; - } - spin_unlock(&swap_lock); - } - return n; -} -#endif - /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index 8eedaedba743..c23e9c06ee23 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -106,6 +106,9 @@ * * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan * 050103 + * + * MPLS support by Steven Whitehouse + * */ #include #include @@ -154,7 +157,7 @@ #include /* do_div */ #include -#define VERSION "pktgen v2.66: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.67: Packet Generator for packet performance testing.\n" /* #define PG_DEBUG(a) a */ #define PG_DEBUG(a) @@ -162,6 +165,8 @@ /* The buckets are exponential in 'width' */ #define LAT_BUCKETS_MAX 32 #define IP_NAME_SZ 32 +#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ +#define MPLS_STACK_BOTTOM __constant_htonl(0x00000100) /* Device flag bits */ #define F_IPSRC_RND (1<<0) /* IP-Src Random */ @@ -172,6 +177,7 @@ #define F_MACDST_RND (1<<5) /* MAC-Dst Random */ #define F_TXSIZE_RND (1<<6) /* Transmit size is random */ #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ +#define F_MPLS_RND (1<<8) /* Random MPLS labels */ /* Thread control flag bits */ #define T_TERMINATE (1<<0) @@ -278,6 +284,10 @@ struct pktgen_dev { __u16 udp_dst_min; /* inclusive, dest UDP port */ __u16 udp_dst_max; /* exclusive, dest UDP port */ + /* MPLS */ + unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ + __be32 labels[MAX_MPLS_LABELS]; + __u32 src_mac_count; /* How many MACs to iterate through */ __u32 dst_mac_count; /* How many MACs to iterate through */ @@ -623,9 +633,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v) pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); seq_printf(seq, - " src_mac_count: %d dst_mac_count: %d \n Flags: ", + " src_mac_count: %d dst_mac_count: %d\n", pkt_dev->src_mac_count, pkt_dev->dst_mac_count); + if (pkt_dev->nr_labels) { + unsigned i; + seq_printf(seq, " mpls: "); + for(i = 0; i < pkt_dev->nr_labels; i++) + seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), + i == pkt_dev->nr_labels-1 ? "\n" : ", "); + } + + seq_printf(seq, " Flags: "); + if (pkt_dev->flags & F_IPV6) seq_printf(seq, "IPV6 "); @@ -644,6 +664,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_UDPDST_RND) seq_printf(seq, "UDPDST_RND "); + if (pkt_dev->flags & F_MPLS_RND) + seq_printf(seq, "MPLS_RND "); + if (pkt_dev->flags & F_MACSRC_RND) seq_printf(seq, "MACSRC_RND "); @@ -691,6 +714,29 @@ static int pktgen_if_show(struct seq_file *seq, void *v) return 0; } + +static int hex32_arg(const char __user *user_buffer, __u32 *num) +{ + int i = 0; + *num = 0; + + for(; i < 8; i++) { + char c; + *num <<= 4; + if (get_user(c, &user_buffer[i])) + return -EFAULT; + if ((c >= '0') && (c <= '9')) + *num |= c - '0'; + else if ((c >= 'a') && (c <= 'f')) + *num |= c - 'a' + 10; + else if ((c >= 'A') && (c <= 'F')) + *num |= c - 'A' + 10; + else + break; + } + return i; +} + static int count_trail_chars(const char __user * user_buffer, unsigned int maxlen) { @@ -759,6 +805,35 @@ static int strn_len(const char __user * user_buffer, unsigned int maxlen) return i; } +static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) +{ + unsigned n = 0; + char c; + ssize_t i = 0; + int len; + + pkt_dev->nr_labels = 0; + do { + __u32 tmp; + len = hex32_arg(&buffer[i], &tmp); + if (len <= 0) + return len; + pkt_dev->labels[n] = htonl(tmp); + if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) + pkt_dev->flags |= F_MPLS_RND; + i += len; + if (get_user(c, &buffer[i])) + return -EFAULT; + i++; + n++; + if (n >= MAX_MPLS_LABELS) + return -E2BIG; + } while(c == ','); + + pkt_dev->nr_labels = n; + return i; +} + static ssize_t pktgen_if_write(struct file *file, const char __user * user_buffer, size_t count, loff_t * offset) @@ -1059,6 +1134,12 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!MACDST_RND") == 0) pkt_dev->flags &= ~F_MACDST_RND; + else if (strcmp(f, "MPLS_RND") == 0) + pkt_dev->flags |= F_MPLS_RND; + + else if (strcmp(f, "!MPLS_RND") == 0) + pkt_dev->flags &= ~F_MPLS_RND; + else { sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", @@ -1354,6 +1435,19 @@ static ssize_t pktgen_if_write(struct file *file, return count; } + if (!strcmp(name, "mpls")) { + unsigned n, offset; + len = get_labels(&user_buffer[i], pkt_dev); + if (len < 0) { return len; } + i += len; + offset = sprintf(pg_result, "OK: mpls="); + for(n = 0; n < pkt_dev->nr_labels; n++) + offset += sprintf(pg_result + offset, + "%08x%s", ntohl(pkt_dev->labels[n]), + n == pkt_dev->nr_labels-1 ? "" : ","); + return count; + } + sprintf(pkt_dev->result, "No such parameter \"%s\"", name); return -EINVAL; } @@ -1846,6 +1940,15 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->hh[1] = tmp; } + if (pkt_dev->flags & F_MPLS_RND) { + unsigned i; + for(i = 0; i < pkt_dev->nr_labels; i++) + if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) + pkt_dev->labels[i] = MPLS_STACK_BOTTOM | + (pktgen_random() & + htonl(0x000fffff)); + } + if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { if (pkt_dev->flags & F_UDPSRC_RND) pkt_dev->cur_udp_src = @@ -1968,6 +2071,16 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->flows[flow].count++; } +static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) +{ + unsigned i; + for(i = 0; i < pkt_dev->nr_labels; i++) { + *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; + } + mpls--; + *mpls |= MPLS_STACK_BOTTOM; +} + static struct sk_buff *fill_packet_ipv4(struct net_device *odev, struct pktgen_dev *pkt_dev) { @@ -1977,6 +2090,11 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, int datalen, iplen; struct iphdr *iph; struct pktgen_hdr *pgh = NULL; + __be16 protocol = __constant_htons(ETH_P_IP); + __be32 *mpls; + + if (pkt_dev->nr_labels) + protocol = __constant_htons(ETH_P_MPLS_UC); /* Update any of the values, used when we're incrementing various * fields. @@ -1984,7 +2102,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, mod_cur_headers(pkt_dev); datalen = (odev->hard_header_len + 16) & ~0xf; - skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen, GFP_ATOMIC); + skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + + pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -1994,13 +2113,18 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, /* Reserve for ethernet and IP header */ eth = (__u8 *) skb_push(skb, 14); + mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); + if (pkt_dev->nr_labels) + mpls_push(mpls, pkt_dev); iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)); udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); memcpy(eth, pkt_dev->hh, 12); - *(u16 *) & eth[12] = __constant_htons(ETH_P_IP); + *(u16 *) & eth[12] = protocol; - datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8; /* Eth + IPh + UDPh */ + /* Eth + IPh + UDPh + mpls */ + datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - + pkt_dev->nr_labels*sizeof(u32); if (datalen < sizeof(struct pktgen_hdr)) datalen = sizeof(struct pktgen_hdr); @@ -2021,8 +2145,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, iph->tot_len = htons(iplen); iph->check = 0; iph->check = ip_fast_csum((void *)iph, iph->ihl); - skb->protocol = __constant_htons(ETH_P_IP); - skb->mac.raw = ((u8 *) iph) - 14; + skb->protocol = protocol; + skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); skb->dev = odev; skb->pkt_type = PACKET_HOST; @@ -2274,13 +2398,19 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, int datalen; struct ipv6hdr *iph; struct pktgen_hdr *pgh = NULL; + __be16 protocol = __constant_htons(ETH_P_IPV6); + __be32 *mpls; + + if (pkt_dev->nr_labels) + protocol = __constant_htons(ETH_P_MPLS_UC); /* Update any of the values, used when we're incrementing various * fields. */ mod_cur_headers(pkt_dev); - skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16, GFP_ATOMIC); + skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + + pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; @@ -2290,13 +2420,19 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, /* Reserve for ethernet and IP header */ eth = (__u8 *) skb_push(skb, 14); + mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); + if (pkt_dev->nr_labels) + mpls_push(mpls, pkt_dev); iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr)); udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); memcpy(eth, pkt_dev->hh, 12); *(u16 *) & eth[12] = __constant_htons(ETH_P_IPV6); - datalen = pkt_dev->cur_pkt_size - 14 - sizeof(struct ipv6hdr) - sizeof(struct udphdr); /* Eth + IPh + UDPh */ + /* Eth + IPh + UDPh + mpls */ + datalen = pkt_dev->cur_pkt_size - 14 - + sizeof(struct ipv6hdr) - sizeof(struct udphdr) - + pkt_dev->nr_labels*sizeof(u32); if (datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); @@ -2320,8 +2456,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); - skb->mac.raw = ((u8 *) iph) - 14; - skb->protocol = __constant_htons(ETH_P_IPV6); + skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); + skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; diff --git a/trunk/security/seclvl.c b/trunk/security/seclvl.c index 441beaf1bbc1..8529ea6f7aa8 100644 --- a/trunk/security/seclvl.c +++ b/trunk/security/seclvl.c @@ -8,7 +8,6 @@ * Copyright (c) 2001 WireX Communications, Inc * Copyright (c) 2001 Greg Kroah-Hartman * Copyright (c) 2002 International Business Machines - * Copyright (c) 2006 Davi E. M. Arnaut * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,7 +31,6 @@ #include #include #include -#include #include #include @@ -196,27 +194,35 @@ static unsigned char hashedPassword[SHA1_DIGEST_SIZE]; * people... */ static int -plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) +plaintext_to_sha1(unsigned char *hash, const char *plaintext, int len) { + char *pgVirtAddr; struct crypto_tfm *tfm; - struct scatterlist sg; + struct scatterlist sg[1]; if (len > PAGE_SIZE) { seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d " "characters). Largest possible is %lu " "bytes.\n", len, PAGE_SIZE); - return -EINVAL; + return -ENOMEM; } tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) { seclvl_printk(0, KERN_ERR, "Failed to load transform for SHA1\n"); - return -EINVAL; + return -ENOSYS; } - sg_init_one(&sg, (u8 *)plaintext, len); + // Just get a new page; don't play around with page boundaries + // and scatterlists. + pgVirtAddr = (char *)__get_free_page(GFP_KERNEL); + sg[0].page = virt_to_page(pgVirtAddr); + sg[0].offset = 0; + sg[0].length = len; + strncpy(pgVirtAddr, plaintext, len); crypto_digest_init(tfm); - crypto_digest_update(tfm, &sg, 1); + crypto_digest_update(tfm, sg, 1); crypto_digest_final(tfm, hash); crypto_free_tfm(tfm); + free_page((unsigned long)pgVirtAddr); return 0; } @@ -228,9 +234,11 @@ static ssize_t passwd_write_file(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { - char *p; - int len; + int i; unsigned char tmp[SHA1_DIGEST_SIZE]; + char *page; + int rc; + int len; if (!*passwd && !*sha1_passwd) { seclvl_printk(0, KERN_ERR, "Attempt to password-unlock the " @@ -243,39 +251,38 @@ passwd_write_file(struct file * file, const char __user * buf, return -EINVAL; } - if (count >= PAGE_SIZE) + if (count < 0 || count >= PAGE_SIZE) return -EINVAL; if (*ppos != 0) return -EINVAL; - p = kmalloc(count, GFP_KERNEL); - if (!p) + page = (char *)get_zeroed_page(GFP_KERNEL); + if (!page) return -ENOMEM; len = -EFAULT; - if (copy_from_user(p, buf, count)) + if (copy_from_user(page, buf, count)) goto out; - len = count; + len = strlen(page); /* ``echo "secret" > seclvl/passwd'' includes a newline */ - if (p[len - 1] == '\n') + if (page[len - 1] == '\n') len--; /* Hash the password, then compare the hashed values */ - if ((len = plaintext_to_sha1(tmp, p, len))) { + if ((rc = plaintext_to_sha1(tmp, page, len))) { seclvl_printk(0, KERN_ERR, "Error hashing password: rc = " - "[%d]\n", len); - goto out; + "[%d]\n", rc); + return rc; + } + for (i = 0; i < SHA1_DIGEST_SIZE; i++) { + if (hashedPassword[i] != tmp[i]) + return -EPERM; } - - len = -EPERM; - if (memcmp(hashedPassword, tmp, SHA1_DIGEST_SIZE)) - goto out; - seclvl_printk(0, KERN_INFO, "Password accepted; seclvl reduced to 0.\n"); seclvl = 0; len = count; out: - kfree (p); + free_page((unsigned long)page); return len; } @@ -288,11 +295,13 @@ static struct file_operations passwd_file_ops = { */ static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child) { - if (seclvl >= 0 && child->pid == 1) { - seclvl_printk(1, KERN_WARNING, "Attempt to ptrace " - "the init process dissallowed in " - "secure level %d\n", seclvl); - return -EPERM; + if (seclvl >= 0) { + if (child->pid == 1) { + seclvl_printk(1, KERN_WARNING, "Attempt to ptrace " + "the init process dissallowed in " + "secure level %d\n", seclvl); + return -EPERM; + } } return 0; } @@ -303,54 +312,55 @@ static int seclvl_ptrace(struct task_struct *parent, struct task_struct *child) */ static int seclvl_capable(struct task_struct *tsk, int cap) { - int rc = 0; - /* init can do anything it wants */ if (tsk->pid == 1) return 0; - if (seclvl > 0) { - rc = -EPERM; - - if (cap == CAP_LINUX_IMMUTABLE) + switch (seclvl) { + case 2: + /* fall through */ + case 1: + if (cap == CAP_LINUX_IMMUTABLE) { seclvl_printk(1, KERN_WARNING, "Attempt to modify " "the IMMUTABLE and/or APPEND extended " "attribute on a file with the IMMUTABLE " "and/or APPEND extended attribute set " "denied in seclvl [%d]\n", seclvl); - else if (cap == CAP_SYS_RAWIO) + return -EPERM; + } else if (cap == CAP_SYS_RAWIO) { // Somewhat broad... seclvl_printk(1, KERN_WARNING, "Attempt to perform " "raw I/O while in secure level [%d] " "denied\n", seclvl); - else if (cap == CAP_NET_ADMIN) + return -EPERM; + } else if (cap == CAP_NET_ADMIN) { seclvl_printk(1, KERN_WARNING, "Attempt to perform " "network administrative task while " "in secure level [%d] denied\n", seclvl); - else if (cap == CAP_SETUID) + return -EPERM; + } else if (cap == CAP_SETUID) { seclvl_printk(1, KERN_WARNING, "Attempt to setuid " "while in secure level [%d] denied\n", seclvl); - else if (cap == CAP_SETGID) + return -EPERM; + } else if (cap == CAP_SETGID) { seclvl_printk(1, KERN_WARNING, "Attempt to setgid " "while in secure level [%d] denied\n", seclvl); - else if (cap == CAP_SYS_MODULE) + } else if (cap == CAP_SYS_MODULE) { seclvl_printk(1, KERN_WARNING, "Attempt to perform " "a module operation while in secure " "level [%d] denied\n", seclvl); - else - rc = 0; - } - - if (!rc) { - if (!(cap_is_fs_cap(cap) ? tsk->fsuid == 0 : tsk->euid == 0)) - rc = -EPERM; + return -EPERM; + } + break; + default: + break; } - - if (rc) - seclvl_printk(1, KERN_WARNING, "Capability denied\n"); - - return rc; + /* from dummy.c */ + if (cap_is_fs_cap(cap) ? tsk->fsuid == 0 : tsk->euid == 0) + return 0; /* capability granted */ + seclvl_printk(1, KERN_WARNING, "Capability denied\n"); + return -EPERM; /* capability denied */ } /** @@ -456,9 +466,12 @@ static int seclvl_inode_setattr(struct dentry *dentry, struct iattr *iattr) static void seclvl_file_free_security(struct file *filp) { struct dentry *dentry = filp->f_dentry; + struct inode *inode = NULL; - if (dentry) - seclvl_bd_release(dentry->d_inode); + if (dentry) { + inode = dentry->d_inode; + seclvl_bd_release(inode); + } } /** @@ -466,7 +479,9 @@ static void seclvl_file_free_security(struct file *filp) */ static int seclvl_umount(struct vfsmount *mnt, int flags) { - if (current->pid != 1 && seclvl == 2) { + if (current->pid == 1) + return 0; + if (seclvl == 2) { seclvl_printk(1, KERN_WARNING, "Attempt to unmount in secure " "level %d\n", seclvl); return -EPERM; @@ -490,9 +505,8 @@ static struct security_operations seclvl_ops = { static int processPassword(void) { int rc = 0; + hashedPassword[0] = '\0'; if (*passwd) { - char *p; - if (*sha1_passwd) { seclvl_printk(0, KERN_ERR, "Error: Both " "passwd and sha1_passwd " @@ -500,16 +514,12 @@ static int processPassword(void) "exclusive.\n"); return -EINVAL; } - - p = kstrdup(passwd, GFP_KERNEL); - if (p == NULL) - return -ENOMEM; - - if ((rc = plaintext_to_sha1(hashedPassword, p, strlen(p)))) + if ((rc = plaintext_to_sha1(hashedPassword, passwd, + strlen(passwd)))) { seclvl_printk(0, KERN_ERR, "Error: SHA1 support not " "in kernel\n"); - - kfree (p); + return rc; + } /* All static data goes to the BSS, which zero's the * plaintext password out for us. */ } else if (*sha1_passwd) { // Base 16 @@ -532,7 +542,7 @@ static int processPassword(void) sha1_passwd[i + 2] = tmp; } } - return rc; + return 0; } /** @@ -542,46 +552,28 @@ struct dentry *dir_ino, *seclvl_ino, *passwd_ino; static int seclvlfs_register(void) { - int rc = 0; - dir_ino = securityfs_create_dir("seclvl", NULL); - - if (IS_ERR(dir_ino)) - return PTR_ERR(dir_ino); + if (!dir_ino) + return -EFAULT; seclvl_ino = securityfs_create_file("seclvl", S_IRUGO | S_IWUSR, dir_ino, &seclvl, &seclvl_file_ops); - if (IS_ERR(seclvl_ino)) { - rc = PTR_ERR(seclvl_ino); + if (!seclvl_ino) goto out_deldir; - } if (*passwd || *sha1_passwd) { passwd_ino = securityfs_create_file("passwd", S_IRUGO | S_IWUSR, dir_ino, NULL, &passwd_file_ops); - if (IS_ERR(passwd_ino)) { - rc = PTR_ERR(passwd_ino); + if (!passwd_ino) goto out_delf; - } } - return rc; - -out_delf: - securityfs_remove(seclvl_ino); + return 0; out_deldir: securityfs_remove(dir_ino); - - return rc; -} - -static void seclvlfs_unregister(void) -{ +out_delf: securityfs_remove(seclvl_ino); - if (*passwd || *sha1_passwd) - securityfs_remove(passwd_ino); - - securityfs_remove(dir_ino); + return -EFAULT; } /** @@ -590,8 +582,6 @@ static void seclvlfs_unregister(void) static int __init seclvl_init(void) { int rc = 0; - static char once; - if (verbosity < 0 || verbosity > 1) { printk(KERN_ERR "Error: bad verbosity [%d]; only 0 or 1 " "are valid values\n", verbosity); @@ -610,11 +600,6 @@ static int __init seclvl_init(void) "module parameter(s): rc = [%d]\n", rc); goto exit; } - - if ((rc = seclvlfs_register())) { - seclvl_printk(0, KERN_ERR, "Error registering with sysfs\n"); - goto exit; - } /* register ourselves with the security framework */ if (register_security(&seclvl_ops)) { seclvl_printk(0, KERN_ERR, @@ -626,24 +611,20 @@ static int __init seclvl_init(void) seclvl_printk(0, KERN_ERR, "seclvl: Failure " "registering with primary security " "module.\n"); - seclvlfs_unregister(); goto exit; } /* if primary module registered */ secondary = 1; } /* if we registered ourselves with the security framework */ - - seclvl_printk(0, KERN_INFO, "seclvl: Successfully initialized.\n"); - - if (once) { - once = 1; - seclvl_printk(0, KERN_INFO, "seclvl is going away. It has been " - "buggy for ages. Also, be warned that " - "Securelevels are useless."); + if ((rc = seclvlfs_register())) { + seclvl_printk(0, KERN_ERR, "Error registering with sysfs\n"); + goto exit; } + seclvl_printk(0, KERN_INFO, "seclvl: Successfully initialized.\n"); exit: - if (rc) + if (rc) { printk(KERN_ERR "seclvl: Error during initialization: rc = " "[%d]\n", rc); + } return rc; } @@ -652,14 +633,17 @@ static int __init seclvl_init(void) */ static void __exit seclvl_exit(void) { - seclvlfs_unregister(); - - if (secondary) + securityfs_remove(seclvl_ino); + if (*passwd || *sha1_passwd) + securityfs_remove(passwd_ino); + securityfs_remove(dir_ino); + if (secondary == 1) { mod_unreg_security(MY_NAME, &seclvl_ops); - else if (unregister_security(&seclvl_ops)) + } else if (unregister_security(&seclvl_ops)) { seclvl_printk(0, KERN_INFO, "seclvl: Failure unregistering with the " "kernel\n"); + } } module_init(seclvl_init); diff --git a/trunk/sound/oss/ac97_codec.c b/trunk/sound/oss/ac97_codec.c index 972327c97644..fd25aca25120 100644 --- a/trunk/sound/oss/ac97_codec.c +++ b/trunk/sound/oss/ac97_codec.c @@ -55,7 +55,7 @@ #include #include #include -#include +#include #define CODEC_ID_BUFSZ 14 @@ -304,7 +304,7 @@ static const unsigned int ac97_oss_rm[] = { static LIST_HEAD(codecs); static LIST_HEAD(codec_drivers); -static DEFINE_MUTEX(codec_mutex); +static DECLARE_MUTEX(codec_sem); /* reads the given OSS mixer from the ac97 the caller must have insured that the ac97 knows about that given mixer, and should be holding a spinlock for the card */ @@ -769,9 +769,9 @@ void ac97_release_codec(struct ac97_codec *codec) { /* Remove from the list first, we don't want to be "rediscovered" */ - mutex_lock(&codec_mutex); + down(&codec_sem); list_del(&codec->list); - mutex_unlock(&codec_mutex); + up(&codec_sem); /* * The driver needs to deal with internal * locking to avoid accidents here. @@ -889,7 +889,7 @@ int ac97_probe_codec(struct ac97_codec *codec) * callbacks. */ - mutex_lock(&codec_mutex); + down(&codec_sem); list_add(&codec->list, &codecs); list_for_each(l, &codec_drivers) { @@ -903,7 +903,7 @@ int ac97_probe_codec(struct ac97_codec *codec) } } - mutex_unlock(&codec_mutex); + up(&codec_sem); return 1; } @@ -1439,7 +1439,7 @@ int ac97_register_driver(struct ac97_driver *driver) struct list_head *l; struct ac97_codec *c; - mutex_lock(&codec_mutex); + down(&codec_sem); INIT_LIST_HEAD(&driver->list); list_add(&driver->list, &codec_drivers); @@ -1452,7 +1452,7 @@ int ac97_register_driver(struct ac97_driver *driver) continue; c->driver = driver; } - mutex_unlock(&codec_mutex); + up(&codec_sem); return 0; } @@ -1471,7 +1471,7 @@ void ac97_unregister_driver(struct ac97_driver *driver) struct list_head *l; struct ac97_codec *c; - mutex_lock(&codec_mutex); + down(&codec_sem); list_del_init(&driver->list); list_for_each(l, &codecs) @@ -1483,7 +1483,7 @@ void ac97_unregister_driver(struct ac97_driver *driver) } } - mutex_unlock(&codec_mutex); + up(&codec_sem); } EXPORT_SYMBOL_GPL(ac97_unregister_driver); @@ -1494,14 +1494,14 @@ static int swap_headphone(int remove_master) struct ac97_codec *c; if (remove_master) { - mutex_lock(&codec_mutex); + down(&codec_sem); list_for_each(l, &codecs) { c = list_entry(l, struct ac97_codec, list); if (supported_mixer(c, SOUND_MIXER_PHONEOUT)) c->supported_mixers &= ~SOUND_MASK_PHONEOUT; } - mutex_unlock(&codec_mutex); + up(&codec_sem); } else ac97_hw[SOUND_MIXER_PHONEOUT].offset = AC97_MASTER_VOL_STEREO; diff --git a/trunk/sound/oss/aci.c b/trunk/sound/oss/aci.c index 3bfac375dbdb..3928c2802cc4 100644 --- a/trunk/sound/oss/aci.c +++ b/trunk/sound/oss/aci.c @@ -56,8 +56,7 @@ #include #include #include -#include - +#include #include #include #include "sound_config.h" @@ -80,7 +79,7 @@ static int aci_micpreamp=3; /* microphone preamp-level that can't be * * checked with ACI versions prior to 0xb0 */ static int mixer_device; -static struct mutex aci_mutex; +static struct semaphore aci_sem; #ifdef MODULE static int reset; @@ -213,7 +212,7 @@ int aci_rw_cmd(int write1, int write2, int write3) int write[] = {write1, write2, write3}; int read = -EINTR, i; - if (mutex_lock_interruptible(&aci_mutex)) + if (down_interruptible(&aci_sem)) goto out; for (i=0; i<3; i++) { @@ -228,7 +227,7 @@ int aci_rw_cmd(int write1, int write2, int write3) } read = aci_rawread(); -out_up: mutex_unlock(&aci_mutex); +out_up: up(&aci_sem); out: return read; } @@ -604,7 +603,7 @@ static int __init attach_aci(void) char *boardname; int i, rc = -EBUSY; - mutex_init(&aci_mutex); + init_MUTEX(&aci_sem); outb(0xE3, 0xf8f); /* Write MAD16 password */ aci_port = (inb(0xf90) & 0x10) ? diff --git a/trunk/sound/oss/ad1889.c b/trunk/sound/oss/ad1889.c index 54dabf862802..a0d73f343100 100644 --- a/trunk/sound/oss/ad1889.c +++ b/trunk/sound/oss/ad1889.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include @@ -239,7 +238,7 @@ static ad1889_dev_t *ad1889_alloc_dev(struct pci_dev *pci) for (i = 0; i < AD_MAX_STATES; i++) { dev->state[i].card = dev; - mutex_init(&dev->state[i].mutex); + init_MUTEX(&dev->state[i].sem); init_waitqueue_head(&dev->state[i].dmabuf.wait); } @@ -462,7 +461,7 @@ static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t ssize_t ret = 0; DECLARE_WAITQUEUE(wait, current); - mutex_lock(&state->mutex); + down(&state->sem); #if 0 if (dmabuf->mapped) { ret = -ENXIO; @@ -547,7 +546,7 @@ static ssize_t ad1889_write(struct file *file, const char __user *buffer, size_t err2: remove_wait_queue(&state->dmabuf.wait, &wait); err1: - mutex_unlock(&state->mutex); + up(&state->sem); return ret; } diff --git a/trunk/sound/oss/ad1889.h b/trunk/sound/oss/ad1889.h index 861b3213f30b..e04affce1dd1 100644 --- a/trunk/sound/oss/ad1889.h +++ b/trunk/sound/oss/ad1889.h @@ -100,7 +100,7 @@ typedef struct ad1889_state { unsigned int subdivision; } dmabuf; - struct mutex mutex; + struct semaphore sem; } ad1889_state_t; typedef struct ad1889_dev { diff --git a/trunk/sound/oss/ali5455.c b/trunk/sound/oss/ali5455.c index 62bb936b1f3d..9c9e6c0410f2 100644 --- a/trunk/sound/oss/ali5455.c +++ b/trunk/sound/oss/ali5455.c @@ -64,8 +64,6 @@ #include #include #include -#include - #include #ifndef PCI_DEVICE_ID_ALI_5455 @@ -236,7 +234,7 @@ struct ali_state { struct ali_card *card; /* Card info */ /* single open lock mechanism, only used for recording */ - struct mutex open_mutex; + struct semaphore open_sem; wait_queue_head_t open_wait; /* file mode */ @@ -2809,7 +2807,7 @@ static int ali_open(struct inode *inode, struct file *file) state->card = card; state->magic = ALI5455_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - mutex_init(&state->open_mutex); + init_MUTEX(&state->open_sem); file->private_data = state; dmabuf->trigger = 0; /* allocate hardware channels */ @@ -3361,7 +3359,7 @@ static void __devinit ali_configure_clocking(void) state->card = card; state->magic = ALI5455_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - mutex_init(&state->open_mutex); + init_MUTEX(&state->open_sem); dmabuf->fmt = ALI5455_FMT_STEREO | ALI5455_FMT_16BIT; dmabuf->trigger = PCM_ENABLE_OUTPUT; ali_set_dac_rate(state, 48000); diff --git a/trunk/sound/oss/au1000.c b/trunk/sound/oss/au1000.c index fe54de25aafc..c407de86cbb6 100644 --- a/trunk/sound/oss/au1000.c +++ b/trunk/sound/oss/au1000.c @@ -68,8 +68,6 @@ #include #include #include -#include - #include #include #include @@ -122,8 +120,8 @@ struct au1000_state { int no_vra; // do not use VRA spinlock_t lock; - struct mutex open_mutex; - struct mutex sem; + struct semaphore open_sem; + struct semaphore sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1108,7 +1106,7 @@ static ssize_t au1000_read(struct file *file, char *buffer, count *= db->cnt_factor; - mutex_lock(&s->sem); + down(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1127,14 +1125,14 @@ static ssize_t au1000_read(struct file *file, char *buffer, ret = -EAGAIN; goto out; } - mutex_unlock(&s->sem); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - mutex_lock(&s->sem); + down(&s->sem); } } while (avail <= 0); @@ -1161,7 +1159,7 @@ static ssize_t au1000_read(struct file *file, char *buffer, } // while (count > 0) out: - mutex_unlock(&s->sem); + up(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1189,7 +1187,7 @@ static ssize_t au1000_write(struct file *file, const char *buffer, count *= db->cnt_factor; - mutex_lock(&s->sem); + down(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1206,14 +1204,14 @@ static ssize_t au1000_write(struct file *file, const char *buffer, ret = -EAGAIN; goto out; } - mutex_unlock(&s->sem); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - mutex_lock(&s->sem); + down(&s->sem); } } while (avail <= 0); @@ -1242,7 +1240,7 @@ static ssize_t au1000_write(struct file *file, const char *buffer, } // while (count > 0) out: - mutex_unlock(&s->sem); + up(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1300,7 +1298,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma) dbg("%s", __FUNCTION__); lock_kernel(); - mutex_lock(&s->sem); + down(&s->sem); if (vma->vm_flags & VM_WRITE) db = &s->dma_dac; else if (vma->vm_flags & VM_READ) @@ -1326,7 +1324,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags &= ~VM_IO; db->mapped = 1; out: - mutex_unlock(&s->sem); + up(&s->sem); unlock_kernel(); return ret; } @@ -1831,21 +1829,21 @@ static int au1000_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } stop_dac(s); @@ -1881,8 +1879,8 @@ static int au1000_open(struct inode *inode, struct file *file) } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); - mutex_init(&s->sem); + up(&s->open_sem); + init_MUTEX(&s->sem); return nonseekable_open(inode, file); } @@ -1898,7 +1896,7 @@ static int au1000_release(struct inode *inode, struct file *file) lock_kernel(); } - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1908,7 +1906,7 @@ static int au1000_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1998,7 +1996,7 @@ static int __devinit au1000_probe(void) init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->codec.private_data = s; s->codec.id = 0; diff --git a/trunk/sound/oss/au1550_ac97.c b/trunk/sound/oss/au1550_ac97.c index 6a4956b8025d..bdee0502f3e2 100644 --- a/trunk/sound/oss/au1550_ac97.c +++ b/trunk/sound/oss/au1550_ac97.c @@ -52,8 +52,6 @@ #include #include #include -#include - #include #include #include @@ -92,8 +90,8 @@ static struct au1550_state { int no_vra; /* do not use VRA */ spinlock_t lock; - struct mutex open_mutex; - struct mutex sem; + struct semaphore open_sem; + struct semaphore sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1046,7 +1044,7 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos) count *= db->cnt_factor; - mutex_lock(&s->sem); + down(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1066,14 +1064,14 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos) ret = -EAGAIN; goto out; } - mutex_unlock(&s->sem); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - mutex_lock(&s->sem); + down(&s->sem); } } while (avail <= 0); @@ -1101,7 +1099,7 @@ au1550_read(struct file *file, char *buffer, size_t count, loff_t *ppos) } /* while (count > 0) */ out: - mutex_unlock(&s->sem); + up(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1127,7 +1125,7 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) count *= db->cnt_factor; - mutex_lock(&s->sem); + down(&s->sem); add_wait_queue(&db->wait, &wait); while (count > 0) { @@ -1145,14 +1143,14 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) ret = -EAGAIN; goto out; } - mutex_unlock(&s->sem); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - mutex_lock(&s->sem); + down(&s->sem); } } while (avail <= 0); @@ -1198,7 +1196,7 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos) } /* while (count > 0) */ out: - mutex_unlock(&s->sem); + up(&s->sem); out2: remove_wait_queue(&db->wait, &wait); set_current_state(TASK_RUNNING); @@ -1255,7 +1253,7 @@ au1550_mmap(struct file *file, struct vm_area_struct *vma) int ret = 0; lock_kernel(); - mutex_lock(&s->sem); + down(&s->sem); if (vma->vm_flags & VM_WRITE) db = &s->dma_dac; else if (vma->vm_flags & VM_READ) @@ -1281,7 +1279,7 @@ au1550_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags &= ~VM_IO; db->mapped = 1; out: - mutex_unlock(&s->sem); + up(&s->sem); unlock_kernel(); return ret; } @@ -1792,21 +1790,21 @@ au1550_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } stop_dac(s); @@ -1842,8 +1840,8 @@ au1550_open(struct inode *inode, struct file *file) } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); - mutex_init(&s->sem); + up(&s->open_sem); + init_MUTEX(&s->sem); return 0; } @@ -1860,7 +1858,7 @@ au1550_release(struct inode *inode, struct file *file) lock_kernel(); } - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); kfree(s->dma_dac.rawbuf); @@ -1872,7 +1870,7 @@ au1550_release(struct inode *inode, struct file *file) s->dma_adc.rawbuf = NULL; } s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1904,7 +1902,7 @@ au1550_probe(void) init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->codec = ac97_alloc_codec(); diff --git a/trunk/sound/oss/btaudio.c b/trunk/sound/oss/btaudio.c index bfe3b534ef30..4007a5680acb 100644 --- a/trunk/sound/oss/btaudio.c +++ b/trunk/sound/oss/btaudio.c @@ -32,8 +32,6 @@ #include #include #include -#include - #include #include @@ -110,7 +108,7 @@ struct btaudio { /* locking */ int users; - struct mutex lock; + struct semaphore lock; /* risc instructions */ unsigned int risc_size; @@ -442,7 +440,7 @@ static struct file_operations btaudio_mixer_fops = { static int btaudio_dsp_open(struct inode *inode, struct file *file, struct btaudio *bta, int analog) { - mutex_lock(&bta->lock); + down(&bta->lock); if (bta->users) goto busy; bta->users++; @@ -454,11 +452,11 @@ static int btaudio_dsp_open(struct inode *inode, struct file *file, bta->read_count = 0; bta->sampleshift = 0; - mutex_unlock(&bta->lock); + up(&bta->lock); return 0; busy: - mutex_unlock(&bta->lock); + up(&bta->lock); return -EBUSY; } @@ -498,11 +496,11 @@ static int btaudio_dsp_release(struct inode *inode, struct file *file) { struct btaudio *bta = file->private_data; - mutex_lock(&bta->lock); + down(&bta->lock); if (bta->recording) stop_recording(bta); bta->users--; - mutex_unlock(&bta->lock); + up(&bta->lock); return 0; } @@ -515,7 +513,7 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer, DECLARE_WAITQUEUE(wait, current); add_wait_queue(&bta->readq, &wait); - mutex_lock(&bta->lock); + down(&bta->lock); while (swcount > 0) { if (0 == bta->read_count) { if (!bta->recording) { @@ -530,10 +528,10 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer, ret = -EAGAIN; break; } - mutex_unlock(&bta->lock); + up(&bta->lock); current->state = TASK_INTERRUPTIBLE; schedule(); - mutex_lock(&bta->lock); + down(&bta->lock); if(signal_pending(current)) { if (0 == ret) ret = -EINTR; @@ -606,7 +604,7 @@ static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer, if (bta->read_offset == bta->buf_size) bta->read_offset = 0; } - mutex_unlock(&bta->lock); + up(&bta->lock); remove_wait_queue(&bta->readq, &wait); current->state = TASK_RUNNING; return ret; @@ -653,10 +651,10 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file, bta->decimation = 0; } if (bta->recording) { - mutex_lock(&bta->lock); + down(&bta->lock); stop_recording(bta); start_recording(bta); - mutex_unlock(&bta->lock); + up(&bta->lock); } /* fall through */ case SOUND_PCM_READ_RATE: @@ -718,10 +716,10 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file, else bta->bits = 16; if (bta->recording) { - mutex_lock(&bta->lock); + down(&bta->lock); stop_recording(bta); start_recording(bta); - mutex_unlock(&bta->lock); + up(&bta->lock); } } if (debug) @@ -738,9 +736,9 @@ static int btaudio_dsp_ioctl(struct inode *inode, struct file *file, case SNDCTL_DSP_RESET: if (bta->recording) { - mutex_lock(&bta->lock); + down(&bta->lock); stop_recording(bta); - mutex_unlock(&bta->lock); + up(&bta->lock); } return 0; case SNDCTL_DSP_GETBLKSIZE: @@ -943,7 +941,7 @@ static int __devinit btaudio_probe(struct pci_dev *pci_dev, if (rate) bta->rate = rate; - mutex_init(&bta->lock); + init_MUTEX(&bta->lock); init_waitqueue_head(&bta->readq); if (-1 != latency) { diff --git a/trunk/sound/oss/cmpci.c b/trunk/sound/oss/cmpci.c index 1fbd5137f6d7..7cfbb08db537 100644 --- a/trunk/sound/oss/cmpci.c +++ b/trunk/sound/oss/cmpci.c @@ -138,8 +138,6 @@ #endif #ifdef CONFIG_SOUND_CMPCI_JOYSTICK #include -#include - #endif /* --------------------------------------------------------------------- */ @@ -394,7 +392,7 @@ struct cm_state { unsigned char fmt, enable; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -2827,21 +2825,21 @@ static int cm_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } if (file->f_mode & FMODE_READ) { s->status &= ~DO_BIGENDIAN_R; @@ -2869,7 +2867,7 @@ static int cm_open(struct inode *inode, struct file *file) } set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2881,7 +2879,7 @@ static int cm_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); @@ -2905,7 +2903,7 @@ static int cm_release(struct inode *inode, struct file *file) s->status &= ~DO_BIGENDIAN_R; } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -3082,7 +3080,7 @@ static int __devinit cm_probe(struct pci_dev *pcidev, const struct pci_device_id init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->magic = CM_MAGIC; s->dev = pcidev; diff --git a/trunk/sound/oss/cs4281/cs4281m.c b/trunk/sound/oss/cs4281/cs4281m.c index 0004442f9b7e..0720365f6438 100644 --- a/trunk/sound/oss/cs4281/cs4281m.c +++ b/trunk/sound/oss/cs4281/cs4281m.c @@ -245,9 +245,9 @@ struct cs4281_state { void *tmpbuff; // tmp buffer for sample conversions unsigned ena; spinlock_t lock; - struct mutex open_sem; - struct mutex open_sem_adc; - struct mutex open_sem_dac; + struct semaphore open_sem; + struct semaphore open_sem_adc; + struct semaphore open_sem_dac; mode_t open_mode; wait_queue_head_t open_wait; wait_queue_head_t open_wait_adc; @@ -3598,20 +3598,20 @@ static int cs4281_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) { drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_sem_dac); + down(&s->open_sem_dac); stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); s->open_mode &= ~FMODE_WRITE; - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); wake_up(&s->open_wait_dac); } if (file->f_mode & FMODE_READ) { drain_adc(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_sem_adc); + down(&s->open_sem_adc); stop_adc(s); dealloc_dmabuf(s, &s->dma_adc); s->open_mode &= ~FMODE_READ; - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); wake_up(&s->open_wait_adc); } return 0; @@ -3651,33 +3651,33 @@ static int cs4281_open(struct inode *inode, struct file *file) return -ENODEV; } if (file->f_mode & FMODE_WRITE) { - mutex_lock(&s->open_sem_dac); + down(&s->open_sem_dac); while (s->open_mode & FMODE_WRITE) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); return -EBUSY; } - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); interruptible_sleep_on(&s->open_wait_dac); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_sem_dac); + down(&s->open_sem_dac); } } if (file->f_mode & FMODE_READ) { - mutex_lock(&s->open_sem_adc); + down(&s->open_sem_adc); while (s->open_mode & FMODE_READ) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); return -EBUSY; } - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); interruptible_sleep_on(&s->open_wait_adc); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_sem_adc); + down(&s->open_sem_adc); } } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); @@ -3691,7 +3691,7 @@ static int cs4281_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_READ; s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); if (prog_dmabuf_adc(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -3711,7 +3711,7 @@ static int cs4281_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_WRITE; s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); if (prog_dmabuf_dac(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -3978,17 +3978,17 @@ static int cs4281_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; // wait for device to become free - mutex_lock(&s->open_sem); + down(&s->open_sem); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_sem); + up(&s->open_sem); return -EBUSY; } - mutex_unlock(&s->open_sem); + up(&s->open_sem); interruptible_sleep_on(&s->open_wait); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_sem); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -4018,7 +4018,7 @@ static int cs4281_midi_open(struct inode *inode, struct file *file) (file-> f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - mutex_unlock(&s->open_sem); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -4057,7 +4057,7 @@ static int cs4281_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); current->state = TASK_RUNNING; } - mutex_lock(&s->open_sem); + down(&s->open_sem); s->open_mode &= (~(file->f_mode << FMODE_MIDI_SHIFT)) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); @@ -4067,7 +4067,7 @@ static int cs4281_midi_release(struct inode *inode, struct file *file) del_timer(&s->midi.timer); } spin_unlock_irqrestore(&s->lock, flags); - mutex_unlock(&s->open_sem); + up(&s->open_sem); wake_up(&s->open_wait); return 0; } @@ -4300,9 +4300,9 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev, init_waitqueue_head(&s->open_wait_dac); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - mutex_init(&s->open_sem); - mutex_init(&s->open_sem_adc); - mutex_init(&s->open_sem_dac); + init_MUTEX(&s->open_sem); + init_MUTEX(&s->open_sem_adc); + init_MUTEX(&s->open_sem_dac); spin_lock_init(&s->lock); s->pBA0phys = pci_resource_start(pcidev, 0); s->pBA1phys = pci_resource_start(pcidev, 1); diff --git a/trunk/sound/oss/cs46xx.c b/trunk/sound/oss/cs46xx.c index 53881bc91bba..58e25c82eaf2 100644 --- a/trunk/sound/oss/cs46xx.c +++ b/trunk/sound/oss/cs46xx.c @@ -90,7 +90,6 @@ #include #include #include -#include #include #include @@ -239,7 +238,7 @@ struct cs_state { struct cs_card *card; /* Card info */ /* single open lock mechanism, only used for recording */ - struct mutex open_mutex; + struct semaphore open_sem; wait_queue_head_t open_wait; /* file mode */ @@ -298,7 +297,7 @@ struct cs_state { unsigned subdivision; } dmabuf; /* Guard against mmap/write/read races */ - struct mutex sem; + struct semaphore sem; }; struct cs_card { @@ -376,7 +375,7 @@ struct cs_card { unsigned char ibuf[CS_MIDIINBUF]; unsigned char obuf[CS_MIDIOUTBUF]; mode_t open_mode; - struct mutex open_mutex; + struct semaphore open_sem; } midi; struct cs46xx_pm pm; }; @@ -1429,9 +1428,9 @@ static int prog_dmabuf(struct cs_state *state) { int ret; - mutex_lock(&state->sem); + down(&state->sem); ret = __prog_dmabuf(state); - mutex_unlock(&state->sem); + up(&state->sem); return ret; } @@ -1832,17 +1831,17 @@ static int cs_midi_open(struct inode *inode, struct file *file) file->private_data = card; /* wait for device to become free */ - mutex_lock(&card->midi.open_mutex); + down(&card->midi.open_sem); while (card->midi.open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&card->midi.open_mutex); + up(&card->midi.open_sem); return -EBUSY; } - mutex_unlock(&card->midi.open_mutex); + up(&card->midi.open_sem); interruptible_sleep_on(&card->midi.open_wait); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&card->midi.open_mutex); + down(&card->midi.open_sem); } spin_lock_irqsave(&card->midi.lock, flags); if (!(card->midi.open_mode & (FMODE_READ | FMODE_WRITE))) { @@ -1860,7 +1859,7 @@ static int cs_midi_open(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&card->midi.lock, flags); card->midi.open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE)); - mutex_unlock(&card->midi.open_mutex); + up(&card->midi.open_sem); return 0; } @@ -1892,9 +1891,9 @@ static int cs_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&card->midi.owait, &wait); current->state = TASK_RUNNING; } - mutex_lock(&card->midi.open_mutex); + down(&card->midi.open_sem); card->midi.open_mode &= (~(file->f_mode & (FMODE_READ | FMODE_WRITE))); - mutex_unlock(&card->midi.open_mutex); + up(&card->midi.open_sem); wake_up(&card->midi.open_wait); return 0; } @@ -2082,7 +2081,7 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - mutex_lock(&state->sem); + down(&state->sem); if (!dmabuf->ready && (ret = __prog_dmabuf(state))) goto out2; @@ -2115,13 +2114,13 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof if (!ret) ret = -EAGAIN; goto out; } - mutex_unlock(&state->sem); + up(&state->sem); schedule(); if (signal_pending(current)) { if(!ret) ret = -ERESTARTSYS; goto out; } - mutex_lock(&state->sem); + down(&state->sem); if (dmabuf->mapped) { if(!ret) @@ -2156,7 +2155,7 @@ static ssize_t cs_read(struct file *file, char __user *buffer, size_t count, lof out: remove_wait_queue(&state->dmabuf.wait, &wait); out2: - mutex_unlock(&state->sem); + up(&state->sem); set_current_state(TASK_RUNNING); CS_DBGOUT(CS_WAVE_READ | CS_FUNCTION, 4, printk("cs46xx: cs_read()- %zd\n",ret) ); @@ -2185,7 +2184,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou return -EFAULT; dmabuf = &state->dmabuf; - mutex_lock(&state->sem); + down(&state->sem); if (dmabuf->mapped) { ret = -ENXIO; @@ -2241,13 +2240,13 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou if (!ret) ret = -EAGAIN; goto out; } - mutex_unlock(&state->sem); + up(&state->sem); schedule(); if (signal_pending(current)) { if(!ret) ret = -ERESTARTSYS; goto out; } - mutex_lock(&state->sem); + down(&state->sem); if (dmabuf->mapped) { if(!ret) @@ -2279,7 +2278,7 @@ static ssize_t cs_write(struct file *file, const char __user *buffer, size_t cou start_dac(state); } out: - mutex_unlock(&state->sem); + up(&state->sem); remove_wait_queue(&state->dmabuf.wait, &wait); set_current_state(TASK_RUNNING); @@ -2412,7 +2411,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma) goto out; } - mutex_lock(&state->sem); + down(&state->sem); dmabuf = &state->dmabuf; if (cs4x_pgoff(vma) != 0) { @@ -2439,7 +2438,7 @@ static int cs_mmap(struct file *file, struct vm_area_struct *vma) CS_DBGOUT(CS_FUNCTION, 2, printk("cs46xx: cs_mmap()-\n") ); out: - mutex_unlock(&state->sem); + up(&state->sem); return ret; } @@ -3201,7 +3200,7 @@ static int cs_open(struct inode *inode, struct file *file) if (state == NULL) return -ENOMEM; memset(state, 0, sizeof(struct cs_state)); - mutex_init(&state->sem); + init_MUTEX(&state->sem); dmabuf = &state->dmabuf; dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if(dmabuf->pbuf==NULL) @@ -3242,10 +3241,10 @@ static int cs_open(struct inode *inode, struct file *file) state->virt = 0; state->magic = CS_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - mutex_init(&state->open_mutex); + init_MUTEX(&state->open_sem); file->private_data = card; - mutex_lock(&state->open_mutex); + down(&state->open_sem); /* set default sample format. According to OSS Programmer's Guide /dev/dsp should be default to unsigned 8-bits, mono, with sample rate 8kHz and @@ -3261,7 +3260,7 @@ static int cs_open(struct inode *inode, struct file *file) cs_set_divisor(dmabuf); state->open_mode |= FMODE_READ; - mutex_unlock(&state->open_mutex); + up(&state->open_sem); } if(file->f_mode & FMODE_WRITE) { @@ -3272,7 +3271,7 @@ static int cs_open(struct inode *inode, struct file *file) if (state == NULL) return -ENOMEM; memset(state, 0, sizeof(struct cs_state)); - mutex_init(&state->sem); + init_MUTEX(&state->sem); dmabuf = &state->dmabuf; dmabuf->pbuf = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if(dmabuf->pbuf==NULL) @@ -3313,10 +3312,10 @@ static int cs_open(struct inode *inode, struct file *file) state->virt = 1; state->magic = CS_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - mutex_init(&state->open_mutex); + init_MUTEX(&state->open_sem); file->private_data = card; - mutex_lock(&state->open_mutex); + down(&state->open_sem); /* set default sample format. According to OSS Programmer's Guide /dev/dsp should be default to unsigned 8-bits, mono, with sample rate 8kHz and @@ -3332,7 +3331,7 @@ static int cs_open(struct inode *inode, struct file *file) cs_set_divisor(dmabuf); state->open_mode |= FMODE_WRITE; - mutex_unlock(&state->open_mutex); + up(&state->open_sem); if((ret = prog_dmabuf(state))) return ret; } @@ -3364,14 +3363,14 @@ static int cs_release(struct inode *inode, struct file *file) cs_clear_tail(state); drain_dac(state, file->f_flags & O_NONBLOCK); /* stop DMA state machine and free DMA buffers/channels */ - mutex_lock(&state->open_mutex); + down(&state->open_sem); stop_dac(state); dealloc_dmabuf(state); state->card->free_pcm_channel(state->card, dmabuf->channel->num); free_page((unsigned long)state->dmabuf.pbuf); - /* we're covered by the open_mutex */ - mutex_unlock(&state->open_mutex); + /* we're covered by the open_sem */ + up(&state->open_sem); state->card->states[state->virt] = NULL; state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); @@ -3396,14 +3395,14 @@ static int cs_release(struct inode *inode, struct file *file) { CS_DBGOUT(CS_RELEASE, 2, printk("cs46xx: cs_release() FMODE_READ\n") ); dmabuf = &state->dmabuf; - mutex_lock(&state->open_mutex); + down(&state->open_sem); stop_adc(state); dealloc_dmabuf(state); state->card->free_pcm_channel(state->card, dmabuf->channel->num); free_page((unsigned long)state->dmabuf.pbuf); - /* we're covered by the open_mutex */ - mutex_unlock(&state->open_mutex); + /* we're covered by the open_sem */ + up(&state->open_sem); state->card->states[state->virt] = NULL; state->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); @@ -5508,7 +5507,7 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev, } init_waitqueue_head(&card->midi.open_wait); - mutex_init(&card->midi.open_mutex); + init_MUTEX(&card->midi.open_sem); init_waitqueue_head(&card->midi.iwait); init_waitqueue_head(&card->midi.owait); cs461x_pokeBA0(card, BA0_MIDCR, MIDCR_MRST); diff --git a/trunk/sound/oss/dmasound/dmasound_awacs.c b/trunk/sound/oss/dmasound/dmasound_awacs.c index a17375141c3a..74f975676ccb 100644 --- a/trunk/sound/oss/dmasound/dmasound_awacs.c +++ b/trunk/sound/oss/dmasound/dmasound_awacs.c @@ -80,7 +80,7 @@ #include #include #include -#include +#include #ifdef CONFIG_ADB_CUDA #include #endif @@ -130,7 +130,7 @@ static struct resource awacs_rsrc[3]; static char awacs_name[64]; static int awacs_revision; static int awacs_sleeping; -static DEFINE_MUTEX(dmasound_mutex); +static DECLARE_MUTEX(dmasound_sem); static int sound_device_id; /* exists after iMac revA */ static int hw_can_byteswap = 1 ; /* most pmac sound h/w can */ @@ -312,11 +312,11 @@ extern int daca_enter_sleep(void); extern int daca_leave_sleep(void); #define TRY_LOCK() \ - if ((rc = mutex_lock_interruptible(&dmasound_mutex)) != 0) \ + if ((rc = down_interruptible(&dmasound_sem)) != 0) \ return rc; -#define LOCK() mutex_lock(&dmasound_mutex); +#define LOCK() down(&dmasound_sem); -#define UNLOCK() mutex_unlock(&dmasound_mutex); +#define UNLOCK() up(&dmasound_sem); /* We use different versions that the ones provided in dmasound.h * diff --git a/trunk/sound/oss/emu10k1/hwaccess.h b/trunk/sound/oss/emu10k1/hwaccess.h index 85e27bda694b..104223a192aa 100644 --- a/trunk/sound/oss/emu10k1/hwaccess.h +++ b/trunk/sound/oss/emu10k1/hwaccess.h @@ -181,7 +181,7 @@ struct emu10k1_card struct emu10k1_mpuout *mpuout; struct emu10k1_mpuin *mpuin; - struct mutex open_sem; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; diff --git a/trunk/sound/oss/emu10k1/main.c b/trunk/sound/oss/emu10k1/main.c index 0cd44a6f7ac0..23241cbdd90f 100644 --- a/trunk/sound/oss/emu10k1/main.c +++ b/trunk/sound/oss/emu10k1/main.c @@ -1320,7 +1320,7 @@ static int __devinit emu10k1_probe(struct pci_dev *pci_dev, const struct pci_dev card->is_aps = (subsysvid == EMU_APS_SUBID); spin_lock_init(&card->lock); - mutex_init(&card->open_sem); + init_MUTEX(&card->open_sem); card->open_mode = 0; init_waitqueue_head(&card->open_wait); diff --git a/trunk/sound/oss/emu10k1/midi.c b/trunk/sound/oss/emu10k1/midi.c index 959a96794dba..b40b5f97aace 100644 --- a/trunk/sound/oss/emu10k1/midi.c +++ b/trunk/sound/oss/emu10k1/midi.c @@ -110,21 +110,21 @@ static int emu10k1_midi_open(struct inode *inode, struct file *file) #endif /* Wait for device to become free */ - mutex_lock(&card->open_sem); + down(&card->open_sem); while (card->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&card->open_sem); + up(&card->open_sem); return -EBUSY; } - mutex_unlock(&card->open_sem); + up(&card->open_sem); interruptible_sleep_on(&card->open_wait); if (signal_pending(current)) { return -ERESTARTSYS; } - mutex_lock(&card->open_sem); + down(&card->open_sem); } if ((midi_dev = (struct emu10k1_mididevice *) kmalloc(sizeof(*midi_dev), GFP_KERNEL)) == NULL) @@ -183,7 +183,7 @@ static int emu10k1_midi_open(struct inode *inode, struct file *file) card->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - mutex_unlock(&card->open_sem); + up(&card->open_sem); return nonseekable_open(inode, file); } @@ -234,9 +234,9 @@ static int emu10k1_midi_release(struct inode *inode, struct file *file) kfree(midi_dev); - mutex_lock(&card->open_sem); + down(&card->open_sem); card->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE)); - mutex_unlock(&card->open_sem); + up(&card->open_sem); wake_up_interruptible(&card->open_wait); unlock_kernel(); diff --git a/trunk/sound/oss/es1370.c b/trunk/sound/oss/es1370.c index 094f569cc6e0..ae55c536613a 100644 --- a/trunk/sound/oss/es1370.c +++ b/trunk/sound/oss/es1370.c @@ -157,7 +157,6 @@ #include #include #include -#include #include #include @@ -347,7 +346,7 @@ struct es1370_state { unsigned sctrl; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -394,7 +393,7 @@ struct es1370_state { struct gameport *gameport; #endif - struct mutex mutex; + struct semaphore sem; }; /* --------------------------------------------------------------------- */ @@ -1160,7 +1159,7 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, return -ENXIO; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - mutex_lock(&s->mutex); + down(&s->sem); if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) goto out; @@ -1184,14 +1183,14 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, ret = -EAGAIN; goto out; } - mutex_unlock(&s->mutex); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out; } - mutex_lock(&s->mutex); + down(&s->sem); if (s->dma_adc.mapped) { ret = -ENXIO; @@ -1216,7 +1215,7 @@ static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, start_adc(s); } out: - mutex_unlock(&s->mutex); + up(&s->sem); remove_wait_queue(&s->dma_adc.wait, &wait); set_current_state(TASK_RUNNING); return ret; @@ -1236,7 +1235,7 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t return -ENXIO; if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; - mutex_lock(&s->mutex); + down(&s->sem); if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s))) goto out; ret = 0; @@ -1264,14 +1263,14 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t ret = -EAGAIN; goto out; } - mutex_unlock(&s->mutex); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out; } - mutex_lock(&s->mutex); + down(&s->sem); if (s->dma_dac2.mapped) { ret = -ENXIO; @@ -1297,7 +1296,7 @@ static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t start_dac2(s); } out: - mutex_unlock(&s->mutex); + up(&s->sem); remove_wait_queue(&s->dma_dac2.wait, &wait); set_current_state(TASK_RUNNING); return ret; @@ -1349,7 +1348,7 @@ static int es1370_mmap(struct file *file, struct vm_area_struct *vma) VALIDATE_STATE(s); lock_kernel(); - mutex_lock(&s->mutex); + down(&s->sem); if (vma->vm_flags & VM_WRITE) { if ((ret = prog_dmabuf_dac2(s)) != 0) { goto out; @@ -1381,7 +1380,7 @@ static int es1370_mmap(struct file *file, struct vm_area_struct *vma) } db->mapped = 1; out: - mutex_unlock(&s->mutex); + up(&s->sem); unlock_kernel(); return ret; } @@ -1753,21 +1752,21 @@ static int es1370_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_READ|FMODE_WRITE))) @@ -1794,8 +1793,8 @@ static int es1370_open(struct inode *inode, struct file *file) outl(s->ctrl, s->io+ES1370_REG_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); - mutex_init(&s->mutex); + up(&s->open_sem); + init_MUTEX(&s->sem); return nonseekable_open(inode, file); } @@ -1807,7 +1806,7 @@ static int es1370_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac2(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac2(s); synchronize_irq(s->irq); @@ -1819,7 +1818,7 @@ static int es1370_release(struct inode *inode, struct file *file) } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2199,21 +2198,21 @@ static int es1370_open_dac(struct inode *inode, struct file *file) return -EINVAL; file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & FMODE_DAC) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0; s->dma_dac1.enabled = 1; @@ -2228,7 +2227,7 @@ static int es1370_open_dac(struct inode *inode, struct file *file) outl(s->ctrl, s->io+ES1370_REG_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= FMODE_DAC; - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2239,12 +2238,12 @@ static int es1370_release_dac(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); drain_dac1(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); stop_dac1(s); dealloc_dmabuf(s, &s->dma_dac1); s->open_mode &= ~FMODE_DAC; wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2431,21 +2430,21 @@ static int es1370_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2466,7 +2465,7 @@ static int es1370_midi_open(struct inode *inode, struct file *file) es1370_handle_midi(s); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2500,7 +2499,7 @@ static int es1370_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - mutex_lock(&s->open_mutex); + down(&s->open_sem); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2509,7 +2508,7 @@ static int es1370_midi_release(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2639,7 +2638,7 @@ static int __devinit es1370_probe(struct pci_dev *pcidev, const struct pci_devic init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->magic = ES1370_MAGIC; s->dev = pcidev; diff --git a/trunk/sound/oss/es1371.c b/trunk/sound/oss/es1371.c index 4400c8538686..5c697f162579 100644 --- a/trunk/sound/oss/es1371.c +++ b/trunk/sound/oss/es1371.c @@ -129,7 +129,6 @@ #include #include #include -#include #include #include @@ -420,7 +419,7 @@ struct es1371_state { unsigned dac1rate, dac2rate, adcrate; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -463,7 +462,7 @@ struct es1371_state { struct gameport *gameport; #endif - struct mutex sem; + struct semaphore sem; }; /* --------------------------------------------------------------------- */ @@ -1347,7 +1346,7 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count, return -ENXIO; if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - mutex_lock(&s->sem); + down(&s->sem); if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s))) goto out2; @@ -1371,14 +1370,14 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count, ret = -EAGAIN; goto out; } - mutex_unlock(&s->sem); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - mutex_lock(&s->sem); + down(&s->sem); if (s->dma_adc.mapped) { ret = -ENXIO; @@ -1403,7 +1402,7 @@ static ssize_t es1371_read(struct file *file, char __user *buffer, size_t count, start_adc(s); } out: - mutex_unlock(&s->sem); + up(&s->sem); out2: remove_wait_queue(&s->dma_adc.wait, &wait); set_current_state(TASK_RUNNING); @@ -1424,7 +1423,7 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t return -ENXIO; if (!access_ok(VERIFY_READ, buffer, count)) return -EFAULT; - mutex_lock(&s->sem); + down(&s->sem); if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s))) goto out3; ret = 0; @@ -1452,14 +1451,14 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t ret = -EAGAIN; goto out; } - mutex_unlock(&s->sem); + up(&s->sem); schedule(); if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; goto out2; } - mutex_lock(&s->sem); + down(&s->sem); if (s->dma_dac2.mapped) { ret = -ENXIO; @@ -1485,7 +1484,7 @@ static ssize_t es1371_write(struct file *file, const char __user *buffer, size_t start_dac2(s); } out: - mutex_unlock(&s->sem); + up(&s->sem); out2: remove_wait_queue(&s->dma_dac2.wait, &wait); out3: @@ -1539,7 +1538,7 @@ static int es1371_mmap(struct file *file, struct vm_area_struct *vma) VALIDATE_STATE(s); lock_kernel(); - mutex_lock(&s->sem); + down(&s->sem); if (vma->vm_flags & VM_WRITE) { if ((ret = prog_dmabuf_dac2(s)) != 0) { @@ -1572,7 +1571,7 @@ static int es1371_mmap(struct file *file, struct vm_area_struct *vma) } db->mapped = 1; out: - mutex_unlock(&s->sem); + up(&s->sem); unlock_kernel(); return ret; } @@ -1939,21 +1938,21 @@ static int es1371_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } if (file->f_mode & FMODE_READ) { s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; @@ -1983,8 +1982,8 @@ static int es1371_open(struct inode *inode, struct file *file) outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); - mutex_init(&s->sem); + up(&s->open_sem); + init_MUTEX(&s->sem); return nonseekable_open(inode, file); } @@ -1996,7 +1995,7 @@ static int es1371_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac2(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac2(s); dealloc_dmabuf(s, &s->dma_dac2); @@ -2006,7 +2005,7 @@ static int es1371_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -2378,21 +2377,21 @@ static int es1371_open_dac(struct inode *inode, struct file *file) return -EINVAL; file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & FMODE_DAC) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0; s->dma_dac1.enabled = 1; @@ -2406,7 +2405,7 @@ static int es1371_open_dac(struct inode *inode, struct file *file) outl(s->sctrl, s->io+ES1371_REG_SERIAL_CONTROL); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= FMODE_DAC; - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2417,11 +2416,11 @@ static int es1371_release_dac(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); drain_dac1(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); stop_dac1(s); dealloc_dmabuf(s, &s->dma_dac1); s->open_mode &= ~FMODE_DAC; - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -2609,21 +2608,21 @@ static int es1371_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2644,7 +2643,7 @@ static int es1371_midi_open(struct inode *inode, struct file *file) es1371_handle_midi(s); spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2677,7 +2676,7 @@ static int es1371_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - mutex_lock(&s->open_mutex); + down(&s->open_sem); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2685,7 +2684,7 @@ static int es1371_midi_release(struct inode *inode, struct file *file) outl(s->ctrl, s->io+ES1371_REG_CONTROL); } spin_unlock_irqrestore(&s->lock, flags); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -2885,7 +2884,7 @@ static int __devinit es1371_probe(struct pci_dev *pcidev, const struct pci_devic init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->magic = ES1371_MAGIC; s->dev = pcidev; diff --git a/trunk/sound/oss/esssolo1.c b/trunk/sound/oss/esssolo1.c index 78d3e29ce968..849b59f67ef5 100644 --- a/trunk/sound/oss/esssolo1.c +++ b/trunk/sound/oss/esssolo1.c @@ -105,8 +105,6 @@ #include #include #include -#include - #include #include @@ -193,7 +191,7 @@ struct solo1_state { unsigned ena; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1583,7 +1581,7 @@ static int solo1_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); outb(0, s->iobase+6); /* disable DMA */ @@ -1597,7 +1595,7 @@ static int solo1_release(struct inode *inode, struct file *file) } s->open_mode &= ~(FMODE_READ | FMODE_WRITE); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -1626,21 +1624,21 @@ static int solo1_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & (FMODE_READ | FMODE_WRITE)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } s->fmt = AFMT_U8; s->channels = 1; @@ -1652,7 +1650,7 @@ static int solo1_open(struct inode *inode, struct file *file) s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; s->dma_dac.enabled = 1; s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); prog_codec(s); return nonseekable_open(inode, file); } @@ -1913,21 +1911,21 @@ static int solo1_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -1953,7 +1951,7 @@ static int solo1_midi_open(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -1987,7 +1985,7 @@ static int solo1_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - mutex_lock(&s->open_mutex); + down(&s->open_sem); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -1996,7 +1994,7 @@ static int solo1_midi_release(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2134,24 +2132,24 @@ static int solo1_dmfm_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & FMODE_DMFM) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } if (!request_region(s->sbbase, FMSYNTH_EXTENT, "ESS Solo1")) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); printk(KERN_ERR "solo1: FM synth io ports in use, opl3 loaded?\n"); return -EBUSY; } @@ -2163,7 +2161,7 @@ static int solo1_dmfm_open(struct inode *inode, struct file *file) outb(5, s->sbbase+2); outb(1, s->sbbase+3); /* enable OPL3 */ s->open_mode |= FMODE_DMFM; - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2174,7 +2172,7 @@ static int solo1_dmfm_release(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); - mutex_lock(&s->open_mutex); + down(&s->open_sem); s->open_mode &= ~FMODE_DMFM; for (regb = 0xb0; regb < 0xb9; regb++) { outb(regb, s->sbbase); @@ -2184,7 +2182,7 @@ static int solo1_dmfm_release(struct inode *inode, struct file *file) } release_region(s->sbbase, FMSYNTH_EXTENT); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2364,7 +2362,7 @@ static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->magic = SOLO1_MAGIC; s->dev = pcidev; diff --git a/trunk/sound/oss/forte.c b/trunk/sound/oss/forte.c index 0294eec8ad90..8406bc90c4ff 100644 --- a/trunk/sound/oss/forte.c +++ b/trunk/sound/oss/forte.c @@ -43,7 +43,6 @@ #include #include -#include #include #include @@ -186,7 +185,7 @@ struct forte_chip { unsigned long iobase; int irq; - struct mutex open_mutex; /* Device access */ + struct semaphore open_sem; /* Device access */ spinlock_t lock; /* State */ spinlock_t ac97_lock; @@ -1243,13 +1242,13 @@ forte_dsp_open (struct inode *inode, struct file *file) struct forte_chip *chip = forte; /* FIXME: HACK FROM HELL! */ if (file->f_flags & O_NONBLOCK) { - if (!mutex_trylock(&chip->open_mutex)) { + if (down_trylock (&chip->open_sem)) { DPRINTK ("%s: returning -EAGAIN\n", __FUNCTION__); return -EAGAIN; } } else { - if (mutex_lock_interruptible(&chip->open_mutex)) { + if (down_interruptible (&chip->open_sem)) { DPRINTK ("%s: returning -ERESTARTSYS\n", __FUNCTION__); return -ERESTARTSYS; } @@ -1303,7 +1302,7 @@ forte_dsp_release (struct inode *inode, struct file *file) spin_unlock_irq (&chip->lock); } - mutex_unlock(&chip->open_mutex); + up (&chip->open_sem); return ret; } @@ -2012,7 +2011,7 @@ forte_probe (struct pci_dev *pci_dev, const struct pci_device_id *pci_id) memset (chip, 0, sizeof (struct forte_chip)); chip->pci_dev = pci_dev; - mutex_init(&chip->open_mutex); + init_MUTEX(&chip->open_sem); spin_lock_init (&chip->lock); spin_lock_init (&chip->ac97_lock); diff --git a/trunk/sound/oss/hal2.c b/trunk/sound/oss/hal2.c index dd4f59d30a3a..afe97c4ce069 100644 --- a/trunk/sound/oss/hal2.c +++ b/trunk/sound/oss/hal2.c @@ -32,8 +32,6 @@ #include #include #include -#include - #include #include @@ -94,7 +92,7 @@ struct hal2_codec { wait_queue_head_t dma_wait; spinlock_t lock; - struct mutex sem; + struct semaphore sem; int usecount; /* recording and playback are * independent */ @@ -1180,7 +1178,7 @@ static ssize_t hal2_read(struct file *file, char *buffer, if (!count) return 0; - if (mutex_lock_interruptible(&adc->sem)) + if (down_interruptible(&adc->sem)) return -EINTR; if (file->f_flags & O_NONBLOCK) { err = hal2_get_buffer(hal2, buffer, count); @@ -1219,7 +1217,7 @@ static ssize_t hal2_read(struct file *file, char *buffer, } } while (count > 0 && err >= 0); } - mutex_unlock(&adc->sem); + up(&adc->sem); return err; } @@ -1234,7 +1232,7 @@ static ssize_t hal2_write(struct file *file, const char *buffer, if (!count) return 0; - if (mutex_lock_interruptible(&dac->sem)) + if (down_interruptible(&dac->sem)) return -EINTR; if (file->f_flags & O_NONBLOCK) { err = hal2_add_buffer(hal2, buf, count); @@ -1273,7 +1271,7 @@ static ssize_t hal2_write(struct file *file, const char *buffer, } } while (count > 0 && err >= 0); } - mutex_unlock(&dac->sem); + up(&dac->sem); return err; } @@ -1358,20 +1356,20 @@ static int hal2_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_READ) { struct hal2_codec *adc = &hal2->adc; - mutex_lock(&adc->sem); + down(&adc->sem); hal2_stop_adc(hal2); hal2_free_adc_dmabuf(adc); adc->usecount--; - mutex_unlock(&adc->sem); + up(&adc->sem); } if (file->f_mode & FMODE_WRITE) { struct hal2_codec *dac = &hal2->dac; - mutex_lock(&dac->sem); + down(&dac->sem); hal2_sync_dac(hal2); hal2_free_dac_dmabuf(dac); dac->usecount--; - mutex_unlock(&dac->sem); + up(&dac->sem); } return 0; @@ -1402,7 +1400,7 @@ static void hal2_init_codec(struct hal2_codec *codec, struct hpc3_regs *hpc3, codec->pbus.pbusnr = index; codec->pbus.pbus = &hpc3->pbdma[index]; init_waitqueue_head(&codec->dma_wait); - mutex_init(&codec->sem); + init_MUTEX(&codec->sem); spin_lock_init(&codec->lock); } diff --git a/trunk/sound/oss/i810_audio.c b/trunk/sound/oss/i810_audio.c index dd2b871cdac5..abc242abd5b1 100644 --- a/trunk/sound/oss/i810_audio.c +++ b/trunk/sound/oss/i810_audio.c @@ -100,8 +100,6 @@ #include #include #include -#include - #include #define DRIVER_VERSION "1.01" @@ -333,7 +331,7 @@ struct i810_state { struct i810_card *card; /* Card info */ /* single open lock mechanism, only used for recording */ - struct mutex open_mutex; + struct semaphore open_sem; wait_queue_head_t open_wait; /* file mode */ @@ -2599,7 +2597,7 @@ static int i810_open(struct inode *inode, struct file *file) state->card = card; state->magic = I810_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - mutex_init(&state->open_mutex); + init_MUTEX(&state->open_sem); file->private_data = state; dmabuf->trigger = 0; @@ -3215,7 +3213,7 @@ static void __devinit i810_configure_clocking (void) state->card = card; state->magic = I810_STATE_MAGIC; init_waitqueue_head(&dmabuf->wait); - mutex_init(&state->open_mutex); + init_MUTEX(&state->open_sem); dmabuf->fmt = I810_FMT_STEREO | I810_FMT_16BIT; dmabuf->trigger = PCM_ENABLE_OUTPUT; i810_set_spdif_output(state, -1, 0); diff --git a/trunk/sound/oss/ite8172.c b/trunk/sound/oss/ite8172.c index ffcb910f5c3e..8fd2f9a9e668 100644 --- a/trunk/sound/oss/ite8172.c +++ b/trunk/sound/oss/ite8172.c @@ -71,8 +71,6 @@ #include #include #include -#include - #include #include #include @@ -306,7 +304,7 @@ struct it8172_state { unsigned dacrate, adcrate; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1803,21 +1801,21 @@ static int it8172_open(struct inode *inode, struct file *file) } file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); @@ -1852,7 +1850,7 @@ static int it8172_open(struct inode *inode, struct file *file) spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode & (FMODE_READ | FMODE_WRITE)); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -1866,7 +1864,7 @@ static int it8172_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1876,7 +1874,7 @@ static int it8172_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE)); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1999,7 +1997,7 @@ static int __devinit it8172_probe(struct pci_dev *pcidev, init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->dev = pcidev; s->io = pci_resource_start(pcidev, 0); diff --git a/trunk/sound/oss/maestro.c b/trunk/sound/oss/maestro.c index e647f2f86279..d4b569acf764 100644 --- a/trunk/sound/oss/maestro.c +++ b/trunk/sound/oss/maestro.c @@ -223,8 +223,6 @@ #include #include #include -#include - #include #include @@ -399,7 +397,7 @@ struct ess_state { /* this locks around the oss state in the driver */ spinlock_t lock; /* only let 1 be opening at a time */ - struct mutex open_mutex; + struct semaphore open_sem; wait_queue_head_t open_wait; mode_t open_mode; @@ -3022,26 +3020,26 @@ ess_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EWOULDBLOCK; } - mutex_unlock(&s->open_mutex); + up(&s->open_sem); interruptible_sleep_on(&s->open_wait); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } /* under semaphore.. */ if ((s->card->dmapages==NULL) && allocate_buffers(s)) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -ENOMEM; } - /* we're covered by the open_mutex */ + /* we're covered by the open_sem */ if( ! s->card->dsps_open ) { maestro_power(s->card,ACPI_D0); start_bob(s); @@ -3078,7 +3076,7 @@ ess_open(struct inode *inode, struct file *file) set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -3091,7 +3089,7 @@ ess_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); } @@ -3100,7 +3098,7 @@ ess_release(struct inode *inode, struct file *file) } s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); - /* we're covered by the open_mutex */ + /* we're covered by the open_sem */ M_printk("maestro: %d dsps now alive\n",s->card->dsps_open-1); if( --s->card->dsps_open <= 0) { s->card->dsps_open = 0; @@ -3108,7 +3106,7 @@ ess_release(struct inode *inode, struct file *file) free_buffers(s); maestro_power(s->card,ACPI_D2); } - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -3468,7 +3466,7 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid) init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); spin_lock_init(&s->lock); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); s->magic = ESS_STATE_MAGIC; s->apu[0] = 6*i; diff --git a/trunk/sound/oss/maestro3.c b/trunk/sound/oss/maestro3.c index 66044aff2586..f3dec70fcb9b 100644 --- a/trunk/sound/oss/maestro3.c +++ b/trunk/sound/oss/maestro3.c @@ -144,8 +144,6 @@ #include #include #include -#include - #include #include @@ -207,7 +205,7 @@ struct m3_state { when irqhandler uses s->lock and m3_assp_read uses card->lock ? */ - struct mutex open_mutex; + struct semaphore open_sem; wait_queue_head_t open_wait; mode_t open_mode; @@ -2015,17 +2013,17 @@ static int m3_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EWOULDBLOCK; } - mutex_unlock(&s->open_mutex); + up(&s->open_sem); interruptible_sleep_on(&s->open_wait); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&c->lock, flags); @@ -2049,7 +2047,7 @@ static int m3_open(struct inode *inode, struct file *file) set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); spin_unlock_irqrestore(&c->lock, flags); return nonseekable_open(inode, file); } @@ -2064,7 +2062,7 @@ static int m3_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); spin_lock_irqsave(&card->lock, flags); if (file->f_mode & FMODE_WRITE) { @@ -2085,7 +2083,7 @@ static int m3_release(struct inode *inode, struct file *file) s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); spin_unlock_irqrestore(&card->lock, flags); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); return 0; @@ -2681,7 +2679,7 @@ static int __devinit m3_probe(struct pci_dev *pci_dev, const struct pci_device_i init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - mutex_init(&(s->open_mutex)); + init_MUTEX(&(s->open_sem)); s->magic = M3_STATE_MAGIC; m3_assp_client_init(s); diff --git a/trunk/sound/oss/nec_vrc5477.c b/trunk/sound/oss/nec_vrc5477.c index 21c1954d9108..fbb9170e8e0a 100644 --- a/trunk/sound/oss/nec_vrc5477.c +++ b/trunk/sound/oss/nec_vrc5477.c @@ -78,8 +78,6 @@ #include #include #include -#include - #include #include #include @@ -200,7 +198,7 @@ struct vrc5477_ac97_state { unsigned short extended_status; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1619,22 +1617,22 @@ static int vrc5477_ac97_open(struct inode *inode, struct file *file) file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); @@ -1661,7 +1659,7 @@ static int vrc5477_ac97_open(struct inode *inode, struct file *file) bailout: spin_unlock_irqrestore(&s->lock, flags); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return ret; } @@ -1673,7 +1671,7 @@ static int vrc5477_ac97_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1683,7 +1681,7 @@ static int vrc5477_ac97_release(struct inode *inode, struct file *file) dealloc_dmabuf(s, &s->dma_adc); } s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); wake_up(&s->open_wait); unlock_kernel(); return 0; @@ -1869,7 +1867,7 @@ static int __devinit vrc5477_ac97_probe(struct pci_dev *pcidev, init_waitqueue_head(&s->dma_adc.wait); init_waitqueue_head(&s->dma_dac.wait); init_waitqueue_head(&s->open_wait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->dev = pcidev; diff --git a/trunk/sound/oss/rme96xx.c b/trunk/sound/oss/rme96xx.c index a1ec9d131ab3..faa0b7919b65 100644 --- a/trunk/sound/oss/rme96xx.c +++ b/trunk/sound/oss/rme96xx.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include @@ -327,7 +326,7 @@ typedef struct _rme96xx_info { /* waiting and locking */ wait_queue_head_t wait; - struct mutex open_mutex; + struct semaphore open_sem; wait_queue_head_t open_wait; } dma[RME96xx_MAX_DEVS]; @@ -843,7 +842,7 @@ static void busmaster_free(void* ptr,int size) { static int rme96xx_dmabuf_init(rme96xx_info * s,struct dmabuf* dma,int ioffset,int ooffset) { - mutex_init(&dma->open_mutex); + init_MUTEX(&dma->open_sem); init_waitqueue_head(&dma->open_wait); init_waitqueue_head(&dma->wait); dma->s = s; @@ -1470,21 +1469,21 @@ static int rme96xx_open(struct inode *in, struct file *f) dma = &s->dma[devnum]; f->private_data = dma; /* wait for device to become free */ - mutex_lock(&dma->open_mutex); + down(&dma->open_sem); while (dma->open_mode & f->f_mode) { if (f->f_flags & O_NONBLOCK) { - mutex_unlock(&dma->open_mutex); + up(&dma->open_sem); return -EBUSY; } add_wait_queue(&dma->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&dma->open_mutex); + up(&dma->open_sem); schedule(); remove_wait_queue(&dma->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&dma->open_mutex); + down(&dma->open_sem); } COMM ("hardware open") @@ -1493,7 +1492,7 @@ static int rme96xx_open(struct inode *in, struct file *f) dma->open_mode |= (f->f_mode & (FMODE_READ | FMODE_WRITE)); dma->opened = 1; - mutex_unlock(&dma->open_mutex); + up(&dma->open_sem); DBG(printk("device num %d open finished\n",devnum)); return 0; @@ -1525,7 +1524,7 @@ static int rme96xx_release(struct inode *in, struct file *file) } wake_up(&dma->open_wait); - mutex_unlock(&dma->open_mutex); + up(&dma->open_sem); return 0; } diff --git a/trunk/sound/oss/sonicvibes.c b/trunk/sound/oss/sonicvibes.c index 69a4b8778b51..71b05e2f6977 100644 --- a/trunk/sound/oss/sonicvibes.c +++ b/trunk/sound/oss/sonicvibes.c @@ -116,8 +116,6 @@ #include #include #include -#include - #include #include @@ -330,7 +328,7 @@ struct sv_state { unsigned char fmt, enable; spinlock_t lock; - struct mutex open_mutex; + struct semaphore open_sem; mode_t open_mode; wait_queue_head_t open_wait; @@ -1924,21 +1922,21 @@ static int sv_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & file->f_mode) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } if (file->f_mode & FMODE_READ) { fmtm &= ~((SV_CFMT_STEREO | SV_CFMT_16BIT) << SV_CFMT_CSHIFT); @@ -1958,7 +1956,7 @@ static int sv_open(struct inode *inode, struct file *file) } set_fmt(s, fmtm, fmts); s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -1970,7 +1968,7 @@ static int sv_release(struct inode *inode, struct file *file) lock_kernel(); if (file->f_mode & FMODE_WRITE) drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_mutex); + down(&s->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); @@ -1981,7 +1979,7 @@ static int sv_release(struct inode *inode, struct file *file) } s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE)); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2169,21 +2167,21 @@ static int sv_midi_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2212,7 +2210,7 @@ static int sv_midi_open(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2250,7 +2248,7 @@ static int sv_midi_release(struct inode *inode, struct file *file) remove_wait_queue(&s->midi.owait, &wait); set_current_state(TASK_RUNNING); } - mutex_lock(&s->open_mutex); + down(&s->open_sem); s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE)); spin_lock_irqsave(&s->lock, flags); if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) { @@ -2259,7 +2257,7 @@ static int sv_midi_release(struct inode *inode, struct file *file) } spin_unlock_irqrestore(&s->lock, flags); wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2390,21 +2388,21 @@ static int sv_dmfm_open(struct inode *inode, struct file *file) VALIDATE_STATE(s); file->private_data = s; /* wait for device to become free */ - mutex_lock(&s->open_mutex); + down(&s->open_sem); while (s->open_mode & FMODE_DMFM) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return -EBUSY; } add_wait_queue(&s->open_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); schedule(); remove_wait_queue(&s->open_wait, &wait); set_current_state(TASK_RUNNING); if (signal_pending(current)) return -ERESTARTSYS; - mutex_lock(&s->open_mutex); + down(&s->open_sem); } /* init the stuff */ outb(1, s->iosynth); @@ -2414,7 +2412,7 @@ static int sv_dmfm_open(struct inode *inode, struct file *file) outb(5, s->iosynth+2); outb(1, s->iosynth+3); /* enable OPL3 */ s->open_mode |= FMODE_DMFM; - mutex_unlock(&s->open_mutex); + up(&s->open_sem); return nonseekable_open(inode, file); } @@ -2425,7 +2423,7 @@ static int sv_dmfm_release(struct inode *inode, struct file *file) VALIDATE_STATE(s); lock_kernel(); - mutex_lock(&s->open_mutex); + down(&s->open_sem); s->open_mode &= ~FMODE_DMFM; for (regb = 0xb0; regb < 0xb9; regb++) { outb(regb, s->iosynth); @@ -2434,7 +2432,7 @@ static int sv_dmfm_release(struct inode *inode, struct file *file) outb(0, s->iosynth+3); } wake_up(&s->open_wait); - mutex_unlock(&s->open_mutex); + up(&s->open_sem); unlock_kernel(); return 0; } @@ -2584,7 +2582,7 @@ static int __devinit sv_probe(struct pci_dev *pcidev, const struct pci_device_id init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->midi.iwait); init_waitqueue_head(&s->midi.owait); - mutex_init(&s->open_mutex); + init_MUTEX(&s->open_sem); spin_lock_init(&s->lock); s->magic = SV_MAGIC; s->dev = pcidev; diff --git a/trunk/sound/oss/swarm_cs4297a.c b/trunk/sound/oss/swarm_cs4297a.c index dce9016cbcfd..df4d3771fa84 100644 --- a/trunk/sound/oss/swarm_cs4297a.c +++ b/trunk/sound/oss/swarm_cs4297a.c @@ -76,7 +76,6 @@ #include #include #include -#include #include #include @@ -292,9 +291,9 @@ struct cs4297a_state { unsigned conversion:1; // conversion from 16 to 8 bit in progress unsigned ena; spinlock_t lock; - struct mutex open_mutex; - struct mutex open_sem_adc; - struct mutex open_sem_dac; + struct semaphore open_sem; + struct semaphore open_sem_adc; + struct semaphore open_sem_dac; mode_t open_mode; wait_queue_head_t open_wait; wait_queue_head_t open_wait_adc; @@ -2353,20 +2352,20 @@ static int cs4297a_release(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) { drain_dac(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_sem_dac); + down(&s->open_sem_dac); stop_dac(s); dealloc_dmabuf(s, &s->dma_dac); s->open_mode &= ~FMODE_WRITE; - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); wake_up(&s->open_wait_dac); } if (file->f_mode & FMODE_READ) { drain_adc(s, file->f_flags & O_NONBLOCK); - mutex_lock(&s->open_sem_adc); + down(&s->open_sem_adc); stop_adc(s); dealloc_dmabuf(s, &s->dma_adc); s->open_mode &= ~FMODE_READ; - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); wake_up(&s->open_wait_adc); } return 0; @@ -2414,37 +2413,37 @@ static int cs4297a_open(struct inode *inode, struct file *file) ; } - mutex_lock(&s->open_sem_dac); + down(&s->open_sem_dac); while (s->open_mode & FMODE_WRITE) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); return -EBUSY; } - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); interruptible_sleep_on(&s->open_wait_dac); if (signal_pending(current)) { printk("open - sig pending\n"); return -ERESTARTSYS; } - mutex_lock(&s->open_sem_dac); + down(&s->open_sem_dac); } } if (file->f_mode & FMODE_READ) { - mutex_lock(&s->open_sem_adc); + down(&s->open_sem_adc); while (s->open_mode & FMODE_READ) { if (file->f_flags & O_NONBLOCK) { - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); return -EBUSY; } - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); interruptible_sleep_on(&s->open_wait_adc); if (signal_pending(current)) { printk("open - sig pending\n"); return -ERESTARTSYS; } - mutex_lock(&s->open_sem_adc); + down(&s->open_sem_adc); } } s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); @@ -2457,7 +2456,7 @@ static int cs4297a_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_READ; s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0; - mutex_unlock(&s->open_sem_adc); + up(&s->open_sem_adc); if (prog_dmabuf_adc(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -2475,7 +2474,7 @@ static int cs4297a_open(struct inode *inode, struct file *file) s->ena &= ~FMODE_WRITE; s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0; - mutex_unlock(&s->open_sem_dac); + up(&s->open_sem_dac); if (prog_dmabuf_dac(s)) { CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR @@ -2632,8 +2631,8 @@ static int __init cs4297a_init(void) init_waitqueue_head(&s->open_wait); init_waitqueue_head(&s->open_wait_adc); init_waitqueue_head(&s->open_wait_dac); - mutex_init(&s->open_sem_adc); - mutex_init(&s->open_sem_dac); + init_MUTEX(&s->open_sem_adc); + init_MUTEX(&s->open_sem_dac); spin_lock_init(&s->lock); s->irq = K_INT_SER_1; diff --git a/trunk/sound/oss/trident.c b/trunk/sound/oss/trident.c index e61a454a8150..a21c663e7e12 100644 --- a/trunk/sound/oss/trident.c +++ b/trunk/sound/oss/trident.c @@ -190,7 +190,7 @@ * * Lock order (high->low) * lock - hardware lock - * open_mutex - guard opens + * open_sem - guard opens * sem - guard dmabuf, write re-entry etc */ @@ -216,8 +216,6 @@ #include #include #include -#include - #include #include #include @@ -351,7 +349,7 @@ struct trident_state { unsigned chans_num; unsigned long fmt_flag; /* Guard against mmap/write/read races */ - struct mutex sem; + struct semaphore sem; }; @@ -404,7 +402,7 @@ struct trident_card { struct trident_card *next; /* single open lock mechanism, only used for recording */ - struct mutex open_mutex; + struct semaphore open_sem; /* The trident has a certain amount of cross channel interaction so we use a single per card lock */ @@ -1883,7 +1881,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT; - mutex_lock(&state->sem); + down(&state->sem); if (!dmabuf->ready && (ret = prog_dmabuf_record(state))) goto out; @@ -1915,7 +1913,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos goto out; } - mutex_unlock(&state->sem); + up(&state->sem); /* No matter how much space left in the buffer, */ /* we have to wait until CSO == ESO/2 or CSO == ESO */ /* when address engine interrupts */ @@ -1942,7 +1940,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos ret = -ERESTARTSYS; goto out; } - mutex_lock(&state->sem); + down(&state->sem); if (dmabuf->mapped) { if (!ret) ret = -ENXIO; @@ -1970,7 +1968,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos start_adc(state); } out: - mutex_unlock(&state->sem); + up(&state->sem); return ret; } @@ -1998,7 +1996,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t * Guard against an mmap or ioctl while writing */ - mutex_lock(&state->sem); + down(&state->sem); if (dmabuf->mapped) { ret = -ENXIO; @@ -2047,7 +2045,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2); tmo >>= sample_shift[dmabuf->fmt]; unlock_set_fmt(state); - mutex_unlock(&state->sem); + up(&state->sem); /* There are two situations when sleep_on_timeout */ /* returns, one is when the interrupt is serviced */ @@ -2075,7 +2073,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t ret = -ERESTARTSYS; goto out_nolock; } - mutex_lock(&state->sem); + down(&state->sem); if (dmabuf->mapped) { if (!ret) ret = -ENXIO; @@ -2133,7 +2131,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t start_dac(state); } out: - mutex_unlock(&state->sem); + up(&state->sem); out_nolock: return ret; } @@ -2154,24 +2152,24 @@ trident_poll(struct file *file, struct poll_table_struct *wait) * prog_dmabuf events */ - mutex_lock(&state->sem); + down(&state->sem); if (file->f_mode & FMODE_WRITE) { if (!dmabuf->ready && prog_dmabuf_playback(state)) { - mutex_unlock(&state->sem); + up(&state->sem); return 0; } poll_wait(file, &dmabuf->wait, wait); } if (file->f_mode & FMODE_READ) { if (!dmabuf->ready && prog_dmabuf_record(state)) { - mutex_unlock(&state->sem); + up(&state->sem); return 0; } poll_wait(file, &dmabuf->wait, wait); } - mutex_unlock(&state->sem); + up(&state->sem); spin_lock_irqsave(&state->card->lock, flags); trident_update_ptr(state); @@ -2209,7 +2207,7 @@ trident_mmap(struct file *file, struct vm_area_struct *vma) * a read or write against an mmap. */ - mutex_lock(&state->sem); + down(&state->sem); if (vma->vm_flags & VM_WRITE) { if ((ret = prog_dmabuf_playback(state)) != 0) @@ -2234,7 +2232,7 @@ trident_mmap(struct file *file, struct vm_area_struct *vma) dmabuf->mapped = 1; ret = 0; out: - mutex_unlock(&state->sem); + up(&state->sem); return ret; } @@ -2431,15 +2429,15 @@ trident_ioctl(struct inode *inode, struct file *file, unlock_set_fmt(state); break; } - mutex_lock(&state->card->open_mutex); + down(&state->card->open_sem); ret = ali_allocate_other_states_resources(state, 6); if (ret < 0) { - mutex_unlock(&state->card->open_mutex); + up(&state->card->open_sem); unlock_set_fmt(state); break; } state->card->multi_channel_use_count++; - mutex_unlock(&state->card->open_mutex); + up(&state->card->open_sem); } else val = 2; /*yield to 2-channels */ } else @@ -2729,11 +2727,11 @@ trident_open(struct inode *inode, struct file *file) /* find an available virtual channel (instance of /dev/dsp) */ while (card != NULL) { - mutex_lock(&card->open_mutex); + down(&card->open_sem); if (file->f_mode & FMODE_READ) { /* Skip opens on cards that are in 6 channel mode */ if (card->multi_channel_use_count > 0) { - mutex_unlock(&card->open_mutex); + up(&card->open_sem); card = card->next; continue; } @@ -2742,16 +2740,16 @@ trident_open(struct inode *inode, struct file *file) if (card->states[i] == NULL) { state = card->states[i] = kmalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) { - mutex_unlock(&card->open_mutex); + up(&card->open_sem); return -ENOMEM; } memset(state, 0, sizeof(*state)); - mutex_init(&state->sem); + init_MUTEX(&state->sem); dmabuf = &state->dmabuf; goto found_virt; } } - mutex_unlock(&card->open_mutex); + up(&card->open_sem); card = card->next; } /* no more virtual channel avaiable */ @@ -2818,7 +2816,7 @@ trident_open(struct inode *inode, struct file *file) } state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&card->open_mutex); + up(&card->open_sem); pr_debug("trident: open virtual channel %d, hard channel %d\n", state->virt, dmabuf->channel->num); @@ -2847,7 +2845,7 @@ trident_release(struct inode *inode, struct file *file) state->virt, dmabuf->channel->num); /* stop DMA state machine and free DMA buffers/channels */ - mutex_lock(&card->open_mutex); + down(&card->open_sem); if (file->f_mode & FMODE_WRITE) { stop_dac(state); @@ -2880,8 +2878,8 @@ trident_release(struct inode *inode, struct file *file) card->states[state->virt] = NULL; kfree(state); - /* we're covered by the open_mutex */ - mutex_unlock(&card->open_mutex); + /* we're covered by the open_sem */ + up(&card->open_sem); return 0; } @@ -4407,7 +4405,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) card->banks[BANK_B].addresses = &bank_b_addrs; card->banks[BANK_B].bitmap = 0UL; - mutex_init(&card->open_mutex); + init_MUTEX(&card->open_sem); spin_lock_init(&card->lock); init_timer(&card->timer); diff --git a/trunk/sound/oss/via82cxxx_audio.c b/trunk/sound/oss/via82cxxx_audio.c index 1a921ee71aba..83edda93f0b4 100644 --- a/trunk/sound/oss/via82cxxx_audio.c +++ b/trunk/sound/oss/via82cxxx_audio.c @@ -38,8 +38,7 @@ #include #include #include -#include - +#include #include "sound_config.h" #include "dev_table.h" #include "mpu401.h" @@ -312,8 +311,8 @@ struct via_info { int mixer_vol; /* 8233/35 volume - not yet implemented */ - struct mutex syscall_mutex; - struct mutex open_mutex; + struct semaphore syscall_sem; + struct semaphore open_sem; /* The 8233/8235 have 4 DX audio channels, two record and one six channel out. We bind ch_in to DX 1, ch_out to multichannel @@ -506,10 +505,10 @@ static inline int via_syscall_down (struct via_info *card, int nonblock) nonblock = 0; if (nonblock) { - if (!mutex_trylock(&card->syscall_mutex)) + if (down_trylock (&card->syscall_sem)) return -EAGAIN; } else { - if (mutex_lock_interruptible(&card->syscall_mutex)) + if (down_interruptible (&card->syscall_sem)) return -ERESTARTSYS; } @@ -1610,7 +1609,7 @@ static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int #endif rc = codec->mixer_ioctl(codec, cmd, arg); - mutex_unlock(&card->syscall_mutex); + up (&card->syscall_sem); out: DPRINTK ("EXIT, returning %d\n", rc); @@ -2229,7 +2228,7 @@ static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma) if (wr) card->ch_out.is_mapped = 1; - mutex_unlock(&card->syscall_mutex); + up (&card->syscall_sem); rc = 0; out: @@ -2257,7 +2256,7 @@ static ssize_t via_dsp_do_read (struct via_info *card, /* Thomas Sailer: * But also to ourselves, release semaphore if we do so */ if (need_resched()) { - mutex_unlock(&card->syscall_mutex); + up(&card->syscall_sem); schedule (); ret = via_syscall_down (card, nonblock); if (ret) @@ -2287,7 +2286,7 @@ static ssize_t via_dsp_do_read (struct via_info *card, break; } - mutex_unlock(&card->syscall_mutex); + up(&card->syscall_sem); DPRINTK ("Sleeping on block %d\n", n); schedule(); @@ -2403,7 +2402,7 @@ static ssize_t via_dsp_read(struct file *file, char __user *buffer, size_t count rc = via_dsp_do_read (card, buffer, count, nonblock); out_up: - mutex_unlock(&card->syscall_mutex); + up (&card->syscall_sem); out: DPRINTK ("EXIT, returning %ld\n",(long) rc); return rc; @@ -2427,7 +2426,7 @@ static ssize_t via_dsp_do_write (struct via_info *card, /* Thomas Sailer: * But also to ourselves, release semaphore if we do so */ if (need_resched()) { - mutex_unlock(&card->syscall_mutex); + up(&card->syscall_sem); schedule (); ret = via_syscall_down (card, nonblock); if (ret) @@ -2457,7 +2456,7 @@ static ssize_t via_dsp_do_write (struct via_info *card, break; } - mutex_unlock(&card->syscall_mutex); + up(&card->syscall_sem); DPRINTK ("Sleeping on page %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record); schedule(); @@ -2586,7 +2585,7 @@ static ssize_t via_dsp_write(struct file *file, const char __user *buffer, size_ rc = via_dsp_do_write (card, buffer, count, nonblock); out_up: - mutex_unlock(&card->syscall_mutex); + up (&card->syscall_sem); out: DPRINTK ("EXIT, returning %ld\n",(long) rc); return rc; @@ -2635,7 +2634,7 @@ static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wa * Sleeps until all playback has been flushed to the audio * hardware. * - * Locking: inside card->syscall_mutex + * Locking: inside card->syscall_sem */ static int via_dsp_drain_playback (struct via_info *card, @@ -2693,7 +2692,7 @@ static int via_dsp_drain_playback (struct via_info *card, printk (KERN_ERR "sleeping but not active\n"); #endif - mutex_unlock(&card->syscall_mutex); + up(&card->syscall_sem); DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_frags)); schedule(); @@ -2749,7 +2748,7 @@ static int via_dsp_drain_playback (struct via_info *card, * * Handles SNDCTL_DSP_GETISPACE and SNDCTL_DSP_GETOSPACE. * - * Locking: inside card->syscall_mutex + * Locking: inside card->syscall_sem */ static int via_dsp_ioctl_space (struct via_info *card, @@ -2794,7 +2793,7 @@ static int via_dsp_ioctl_space (struct via_info *card, * * Handles SNDCTL_DSP_GETIPTR and SNDCTL_DSP_GETOPTR. * - * Locking: inside card->syscall_mutex + * Locking: inside card->syscall_sem */ static int via_dsp_ioctl_ptr (struct via_info *card, @@ -3222,7 +3221,7 @@ static int via_dsp_ioctl (struct inode *inode, struct file *file, break; } - mutex_unlock(&card->syscall_mutex); + up (&card->syscall_sem); DPRINTK ("EXIT, returning %d\n", rc); return rc; } @@ -3265,12 +3264,12 @@ static int via_dsp_open (struct inode *inode, struct file *file) match: if (nonblock) { - if (!mutex_trylock(&card->open_mutex)) { + if (down_trylock (&card->open_sem)) { DPRINTK ("EXIT, returning -EAGAIN\n"); return -EAGAIN; } } else { - if (mutex_lock_interruptible(&card->open_mutex)) { + if (down_interruptible (&card->open_sem)) { DPRINTK ("EXIT, returning -ERESTARTSYS\n"); return -ERESTARTSYS; } @@ -3356,8 +3355,8 @@ static int via_dsp_release(struct inode *inode, struct file *file) via_chan_buffer_free (card, &card->ch_in); } - mutex_unlock(&card->syscall_mutex); - mutex_unlock(&card->open_mutex); + up (&card->syscall_sem); + up (&card->open_sem); DPRINTK ("EXIT, returning 0\n"); return 0; @@ -3415,8 +3414,8 @@ static int __devinit via_init_one (struct pci_dev *pdev, const struct pci_device card->card_num = via_num_cards++; spin_lock_init (&card->lock); spin_lock_init (&card->ac97_lock); - mutex_init(&card->syscall_mutex); - mutex_init(&card->open_mutex); + init_MUTEX (&card->syscall_sem); + init_MUTEX (&card->open_sem); /* we must init these now, in case the intr handler needs them */ via_chan_init_defaults (card, &card->ch_out); diff --git a/trunk/sound/oss/vwsnd.c b/trunk/sound/oss/vwsnd.c index b372e88e857f..265423054caf 100644 --- a/trunk/sound/oss/vwsnd.c +++ b/trunk/sound/oss/vwsnd.c @@ -94,7 +94,7 @@ * Open will block until the previous client has closed the * device, unless O_NONBLOCK is specified. * - * The semaphore devc->io_mutex serializes PCM I/O syscalls. This + * The semaphore devc->io_sema serializes PCM I/O syscalls. This * is unnecessary in Linux 2.2, because the kernel lock * serializes read, write, and ioctl globally, but it's there, * ready for the brave, new post-kernel-lock world. @@ -105,7 +105,7 @@ * area it owns and update its pointers. See pcm_output() and * pcm_input() for most of the gory stuff. * - * devc->mix_mutex serializes all mixer ioctls. This is also + * devc->mix_sema serializes all mixer ioctls. This is also * redundant because of the kernel lock. * * The lowest level lock is lith->lithium_lock. It is a @@ -148,8 +148,7 @@ #include #include #include -#include - +#include #include #include "sound_config.h" @@ -1448,11 +1447,11 @@ typedef enum vwsnd_port_flags { * * port->lock protects: hwstate, flags, swb_[iu]_avail. * - * devc->io_mutex protects: swstate, sw_*, swb_[iu]_idx. + * devc->io_sema protects: swstate, sw_*, swb_[iu]_idx. * * everything else is only written by open/release or * pcm_{setup,shutdown}(), which are serialized by a - * combination of devc->open_mutex and devc->io_mutex. + * combination of devc->open_sema and devc->io_sema. */ typedef struct vwsnd_port { @@ -1508,9 +1507,9 @@ typedef struct vwsnd_dev { int audio_minor; /* minor number of audio device */ int mixer_minor; /* minor number of mixer device */ - struct mutex open_mutex; - struct mutex io_mutex; - struct mutex mix_mutex; + struct semaphore open_sema; + struct semaphore io_sema; + struct semaphore mix_sema; mode_t open_mode; wait_queue_head_t open_wait; @@ -1634,7 +1633,7 @@ static __inline__ unsigned int swb_inc_i(vwsnd_port_t *port, int inc) * mode-setting ioctls have been done, but before the first I/O is * done. * - * Locking: called with devc->io_mutex held. + * Locking: called with devc->io_sema held. * * Returns 0 on success, -errno on failure. */ @@ -2320,9 +2319,9 @@ static ssize_t vwsnd_audio_read(struct file *file, vwsnd_dev_t *devc = file->private_data; ssize_t ret; - mutex_lock(&devc->io_mutex); + down(&devc->io_sema); ret = vwsnd_audio_do_read(file, buffer, count, ppos); - mutex_unlock(&devc->io_mutex); + up(&devc->io_sema); return ret; } @@ -2395,9 +2394,9 @@ static ssize_t vwsnd_audio_write(struct file *file, vwsnd_dev_t *devc = file->private_data; ssize_t ret; - mutex_lock(&devc->io_mutex); + down(&devc->io_sema); ret = vwsnd_audio_do_write(file, buffer, count, ppos); - mutex_unlock(&devc->io_mutex); + up(&devc->io_sema); return ret; } @@ -2892,9 +2891,9 @@ static int vwsnd_audio_ioctl(struct inode *inode, vwsnd_dev_t *devc = (vwsnd_dev_t *) file->private_data; int ret; - mutex_lock(&devc->io_mutex); + down(&devc->io_sema); ret = vwsnd_audio_do_ioctl(inode, file, cmd, arg); - mutex_unlock(&devc->io_mutex); + up(&devc->io_sema); return ret; } @@ -2930,9 +2929,9 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) return -ENODEV; } - mutex_lock(&devc->open_mutex); + down(&devc->open_sema); while (devc->open_mode & file->f_mode) { - mutex_unlock(&devc->open_mutex); + up(&devc->open_sema); if (file->f_flags & O_NONBLOCK) { DEC_USE_COUNT; return -EBUSY; @@ -2942,10 +2941,10 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) DEC_USE_COUNT; return -ERESTARTSYS; } - mutex_lock(&devc->open_mutex); + down(&devc->open_sema); } devc->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE); - mutex_unlock(&devc->open_mutex); + up(&devc->open_sema); /* get default sample format from minor number. */ @@ -2961,7 +2960,7 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) /* Initialize vwsnd_ports. */ - mutex_lock(&devc->io_mutex); + down(&devc->io_sema); { if (file->f_mode & FMODE_READ) { devc->rport.swstate = SW_INITIAL; @@ -2988,7 +2987,7 @@ static int vwsnd_audio_open(struct inode *inode, struct file *file) devc->wport.frag_count = 0; } } - mutex_unlock(&devc->io_mutex); + up(&devc->io_sema); file->private_data = devc; DBGRV(); @@ -3006,7 +3005,7 @@ static int vwsnd_audio_release(struct inode *inode, struct file *file) int err = 0; lock_kernel(); - mutex_lock(&devc->io_mutex); + down(&devc->io_sema); { DBGEV("(inode=0x%p, file=0x%p)\n", inode, file); @@ -3023,13 +3022,13 @@ static int vwsnd_audio_release(struct inode *inode, struct file *file) if (wport) wport->swstate = SW_OFF; } - mutex_unlock(&devc->io_mutex); + up(&devc->io_sema); - mutex_lock(&devc->open_mutex); + down(&devc->open_sema); { devc->open_mode &= ~file->f_mode; } - mutex_unlock(&devc->open_mutex); + up(&devc->open_sema); wake_up(&devc->open_wait); DEC_USE_COUNT; DBGR(); @@ -3214,7 +3213,7 @@ static int vwsnd_mixer_ioctl(struct inode *ioctl, DBGEV("(devc=0x%p, cmd=0x%x, arg=0x%lx)\n", devc, cmd, arg); - mutex_lock(&devc->mix_mutex); + down(&devc->mix_sema); { if ((cmd & ~nrmask) == MIXER_READ(0)) retval = mixer_read_ioctl(devc, nr, (void __user *) arg); @@ -3223,7 +3222,7 @@ static int vwsnd_mixer_ioctl(struct inode *ioctl, else retval = -EINVAL; } - mutex_unlock(&devc->mix_mutex); + up(&devc->mix_sema); return retval; } @@ -3377,9 +3376,9 @@ static int __init attach_vwsnd(struct address_info *hw_config) /* Initialize as much of *devc as possible */ - mutex_init(&devc->open_mutex); - mutex_init(&devc->io_mutex); - mutex_init(&devc->mix_mutex); + init_MUTEX(&devc->open_sema); + init_MUTEX(&devc->io_sema); + init_MUTEX(&devc->mix_sema); devc->open_mode = 0; spin_lock_init(&devc->rport.lock); init_waitqueue_head(&devc->rport.queue); diff --git a/trunk/sound/oss/ymfpci.c b/trunk/sound/oss/ymfpci.c index bf90c124a7e6..f8bd72e46f57 100644 --- a/trunk/sound/oss/ymfpci.c +++ b/trunk/sound/oss/ymfpci.c @@ -1918,10 +1918,10 @@ static int ymf_open(struct inode *inode, struct file *file) if (unit == NULL) return -ENODEV; - mutex_lock(&unit->open_mutex); + down(&unit->open_sem); if ((state = ymf_state_alloc(unit)) == NULL) { - mutex_unlock(&unit->open_mutex); + up(&unit->open_sem); return -ENOMEM; } list_add_tail(&state->chain, &unit->states); @@ -1956,7 +1956,7 @@ static int ymf_open(struct inode *inode, struct file *file) ymfpci_writeb(unit, YDSXGR_TIMERCTRL, (YDSXGR_TIMERCTRL_TEN|YDSXGR_TIMERCTRL_TIEN)); #endif - mutex_unlock(&unit->open_mutex); + up(&unit->open_sem); return nonseekable_open(inode, file); @@ -1974,7 +1974,7 @@ static int ymf_open(struct inode *inode, struct file *file) list_del(&state->chain); kfree(state); - mutex_unlock(&unit->open_mutex); + up(&unit->open_sem); return err; } @@ -1987,7 +1987,7 @@ static int ymf_release(struct inode *inode, struct file *file) ymfpci_writeb(unit, YDSXGR_TIMERCTRL, 0); #endif - mutex_lock(&unit->open_mutex); + down(&unit->open_sem); /* * XXX Solve the case of O_NONBLOCK close - don't deallocate here. @@ -2004,7 +2004,7 @@ static int ymf_release(struct inode *inode, struct file *file) file->private_data = NULL; /* Can you tell I programmed Solaris */ kfree(state); - mutex_unlock(&unit->open_mutex); + up(&unit->open_sem); return 0; } @@ -2532,7 +2532,7 @@ static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_devi spin_lock_init(&codec->reg_lock); spin_lock_init(&codec->voice_lock); spin_lock_init(&codec->ac97_lock); - mutex_init(&codec->open_mutex); + init_MUTEX(&codec->open_sem); INIT_LIST_HEAD(&codec->states); codec->pci = pcidev; diff --git a/trunk/sound/oss/ymfpci.h b/trunk/sound/oss/ymfpci.h index ac1785f2b7e7..f810a100c641 100644 --- a/trunk/sound/oss/ymfpci.h +++ b/trunk/sound/oss/ymfpci.h @@ -22,7 +22,6 @@ * */ #include -#include /* * Direct registers @@ -280,7 +279,7 @@ struct ymf_unit { /* soundcore stuff */ int dev_audio; - struct mutex open_mutex; + struct semaphore open_sem; struct list_head ymf_devs; struct list_head states; /* List of states for this unit */