From 063f2a0f64955a988cb0534bd774b5a9694ac19b Mon Sep 17 00:00:00 2001 From: Ray Molenkamp Date: Wed, 21 May 2008 17:06:26 -0600 Subject: [PATCH] --- yaml --- r: 97436 b: refs/heads/master c: ebb3770c01a8afd049e3e91b0a026dcdfcb2da9f h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/ia64/kernel/ivt.S | 84 ++--- trunk/arch/ia64/kernel/minstate.h | 46 +-- trunk/arch/ia64/kernel/patch.c | 23 -- trunk/arch/ia64/kernel/setup.c | 11 - trunk/arch/ia64/kernel/vmlinux.lds.S | 7 - trunk/drivers/cpufreq/cpufreq.c | 4 +- trunk/drivers/usb/serial/ftdi_sio.c | 264 +++++++++++++++- trunk/drivers/usb/serial/ftdi_sio.h | 267 +++++++++++++++- trunk/include/asm-ia64/patch.h | 1 - trunk/include/asm-ia64/ptrace.h | 2 +- trunk/include/asm-ia64/sections.h | 1 - trunk/include/linux/sched.h | 1 + trunk/include/linux/topology.h | 4 +- trunk/kernel/sched.c | 447 ++++++++++++++++++++++++--- trunk/kernel/sched_clock.c | 18 +- trunk/kernel/sched_debug.c | 5 + trunk/kernel/sched_fair.c | 254 +++++++++------ trunk/kernel/sched_rt.c | 4 + trunk/kernel/sched_stats.h | 1 - 20 files changed, 1159 insertions(+), 287 deletions(-) diff --git a/[refs] b/[refs] index 609ee2cc10c2..2ad0599ba155 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4bd27972e2c35b1e9e672ff05e0a781644f9c905 +refs/heads/master: ebb3770c01a8afd049e3e91b0a026dcdfcb2da9f diff --git a/trunk/arch/ia64/kernel/ivt.S b/trunk/arch/ia64/kernel/ivt.S index 80b44ea052d7..6678c49daba3 100644 --- a/trunk/arch/ia64/kernel/ivt.S +++ b/trunk/arch/ia64/kernel/ivt.S @@ -1076,6 +1076,48 @@ END(ia64_syscall_setup) DBG_FAULT(15) FAULT(15) + /* + * Squatting in this space ... + * + * This special case dispatcher for illegal operation faults allows preserved + * registers to be modified through a callback function (asm only) that is handed + * back from the fault handler in r8. Up to three arguments can be passed to the + * callback function by returning an aggregate with the callback as its first + * element, followed by the arguments. + */ +ENTRY(dispatch_illegal_op_fault) + .prologue + .body + SAVE_MIN_WITH_COVER + ssm psr.ic | PSR_DEFAULT_BITS + ;; + srlz.i // guarantee that interruption collection is on + ;; +(p15) ssm psr.i // restore psr.i + adds r3=8,r2 // set up second base pointer for SAVE_REST + ;; + alloc r14=ar.pfs,0,0,1,0 // must be first in insn group + mov out0=ar.ec + ;; + SAVE_REST + PT_REGS_UNWIND_INFO(0) + ;; + br.call.sptk.many rp=ia64_illegal_op_fault +.ret0: ;; + alloc r14=ar.pfs,0,0,3,0 // must be first in insn group + mov out0=r9 + mov out1=r10 + mov out2=r11 + movl r15=ia64_leave_kernel + ;; + mov rp=r15 + mov b6=r8 + ;; + cmp.ne p6,p0=0,r8 +(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel + br.sptk.many ia64_leave_kernel +END(dispatch_illegal_op_fault) + .org ia64_ivt+0x4000 ///////////////////////////////////////////////////////////////////////////////////////// // 0x4000 Entry 16 (size 64 bundles) Reserved @@ -1673,48 +1715,6 @@ END(ia32_interrupt) DBG_FAULT(67) FAULT(67) - /* - * Squatting in this space ... - * - * This special case dispatcher for illegal operation faults allows preserved - * registers to be modified through a callback function (asm only) that is handed - * back from the fault handler in r8. Up to three arguments can be passed to the - * callback function by returning an aggregate with the callback as its first - * element, followed by the arguments. - */ -ENTRY(dispatch_illegal_op_fault) - .prologue - .body - SAVE_MIN_WITH_COVER - ssm psr.ic | PSR_DEFAULT_BITS - ;; - srlz.i // guarantee that interruption collection is on - ;; -(p15) ssm psr.i // restore psr.i - adds r3=8,r2 // set up second base pointer for SAVE_REST - ;; - alloc r14=ar.pfs,0,0,1,0 // must be first in insn group - mov out0=ar.ec - ;; - SAVE_REST - PT_REGS_UNWIND_INFO(0) - ;; - br.call.sptk.many rp=ia64_illegal_op_fault -.ret0: ;; - alloc r14=ar.pfs,0,0,3,0 // must be first in insn group - mov out0=r9 - mov out1=r10 - mov out2=r11 - movl r15=ia64_leave_kernel - ;; - mov rp=r15 - mov b6=r8 - ;; - cmp.ne p6,p0=0,r8 -(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel - br.sptk.many ia64_leave_kernel -END(dispatch_illegal_op_fault) - #ifdef CONFIG_IA32_SUPPORT /* diff --git a/trunk/arch/ia64/kernel/minstate.h b/trunk/arch/ia64/kernel/minstate.h index 74b6d670aaef..7c548ac52bbc 100644 --- a/trunk/arch/ia64/kernel/minstate.h +++ b/trunk/arch/ia64/kernel/minstate.h @@ -15,9 +15,6 @@ #define ACCOUNT_SYS_ENTER #endif -.section ".data.patch.rse", "a" -.previous - /* * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves * the minimum state necessary that allows us to turn psr.ic back @@ -43,7 +40,7 @@ * Note that psr.ic is NOT turned on by this macro. This is so that * we can pass interruption state as arguments to a handler. */ -#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \ +#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ mov r16=IA64_KR(CURRENT); /* M */ \ mov r27=ar.rsc; /* M */ \ mov r20=r1; /* A */ \ @@ -90,7 +87,6 @@ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \ mov r29=b0 \ ;; \ - WORKAROUND; \ adds r16=PT(R8),r1; /* initialize first base pointer */ \ adds r17=PT(R9),r1; /* initialize second base pointer */ \ (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \ @@ -210,40 +206,6 @@ st8 [r25]=r10; /* ar.ssd */ \ ;; -#define RSE_WORKAROUND \ -(pUStk) extr.u r17=r18,3,6; \ -(pUStk) sub r16=r18,r22; \ -[1:](pKStk) br.cond.sptk.many 1f; \ - .xdata4 ".data.patch.rse",1b-. \ - ;; \ - cmp.ge p6,p7 = 33,r17; \ - ;; \ -(p6) mov r17=0x310; \ -(p7) mov r17=0x308; \ - ;; \ - cmp.leu p1,p0=r16,r17; \ -(p1) br.cond.sptk.many 1f; \ - dep.z r17=r26,0,62; \ - movl r16=2f; \ - ;; \ - mov ar.pfs=r17; \ - dep r27=r0,r27,16,14; \ - mov b0=r16; \ - ;; \ - br.ret.sptk b0; \ - ;; \ -2: \ - mov ar.rsc=r0 \ - ;; \ - flushrs; \ - ;; \ - mov ar.bspstore=r22 \ - ;; \ - mov r18=ar.bsp; \ - ;; \ -1: \ - .pred.rel "mutex", pKStk, pUStk - -#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND) -#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND) -#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , ) +#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,) +#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19) +#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, ) diff --git a/trunk/arch/ia64/kernel/patch.c b/trunk/arch/ia64/kernel/patch.c index b83b2c516008..e0dca8743dbb 100644 --- a/trunk/arch/ia64/kernel/patch.c +++ b/trunk/arch/ia64/kernel/patch.c @@ -115,29 +115,6 @@ ia64_patch_vtop (unsigned long start, unsigned long end) ia64_srlz_i(); } -/* - * Disable the RSE workaround by turning the conditional branch - * that we tagged in each place the workaround was used into an - * unconditional branch. - */ -void __init -ia64_patch_rse (unsigned long start, unsigned long end) -{ - s32 *offp = (s32 *) start; - u64 ip, *b; - - while (offp < (s32 *) end) { - ip = (u64) offp + *offp; - - b = (u64 *)(ip & -16); - b[1] &= ~0xf800000L; - ia64_fc((void *) ip); - ++offp; - } - ia64_sync_i(); - ia64_srlz_i(); -} - void __init ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) { diff --git a/trunk/arch/ia64/kernel/setup.c b/trunk/arch/ia64/kernel/setup.c index f48a809c686d..e9596cd0cdab 100644 --- a/trunk/arch/ia64/kernel/setup.c +++ b/trunk/arch/ia64/kernel/setup.c @@ -560,17 +560,6 @@ setup_arch (char **cmdline_p) /* process SAL system table: */ ia64_sal_init(__va(efi.sal_systab)); -#ifdef CONFIG_ITANIUM - ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); -#else - { - u64 num_phys_stacked; - - if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) - ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); - } -#endif - #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif diff --git a/trunk/arch/ia64/kernel/vmlinux.lds.S b/trunk/arch/ia64/kernel/vmlinux.lds.S index 5929ab10a289..80622acc95de 100644 --- a/trunk/arch/ia64/kernel/vmlinux.lds.S +++ b/trunk/arch/ia64/kernel/vmlinux.lds.S @@ -156,13 +156,6 @@ SECTIONS __end___vtop_patchlist = .; } - .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) - { - __start___rse_patchlist = .; - *(.data.patch.rse) - __end___rse_patchlist = .; - } - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 86f0a2430624..7fce038fa57e 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -928,13 +928,13 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; + unlock_policy_rwsem_write(cpu); + if (ret) { dprintk("setting policy failed\n"); goto err_out_unregister; } - unlock_policy_rwsem_write(cpu); - kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); dprintk("initialization complete\n"); diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index 3cee6feac174..5234e7a3bd2c 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -174,8 +174,270 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, - { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID), + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0100_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0101_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0102_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0103_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0104_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0105_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0106_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0107_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0108_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0109_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_010F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0110_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0111_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0112_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0113_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0114_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0115_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0116_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0117_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0118_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0119_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_011F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0120_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0121_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0122_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0123_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0124_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0125_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0126_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0127_PID), .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0128_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0129_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012C_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_012F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0130_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0131_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0132_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0133_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0134_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0135_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0136_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0137_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0138_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0139_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_013F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0140_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0141_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0142_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0143_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0144_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0145_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0146_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0147_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0148_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0149_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_014F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0150_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0151_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0152_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0153_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0154_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0155_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0156_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0157_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0158_PID), + .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0159_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_015F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0160_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0161_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0162_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0163_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0164_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0165_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0166_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0167_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0168_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0169_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_016F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0170_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0171_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0172_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0173_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0174_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0175_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0176_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0177_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0178_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0179_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_017F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0180_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0181_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0182_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0183_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0184_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0185_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0186_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0187_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0188_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0189_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_018F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0190_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0191_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0192_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0193_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0194_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0195_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0196_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0197_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0198_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_0199_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019A_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019B_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019C_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019D_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019E_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_019F_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A0_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A1_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A2_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A3_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A4_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A5_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A6_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A7_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A8_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01A9_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AA_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AB_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AC_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AD_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AE_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01AF_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B0_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B1_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B2_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B3_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B4_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B5_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B6_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B7_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B8_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01B9_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BA_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BB_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BC_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BD_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BE_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01BF_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C0_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C1_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C2_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C3_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C4_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C5_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C6_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C7_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C8_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01C9_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CA_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CB_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CC_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CD_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CE_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01CF_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D0_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D1_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D2_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D3_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D4_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D5_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D6_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D7_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D8_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01D9_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DA_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DB_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DC_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DD_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DE_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01DF_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E0_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E1_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E2_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E3_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E4_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E5_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E6_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E7_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E8_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01E9_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EA_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EB_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EC_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01ED_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EE_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01EF_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F0_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F1_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F2_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F3_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F4_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F5_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F6_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F7_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F8_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01F9_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FA_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FB_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FC_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FD_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FE_PID) }, + { USB_DEVICE(MTXORB_VID,MTXORB_FTDI_RANGE_01FF_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, diff --git a/trunk/drivers/usb/serial/ftdi_sio.h b/trunk/drivers/usb/serial/ftdi_sio.h index a72f2c81d664..06e0ecabb3eb 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.h +++ b/trunk/drivers/usb/serial/ftdi_sio.h @@ -114,11 +114,268 @@ #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ /* - * The following are the values for the Matrix Orbital VK204-25-USB - * display, which use the FT232RL. - */ -#define MTXORB_VK_VID 0x1b3d -#define MTXORB_VK_PID 0x0158 + * The following are the values for the Matrix Orbital FTDI Range + * Anything in this range will use an FT232RL. + */ +#define MTXORB_VID 0x1B3D +#define MTXORB_FTDI_RANGE_0100_PID 0x0100 +#define MTXORB_FTDI_RANGE_0101_PID 0x0101 +#define MTXORB_FTDI_RANGE_0102_PID 0x0102 +#define MTXORB_FTDI_RANGE_0103_PID 0x0103 +#define MTXORB_FTDI_RANGE_0104_PID 0x0104 +#define MTXORB_FTDI_RANGE_0105_PID 0x0105 +#define MTXORB_FTDI_RANGE_0106_PID 0x0106 +#define MTXORB_FTDI_RANGE_0107_PID 0x0107 +#define MTXORB_FTDI_RANGE_0108_PID 0x0108 +#define MTXORB_FTDI_RANGE_0109_PID 0x0109 +#define MTXORB_FTDI_RANGE_010A_PID 0x010A +#define MTXORB_FTDI_RANGE_010B_PID 0x010B +#define MTXORB_FTDI_RANGE_010C_PID 0x010C +#define MTXORB_FTDI_RANGE_010D_PID 0x010D +#define MTXORB_FTDI_RANGE_010E_PID 0x010E +#define MTXORB_FTDI_RANGE_010F_PID 0x010F +#define MTXORB_FTDI_RANGE_0110_PID 0x0110 +#define MTXORB_FTDI_RANGE_0111_PID 0x0111 +#define MTXORB_FTDI_RANGE_0112_PID 0x0112 +#define MTXORB_FTDI_RANGE_0113_PID 0x0113 +#define MTXORB_FTDI_RANGE_0114_PID 0x0114 +#define MTXORB_FTDI_RANGE_0115_PID 0x0115 +#define MTXORB_FTDI_RANGE_0116_PID 0x0116 +#define MTXORB_FTDI_RANGE_0117_PID 0x0117 +#define MTXORB_FTDI_RANGE_0118_PID 0x0118 +#define MTXORB_FTDI_RANGE_0119_PID 0x0119 +#define MTXORB_FTDI_RANGE_011A_PID 0x011A +#define MTXORB_FTDI_RANGE_011B_PID 0x011B +#define MTXORB_FTDI_RANGE_011C_PID 0x011C +#define MTXORB_FTDI_RANGE_011D_PID 0x011D +#define MTXORB_FTDI_RANGE_011E_PID 0x011E +#define MTXORB_FTDI_RANGE_011F_PID 0x011F +#define MTXORB_FTDI_RANGE_0120_PID 0x0120 +#define MTXORB_FTDI_RANGE_0121_PID 0x0121 +#define MTXORB_FTDI_RANGE_0122_PID 0x0122 +#define MTXORB_FTDI_RANGE_0123_PID 0x0123 +#define MTXORB_FTDI_RANGE_0124_PID 0x0124 +#define MTXORB_FTDI_RANGE_0125_PID 0x0125 +#define MTXORB_FTDI_RANGE_0126_PID 0x0126 +#define MTXORB_FTDI_RANGE_0127_PID 0x0127 +#define MTXORB_FTDI_RANGE_0128_PID 0x0128 +#define MTXORB_FTDI_RANGE_0129_PID 0x0129 +#define MTXORB_FTDI_RANGE_012A_PID 0x012A +#define MTXORB_FTDI_RANGE_012B_PID 0x012B +#define MTXORB_FTDI_RANGE_012C_PID 0x012C +#define MTXORB_FTDI_RANGE_012D_PID 0x012D +#define MTXORB_FTDI_RANGE_012E_PID 0x012E +#define MTXORB_FTDI_RANGE_012F_PID 0x012F +#define MTXORB_FTDI_RANGE_0130_PID 0x0130 +#define MTXORB_FTDI_RANGE_0131_PID 0x0131 +#define MTXORB_FTDI_RANGE_0132_PID 0x0132 +#define MTXORB_FTDI_RANGE_0133_PID 0x0133 +#define MTXORB_FTDI_RANGE_0134_PID 0x0134 +#define MTXORB_FTDI_RANGE_0135_PID 0x0135 +#define MTXORB_FTDI_RANGE_0136_PID 0x0136 +#define MTXORB_FTDI_RANGE_0137_PID 0x0137 +#define MTXORB_FTDI_RANGE_0138_PID 0x0138 +#define MTXORB_FTDI_RANGE_0139_PID 0x0139 +#define MTXORB_FTDI_RANGE_013A_PID 0x013A +#define MTXORB_FTDI_RANGE_013B_PID 0x013B +#define MTXORB_FTDI_RANGE_013C_PID 0x013C +#define MTXORB_FTDI_RANGE_013D_PID 0x013D +#define MTXORB_FTDI_RANGE_013E_PID 0x013E +#define MTXORB_FTDI_RANGE_013F_PID 0x013F +#define MTXORB_FTDI_RANGE_0140_PID 0x0140 +#define MTXORB_FTDI_RANGE_0141_PID 0x0141 +#define MTXORB_FTDI_RANGE_0142_PID 0x0142 +#define MTXORB_FTDI_RANGE_0143_PID 0x0143 +#define MTXORB_FTDI_RANGE_0144_PID 0x0144 +#define MTXORB_FTDI_RANGE_0145_PID 0x0145 +#define MTXORB_FTDI_RANGE_0146_PID 0x0146 +#define MTXORB_FTDI_RANGE_0147_PID 0x0147 +#define MTXORB_FTDI_RANGE_0148_PID 0x0148 +#define MTXORB_FTDI_RANGE_0149_PID 0x0149 +#define MTXORB_FTDI_RANGE_014A_PID 0x014A +#define MTXORB_FTDI_RANGE_014B_PID 0x014B +#define MTXORB_FTDI_RANGE_014C_PID 0x014C +#define MTXORB_FTDI_RANGE_014D_PID 0x014D +#define MTXORB_FTDI_RANGE_014E_PID 0x014E +#define MTXORB_FTDI_RANGE_014F_PID 0x014F +#define MTXORB_FTDI_RANGE_0150_PID 0x0150 +#define MTXORB_FTDI_RANGE_0151_PID 0x0151 +#define MTXORB_FTDI_RANGE_0152_PID 0x0152 +#define MTXORB_FTDI_RANGE_0153_PID 0x0153 +#define MTXORB_FTDI_RANGE_0154_PID 0x0154 +#define MTXORB_FTDI_RANGE_0155_PID 0x0155 +#define MTXORB_FTDI_RANGE_0156_PID 0x0156 +#define MTXORB_FTDI_RANGE_0157_PID 0x0157 +#define MTXORB_FTDI_RANGE_0158_PID 0x0158 +#define MTXORB_FTDI_RANGE_0159_PID 0x0159 +#define MTXORB_FTDI_RANGE_015A_PID 0x015A +#define MTXORB_FTDI_RANGE_015B_PID 0x015B +#define MTXORB_FTDI_RANGE_015C_PID 0x015C +#define MTXORB_FTDI_RANGE_015D_PID 0x015D +#define MTXORB_FTDI_RANGE_015E_PID 0x015E +#define MTXORB_FTDI_RANGE_015F_PID 0x015F +#define MTXORB_FTDI_RANGE_0160_PID 0x0160 +#define MTXORB_FTDI_RANGE_0161_PID 0x0161 +#define MTXORB_FTDI_RANGE_0162_PID 0x0162 +#define MTXORB_FTDI_RANGE_0163_PID 0x0163 +#define MTXORB_FTDI_RANGE_0164_PID 0x0164 +#define MTXORB_FTDI_RANGE_0165_PID 0x0165 +#define MTXORB_FTDI_RANGE_0166_PID 0x0166 +#define MTXORB_FTDI_RANGE_0167_PID 0x0167 +#define MTXORB_FTDI_RANGE_0168_PID 0x0168 +#define MTXORB_FTDI_RANGE_0169_PID 0x0169 +#define MTXORB_FTDI_RANGE_016A_PID 0x016A +#define MTXORB_FTDI_RANGE_016B_PID 0x016B +#define MTXORB_FTDI_RANGE_016C_PID 0x016C +#define MTXORB_FTDI_RANGE_016D_PID 0x016D +#define MTXORB_FTDI_RANGE_016E_PID 0x016E +#define MTXORB_FTDI_RANGE_016F_PID 0x016F +#define MTXORB_FTDI_RANGE_0170_PID 0x0170 +#define MTXORB_FTDI_RANGE_0171_PID 0x0171 +#define MTXORB_FTDI_RANGE_0172_PID 0x0172 +#define MTXORB_FTDI_RANGE_0173_PID 0x0173 +#define MTXORB_FTDI_RANGE_0174_PID 0x0174 +#define MTXORB_FTDI_RANGE_0175_PID 0x0175 +#define MTXORB_FTDI_RANGE_0176_PID 0x0176 +#define MTXORB_FTDI_RANGE_0177_PID 0x0177 +#define MTXORB_FTDI_RANGE_0178_PID 0x0178 +#define MTXORB_FTDI_RANGE_0179_PID 0x0179 +#define MTXORB_FTDI_RANGE_017A_PID 0x017A +#define MTXORB_FTDI_RANGE_017B_PID 0x017B +#define MTXORB_FTDI_RANGE_017C_PID 0x017C +#define MTXORB_FTDI_RANGE_017D_PID 0x017D +#define MTXORB_FTDI_RANGE_017E_PID 0x017E +#define MTXORB_FTDI_RANGE_017F_PID 0x017F +#define MTXORB_FTDI_RANGE_0180_PID 0x0180 +#define MTXORB_FTDI_RANGE_0181_PID 0x0181 +#define MTXORB_FTDI_RANGE_0182_PID 0x0182 +#define MTXORB_FTDI_RANGE_0183_PID 0x0183 +#define MTXORB_FTDI_RANGE_0184_PID 0x0184 +#define MTXORB_FTDI_RANGE_0185_PID 0x0185 +#define MTXORB_FTDI_RANGE_0186_PID 0x0186 +#define MTXORB_FTDI_RANGE_0187_PID 0x0187 +#define MTXORB_FTDI_RANGE_0188_PID 0x0188 +#define MTXORB_FTDI_RANGE_0189_PID 0x0189 +#define MTXORB_FTDI_RANGE_018A_PID 0x018A +#define MTXORB_FTDI_RANGE_018B_PID 0x018B +#define MTXORB_FTDI_RANGE_018C_PID 0x018C +#define MTXORB_FTDI_RANGE_018D_PID 0x018D +#define MTXORB_FTDI_RANGE_018E_PID 0x018E +#define MTXORB_FTDI_RANGE_018F_PID 0x018F +#define MTXORB_FTDI_RANGE_0190_PID 0x0190 +#define MTXORB_FTDI_RANGE_0191_PID 0x0191 +#define MTXORB_FTDI_RANGE_0192_PID 0x0192 +#define MTXORB_FTDI_RANGE_0193_PID 0x0193 +#define MTXORB_FTDI_RANGE_0194_PID 0x0194 +#define MTXORB_FTDI_RANGE_0195_PID 0x0195 +#define MTXORB_FTDI_RANGE_0196_PID 0x0196 +#define MTXORB_FTDI_RANGE_0197_PID 0x0197 +#define MTXORB_FTDI_RANGE_0198_PID 0x0198 +#define MTXORB_FTDI_RANGE_0199_PID 0x0199 +#define MTXORB_FTDI_RANGE_019A_PID 0x019A +#define MTXORB_FTDI_RANGE_019B_PID 0x019B +#define MTXORB_FTDI_RANGE_019C_PID 0x019C +#define MTXORB_FTDI_RANGE_019D_PID 0x019D +#define MTXORB_FTDI_RANGE_019E_PID 0x019E +#define MTXORB_FTDI_RANGE_019F_PID 0x019F +#define MTXORB_FTDI_RANGE_01A0_PID 0x01A0 +#define MTXORB_FTDI_RANGE_01A1_PID 0x01A1 +#define MTXORB_FTDI_RANGE_01A2_PID 0x01A2 +#define MTXORB_FTDI_RANGE_01A3_PID 0x01A3 +#define MTXORB_FTDI_RANGE_01A4_PID 0x01A4 +#define MTXORB_FTDI_RANGE_01A5_PID 0x01A5 +#define MTXORB_FTDI_RANGE_01A6_PID 0x01A6 +#define MTXORB_FTDI_RANGE_01A7_PID 0x01A7 +#define MTXORB_FTDI_RANGE_01A8_PID 0x01A8 +#define MTXORB_FTDI_RANGE_01A9_PID 0x01A9 +#define MTXORB_FTDI_RANGE_01AA_PID 0x01AA +#define MTXORB_FTDI_RANGE_01AB_PID 0x01AB +#define MTXORB_FTDI_RANGE_01AC_PID 0x01AC +#define MTXORB_FTDI_RANGE_01AD_PID 0x01AD +#define MTXORB_FTDI_RANGE_01AE_PID 0x01AE +#define MTXORB_FTDI_RANGE_01AF_PID 0x01AF +#define MTXORB_FTDI_RANGE_01B0_PID 0x01B0 +#define MTXORB_FTDI_RANGE_01B1_PID 0x01B1 +#define MTXORB_FTDI_RANGE_01B2_PID 0x01B2 +#define MTXORB_FTDI_RANGE_01B3_PID 0x01B3 +#define MTXORB_FTDI_RANGE_01B4_PID 0x01B4 +#define MTXORB_FTDI_RANGE_01B5_PID 0x01B5 +#define MTXORB_FTDI_RANGE_01B6_PID 0x01B6 +#define MTXORB_FTDI_RANGE_01B7_PID 0x01B7 +#define MTXORB_FTDI_RANGE_01B8_PID 0x01B8 +#define MTXORB_FTDI_RANGE_01B9_PID 0x01B9 +#define MTXORB_FTDI_RANGE_01BA_PID 0x01BA +#define MTXORB_FTDI_RANGE_01BB_PID 0x01BB +#define MTXORB_FTDI_RANGE_01BC_PID 0x01BC +#define MTXORB_FTDI_RANGE_01BD_PID 0x01BD +#define MTXORB_FTDI_RANGE_01BE_PID 0x01BE +#define MTXORB_FTDI_RANGE_01BF_PID 0x01BF +#define MTXORB_FTDI_RANGE_01C0_PID 0x01C0 +#define MTXORB_FTDI_RANGE_01C1_PID 0x01C1 +#define MTXORB_FTDI_RANGE_01C2_PID 0x01C2 +#define MTXORB_FTDI_RANGE_01C3_PID 0x01C3 +#define MTXORB_FTDI_RANGE_01C4_PID 0x01C4 +#define MTXORB_FTDI_RANGE_01C5_PID 0x01C5 +#define MTXORB_FTDI_RANGE_01C6_PID 0x01C6 +#define MTXORB_FTDI_RANGE_01C7_PID 0x01C7 +#define MTXORB_FTDI_RANGE_01C8_PID 0x01C8 +#define MTXORB_FTDI_RANGE_01C9_PID 0x01C9 +#define MTXORB_FTDI_RANGE_01CA_PID 0x01CA +#define MTXORB_FTDI_RANGE_01CB_PID 0x01CB +#define MTXORB_FTDI_RANGE_01CC_PID 0x01CC +#define MTXORB_FTDI_RANGE_01CD_PID 0x01CD +#define MTXORB_FTDI_RANGE_01CE_PID 0x01CE +#define MTXORB_FTDI_RANGE_01CF_PID 0x01CF +#define MTXORB_FTDI_RANGE_01D0_PID 0x01D0 +#define MTXORB_FTDI_RANGE_01D1_PID 0x01D1 +#define MTXORB_FTDI_RANGE_01D2_PID 0x01D2 +#define MTXORB_FTDI_RANGE_01D3_PID 0x01D3 +#define MTXORB_FTDI_RANGE_01D4_PID 0x01D4 +#define MTXORB_FTDI_RANGE_01D5_PID 0x01D5 +#define MTXORB_FTDI_RANGE_01D6_PID 0x01D6 +#define MTXORB_FTDI_RANGE_01D7_PID 0x01D7 +#define MTXORB_FTDI_RANGE_01D8_PID 0x01D8 +#define MTXORB_FTDI_RANGE_01D9_PID 0x01D9 +#define MTXORB_FTDI_RANGE_01DA_PID 0x01DA +#define MTXORB_FTDI_RANGE_01DB_PID 0x01DB +#define MTXORB_FTDI_RANGE_01DC_PID 0x01DC +#define MTXORB_FTDI_RANGE_01DD_PID 0x01DD +#define MTXORB_FTDI_RANGE_01DE_PID 0x01DE +#define MTXORB_FTDI_RANGE_01DF_PID 0x01DF +#define MTXORB_FTDI_RANGE_01E0_PID 0x01E0 +#define MTXORB_FTDI_RANGE_01E1_PID 0x01E1 +#define MTXORB_FTDI_RANGE_01E2_PID 0x01E2 +#define MTXORB_FTDI_RANGE_01E3_PID 0x01E3 +#define MTXORB_FTDI_RANGE_01E4_PID 0x01E4 +#define MTXORB_FTDI_RANGE_01E5_PID 0x01E5 +#define MTXORB_FTDI_RANGE_01E6_PID 0x01E6 +#define MTXORB_FTDI_RANGE_01E7_PID 0x01E7 +#define MTXORB_FTDI_RANGE_01E8_PID 0x01E8 +#define MTXORB_FTDI_RANGE_01E9_PID 0x01E9 +#define MTXORB_FTDI_RANGE_01EA_PID 0x01EA +#define MTXORB_FTDI_RANGE_01EB_PID 0x01EB +#define MTXORB_FTDI_RANGE_01EC_PID 0x01EC +#define MTXORB_FTDI_RANGE_01ED_PID 0x01ED +#define MTXORB_FTDI_RANGE_01EE_PID 0x01EE +#define MTXORB_FTDI_RANGE_01EF_PID 0x01EF +#define MTXORB_FTDI_RANGE_01F0_PID 0x01F0 +#define MTXORB_FTDI_RANGE_01F1_PID 0x01F1 +#define MTXORB_FTDI_RANGE_01F2_PID 0x01F2 +#define MTXORB_FTDI_RANGE_01F3_PID 0x01F3 +#define MTXORB_FTDI_RANGE_01F4_PID 0x01F4 +#define MTXORB_FTDI_RANGE_01F5_PID 0x01F5 +#define MTXORB_FTDI_RANGE_01F6_PID 0x01F6 +#define MTXORB_FTDI_RANGE_01F7_PID 0x01F7 +#define MTXORB_FTDI_RANGE_01F8_PID 0x01F8 +#define MTXORB_FTDI_RANGE_01F9_PID 0x01F9 +#define MTXORB_FTDI_RANGE_01FA_PID 0x01FA +#define MTXORB_FTDI_RANGE_01FB_PID 0x01FB +#define MTXORB_FTDI_RANGE_01FC_PID 0x01FC +#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD +#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE +#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF + + /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ diff --git a/trunk/include/asm-ia64/patch.h b/trunk/include/asm-ia64/patch.h index 295fe6ab4584..a71543084fb4 100644 --- a/trunk/include/asm-ia64/patch.h +++ b/trunk/include/asm-ia64/patch.h @@ -21,7 +21,6 @@ extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end); extern void ia64_patch_vtop (unsigned long start, unsigned long end); extern void ia64_patch_phys_stack_reg(unsigned long val); -extern void ia64_patch_rse (unsigned long start, unsigned long end); extern void ia64_patch_gate (void); #endif /* _ASM_IA64_PATCH_H */ diff --git a/trunk/include/asm-ia64/ptrace.h b/trunk/include/asm-ia64/ptrace.h index 15f8dcfe6eee..4b2a8d40ebc5 100644 --- a/trunk/include/asm-ia64/ptrace.h +++ b/trunk/include/asm-ia64/ptrace.h @@ -76,7 +76,7 @@ # define KERNEL_STACK_SIZE_ORDER 0 #endif -#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31) +#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15) #define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE) #define KERNEL_STACK_SIZE IA64_STK_OFFSET diff --git a/trunk/include/asm-ia64/sections.h b/trunk/include/asm-ia64/sections.h index 7286e4a9fe84..dc42a359894f 100644 --- a/trunk/include/asm-ia64/sections.h +++ b/trunk/include/asm-ia64/sections.h @@ -10,7 +10,6 @@ extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; -extern char __start___rse_patchlist[], __end___rse_patchlist[]; extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[]; extern char __start_gate_section[]; diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index ae0be3c62375..3e05e5474749 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -766,6 +766,7 @@ struct sched_domain { struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ cpumask_t span; /* span of all CPUs in this domain */ + int first_cpu; /* cache of the first cpu in this domain */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ diff --git a/trunk/include/linux/topology.h b/trunk/include/linux/topology.h index 24f3d2282e11..4bb7074a2c3a 100644 --- a/trunk/include/linux/topology.h +++ b/trunk/include/linux/topology.h @@ -166,9 +166,7 @@ void arch_update_cpu_topology(void); .busy_idx = 3, \ .idle_idx = 3, \ .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ - | SD_WAKE_AFFINE \ - | SD_SERIALIZE, \ + | SD_SERIALIZE, \ .last_balance = jiffies, \ .balance_interval = 64, \ } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index bfb8ad8ed171..cfa222a91539 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -136,7 +136,7 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val) static inline int rt_policy(int policy) { - if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) + if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) return 1; return 0; } @@ -398,6 +398,43 @@ struct cfs_rq { */ struct list_head leaf_cfs_rq_list; struct task_group *tg; /* group that "owns" this runqueue */ + +#ifdef CONFIG_SMP + unsigned long task_weight; + unsigned long shares; + /* + * We need space to build a sched_domain wide view of the full task + * group tree, in order to avoid depending on dynamic memory allocation + * during the load balancing we place this in the per cpu task group + * hierarchy. This limits the load balancing to one instance per cpu, + * but more should not be needed anyway. + */ + struct aggregate_struct { + /* + * load = weight(cpus) * f(tg) + * + * Where f(tg) is the recursive weight fraction assigned to + * this group. + */ + unsigned long load; + + /* + * part of the group weight distributed to this span. + */ + unsigned long shares; + + /* + * The sum of all runqueue weights within this span. + */ + unsigned long rq_weight; + + /* + * Weight contributed by tasks; this is the part we can + * influence by moving tasks around. + */ + unsigned long task_weight; + } aggregate; +#endif #endif }; @@ -1331,6 +1368,9 @@ static void __resched_task(struct task_struct *p, int tif_bit) */ #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y)) +/* + * delta *= weight / lw + */ static unsigned long calc_delta_mine(unsigned long delta_exec, unsigned long weight, struct load_weight *lw) @@ -1353,12 +1393,6 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX); } -static inline unsigned long -calc_delta_fair(unsigned long delta_exec, struct load_weight *lw) -{ - return calc_delta_mine(delta_exec, NICE_0_LOAD, lw); -} - static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; @@ -1471,6 +1505,326 @@ static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static unsigned long cpu_avg_load_per_task(int cpu); static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); + +#ifdef CONFIG_FAIR_GROUP_SCHED + +/* + * Group load balancing. + * + * We calculate a few balance domain wide aggregate numbers; load and weight. + * Given the pictures below, and assuming each item has equal weight: + * + * root 1 - thread + * / | \ A - group + * A 1 B + * /|\ / \ + * C 2 D 3 4 + * | | + * 5 6 + * + * load: + * A and B get 1/3-rd of the total load. C and D get 1/3-rd of A's 1/3-rd, + * which equals 1/9-th of the total load. + * + * shares: + * The weight of this group on the selected cpus. + * + * rq_weight: + * Direct sum of all the cpu's their rq weight, e.g. A would get 3 while + * B would get 2. + * + * task_weight: + * Part of the rq_weight contributed by tasks; all groups except B would + * get 1, B gets 2. + */ + +static inline struct aggregate_struct * +aggregate(struct task_group *tg, struct sched_domain *sd) +{ + return &tg->cfs_rq[sd->first_cpu]->aggregate; +} + +typedef void (*aggregate_func)(struct task_group *, struct sched_domain *); + +/* + * Iterate the full tree, calling @down when first entering a node and @up when + * leaving it for the final time. + */ +static +void aggregate_walk_tree(aggregate_func down, aggregate_func up, + struct sched_domain *sd) +{ + struct task_group *parent, *child; + + rcu_read_lock(); + parent = &root_task_group; +down: + (*down)(parent, sd); + list_for_each_entry_rcu(child, &parent->children, siblings) { + parent = child; + goto down; + +up: + continue; + } + (*up)(parent, sd); + + child = parent; + parent = parent->parent; + if (parent) + goto up; + rcu_read_unlock(); +} + +/* + * Calculate the aggregate runqueue weight. + */ +static +void aggregate_group_weight(struct task_group *tg, struct sched_domain *sd) +{ + unsigned long rq_weight = 0; + unsigned long task_weight = 0; + int i; + + for_each_cpu_mask(i, sd->span) { + rq_weight += tg->cfs_rq[i]->load.weight; + task_weight += tg->cfs_rq[i]->task_weight; + } + + aggregate(tg, sd)->rq_weight = rq_weight; + aggregate(tg, sd)->task_weight = task_weight; +} + +/* + * Compute the weight of this group on the given cpus. + */ +static +void aggregate_group_shares(struct task_group *tg, struct sched_domain *sd) +{ + unsigned long shares = 0; + int i; + + for_each_cpu_mask(i, sd->span) + shares += tg->cfs_rq[i]->shares; + + if ((!shares && aggregate(tg, sd)->rq_weight) || shares > tg->shares) + shares = tg->shares; + + aggregate(tg, sd)->shares = shares; +} + +/* + * Compute the load fraction assigned to this group, relies on the aggregate + * weight and this group's parent's load, i.e. top-down. + */ +static +void aggregate_group_load(struct task_group *tg, struct sched_domain *sd) +{ + unsigned long load; + + if (!tg->parent) { + int i; + + load = 0; + for_each_cpu_mask(i, sd->span) + load += cpu_rq(i)->load.weight; + + } else { + load = aggregate(tg->parent, sd)->load; + + /* + * shares is our weight in the parent's rq so + * shares/parent->rq_weight gives our fraction of the load + */ + load *= aggregate(tg, sd)->shares; + load /= aggregate(tg->parent, sd)->rq_weight + 1; + } + + aggregate(tg, sd)->load = load; +} + +static void __set_se_shares(struct sched_entity *se, unsigned long shares); + +/* + * Calculate and set the cpu's group shares. + */ +static void +__update_group_shares_cpu(struct task_group *tg, struct sched_domain *sd, + int tcpu) +{ + int boost = 0; + unsigned long shares; + unsigned long rq_weight; + + if (!tg->se[tcpu]) + return; + + rq_weight = tg->cfs_rq[tcpu]->load.weight; + + /* + * If there are currently no tasks on the cpu pretend there is one of + * average load so that when a new task gets to run here it will not + * get delayed by group starvation. + */ + if (!rq_weight) { + boost = 1; + rq_weight = NICE_0_LOAD; + } + + /* + * \Sum shares * rq_weight + * shares = ----------------------- + * \Sum rq_weight + * + */ + shares = aggregate(tg, sd)->shares * rq_weight; + shares /= aggregate(tg, sd)->rq_weight + 1; + + /* + * record the actual number of shares, not the boosted amount. + */ + tg->cfs_rq[tcpu]->shares = boost ? 0 : shares; + + if (shares < MIN_SHARES) + shares = MIN_SHARES; + else if (shares > MAX_SHARES) + shares = MAX_SHARES; + + __set_se_shares(tg->se[tcpu], shares); +} + +/* + * Re-adjust the weights on the cpu the task came from and on the cpu the + * task went to. + */ +static void +__move_group_shares(struct task_group *tg, struct sched_domain *sd, + int scpu, int dcpu) +{ + unsigned long shares; + + shares = tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares; + + __update_group_shares_cpu(tg, sd, scpu); + __update_group_shares_cpu(tg, sd, dcpu); + + /* + * ensure we never loose shares due to rounding errors in the + * above redistribution. + */ + shares -= tg->cfs_rq[scpu]->shares + tg->cfs_rq[dcpu]->shares; + if (shares) + tg->cfs_rq[dcpu]->shares += shares; +} + +/* + * Because changing a group's shares changes the weight of the super-group + * we need to walk up the tree and change all shares until we hit the root. + */ +static void +move_group_shares(struct task_group *tg, struct sched_domain *sd, + int scpu, int dcpu) +{ + while (tg) { + __move_group_shares(tg, sd, scpu, dcpu); + tg = tg->parent; + } +} + +static +void aggregate_group_set_shares(struct task_group *tg, struct sched_domain *sd) +{ + unsigned long shares = aggregate(tg, sd)->shares; + int i; + + for_each_cpu_mask(i, sd->span) { + struct rq *rq = cpu_rq(i); + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + __update_group_shares_cpu(tg, sd, i); + spin_unlock_irqrestore(&rq->lock, flags); + } + + aggregate_group_shares(tg, sd); + + /* + * ensure we never loose shares due to rounding errors in the + * above redistribution. + */ + shares -= aggregate(tg, sd)->shares; + if (shares) { + tg->cfs_rq[sd->first_cpu]->shares += shares; + aggregate(tg, sd)->shares += shares; + } +} + +/* + * Calculate the accumulative weight and recursive load of each task group + * while walking down the tree. + */ +static +void aggregate_get_down(struct task_group *tg, struct sched_domain *sd) +{ + aggregate_group_weight(tg, sd); + aggregate_group_shares(tg, sd); + aggregate_group_load(tg, sd); +} + +/* + * Rebalance the cpu shares while walking back up the tree. + */ +static +void aggregate_get_up(struct task_group *tg, struct sched_domain *sd) +{ + aggregate_group_set_shares(tg, sd); +} + +static DEFINE_PER_CPU(spinlock_t, aggregate_lock); + +static void __init init_aggregate(void) +{ + int i; + + for_each_possible_cpu(i) + spin_lock_init(&per_cpu(aggregate_lock, i)); +} + +static int get_aggregate(struct sched_domain *sd) +{ + if (!spin_trylock(&per_cpu(aggregate_lock, sd->first_cpu))) + return 0; + + aggregate_walk_tree(aggregate_get_down, aggregate_get_up, sd); + return 1; +} + +static void put_aggregate(struct sched_domain *sd) +{ + spin_unlock(&per_cpu(aggregate_lock, sd->first_cpu)); +} + +static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) +{ + cfs_rq->shares = shares; +} + +#else + +static inline void init_aggregate(void) +{ +} + +static inline int get_aggregate(struct sched_domain *sd) +{ + return 0; +} + +static inline void put_aggregate(struct sched_domain *sd) +{ +} +#endif + #else /* CONFIG_SMP */ #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1491,26 +1845,14 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) #define sched_class_highest (&rt_sched_class) -static inline void inc_load(struct rq *rq, const struct task_struct *p) -{ - update_load_add(&rq->load, p->se.load.weight); -} - -static inline void dec_load(struct rq *rq, const struct task_struct *p) -{ - update_load_sub(&rq->load, p->se.load.weight); -} - -static void inc_nr_running(struct task_struct *p, struct rq *rq) +static void inc_nr_running(struct rq *rq) { rq->nr_running++; - inc_load(rq, p); } -static void dec_nr_running(struct task_struct *p, struct rq *rq) +static void dec_nr_running(struct rq *rq) { rq->nr_running--; - dec_load(rq, p); } static void set_load_weight(struct task_struct *p) @@ -1602,7 +1944,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) rq->nr_uninterruptible--; enqueue_task(rq, p, wakeup); - inc_nr_running(p, rq); + inc_nr_running(rq); } /* @@ -1614,7 +1956,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) rq->nr_uninterruptible++; dequeue_task(rq, p, sleep); - dec_nr_running(p, rq); + dec_nr_running(rq); } /** @@ -2267,7 +2609,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) * management (if any): */ p->sched_class->task_new(rq, p); - inc_nr_running(p, rq); + inc_nr_running(rq); } check_preempt_curr(rq, p); #ifdef CONFIG_SMP @@ -3258,9 +3600,12 @@ static int load_balance(int this_cpu, struct rq *this_rq, unsigned long imbalance; struct rq *busiest; unsigned long flags; + int unlock_aggregate; cpus_setall(*cpus); + unlock_aggregate = get_aggregate(sd); + /* * When power savings policy is enabled for the parent domain, idle * sibling can pick up load irrespective of busy siblings. In this case, @@ -3376,8 +3721,9 @@ static int load_balance(int this_cpu, struct rq *this_rq, if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) - return -1; - return ld_moved; + ld_moved = -1; + + goto out; out_balanced: schedstat_inc(sd, lb_balanced[idle]); @@ -3392,8 +3738,13 @@ static int load_balance(int this_cpu, struct rq *this_rq, if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) - return -1; - return 0; + ld_moved = -1; + else + ld_moved = 0; +out: + if (unlock_aggregate) + put_aggregate(sd); + return ld_moved; } /* @@ -4079,7 +4430,7 @@ static inline void schedule_debug(struct task_struct *prev) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ - if (unlikely(in_atomic_preempt_off() && !prev->exit_state)) + if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state)) __schedule_bug(prev); profile_hit(SCHED_PROFILING, __builtin_return_address(0)); @@ -4580,10 +4931,8 @@ void set_user_nice(struct task_struct *p, long nice) goto out_unlock; } on_rq = p->se.on_rq; - if (on_rq) { + if (on_rq) dequeue_task(rq, p, 0); - dec_load(rq, p); - } p->static_prio = NICE_TO_PRIO(nice); set_load_weight(p); @@ -4593,7 +4942,6 @@ void set_user_nice(struct task_struct *p, long nice) if (on_rq) { enqueue_task(rq, p, 0); - inc_load(rq, p); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: @@ -6968,6 +7316,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, SD_INIT(sd, ALLNODES); set_domain_attribute(sd, attr); sd->span = *cpu_map; + sd->first_cpu = first_cpu(sd->span); cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); p = sd; sd_allnodes = 1; @@ -6978,6 +7327,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, SD_INIT(sd, NODE); set_domain_attribute(sd, attr); sched_domain_node_span(cpu_to_node(i), &sd->span); + sd->first_cpu = first_cpu(sd->span); sd->parent = p; if (p) p->child = sd; @@ -6989,6 +7339,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, SD_INIT(sd, CPU); set_domain_attribute(sd, attr); sd->span = *nodemask; + sd->first_cpu = first_cpu(sd->span); sd->parent = p; if (p) p->child = sd; @@ -7000,6 +7351,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, SD_INIT(sd, MC); set_domain_attribute(sd, attr); sd->span = cpu_coregroup_map(i); + sd->first_cpu = first_cpu(sd->span); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; @@ -7012,6 +7364,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, SD_INIT(sd, SIBLING); set_domain_attribute(sd, attr); sd->span = per_cpu(cpu_sibling_map, i); + sd->first_cpu = first_cpu(sd->span); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; @@ -7215,8 +7568,8 @@ static int build_sched_domains(const cpumask_t *cpu_map) static cpumask_t *doms_cur; /* current sched domains */ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ -static struct sched_domain_attr *dattr_cur; - /* attribues of custom domains in 'doms_cur' */ +static struct sched_domain_attr *dattr_cur; /* attribues of custom domains + in 'doms_cur' */ /* * Special case: If a kmalloc of a doms_cur partition (array of @@ -7681,6 +8034,7 @@ void __init sched_init(void) } #ifdef CONFIG_SMP + init_aggregate(); init_defrootdomain(); #endif @@ -8245,14 +8599,11 @@ void sched_move_task(struct task_struct *tsk) #endif #ifdef CONFIG_FAIR_GROUP_SCHED -static void set_se_shares(struct sched_entity *se, unsigned long shares) +static void __set_se_shares(struct sched_entity *se, unsigned long shares) { struct cfs_rq *cfs_rq = se->cfs_rq; - struct rq *rq = cfs_rq->rq; int on_rq; - spin_lock_irq(&rq->lock); - on_rq = se->on_rq; if (on_rq) dequeue_entity(cfs_rq, se, 0); @@ -8262,8 +8613,17 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) if (on_rq) enqueue_entity(cfs_rq, se, 0); +} - spin_unlock_irq(&rq->lock); +static void set_se_shares(struct sched_entity *se, unsigned long shares) +{ + struct cfs_rq *cfs_rq = se->cfs_rq; + struct rq *rq = cfs_rq->rq; + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + __set_se_shares(se, shares); + spin_unlock_irqrestore(&rq->lock, flags); } static DEFINE_MUTEX(shares_mutex); @@ -8302,8 +8662,13 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) * w/o tripping rebalance_share or load_balance_fair. */ tg->shares = shares; - for_each_possible_cpu(i) + for_each_possible_cpu(i) { + /* + * force a rebalance + */ + cfs_rq_set_shares(tg->cfs_rq[i], 0); set_se_shares(tg->se[i], shares); + } /* * Enable load balance activity on this group, by inserting it back on diff --git a/trunk/kernel/sched_clock.c b/trunk/kernel/sched_clock.c index ce05271219ab..9c597e37f7de 100644 --- a/trunk/kernel/sched_clock.c +++ b/trunk/kernel/sched_clock.c @@ -59,26 +59,22 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) return &per_cpu(sched_clock_data, cpu); } -static __read_mostly int sched_clock_running; - void sched_clock_init(void) { u64 ktime_now = ktime_to_ns(ktime_get()); - unsigned long now_jiffies = jiffies; + u64 now = 0; int cpu; for_each_possible_cpu(cpu) { struct sched_clock_data *scd = cpu_sdc(cpu); scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; - scd->prev_jiffies = now_jiffies; - scd->prev_raw = 0; - scd->tick_raw = 0; + scd->prev_jiffies = jiffies; + scd->prev_raw = now; + scd->tick_raw = now; scd->tick_gtod = ktime_now; scd->clock = ktime_now; } - - sched_clock_running = 1; } /* @@ -140,9 +136,6 @@ u64 sched_clock_cpu(int cpu) struct sched_clock_data *scd = cpu_sdc(cpu); u64 now, clock; - if (unlikely(!sched_clock_running)) - return 0ull; - WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); @@ -181,9 +174,6 @@ void sched_clock_tick(void) struct sched_clock_data *scd = this_scd(); u64 now, now_gtod; - if (unlikely(!sched_clock_running)) - return; - WARN_ON_ONCE(!irqs_disabled()); now = sched_clock(); diff --git a/trunk/kernel/sched_debug.c b/trunk/kernel/sched_debug.c index 8bb713040ac9..5f06118fbc31 100644 --- a/trunk/kernel/sched_debug.c +++ b/trunk/kernel/sched_debug.c @@ -167,6 +167,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #endif SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", cfs_rq->nr_spread_over); +#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_SMP + SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); +#endif +#endif } static void print_cpu(struct seq_file *m, int cpu) diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 08ae848b71d4..e24ecd39c4b8 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -333,6 +333,34 @@ int sched_nr_latency_handler(struct ctl_table *table, int write, } #endif +/* + * delta *= w / rw + */ +static inline unsigned long +calc_delta_weight(unsigned long delta, struct sched_entity *se) +{ + for_each_sched_entity(se) { + delta = calc_delta_mine(delta, + se->load.weight, &cfs_rq_of(se)->load); + } + + return delta; +} + +/* + * delta *= rw / w + */ +static inline unsigned long +calc_delta_fair(unsigned long delta, struct sched_entity *se) +{ + for_each_sched_entity(se) { + delta = calc_delta_mine(delta, + cfs_rq_of(se)->load.weight, &se->load); + } + + return delta; +} + /* * The idea is to set a period in which each task runs once. * @@ -362,47 +390,54 @@ static u64 __sched_period(unsigned long nr_running) */ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { - u64 slice = __sched_period(cfs_rq->nr_running); - - for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); - - slice *= se->load.weight; - do_div(slice, cfs_rq->load.weight); - } - - - return slice; + return calc_delta_weight(__sched_period(cfs_rq->nr_running), se); } /* * We calculate the vruntime slice of a to be inserted task * - * vs = s/w = p/rw + * vs = s*rw/w = p */ static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { unsigned long nr_running = cfs_rq->nr_running; - unsigned long weight; - u64 vslice; if (!se->on_rq) nr_running++; - vslice = __sched_period(nr_running); + return __sched_period(nr_running); +} + +/* + * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in + * that it favours >=0 over <0. + * + * -20 | + * | + * 0 --------+------- + * .' + * 19 .' + * + */ +static unsigned long +calc_delta_asym(unsigned long delta, struct sched_entity *se) +{ + struct load_weight lw = { + .weight = NICE_0_LOAD, + .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT) + }; for_each_sched_entity(se) { - cfs_rq = cfs_rq_of(se); + struct load_weight *se_lw = &se->load; - weight = cfs_rq->load.weight; - if (!se->on_rq) - weight += se->load.weight; + if (se->load.weight < NICE_0_LOAD) + se_lw = &lw; - vslice *= NICE_0_LOAD; - do_div(vslice, weight); + delta = calc_delta_mine(delta, + cfs_rq_of(se)->load.weight, se_lw); } - return vslice; + return delta; } /* @@ -419,11 +454,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq, exec_clock, delta_exec); - delta_exec_weighted = delta_exec; - if (unlikely(curr->load.weight != NICE_0_LOAD)) { - delta_exec_weighted = calc_delta_fair(delta_exec_weighted, - &curr->load); - } + delta_exec_weighted = calc_delta_fair(delta_exec, curr); curr->vruntime += delta_exec_weighted; } @@ -510,10 +541,27 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) * Scheduling class queueing methods: */ +#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED +static void +add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) +{ + cfs_rq->task_weight += weight; +} +#else +static inline void +add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight) +{ +} +#endif + static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_add(&cfs_rq->load, se->load.weight); + if (!parent_entity(se)) + inc_cpu_load(rq_of(cfs_rq), se->load.weight); + if (entity_is_task(se)) + add_cfs_task_weight(cfs_rq, se->load.weight); cfs_rq->nr_running++; se->on_rq = 1; list_add(&se->group_node, &cfs_rq->tasks); @@ -523,6 +571,10 @@ static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_sub(&cfs_rq->load, se->load.weight); + if (!parent_entity(se)) + dec_cpu_load(rq_of(cfs_rq), se->load.weight); + if (entity_is_task(se)) + add_cfs_task_weight(cfs_rq, -se->load.weight); cfs_rq->nr_running--; se->on_rq = 0; list_del_init(&se->group_node); @@ -609,8 +661,17 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ - if (sched_feat(NEW_FAIR_SLEEPERS)) - vruntime -= sysctl_sched_latency; + if (sched_feat(NEW_FAIR_SLEEPERS)) { + unsigned long thresh = sysctl_sched_latency; + + /* + * convert the sleeper threshold into virtual time + */ + if (sched_feat(NORMALIZED_SLEEPER)) + thresh = calc_delta_fair(thresh, se); + + vruntime -= thresh; + } /* ensure we never gain time by being placed backwards. */ vruntime = max_vruntime(se->vruntime, vruntime); @@ -996,27 +1057,16 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, struct task_struct *curr = this_rq->curr; unsigned long tl = this_load; unsigned long tl_per_task; - int balanced; - if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) + if (!(this_sd->flags & SD_WAKE_AFFINE)) return 0; - /* - * If sync wakeup then subtract the (maximum possible) - * effect of the currently running task from the load - * of the current CPU: - */ - if (sync) - tl -= current->se.load.weight; - - balanced = 100*(tl + p->se.load.weight) <= imbalance*load; - /* * If the currently running task will sleep within * a reasonable amount of time then attract this newly * woken task: */ - if (sync && balanced && curr->sched_class == &fair_sched_class) { + if (sync && curr->sched_class == &fair_sched_class) { if (curr->se.avg_overlap < sysctl_sched_migration_cost && p->se.avg_overlap < sysctl_sched_migration_cost) return 1; @@ -1025,8 +1075,16 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, schedstat_inc(p, se.nr_wakeups_affine_attempts); tl_per_task = cpu_avg_load_per_task(this_cpu); + /* + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current CPU: + */ + if (sync) + tl -= current->se.load.weight; + if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || - balanced) { + 100*(tl + p->se.load.weight) <= imbalance*load) { /* * This domain has SD_WAKE_AFFINE and * p is cache cold in this domain, and @@ -1111,11 +1169,10 @@ static unsigned long wakeup_gran(struct sched_entity *se) unsigned long gran = sysctl_sched_wakeup_granularity; /* - * More easily preempt - nice tasks, while not making - * it harder for + nice tasks. + * More easily preempt - nice tasks, while not making it harder for + * + nice tasks. */ - if (unlikely(se->load.weight > NICE_0_LOAD)) - gran = calc_delta_fair(gran, &se->load); + gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); return gran; } @@ -1309,75 +1366,90 @@ static struct task_struct *load_balance_next_fair(void *arg) return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator); } -#ifdef CONFIG_FAIR_GROUP_SCHED -static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) +static unsigned long +__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, struct sched_domain *sd, + enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, + struct cfs_rq *cfs_rq) { - struct sched_entity *curr; - struct task_struct *p; - - if (!cfs_rq->nr_running || !first_fair(cfs_rq)) - return MAX_PRIO; - - curr = cfs_rq->curr; - if (!curr) - curr = __pick_next_entity(cfs_rq); + struct rq_iterator cfs_rq_iterator; - p = task_of(curr); + cfs_rq_iterator.start = load_balance_start_fair; + cfs_rq_iterator.next = load_balance_next_fair; + cfs_rq_iterator.arg = cfs_rq; - return p->prio; + return balance_tasks(this_rq, this_cpu, busiest, + max_load_move, sd, idle, all_pinned, + this_best_prio, &cfs_rq_iterator); } -#endif +#ifdef CONFIG_FAIR_GROUP_SCHED static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio) { - struct cfs_rq *busy_cfs_rq; long rem_load_move = max_load_move; - struct rq_iterator cfs_rq_iterator; - - cfs_rq_iterator.start = load_balance_start_fair; - cfs_rq_iterator.next = load_balance_next_fair; + int busiest_cpu = cpu_of(busiest); + struct task_group *tg; - for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { -#ifdef CONFIG_FAIR_GROUP_SCHED - struct cfs_rq *this_cfs_rq; + rcu_read_lock(); + list_for_each_entry(tg, &task_groups, list) { long imbalance; - unsigned long maxload; + unsigned long this_weight, busiest_weight; + long rem_load, max_load, moved_load; + + /* + * empty group + */ + if (!aggregate(tg, sd)->task_weight) + continue; + + rem_load = rem_load_move * aggregate(tg, sd)->rq_weight; + rem_load /= aggregate(tg, sd)->load + 1; + + this_weight = tg->cfs_rq[this_cpu]->task_weight; + busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight; - this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); + imbalance = (busiest_weight - this_weight) / 2; - imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; - /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ - if (imbalance <= 0) + if (imbalance < 0) + imbalance = busiest_weight; + + max_load = max(rem_load, imbalance); + moved_load = __load_balance_fair(this_rq, this_cpu, busiest, + max_load, sd, idle, all_pinned, this_best_prio, + tg->cfs_rq[busiest_cpu]); + + if (!moved_load) continue; - /* Don't pull more than imbalance/2 */ - imbalance /= 2; - maxload = min(rem_load_move, imbalance); + move_group_shares(tg, sd, busiest_cpu, this_cpu); - *this_best_prio = cfs_rq_best_prio(this_cfs_rq); -#else -# define maxload rem_load_move -#endif - /* - * pass busy_cfs_rq argument into - * load_balance_[start|next]_fair iterators - */ - cfs_rq_iterator.arg = busy_cfs_rq; - rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, - maxload, sd, idle, all_pinned, - this_best_prio, - &cfs_rq_iterator); + moved_load *= aggregate(tg, sd)->load; + moved_load /= aggregate(tg, sd)->rq_weight + 1; - if (rem_load_move <= 0) + rem_load_move -= moved_load; + if (rem_load_move < 0) break; } + rcu_read_unlock(); return max_load_move - rem_load_move; } +#else +static unsigned long +load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, + unsigned long max_load_move, + struct sched_domain *sd, enum cpu_idle_type idle, + int *all_pinned, int *this_best_prio) +{ + return __load_balance_fair(this_rq, this_cpu, busiest, + max_load_move, sd, idle, all_pinned, + this_best_prio, &busiest->cfs); +} +#endif static int move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 3432d573205d..060e87b0cb1c 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -513,6 +513,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) */ for_each_sched_rt_entity(rt_se) enqueue_rt_entity(rt_se); + + inc_cpu_load(rq, p->se.load.weight); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) @@ -532,6 +534,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) if (rt_rq && rt_rq->rt_nr_running) enqueue_rt_entity(rt_se); } + + dec_cpu_load(rq, p->se.load.weight); } /* diff --git a/trunk/kernel/sched_stats.h b/trunk/kernel/sched_stats.h index a38878e0e49d..5bae2e0c3ff2 100644 --- a/trunk/kernel/sched_stats.h +++ b/trunk/kernel/sched_stats.h @@ -67,7 +67,6 @@ static int show_schedstat(struct seq_file *seq, void *v) preempt_enable(); #endif } - kfree(mask_str); return 0; }