Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 40632
b: refs/heads/master
c: 40eb006
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Nov 2, 2006
1 parent 7f5595c commit e02a6e0
Show file tree
Hide file tree
Showing 173 changed files with 2,373 additions and 2,187 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7a118df3ea23820b9922a1b51cd2f24e464f4c17
refs/heads/master: 40eb006685387b2861bd7196be0ab7144c5d5b71
39 changes: 8 additions & 31 deletions trunk/Documentation/mips/time.README
Original file line number Diff line number Diff line change
Expand Up @@ -38,19 +38,14 @@ The new time code provide the following services:

a) Implements functions required by Linux common code:
time_init
do_gettimeofday
do_settimeofday

b) provides an abstraction of RTC and null RTC implementation as default.
extern unsigned long (*rtc_get_time)(void);
extern int (*rtc_set_time)(unsigned long);

c) a set of gettimeoffset functions for different CPUs and different
needs.

d) high-level and low-level timer interrupt routines where the timer
interrupt source may or may not be the CPU timer. The high-level
routine is dispatched through do_IRQ() while the low-level is
c) high-level and low-level timer interrupt routines where the timer
interrupt source may or may not be the CPU timer. The high-level
routine is dispatched through do_IRQ() while the low-level is
dispatched in assemably code (usually int-handler.S)


Expand All @@ -73,8 +68,7 @@ the following functions or values:
c) (optional) board-specific RTC routines.

d) (optional) mips_hpt_frequency - It must be definied if the board
is using CPU counter for timer interrupt or it is using fixed rate
gettimeoffset().
is using CPU counter for timer interrupt.


PORTING GUIDE
Expand All @@ -89,16 +83,6 @@ Step 1: decide how you like to implement the time services.
If the answer is no, you need a timer to provide the timer interrupt
at 100 HZ speed.

You cannot use the fast gettimeoffset functions, i.e.,

unsigned long fixed_rate_gettimeoffset(void);
unsigned long calibrate_div32_gettimeoffset(void);
unsigned long calibrate_div64_gettimeoffset(void);

You can use null_gettimeoffset() will gives the same time resolution as
jiffy. Or you can implement your own gettimeoffset (probably based on
some ad hoc hardware on your machine.)

c) The following sub steps assume your CPU has counter register.
Do you plan to use the CPU counter register as the timer interrupt
or use an exnternal timer?
Expand All @@ -123,8 +107,8 @@ Step 3: implement rtc routines, board_time_init() and plat_timer_setup()
board_time_init() -
a) (optional) set up RTC routines,
b) (optional) calibrate and set the mips_hpt_frequency
(only needed if you intended to use fixed_rate_gettimeoffset
or use cpu counter as timer interrupt source)
(only needed if you intended to use cpu counter as timer interrupt
source)

plat_timer_setup() -
a) (optional) over-write any choices made above by time_init().
Expand Down Expand Up @@ -154,8 +138,8 @@ for some of the functions in time.c.
For example, you may define your own timer interrupt routine, which does
some of its own processing and then calls timer_interrupt().

You can also over-ride any of the built-in functions (gettimeoffset,
RTC routines and/or timer interrupt routine).
You can also over-ride any of the built-in functions (RTC routines
and/or timer interrupt routine).


PORTING NOTES FOR SMP
Expand Down Expand Up @@ -187,10 +171,3 @@ You need to decide on your timer interrupt sources.

You can also do the low-level version of those interrupt routines,
following similar dispatching routes described above.

Note about do_gettimeoffset():

It is very likely the CPU counter registers are not sync'ed up in a SMP box.
Therefore you cannot really use the many of the existing routines that
are based on CPU counter. You should wirte your own gettimeoffset rouinte
if you want intra-jiffy resolution.
66 changes: 63 additions & 3 deletions trunk/arch/i386/kernel/io_apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,46 @@ static struct irq_pin_list {
int apic, pin, next;
} irq_2_pin[PIN_MAP_SIZE];

struct io_apic {
unsigned int index;
unsigned int unused[3];
unsigned int data;
};

static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
{
return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
+ (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
}

static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
writel(reg, &io_apic->index);
return readl(&io_apic->data);
}

static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
{
struct io_apic __iomem *io_apic = io_apic_base(apic);
writel(reg, &io_apic->index);
writel(value, &io_apic->data);
}

/*
* Re-write a value: to be used for read-modify-write
* cycles where the read already set up the index register.
*
* Older SiS APIC requires we rewrite the index register
*/
static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
{
volatile struct io_apic *io_apic = io_apic_base(apic);
if (sis_apic_bug)
writel(reg, &io_apic->index);
writel(value, &io_apic->data);
}

union entry_union {
struct { u32 w1, w2; };
struct IO_APIC_route_entry entry;
Expand All @@ -107,11 +147,33 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
return eu.entry;
}

/*
* When we write a new IO APIC routing entry, we need to write the high
* word first! If the mask bit in the low word is clear, we will enable
* the interrupt, and we need to make sure the entry is fully populated
* before that happens.
*/
static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
{
unsigned long flags;
union entry_union eu;
eu.entry = e;
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
spin_unlock_irqrestore(&ioapic_lock, flags);
}

/*
* When we mask an IO APIC routing entry, we need to write the low
* word first, in order to set the mask bit before we change the
* high bits!
*/
static void ioapic_mask_entry(int apic, int pin)
{
unsigned long flags;
union entry_union eu = { .entry.mask = 1 };

spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x10 + 2*pin, eu.w1);
io_apic_write(apic, 0x11 + 2*pin, eu.w2);
Expand Down Expand Up @@ -234,9 +296,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
/*
* Disable it in the IO-APIC irq-routing table:
*/
memset(&entry, 0, sizeof(entry));
entry.mask = 1;
ioapic_write_entry(apic, pin, entry);
ioapic_mask_entry(apic, pin);
}

static void clear_IO_APIC (void)
Expand Down
95 changes: 68 additions & 27 deletions trunk/arch/ia64/kernel/mca_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,50 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
return MCA_IS_GLOBAL;
}

/**
* get_target_identifier - Get the valid Cache or Bus check target identifier.
* @peidx: pointer of index of processor error section
*
* Return value:
* target address on Success / 0 on Failue
*/
static u64
get_target_identifier(peidx_table_t *peidx)
{
u64 target_address = 0;
sal_log_mod_error_info_t *smei;
pal_cache_check_info_t *pcci;
int i, level = 9;

/*
* Look through the cache checks for a valid target identifier
* If more than one valid target identifier, return the one
* with the lowest cache level.
*/
for (i = 0; i < peidx_cache_check_num(peidx); i++) {
smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
if (smei->valid.target_identifier && smei->target_identifier) {
pcci = (pal_cache_check_info_t *)&(smei->check_info);
if (!target_address || (pcci->level < level)) {
target_address = smei->target_identifier;
level = pcci->level;
continue;
}
}
}
if (target_address)
return target_address;

/*
* Look at the bus check for a valid target identifier
*/
smei = peidx_bus_check(peidx, 0);
if (smei && smei->valid.target_identifier)
return smei->target_identifier;

return 0;
}

/**
* recover_from_read_error - Try to recover the errors which type are "read"s.
* @slidx: pointer of index of SAL error record
Expand All @@ -450,13 +494,14 @@ recover_from_read_error(slidx_table_t *slidx,
peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{
sal_log_mod_error_info_t *smei;
u64 target_identifier;
pal_min_state_area_t *pmsa;
struct ia64_psr *psr1, *psr2;
ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;

/* Is target address valid? */
if (!pbci->tv)
target_identifier = get_target_identifier(peidx);
if (!target_identifier)
return fatal_mca("target address not valid");

/*
Expand Down Expand Up @@ -487,32 +532,28 @@ recover_from_read_error(slidx_table_t *slidx,
pmsa = sos->pal_min_state;
if (psr1->cpl != 0 ||
((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
smei = peidx_bus_check(peidx, 0);
if (smei->valid.target_identifier) {
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
/* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = smei->target_identifier;
pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
pmsa->pmsa_iip = mca_hdlr_bh->fp;
pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
/* set cpl with kernel mode */
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
psr2->bn = 1;
psr2->i = 0;

return mca_recovered("user memory corruption. "
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
/* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = target_identifier;
pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
pmsa->pmsa_iip = mca_hdlr_bh->fp;
pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
/* set cpl with kernel mode */
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
psr2->bn = 1;
psr2->i = 0;

return mca_recovered("user memory corruption. "
"kill affected process - recovered.");
}

}

return fatal_mca("kernel context not recovered, iip 0x%lx\n",
Expand Down
11 changes: 7 additions & 4 deletions trunk/arch/ia64/kernel/sal.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,12 +223,13 @@ static void __init sal_desc_ap_wakeup(void *p) { }
*/
static int sal_cache_flush_drops_interrupts;

static void __init
void __init
check_sal_cache_flush (void)
{
unsigned long flags;
int cpu;
u64 vector;
u64 vector, cache_type = 3;
struct ia64_sal_retval isrv;

cpu = get_cpu();
local_irq_save(flags);
Expand All @@ -243,7 +244,10 @@ check_sal_cache_flush (void)
while (!ia64_get_irr(IA64_TIMER_VECTOR))
cpu_relax();

ia64_sal_cache_flush(3);
SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);

if (isrv.status)
printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status);

if (ia64_get_irr(IA64_TIMER_VECTOR)) {
vector = ia64_get_ivr();
Expand Down Expand Up @@ -331,7 +335,6 @@ ia64_sal_init (struct ia64_sal_systab *systab)
p += SAL_DESC_SIZE(*p);
}

check_sal_cache_flush();
}

int
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -457,6 +457,8 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */

check_sal_cache_flush();

#ifdef CONFIG_ACPI
acpi_boot_init();
#endif
Expand Down
12 changes: 7 additions & 5 deletions trunk/arch/ia64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ cpu_die(void)
}

irqreturn_t
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
handle_IPI (int irq, void *dev_id)
{
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
Expand Down Expand Up @@ -328,10 +328,14 @@ int
smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus()-1;
int cpus;

if (!cpus)
spin_lock(&call_lock);
cpus = num_online_cpus() - 1;
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}

/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
Expand All @@ -343,8 +347,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
if (wait)
atomic_set(&data.finished, 0);

spin_lock(&call_lock);

call_data = &data;
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
send_IPI_allbutself(IPI_CALL_FUNC);
Expand Down
Loading

0 comments on commit e02a6e0

Please sign in to comment.