From 68ba951283721104da65991d5e807abeaa66e900 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 27 Jun 2005 10:55:12 +0200 Subject: [PATCH] --- yaml --- r: 3499 b: refs/heads/master c: 22e2c507c301c3dbbcf91b4948b88f78842ee6c9 h: refs/heads/master i: 3497: 1c7d2ed86f9ca8fb5041f963ddad029dce633be0 3495: eb3e9e82aa4dd68cc2237f553069307cb5c3957f v: v3 --- [refs] | 2 +- trunk/arch/i386/kernel/syscall_table.S | 2 + trunk/arch/ia64/kernel/entry.S | 4 +- trunk/arch/ppc/kernel/misc.S | 2 + trunk/drivers/block/as-iosched.c | 5 +- trunk/drivers/block/cfq-iosched.c | 1910 ++++++++++++++++-------- trunk/drivers/block/deadline-iosched.c | 3 +- trunk/drivers/block/elevator.c | 9 +- trunk/drivers/block/ll_rw_blk.c | 59 +- trunk/drivers/net/skge.c | 1710 +++++++++++---------- trunk/drivers/net/skge.h | 586 ++++++-- trunk/fs/Makefile | 1 + trunk/fs/ioprio.c | 172 +++ trunk/fs/reiserfs/journal.c | 12 + trunk/include/asm-i386/unistd.h | 4 +- trunk/include/asm-ia64/unistd.h | 2 + trunk/include/asm-ppc/unistd.h | 4 +- trunk/include/asm-x86_64/unistd.h | 6 +- trunk/include/linux/bio.h | 14 + trunk/include/linux/blkdev.h | 25 +- trunk/include/linux/elevator.h | 8 +- trunk/include/linux/fs.h | 19 + trunk/include/linux/init_task.h | 2 + trunk/include/linux/ioprio.h | 87 ++ trunk/include/linux/sched.h | 6 +- trunk/include/linux/writeback.h | 6 +- trunk/kernel/exit.c | 2 + trunk/kernel/fork.c | 5 + trunk/kernel/sched.c | 8 - 29 files changed, 3049 insertions(+), 1626 deletions(-) create mode 100644 trunk/fs/ioprio.c create mode 100644 trunk/include/linux/ioprio.h diff --git a/[refs] b/[refs] index f0b40b65874e..7a74df4f93fb 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 747802ab478399f13ff57751c2ebd22577be4eeb +refs/heads/master: 22e2c507c301c3dbbcf91b4948b88f78842ee6c9 diff --git a/trunk/arch/i386/kernel/syscall_table.S b/trunk/arch/i386/kernel/syscall_table.S index 442a6e937b19..3db9a04aec6e 100644 --- a/trunk/arch/i386/kernel/syscall_table.S +++ b/trunk/arch/i386/kernel/syscall_table.S @@ -289,3 +289,5 @@ ENTRY(sys_call_table) .long sys_add_key .long sys_request_key .long sys_keyctl + .long sys_ioprio_set + .long sys_ioprio_get /* 290 */ diff --git a/trunk/arch/ia64/kernel/entry.S b/trunk/arch/ia64/kernel/entry.S index b1d5d3d5276c..785a51b0ad8e 100644 --- a/trunk/arch/ia64/kernel/entry.S +++ b/trunk/arch/ia64/kernel/entry.S @@ -1577,8 +1577,8 @@ sys_call_table: data8 sys_add_key data8 sys_request_key data8 sys_keyctl - data8 sys_ni_syscall - data8 sys_ni_syscall // 1275 + data8 sys_ioprio_set + data8 sys_ioprio_get // 1275 data8 sys_set_zone_reclaim data8 sys_ni_syscall data8 sys_ni_syscall diff --git a/trunk/arch/ppc/kernel/misc.S b/trunk/arch/ppc/kernel/misc.S index b6a63a49a232..191a8def3bdb 100644 --- a/trunk/arch/ppc/kernel/misc.S +++ b/trunk/arch/ppc/kernel/misc.S @@ -1449,3 +1449,5 @@ _GLOBAL(sys_call_table) .long sys_request_key /* 270 */ .long sys_keyctl .long sys_waitid + .long sys_ioprio_set + .long sys_ioprio_get diff --git a/trunk/drivers/block/as-iosched.c b/trunk/drivers/block/as-iosched.c index 3410b4d294b9..91aeb678135d 100644 --- a/trunk/drivers/block/as-iosched.c +++ b/trunk/drivers/block/as-iosched.c @@ -1806,7 +1806,8 @@ static void as_put_request(request_queue_t *q, struct request *rq) rq->elevator_private = NULL; } -static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +static int as_set_request(request_queue_t *q, struct request *rq, + struct bio *bio, int gfp_mask) { struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); @@ -1827,7 +1828,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) return 1; } -static int as_may_queue(request_queue_t *q, int rw) +static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) { int ret = ELV_MQUEUE_MAY; struct as_data *ad = q->elevator->elevator_data; diff --git a/trunk/drivers/block/cfq-iosched.c b/trunk/drivers/block/cfq-iosched.c index 3ac47dde64da..35f6e569d5e5 100644 --- a/trunk/drivers/block/cfq-iosched.c +++ b/trunk/drivers/block/cfq-iosched.c @@ -21,22 +21,33 @@ #include #include #include - -static unsigned long max_elapsed_crq; -static unsigned long max_elapsed_dispatch; +#include +#include /* * tunables */ static int cfq_quantum = 4; /* max queue in one round of service */ static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ -static int cfq_service = HZ; /* period over which service is avg */ -static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */ -static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */ -static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */ +static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ static int cfq_back_penalty = 2; /* penalty of a backwards seek */ +static int cfq_slice_sync = HZ / 10; +static int cfq_slice_async = HZ / 50; +static int cfq_slice_async_rq = 2; +static int cfq_slice_idle = HZ / 50; + +#define CFQ_IDLE_GRACE (HZ / 10) +#define CFQ_SLICE_SCALE (5) + +#define CFQ_KEY_ASYNC (0) + +/* + * disable queueing at the driver/hardware level + */ +static int cfq_max_depth = 1; + /* * for the hash of cfqq inside the cfqd */ @@ -55,6 +66,7 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */ #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) +#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define RQ_DATA(rq) (rq)->elevator_private @@ -75,78 +87,101 @@ static int cfq_back_penalty = 2; /* penalty of a backwards seek */ #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) #define rq_rb_key(rq) (rq)->sector -/* - * threshold for switching off non-tag accounting - */ -#define CFQ_MAX_TAG (4) - -/* - * sort key types and names - */ -enum { - CFQ_KEY_PGID, - CFQ_KEY_TGID, - CFQ_KEY_UID, - CFQ_KEY_GID, - CFQ_KEY_LAST, -}; - -static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL }; - static kmem_cache_t *crq_pool; static kmem_cache_t *cfq_pool; static kmem_cache_t *cfq_ioc_pool; +#define CFQ_PRIO_LISTS IOPRIO_BE_NR +#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) +#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) +#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) + +#define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) + +/* + * Per block device queue structure + */ struct cfq_data { - struct list_head rr_list; + atomic_t ref; + request_queue_t *queue; + + /* + * rr list of queues with requests and the count of them + */ + struct list_head rr_list[CFQ_PRIO_LISTS]; + struct list_head busy_rr; + struct list_head cur_rr; + struct list_head idle_rr; + unsigned int busy_queues; + + /* + * non-ordered list of empty cfqq's + */ struct list_head empty_list; + /* + * cfqq lookup hash + */ struct hlist_head *cfq_hash; - struct hlist_head *crq_hash; - /* queues on rr_list (ie they have pending requests */ - unsigned int busy_queues; + /* + * global crq hash for all queues + */ + struct hlist_head *crq_hash; unsigned int max_queued; - atomic_t ref; + mempool_t *crq_pool; - int key_type; + int rq_in_driver; - mempool_t *crq_pool; + /* + * schedule slice state info + */ + /* + * idle window management + */ + struct timer_list idle_slice_timer; + struct work_struct unplug_work; - request_queue_t *queue; + struct cfq_queue *active_queue; + struct cfq_io_context *active_cic; + int cur_prio, cur_end_prio; + unsigned int dispatch_slice; + + struct timer_list idle_class_timer; sector_t last_sector; + unsigned long last_end_request; - int rq_in_driver; + unsigned int rq_starved; /* * tunables, see top of file */ unsigned int cfq_quantum; unsigned int cfq_queued; - unsigned int cfq_fifo_expire_r; - unsigned int cfq_fifo_expire_w; - unsigned int cfq_fifo_batch_expire; + unsigned int cfq_fifo_expire[2]; unsigned int cfq_back_penalty; unsigned int cfq_back_max; - unsigned int find_best_crq; - - unsigned int cfq_tagged; + unsigned int cfq_slice[2]; + unsigned int cfq_slice_async_rq; + unsigned int cfq_slice_idle; + unsigned int cfq_max_depth; }; +/* + * Per process-grouping structure + */ struct cfq_queue { /* reference count */ atomic_t ref; /* parent cfq_data */ struct cfq_data *cfqd; - /* hash of mergeable requests */ + /* cfqq lookup hash */ struct hlist_node cfq_hash; /* hash key */ - unsigned long key; - /* whether queue is on rr (or empty) list */ - int on_rr; + unsigned int key; /* on either rr or empty list of cfqd */ struct list_head cfq_list; /* sorted list of pending requests */ @@ -158,21 +193,35 @@ struct cfq_queue { /* currently allocated requests */ int allocated[2]; /* fifo list of requests in sort_list */ - struct list_head fifo[2]; - /* last time fifo expired */ - unsigned long last_fifo_expire; - - int key_type; - - unsigned long service_start; - unsigned long service_used; + struct list_head fifo; - unsigned int max_rate; + unsigned long slice_start; + unsigned long slice_end; + unsigned long slice_left; + unsigned long service_last; /* number of requests that have been handed to the driver */ int in_flight; - /* number of currently allocated requests */ - int alloc_limit[2]; + + /* io prio of this group */ + unsigned short ioprio, org_ioprio; + unsigned short ioprio_class, org_ioprio_class; + + /* whether queue is on rr (or empty) list */ + unsigned on_rr : 1; + /* idle slice, waiting for new request submission */ + unsigned wait_request : 1; + /* set when wait_request gets set, reset on first rq alloc */ + unsigned must_alloc : 1; + /* only gets one must_alloc per slice */ + unsigned must_alloc_slice : 1; + /* idle slice, request added, now waiting to dispatch it */ + unsigned must_dispatch : 1; + /* fifo expire per-slice */ + unsigned fifo_expire : 1; + + unsigned idle_window : 1; + unsigned prio_changed : 1; }; struct cfq_rq { @@ -184,42 +233,17 @@ struct cfq_rq { struct cfq_queue *cfq_queue; struct cfq_io_context *io_context; - unsigned long service_start; - unsigned long queue_start; - - unsigned int in_flight : 1; - unsigned int accounted : 1; - unsigned int is_sync : 1; - unsigned int is_write : 1; + unsigned in_flight : 1; + unsigned accounted : 1; + unsigned is_sync : 1; + unsigned requeued : 1; }; -static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long); +static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int); static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); -static void cfq_update_next_crq(struct cfq_rq *); static void cfq_put_cfqd(struct cfq_data *cfqd); -/* - * what the fairness is based on (ie how processes are grouped and - * differentiated) - */ -static inline unsigned long -cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk) -{ - /* - * optimize this so that ->key_type is the offset into the struct - */ - switch (cfqd->key_type) { - case CFQ_KEY_PGID: - return process_group(tsk); - default: - case CFQ_KEY_TGID: - return tsk->tgid; - case CFQ_KEY_UID: - return tsk->uid; - case CFQ_KEY_GID: - return tsk->gid; - } -} +#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) /* * lots of deadline iosched dupes, can be abstracted later... @@ -235,16 +259,12 @@ static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq) if (q->last_merge == crq->request) q->last_merge = NULL; - - cfq_update_next_crq(crq); } static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) { const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); - BUG_ON(!hlist_unhashed(&crq->hash)); - hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); } @@ -257,8 +277,6 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) struct cfq_rq *crq = list_entry_hash(entry); struct request *__rq = crq->request; - BUG_ON(hlist_unhashed(&crq->hash)); - if (!rq_mergeable(__rq)) { cfq_del_crq_hash(crq); continue; @@ -287,36 +305,16 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) return crq2; if (crq2 == NULL) return crq1; + if (crq1->requeued) + return crq1; + if (crq2->requeued) + return crq2; s1 = crq1->request->sector; s2 = crq2->request->sector; last = cfqd->last_sector; -#if 0 - if (!list_empty(&cfqd->queue->queue_head)) { - struct list_head *entry = &cfqd->queue->queue_head; - unsigned long distance = ~0UL; - struct request *rq; - - while ((entry = entry->prev) != &cfqd->queue->queue_head) { - rq = list_entry_rq(entry); - - if (blk_barrier_rq(rq)) - break; - - if (distance < abs(s1 - rq->sector + rq->nr_sectors)) { - distance = abs(s1 - rq->sector +rq->nr_sectors); - last = rq->sector + rq->nr_sectors; - } - if (distance < abs(s2 - rq->sector + rq->nr_sectors)) { - distance = abs(s2 - rq->sector +rq->nr_sectors); - last = rq->sector + rq->nr_sectors; - } - } - } -#endif - /* * by definition, 1KiB is 2 sectors */ @@ -377,11 +375,13 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_rq *crq_next = NULL, *crq_prev = NULL; struct rb_node *rbnext, *rbprev; - if (!ON_RB(&last->rb_node)) - return NULL; - - if ((rbnext = rb_next(&last->rb_node)) == NULL) + if (ON_RB(&last->rb_node)) + rbnext = rb_next(&last->rb_node); + else { rbnext = rb_first(&cfqq->sort_list); + if (rbnext == &last->rb_node) + rbnext = NULL; + } rbprev = rb_prev(&last->rb_node); @@ -401,67 +401,53 @@ static void cfq_update_next_crq(struct cfq_rq *crq) cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); } -static int cfq_check_sort_rr_list(struct cfq_queue *cfqq) +static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) { - struct list_head *head = &cfqq->cfqd->rr_list; - struct list_head *next, *prev; - - /* - * list might still be ordered - */ - next = cfqq->cfq_list.next; - if (next != head) { - struct cfq_queue *cnext = list_entry_cfqq(next); + struct cfq_data *cfqd = cfqq->cfqd; + struct list_head *list, *entry; - if (cfqq->service_used > cnext->service_used) - return 1; - } + BUG_ON(!cfqq->on_rr); - prev = cfqq->cfq_list.prev; - if (prev != head) { - struct cfq_queue *cprev = list_entry_cfqq(prev); + list_del(&cfqq->cfq_list); - if (cfqq->service_used < cprev->service_used) - return 1; + if (cfq_class_rt(cfqq)) + list = &cfqd->cur_rr; + else if (cfq_class_idle(cfqq)) + list = &cfqd->idle_rr; + else { + /* + * if cfqq has requests in flight, don't allow it to be + * found in cfq_set_active_queue before it has finished them. + * this is done to increase fairness between a process that + * has lots of io pending vs one that only generates one + * sporadically or synchronously + */ + if (cfqq->in_flight) + list = &cfqd->busy_rr; + else + list = &cfqd->rr_list[cfqq->ioprio]; } - return 0; -} - -static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) -{ - struct list_head *entry = &cfqq->cfqd->rr_list; - - if (!cfqq->on_rr) - return; - if (!new_queue && !cfq_check_sort_rr_list(cfqq)) + /* + * if queue was preempted, just add to front to be fair. busy_rr + * isn't sorted. + */ + if (preempted || list == &cfqd->busy_rr) { + list_add(&cfqq->cfq_list, list); return; - - list_del(&cfqq->cfq_list); + } /* - * sort by our mean service_used, sub-sort by in-flight requests + * sort by when queue was last serviced */ - while ((entry = entry->prev) != &cfqq->cfqd->rr_list) { + entry = list; + while ((entry = entry->prev) != list) { struct cfq_queue *__cfqq = list_entry_cfqq(entry); - if (cfqq->service_used > __cfqq->service_used) + if (!__cfqq->service_last) + break; + if (time_before(__cfqq->service_last, cfqq->service_last)) break; - else if (cfqq->service_used == __cfqq->service_used) { - struct list_head *prv; - - while ((prv = entry->prev) != &cfqq->cfqd->rr_list) { - __cfqq = list_entry_cfqq(prv); - - WARN_ON(__cfqq->service_used > cfqq->service_used); - if (cfqq->service_used != __cfqq->service_used) - break; - if (cfqq->in_flight > __cfqq->in_flight) - break; - - entry = prv; - } - } } list_add(&cfqq->cfq_list, entry); @@ -469,28 +455,24 @@ static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) /* * add to busy list of queues for service, trying to be fair in ordering - * the pending list according to requests serviced + * the pending list according to last request service */ static inline void -cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) { - /* - * it's currently on the empty list - */ + BUG_ON(cfqq->on_rr); cfqq->on_rr = 1; cfqd->busy_queues++; - if (time_after(jiffies, cfqq->service_start + cfq_service)) - cfqq->service_used >>= 3; - - cfq_sort_rr_list(cfqq, 1); + cfq_resort_rr_list(cfqq, requeue); } static inline void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - list_move(&cfqq->cfq_list, &cfqd->empty_list); + BUG_ON(!cfqq->on_rr); cfqq->on_rr = 0; + list_move(&cfqq->cfq_list, &cfqd->empty_list); BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; @@ -505,16 +487,17 @@ static inline void cfq_del_crq_rb(struct cfq_rq *crq) if (ON_RB(&crq->rb_node)) { struct cfq_data *cfqd = cfqq->cfqd; + const int sync = crq->is_sync; - BUG_ON(!cfqq->queued[crq->is_sync]); + BUG_ON(!cfqq->queued[sync]); + cfqq->queued[sync]--; cfq_update_next_crq(crq); - cfqq->queued[crq->is_sync]--; rb_erase(&crq->rb_node, &cfqq->sort_list); RB_CLEAR_COLOR(&crq->rb_node); - if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr) + if (cfqq->on_rr && RB_EMPTY(&cfqq->sort_list)) cfq_del_cfqq_rr(cfqd, cfqq); } } @@ -562,7 +545,7 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) rb_insert_color(&crq->rb_node, &cfqq->sort_list); if (!cfqq->on_rr) - cfq_add_cfqq_rr(cfqd, cfqq); + cfq_add_cfqq_rr(cfqd, cfqq, crq->requeued); /* * check if this request is a better next-serve candidate @@ -581,11 +564,10 @@ cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) cfq_add_crq_rb(crq); } -static struct request * -cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) +static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) + { - const unsigned long key = cfq_hash_key(cfqd, current); - struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key); + struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid); struct rb_node *n; if (!cfqq) @@ -609,20 +591,23 @@ cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) static void cfq_deactivate_request(request_queue_t *q, struct request *rq) { + struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_rq *crq = RQ_DATA(rq); if (crq) { struct cfq_queue *cfqq = crq->cfq_queue; - if (cfqq->cfqd->cfq_tagged) { - cfqq->service_used--; - cfq_sort_rr_list(cfqq, 0); - } - if (crq->accounted) { crq->accounted = 0; - cfqq->cfqd->rq_in_driver--; + WARN_ON(!cfqd->rq_in_driver); + cfqd->rq_in_driver--; } + if (crq->in_flight) { + crq->in_flight = 0; + WARN_ON(!cfqq->in_flight); + cfqq->in_flight--; + } + crq->requeued = 1; } } @@ -640,11 +625,10 @@ static void cfq_remove_request(request_queue_t *q, struct request *rq) struct cfq_rq *crq = RQ_DATA(rq); if (crq) { - cfq_remove_merge_hints(q, crq); list_del_init(&rq->queuelist); + cfq_del_crq_rb(crq); + cfq_remove_merge_hints(q, crq); - if (crq->cfq_queue) - cfq_del_crq_rb(crq); } } @@ -662,21 +646,15 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) } __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); - if (__rq) { - BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); - - if (elv_rq_merge_ok(__rq, bio)) { - ret = ELEVATOR_BACK_MERGE; - goto out; - } + if (__rq && elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_BACK_MERGE; + goto out; } __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); - if (__rq) { - if (elv_rq_merge_ok(__rq, bio)) { - ret = ELEVATOR_FRONT_MERGE; - goto out; - } + if (__rq && elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_FRONT_MERGE; + goto out; } return ELEVATOR_NO_MERGE; @@ -709,20 +687,194 @@ static void cfq_merged_requests(request_queue_t *q, struct request *rq, struct request *next) { - struct cfq_rq *crq = RQ_DATA(rq); - struct cfq_rq *cnext = RQ_DATA(next); - cfq_merged_request(q, rq); - if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { - if (time_before(cnext->queue_start, crq->queue_start)) { - list_move(&rq->queuelist, &next->queuelist); - crq->queue_start = cnext->queue_start; + /* + * reposition in fifo if next is older than rq + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(next->start_time, rq->start_time)) + list_move(&rq->queuelist, &next->queuelist); + + cfq_remove_request(q, next); +} + +static inline void +__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + if (cfqq) { + /* + * stop potential idle class queues waiting service + */ + del_timer(&cfqd->idle_class_timer); + + cfqq->slice_start = jiffies; + cfqq->slice_end = 0; + cfqq->slice_left = 0; + cfqq->must_alloc_slice = 0; + cfqq->fifo_expire = 0; + } + + cfqd->active_queue = cfqq; +} + +/* + * 0 + * 0,1 + * 0,1,2 + * 0,1,2,3 + * 0,1,2,3,4 + * 0,1,2,3,4,5 + * 0,1,2,3,4,5,6 + * 0,1,2,3,4,5,6,7 + */ +static int cfq_get_next_prio_level(struct cfq_data *cfqd) +{ + int prio, wrap; + + prio = -1; + wrap = 0; + do { + int p; + + for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { + if (!list_empty(&cfqd->rr_list[p])) { + prio = p; + break; + } + } + + if (prio != -1) + break; + cfqd->cur_prio = 0; + if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { + cfqd->cur_end_prio = 0; + if (wrap) + break; + wrap = 1; } + } while (1); + + if (unlikely(prio == -1)) + return -1; + + BUG_ON(prio >= CFQ_PRIO_LISTS); + + list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); + + cfqd->cur_prio = prio + 1; + if (cfqd->cur_prio > cfqd->cur_end_prio) { + cfqd->cur_end_prio = cfqd->cur_prio; + cfqd->cur_prio = 0; + } + if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { + cfqd->cur_prio = 0; + cfqd->cur_end_prio = 0; } - cfq_update_next_crq(cnext); - cfq_remove_request(q, next); + return prio; +} + +static void cfq_set_active_queue(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq = NULL; + + /* + * if current list is non-empty, grab first entry. if it is empty, + * get next prio level and grab first entry then if any are spliced + */ + if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) + cfqq = list_entry_cfqq(cfqd->cur_rr.next); + + /* + * if we have idle queues and no rt or be queues had pending + * requests, either allow immediate service if the grace period + * has passed or arm the idle grace timer + */ + if (!cfqq && !list_empty(&cfqd->idle_rr)) { + unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; + + if (time_after_eq(jiffies, end)) + cfqq = list_entry_cfqq(cfqd->idle_rr.next); + else + mod_timer(&cfqd->idle_class_timer, end); + } + + __cfq_set_active_queue(cfqd, cfqq); +} + +/* + * current cfqq expired its slice (or was too idle), select new one + */ +static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfqq) { + unsigned long now = jiffies; + + if (cfqq->wait_request) + del_timer(&cfqd->idle_slice_timer); + + if (!preempted && !cfqq->in_flight) + cfqq->service_last = now; + + cfqq->must_dispatch = 0; + cfqq->wait_request = 0; + + /* + * store what was left of this slice, if the queue idled out + * or was preempted + */ + if (time_after(now, cfqq->slice_end)) + cfqq->slice_left = now - cfqq->slice_end; + else + cfqq->slice_left = 0; + + if (cfqq->on_rr) + cfq_resort_rr_list(cfqq, preempted); + + cfqd->active_queue = NULL; + + if (cfqd->active_cic) { + put_io_context(cfqd->active_cic->ioc); + cfqd->active_cic = NULL; + } + } + + cfqd->dispatch_slice = 0; +} + +static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) + +{ + WARN_ON(!RB_EMPTY(&cfqq->sort_list)); + WARN_ON(cfqq != cfqd->active_queue); + + /* + * idle is disabled, either manually or by past process history + */ + if (!cfqd->cfq_slice_idle) + return 0; + if (!cfqq->idle_window) + return 0; + /* + * task has exited, don't wait + */ + if (cfqd->active_cic && !cfqd->active_cic->ioc->task) + return 0; + + cfqq->wait_request = 1; + cfqq->must_alloc = 1; + + if (!timer_pending(&cfqd->idle_slice_timer)) { + unsigned long slice_left = cfqq->slice_end - 1; + + cfqd->idle_slice_timer.expires = min(jiffies + cfqd->cfq_slice_idle, slice_left); + add_timer(&cfqd->idle_slice_timer); + } + + return 1; } /* @@ -738,31 +890,39 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) struct request *__rq; sector_t last; - cfq_del_crq_rb(crq); - cfq_remove_merge_hints(q, crq); list_del(&crq->request->queuelist); last = cfqd->last_sector; - while ((entry = entry->prev) != head) { - __rq = list_entry_rq(entry); + list_for_each_entry_reverse(__rq, head, queuelist) { + struct cfq_rq *__crq = RQ_DATA(__rq); - if (blk_barrier_rq(crq->request)) + if (blk_barrier_rq(__rq)) + break; + if (!blk_fs_request(__rq)) break; - if (!blk_fs_request(crq->request)) + if (__crq->requeued) break; - if (crq->request->sector > __rq->sector) + if (__rq->sector <= crq->request->sector) break; if (__rq->sector > last && crq->request->sector < last) { - last = crq->request->sector; + last = crq->request->sector + crq->request->nr_sectors; break; } + entry = &__rq->queuelist; } cfqd->last_sector = last; + + cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); + + cfq_del_crq_rb(crq); + cfq_remove_merge_hints(q, crq); + crq->in_flight = 1; + crq->requeued = 0; cfqq->in_flight++; - list_add(&crq->request->queuelist, entry); + list_add_tail(&crq->request->queuelist, entry); } /* @@ -771,105 +931,176 @@ static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) { struct cfq_data *cfqd = cfqq->cfqd; - const int reads = !list_empty(&cfqq->fifo[0]); - const int writes = !list_empty(&cfqq->fifo[1]); - unsigned long now = jiffies; + struct request *rq; struct cfq_rq *crq; - if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire)) + if (cfqq->fifo_expire) return NULL; - crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist)); - if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) { - cfqq->last_fifo_expire = now; - return crq; - } + if (!list_empty(&cfqq->fifo)) { + int fifo = cfq_cfqq_sync(cfqq); - crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist)); - if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) { - cfqq->last_fifo_expire = now; - return crq; + crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next)); + rq = crq->request; + if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { + cfqq->fifo_expire = 1; + return crq; + } } return NULL; } /* - * dispatch a single request from given queue + * Scale schedule slice based on io priority */ +static inline int +cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; + + WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); + + return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); +} + static inline void -cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd, - struct cfq_queue *cfqq) +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - struct cfq_rq *crq; + cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; +} - /* - * follow expired path, else get first next available - */ - if ((crq = cfq_check_fifo(cfqq)) == NULL) { - if (cfqd->find_best_crq) - crq = cfqq->next_crq; - else - crq = rb_entry_crq(rb_first(&cfqq->sort_list)); - } +static inline int +cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + const int base_rq = cfqd->cfq_slice_async_rq; - cfqd->last_sector = crq->request->sector + crq->request->nr_sectors; + WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); - /* - * finally, insert request into driver list - */ - cfq_dispatch_sort(q, crq); + return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); } -static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch) +/* + * get next queue for service + */ +static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force) { - struct cfq_data *cfqd = q->elevator->elevator_data; + unsigned long now = jiffies; struct cfq_queue *cfqq; - struct list_head *entry, *tmp; - int queued, busy_queues, first_round; - if (list_empty(&cfqd->rr_list)) - return 0; + cfqq = cfqd->active_queue; + if (!cfqq) + goto new_queue; - queued = 0; - first_round = 1; -restart: - busy_queues = 0; - list_for_each_safe(entry, tmp, &cfqd->rr_list) { - cfqq = list_entry_cfqq(entry); + /* + * slice has expired + */ + if (!cfqq->must_dispatch && time_after(jiffies, cfqq->slice_end)) + goto new_queue; - BUG_ON(RB_EMPTY(&cfqq->sort_list)); + /* + * if queue has requests, dispatch one. if not, check if + * enough slice is left to wait for one + */ + if (!RB_EMPTY(&cfqq->sort_list)) + goto keep_queue; + else if (!force && cfq_cfqq_sync(cfqq) && + time_before(now, cfqq->slice_end)) { + if (cfq_arm_slice_timer(cfqd, cfqq)) + return NULL; + } + +new_queue: + cfq_slice_expired(cfqd, 0); + cfq_set_active_queue(cfqd); +keep_queue: + return cfqd->active_queue; +} + +static int +__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, + int max_dispatch) +{ + int dispatched = 0; + + BUG_ON(RB_EMPTY(&cfqq->sort_list)); + + do { + struct cfq_rq *crq; /* - * first round of queueing, only select from queues that - * don't already have io in-flight + * follow expired path, else get first next available */ - if (first_round && cfqq->in_flight) - continue; + if ((crq = cfq_check_fifo(cfqq)) == NULL) + crq = cfqq->next_crq; + + /* + * finally, insert request into driver dispatch list + */ + cfq_dispatch_sort(cfqd->queue, crq); - cfq_dispatch_request(q, cfqd, cfqq); + cfqd->dispatch_slice++; + dispatched++; - if (!RB_EMPTY(&cfqq->sort_list)) - busy_queues++; + if (!cfqd->active_cic) { + atomic_inc(&crq->io_context->ioc->refcount); + cfqd->active_cic = crq->io_context; + } - queued++; - } + if (RB_EMPTY(&cfqq->sort_list)) + break; + + } while (dispatched < max_dispatch); + + /* + * if slice end isn't set yet, set it. if at least one request was + * sync, use the sync time slice value + */ + if (!cfqq->slice_end) + cfq_set_prio_slice(cfqd, cfqq); + + /* + * expire an async queue immediately if it has used up its slice. idle + * queue always expire after 1 dispatch round. + */ + if ((!cfq_cfqq_sync(cfqq) && + cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || + cfq_class_idle(cfqq)) + cfq_slice_expired(cfqd, 0); + + return dispatched; +} + +static int +cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq; + + if (!cfqd->busy_queues) + return 0; + + cfqq = cfq_select_queue(cfqd, force); + if (cfqq) { + cfqq->wait_request = 0; + cfqq->must_dispatch = 0; + del_timer(&cfqd->idle_slice_timer); + + if (cfq_class_idle(cfqq)) + max_dispatch = 1; - if ((queued < max_dispatch) && (busy_queues || first_round)) { - first_round = 0; - goto restart; + return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); } - return queued; + return 0; } static inline void cfq_account_dispatch(struct cfq_rq *crq) { struct cfq_queue *cfqq = crq->cfq_queue; struct cfq_data *cfqd = cfqq->cfqd; - unsigned long now, elapsed; - if (!blk_fs_request(crq->request)) + if (unlikely(!blk_fs_request(crq->request))) return; /* @@ -879,65 +1110,34 @@ static inline void cfq_account_dispatch(struct cfq_rq *crq) if (crq->accounted) return; - now = jiffies; - if (cfqq->service_start == ~0UL) - cfqq->service_start = now; - - /* - * on drives with tagged command queueing, command turn-around time - * doesn't necessarily reflect the time spent processing this very - * command inside the drive. so do the accounting differently there, - * by just sorting on the number of requests - */ - if (cfqd->cfq_tagged) { - if (time_after(now, cfqq->service_start + cfq_service)) { - cfqq->service_start = now; - cfqq->service_used /= 10; - } - - cfqq->service_used++; - cfq_sort_rr_list(cfqq, 0); - } - - elapsed = now - crq->queue_start; - if (elapsed > max_elapsed_dispatch) - max_elapsed_dispatch = elapsed; - crq->accounted = 1; - crq->service_start = now; - - if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) { - cfqq->cfqd->cfq_tagged = 1; - printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG); - } + cfqd->rq_in_driver++; } static inline void cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) { struct cfq_data *cfqd = cfqq->cfqd; + unsigned long now; if (!crq->accounted) return; + now = jiffies; + WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; - if (!cfqd->cfq_tagged) { - unsigned long now = jiffies; - unsigned long duration = now - crq->service_start; + if (!cfq_class_idle(cfqq)) + cfqd->last_end_request = now; - if (time_after(now, cfqq->service_start + cfq_service)) { - cfqq->service_start = now; - cfqq->service_used >>= 3; - } - - cfqq->service_used += duration; - cfq_sort_rr_list(cfqq, 0); - - if (duration > max_elapsed_crq) - max_elapsed_crq = duration; + if (!cfqq->in_flight && cfqq->on_rr) { + cfqq->service_last = now; + cfq_resort_rr_list(cfqq, 0); } + + if (crq->is_sync) + crq->io_context->last_end_request = now; } static struct request *cfq_next_request(request_queue_t *q) @@ -950,7 +1150,15 @@ static struct request *cfq_next_request(request_queue_t *q) dispatch: rq = list_entry_rq(q->queue_head.next); - if ((crq = RQ_DATA(rq)) != NULL) { + crq = RQ_DATA(rq); + if (crq) { + /* + * if idle window is disabled, allow queue buildup + */ + if (!crq->in_flight && !crq->cfq_queue->idle_window && + cfqd->rq_in_driver >= cfqd->cfq_max_depth) + return NULL; + cfq_remove_merge_hints(q, crq); cfq_account_dispatch(crq); } @@ -958,7 +1166,7 @@ static struct request *cfq_next_request(request_queue_t *q) return rq; } - if (cfq_dispatch_requests(q, cfqd->cfq_quantum)) + if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0)) goto dispatch; return NULL; @@ -972,14 +1180,22 @@ static struct request *cfq_next_request(request_queue_t *q) */ static void cfq_put_queue(struct cfq_queue *cfqq) { - BUG_ON(!atomic_read(&cfqq->ref)); + struct cfq_data *cfqd = cfqq->cfqd; + + BUG_ON(atomic_read(&cfqq->ref) <= 0); if (!atomic_dec_and_test(&cfqq->ref)) return; BUG_ON(rb_first(&cfqq->sort_list)); + BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); BUG_ON(cfqq->on_rr); + if (unlikely(cfqd->active_queue == cfqq)) { + cfq_slice_expired(cfqd, 0); + kblockd_schedule_work(&cfqd->unplug_work); + } + cfq_put_cfqd(cfqq->cfqd); /* @@ -991,7 +1207,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) } static inline struct cfq_queue * -__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) +__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, const int hashval) { struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; struct hlist_node *entry, *next; @@ -1007,94 +1223,220 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) } static struct cfq_queue * -cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key) +cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key) { return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT)); } -static inline void -cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq, - struct cfq_io_context *cic) +static void cfq_free_io_context(struct cfq_io_context *cic) { - unsigned long hashkey = cfq_hash_key(cfqd, current); - unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT); - struct cfq_queue *__cfqq; - unsigned long flags; + struct cfq_io_context *__cic; + struct list_head *entry, *next; - spin_lock_irqsave(cfqd->queue->queue_lock, flags); - - hlist_del(&(*cfqq)->cfq_hash); - - __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval); - if (!__cfqq || __cfqq == *cfqq) { - __cfqq = *cfqq; - hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); - __cfqq->key_type = cfqd->key_type; - } else { - atomic_inc(&__cfqq->ref); - cic->cfqq = __cfqq; - cfq_put_queue(*cfqq); - *cfqq = __cfqq; + list_for_each_safe(entry, next, &cic->list) { + __cic = list_entry(entry, struct cfq_io_context, list); + kmem_cache_free(cfq_ioc_pool, __cic); } - cic->cfqq = __cfqq; - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); + kmem_cache_free(cfq_ioc_pool, cic); } -static void cfq_free_io_context(struct cfq_io_context *cic) +/* + * Called with interrupts disabled + */ +static void cfq_exit_single_io_context(struct cfq_io_context *cic) { - kmem_cache_free(cfq_ioc_pool, cic); + struct cfq_data *cfqd = cic->cfqq->cfqd; + request_queue_t *q = cfqd->queue; + + WARN_ON(!irqs_disabled()); + + spin_lock(q->queue_lock); + + if (unlikely(cic->cfqq == cfqd->active_queue)) { + cfq_slice_expired(cfqd, 0); + kblockd_schedule_work(&cfqd->unplug_work); + } + + cfq_put_queue(cic->cfqq); + cic->cfqq = NULL; + spin_unlock(q->queue_lock); } /* - * locking hierarchy is: io_context lock -> queue locks + * Another task may update the task cic list, if it is doing a queue lookup + * on its behalf. cfq_cic_lock excludes such concurrent updates */ static void cfq_exit_io_context(struct cfq_io_context *cic) { - struct cfq_queue *cfqq = cic->cfqq; - struct list_head *entry = &cic->list; - request_queue_t *q; + struct cfq_io_context *__cic; + struct list_head *entry; unsigned long flags; + local_irq_save(flags); + /* * put the reference this task is holding to the various queues */ - spin_lock_irqsave(&cic->ioc->lock, flags); - while ((entry = cic->list.next) != &cic->list) { - struct cfq_io_context *__cic; - + list_for_each(entry, &cic->list) { __cic = list_entry(entry, struct cfq_io_context, list); - list_del(entry); - - q = __cic->cfqq->cfqd->queue; - spin_lock(q->queue_lock); - cfq_put_queue(__cic->cfqq); - spin_unlock(q->queue_lock); + cfq_exit_single_io_context(__cic); } - q = cfqq->cfqd->queue; - spin_lock(q->queue_lock); - cfq_put_queue(cfqq); - spin_unlock(q->queue_lock); - - cic->cfqq = NULL; - spin_unlock_irqrestore(&cic->ioc->lock, flags); + cfq_exit_single_io_context(cic); + local_irq_restore(flags); } -static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) +static struct cfq_io_context * +cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) { - struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags); + struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - cic->dtor = cfq_free_io_context; - cic->exit = cfq_exit_io_context; INIT_LIST_HEAD(&cic->list); cic->cfqq = NULL; + cic->key = NULL; + cic->last_end_request = jiffies; + cic->ttime_total = 0; + cic->ttime_samples = 0; + cic->ttime_mean = 0; + cic->dtor = cfq_free_io_context; + cic->exit = cfq_exit_io_context; } return cic; } +static void cfq_init_prio_data(struct cfq_queue *cfqq) +{ + struct task_struct *tsk = current; + int ioprio_class; + + if (!cfqq->prio_changed) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, place us in the middle of the BE classes + */ + cfqq->ioprio = task_nice_ioprio(tsk); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_RT: + cfqq->ioprio = task_ioprio(tsk); + cfqq->ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + cfqq->ioprio = task_ioprio(tsk); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; + cfqq->ioprio = 7; + cfqq->idle_window = 0; + break; + } + + /* + * keep track of original prio settings in case we have to temporarily + * elevate the priority of this queue + */ + cfqq->org_ioprio = cfqq->ioprio; + cfqq->org_ioprio_class = cfqq->ioprio_class; + + if (cfqq->on_rr) + cfq_resort_rr_list(cfqq, 0); + + cfqq->prio_changed = 0; +} + +static inline void changed_ioprio(struct cfq_queue *cfqq) +{ + if (cfqq) { + struct cfq_data *cfqd = cfqq->cfqd; + + spin_lock(cfqd->queue->queue_lock); + cfqq->prio_changed = 1; + cfq_init_prio_data(cfqq); + spin_unlock(cfqd->queue->queue_lock); + } +} + +/* + * callback from sys_ioprio_set, irqs are disabled + */ +static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) +{ + struct cfq_io_context *cic = ioc->cic; + + changed_ioprio(cic->cfqq); + + list_for_each_entry(cic, &cic->list, list) + changed_ioprio(cic->cfqq); + + return 0; +} + +static struct cfq_queue * +cfq_get_queue(struct cfq_data *cfqd, unsigned int key, int gfp_mask) +{ + const int hashval = hash_long(key, CFQ_QHASH_SHIFT); + struct cfq_queue *cfqq, *new_cfqq = NULL; + +retry: + cfqq = __cfq_find_cfq_hash(cfqd, key, hashval); + + if (!cfqq) { + if (new_cfqq) { + cfqq = new_cfqq; + new_cfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + spin_unlock_irq(cfqd->queue->queue_lock); + new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); + spin_lock_irq(cfqd->queue->queue_lock); + goto retry; + } else { + cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); + if (!cfqq) + goto out; + } + + memset(cfqq, 0, sizeof(*cfqq)); + + INIT_HLIST_NODE(&cfqq->cfq_hash); + INIT_LIST_HEAD(&cfqq->cfq_list); + RB_CLEAR_ROOT(&cfqq->sort_list); + INIT_LIST_HEAD(&cfqq->fifo); + + cfqq->key = key; + hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); + atomic_set(&cfqq->ref, 0); + cfqq->cfqd = cfqd; + atomic_inc(&cfqd->ref); + cfqq->service_last = 0; + /* + * set ->slice_left to allow preemption for a new process + */ + cfqq->slice_left = 2 * cfqd->cfq_slice_idle; + cfqq->idle_window = 1; + cfqq->ioprio = -1; + cfqq->ioprio_class = -1; + cfqq->prio_changed = 1; + } + + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + + atomic_inc(&cfqq->ref); +out: + WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); + return cfqq; +} + /* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more @@ -1102,39 +1444,39 @@ static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) * cfqq, so we don't need to worry about it disappearing */ static struct cfq_io_context * -cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) +cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) { - struct cfq_data *cfqd = (*cfqq)->cfqd; - struct cfq_queue *__cfqq = *cfqq; + struct io_context *ioc = NULL; struct cfq_io_context *cic; - struct io_context *ioc; - might_sleep_if(gfp_flags & __GFP_WAIT); + might_sleep_if(gfp_mask & __GFP_WAIT); - ioc = get_io_context(gfp_flags); + ioc = get_io_context(gfp_mask); if (!ioc) return NULL; if ((cic = ioc->cic) == NULL) { - cic = cfq_alloc_io_context(gfp_flags); + cic = cfq_alloc_io_context(cfqd, gfp_mask); if (cic == NULL) goto err; + /* + * manually increment generic io_context usage count, it + * cannot go away since we are already holding one ref to it + */ ioc->cic = cic; + ioc->set_ioprio = cfq_ioc_set_ioprio; cic->ioc = ioc; - cic->cfqq = __cfqq; - atomic_inc(&__cfqq->ref); + cic->key = cfqd; + atomic_inc(&cfqd->ref); } else { struct cfq_io_context *__cic; - unsigned long flags; /* - * since the first cic on the list is actually the head - * itself, need to check this here or we'll duplicate an - * cic per ioc for no reason + * the first cic on the list is actually the head itself */ - if (cic->cfqq == __cfqq) + if (cic->key == cfqd) goto out; /* @@ -1142,152 +1484,259 @@ cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) * should be ok here, the list will usually not be more than * 1 or a few entries long */ - spin_lock_irqsave(&ioc->lock, flags); list_for_each_entry(__cic, &cic->list, list) { /* * this process is already holding a reference to * this queue, so no need to get one more */ - if (__cic->cfqq == __cfqq) { + if (__cic->key == cfqd) { cic = __cic; - spin_unlock_irqrestore(&ioc->lock, flags); goto out; } } - spin_unlock_irqrestore(&ioc->lock, flags); /* * nope, process doesn't have a cic assoicated with this * cfqq yet. get a new one and add to list */ - __cic = cfq_alloc_io_context(gfp_flags); + __cic = cfq_alloc_io_context(cfqd, gfp_mask); if (__cic == NULL) goto err; __cic->ioc = ioc; - __cic->cfqq = __cfqq; - atomic_inc(&__cfqq->ref); - spin_lock_irqsave(&ioc->lock, flags); + __cic->key = cfqd; + atomic_inc(&cfqd->ref); list_add(&__cic->list, &cic->list); - spin_unlock_irqrestore(&ioc->lock, flags); - cic = __cic; - *cfqq = __cfqq; } out: - /* - * if key_type has been changed on the fly, we lazily rehash - * each queue at lookup time - */ - if ((*cfqq)->key_type != cfqd->key_type) - cfq_rehash_cfqq(cfqd, cfqq, cic); - return cic; err: put_io_context(ioc); return NULL; } -static struct cfq_queue * -__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask) +static void +cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) { - const int hashval = hash_long(key, CFQ_QHASH_SHIFT); - struct cfq_queue *cfqq, *new_cfqq = NULL; + unsigned long elapsed, ttime; -retry: - cfqq = __cfq_find_cfq_hash(cfqd, key, hashval); + /* + * if this context already has stuff queued, thinktime is from + * last queue not last end + */ +#if 0 + if (time_after(cic->last_end_request, cic->last_queue)) + elapsed = jiffies - cic->last_end_request; + else + elapsed = jiffies - cic->last_queue; +#else + elapsed = jiffies - cic->last_end_request; +#endif - if (!cfqq) { - if (new_cfqq) { - cfqq = new_cfqq; - new_cfqq = NULL; - } else { - spin_unlock_irq(cfqd->queue->queue_lock); - new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); - spin_lock_irq(cfqd->queue->queue_lock); + ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); - if (!new_cfqq && !(gfp_mask & __GFP_WAIT)) - goto out; - - goto retry; - } + cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; + cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; + cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; +} - memset(cfqq, 0, sizeof(*cfqq)); +#define sample_valid(samples) ((samples) > 80) - INIT_HLIST_NODE(&cfqq->cfq_hash); - INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list); - INIT_LIST_HEAD(&cfqq->fifo[0]); - INIT_LIST_HEAD(&cfqq->fifo[1]); +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter + */ +static void +cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_io_context *cic) +{ + int enable_idle = cfqq->idle_window; - cfqq->key = key; - hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); - atomic_set(&cfqq->ref, 0); - cfqq->cfqd = cfqd; - atomic_inc(&cfqd->ref); - cfqq->key_type = cfqd->key_type; - cfqq->service_start = ~0UL; + if (!cic->ioc->task || !cfqd->cfq_slice_idle) + enable_idle = 0; + else if (sample_valid(cic->ttime_samples)) { + if (cic->ttime_mean > cfqd->cfq_slice_idle) + enable_idle = 0; + else + enable_idle = 1; } - if (new_cfqq) - kmem_cache_free(cfq_pool, new_cfqq); + cfqq->idle_window = enable_idle; +} - atomic_inc(&cfqq->ref); -out: - WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); - return cfqq; + +/* + * Check if new_cfqq should preempt the currently active queue. Return 0 for + * no or if we aren't sure, a 1 will cause a preempt. + */ +static int +cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + struct cfq_rq *crq) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfq_class_idle(new_cfqq)) + return 0; + + if (!cfqq) + return 1; + + if (cfq_class_idle(cfqq)) + return 1; + if (!new_cfqq->wait_request) + return 0; + /* + * if it doesn't have slice left, forget it + */ + if (new_cfqq->slice_left < cfqd->cfq_slice_idle) + return 0; + if (crq->is_sync && !cfq_cfqq_sync(cfqq)) + return 1; + + return 0; +} + +/* + * cfqq preempts the active queue. if we allowed preempt with no slice left, + * let it have half of its nominal slice. + */ +static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct cfq_queue *__cfqq, *next; + + list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list) + cfq_resort_rr_list(__cfqq, 1); + + if (!cfqq->slice_left) + cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2; + + cfqq->slice_end = cfqq->slice_left + jiffies; + cfq_slice_expired(cfqd, 1); + __cfq_set_active_queue(cfqd, cfqq); +} + +/* + * should really be a ll_rw_blk.c helper + */ +static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + request_queue_t *q = cfqd->queue; + + if (!blk_queue_plugged(q)) + q->request_fn(q); + else + __generic_unplug_device(q); +} + +/* + * Called when a new fs request (crq) is added (to cfqq). Check if there's + * something we should do about it + */ +static void +cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_rq *crq) +{ + const int sync = crq->is_sync; + + cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); + + if (sync) { + struct cfq_io_context *cic = crq->io_context; + + cfq_update_io_thinktime(cfqd, cic); + cfq_update_idle_window(cfqd, cfqq, cic); + + cic->last_queue = jiffies; + } + + if (cfqq == cfqd->active_queue) { + /* + * if we are waiting for a request for this queue, let it rip + * immediately and flag that we must not expire this queue + * just now + */ + if (cfqq->wait_request) { + cfqq->must_dispatch = 1; + del_timer(&cfqd->idle_slice_timer); + cfq_start_queueing(cfqd, cfqq); + } + } else if (cfq_should_preempt(cfqd, cfqq, crq)) { + /* + * not the active queue - expire current slice if it is + * idle and has expired it's mean thinktime or this new queue + * has some old slice time left and is of higher priority + */ + cfq_preempt_queue(cfqd, cfqq); + cfqq->must_dispatch = 1; + cfq_start_queueing(cfqd, cfqq); + } } -static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) +static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) { - crq->is_sync = 0; - if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE) - crq->is_sync = 1; + struct cfq_rq *crq = RQ_DATA(rq); + struct cfq_queue *cfqq = crq->cfq_queue; + + cfq_init_prio_data(cfqq); cfq_add_crq_rb(crq); - crq->queue_start = jiffies; - list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]); + list_add_tail(&rq->queuelist, &cfqq->fifo); + + if (rq_mergeable(rq)) { + cfq_add_crq_hash(cfqd, crq); + + if (!cfqd->queue->last_merge) + cfqd->queue->last_merge = rq; + } + + cfq_crq_enqueued(cfqd, cfqq, crq); } static void cfq_insert_request(request_queue_t *q, struct request *rq, int where) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_rq *crq = RQ_DATA(rq); switch (where) { case ELEVATOR_INSERT_BACK: - while (cfq_dispatch_requests(q, cfqd->cfq_quantum)) + while (cfq_dispatch_requests(q, INT_MAX, 1)) ; list_add_tail(&rq->queuelist, &q->queue_head); + /* + * If we were idling with pending requests on + * inactive cfqqs, force dispatching will + * remove the idle timer and the queue won't + * be kicked by __make_request() afterward. + * Kick it here. + */ + kblockd_schedule_work(&cfqd->unplug_work); break; case ELEVATOR_INSERT_FRONT: list_add(&rq->queuelist, &q->queue_head); break; case ELEVATOR_INSERT_SORT: BUG_ON(!blk_fs_request(rq)); - cfq_enqueue(cfqd, crq); + cfq_enqueue(cfqd, rq); break; default: printk("%s: bad insert point %d\n", __FUNCTION__,where); return; } +} - if (rq_mergeable(rq)) { - cfq_add_crq_hash(cfqd, crq); - - if (!q->last_merge) - q->last_merge = rq; - } +static inline int cfq_pending_requests(struct cfq_data *cfqd) +{ + return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; } static int cfq_queue_empty(request_queue_t *q) { struct cfq_data *cfqd = q->elevator->elevator_data; - return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list); + return !cfq_pending_requests(cfqd); } static void cfq_completed_request(request_queue_t *q, struct request *rq) @@ -1332,51 +1781,132 @@ cfq_latter_request(request_queue_t *q, struct request *rq) return NULL; } -static int cfq_may_queue(request_queue_t *q, int rw) +/* + * we temporarily boost lower priority queues if they are holding fs exclusive + * resources. they are boosted to normal prio (CLASS_BE/4) + */ +static void cfq_prio_boost(struct cfq_queue *cfqq) { - struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; - int ret = ELV_MQUEUE_MAY; + const int ioprio_class = cfqq->ioprio_class; + const int ioprio = cfqq->ioprio; - if (current->flags & PF_MEMALLOC) - return ELV_MQUEUE_MAY; + if (has_fs_excl()) { + /* + * boost idle prio on transactions that would lock out other + * users of the filesystem + */ + if (cfq_class_idle(cfqq)) + cfqq->ioprio_class = IOPRIO_CLASS_BE; + if (cfqq->ioprio > IOPRIO_NORM) + cfqq->ioprio = IOPRIO_NORM; + } else { + /* + * check if we need to unboost the queue + */ + if (cfqq->ioprio_class != cfqq->org_ioprio_class) + cfqq->ioprio_class = cfqq->org_ioprio_class; + if (cfqq->ioprio != cfqq->org_ioprio) + cfqq->ioprio = cfqq->org_ioprio; + } - cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current)); - if (cfqq) { - int limit = cfqd->max_queued; + /* + * refile between round-robin lists if we moved the priority class + */ + if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && + cfqq->on_rr) + cfq_resort_rr_list(cfqq, 0); +} - if (cfqq->allocated[rw] < cfqd->cfq_queued) - return ELV_MQUEUE_MUST; +static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) +{ + if (rw == READ || process_sync(task)) + return task->pid; - if (cfqd->busy_queues) - limit = q->nr_requests / cfqd->busy_queues; + return CFQ_KEY_ASYNC; +} - if (limit < cfqd->cfq_queued) - limit = cfqd->cfq_queued; - else if (limit > cfqd->max_queued) - limit = cfqd->max_queued; +static inline int +__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct task_struct *task, int rw) +{ + if (cfqq->wait_request && cfqq->must_alloc) + return ELV_MQUEUE_MUST; - if (cfqq->allocated[rw] >= limit) { - if (limit > cfqq->alloc_limit[rw]) - cfqq->alloc_limit[rw] = limit; + return ELV_MQUEUE_MAY; +#if 0 + if (!cfqq || task->flags & PF_MEMALLOC) + return ELV_MQUEUE_MAY; + if (!cfqq->allocated[rw] || cfqq->must_alloc) { + if (cfqq->wait_request) + return ELV_MQUEUE_MUST; - ret = ELV_MQUEUE_NO; + /* + * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we + * can quickly flood the queue with writes from a single task + */ + if (rw == READ || !cfqq->must_alloc_slice) { + cfqq->must_alloc_slice = 1; + return ELV_MQUEUE_MUST; } + + return ELV_MQUEUE_MAY; } + if (cfq_class_idle(cfqq)) + return ELV_MQUEUE_NO; + if (cfqq->allocated[rw] >= cfqd->max_queued) { + struct io_context *ioc = get_io_context(GFP_ATOMIC); + int ret = ELV_MQUEUE_NO; - return ret; + if (ioc && ioc->nr_batch_requests) + ret = ELV_MQUEUE_MAY; + + put_io_context(ioc); + return ret; + } + + return ELV_MQUEUE_MAY; +#endif +} + +static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; + struct cfq_queue *cfqq; + + /* + * don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be queued. + * so just lookup a possibly existing queue, or return 'may queue' + * if that fails + */ + cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw)); + if (cfqq) { + cfq_init_prio_data(cfqq); + cfq_prio_boost(cfqq); + + return __cfq_may_queue(cfqd, cfqq, tsk, rw); + } + + return ELV_MQUEUE_MAY; } static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) { + struct cfq_data *cfqd = q->elevator->elevator_data; struct request_list *rl = &q->rq; - const int write = waitqueue_active(&rl->wait[WRITE]); - const int read = waitqueue_active(&rl->wait[READ]); - if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ]) - wake_up(&rl->wait[READ]); - if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE]) - wake_up(&rl->wait[WRITE]); + if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { + smp_mb(); + if (waitqueue_active(&rl->wait[READ])) + wake_up(&rl->wait[READ]); + } + + if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { + smp_mb(); + if (waitqueue_active(&rl->wait[WRITE])) + wake_up(&rl->wait[WRITE]); + } } /* @@ -1389,69 +1919,59 @@ static void cfq_put_request(request_queue_t *q, struct request *rq) if (crq) { struct cfq_queue *cfqq = crq->cfq_queue; + const int rw = rq_data_dir(rq); - BUG_ON(q->last_merge == rq); - BUG_ON(!hlist_unhashed(&crq->hash)); - - if (crq->io_context) - put_io_context(crq->io_context->ioc); + BUG_ON(!cfqq->allocated[rw]); + cfqq->allocated[rw]--; - BUG_ON(!cfqq->allocated[crq->is_write]); - cfqq->allocated[crq->is_write]--; + put_io_context(crq->io_context->ioc); mempool_free(crq, cfqd->crq_pool); rq->elevator_private = NULL; - smp_mb(); cfq_check_waiters(q, cfqq); cfq_put_queue(cfqq); } } /* - * Allocate cfq data structures associated with this request. A queue and + * Allocate cfq data structures associated with this request. */ -static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +static int +cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, + int gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); - struct cfq_queue *cfqq, *saved_cfqq; + struct cfq_queue *cfqq; struct cfq_rq *crq; unsigned long flags; might_sleep_if(gfp_mask & __GFP_WAIT); + cic = cfq_get_io_context(cfqd, cfq_queue_pid(current, rw), gfp_mask); + spin_lock_irqsave(q->queue_lock, flags); - cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask); - if (!cfqq) - goto out_lock; + if (!cic) + goto queue_fail; + + if (!cic->cfqq) { + cfqq = cfq_get_queue(cfqd, current->pid, gfp_mask); + if (!cfqq) + goto queue_fail; -repeat: - if (cfqq->allocated[rw] >= cfqd->max_queued) - goto out_lock; + cic->cfqq = cfqq; + } else + cfqq = cic->cfqq; cfqq->allocated[rw]++; + cfqq->must_alloc = 0; + cfqd->rq_starved = 0; + atomic_inc(&cfqq->ref); spin_unlock_irqrestore(q->queue_lock, flags); - /* - * if hashing type has changed, the cfq_queue might change here. - */ - saved_cfqq = cfqq; - cic = cfq_get_io_context(&cfqq, gfp_mask); - if (!cic) - goto err; - - /* - * repeat allocation checks on queue change - */ - if (unlikely(saved_cfqq != cfqq)) { - spin_lock_irqsave(q->queue_lock, flags); - saved_cfqq->allocated[rw]--; - goto repeat; - } - crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { RB_CLEAR(&crq->rb_node); @@ -1460,24 +1980,130 @@ static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) INIT_HLIST_NODE(&crq->hash); crq->cfq_queue = cfqq; crq->io_context = cic; - crq->service_start = crq->queue_start = 0; - crq->in_flight = crq->accounted = crq->is_sync = 0; - crq->is_write = rw; + crq->in_flight = crq->accounted = 0; + crq->is_sync = (rw == READ || process_sync(current)); + crq->requeued = 0; rq->elevator_private = crq; - cfqq->alloc_limit[rw] = 0; return 0; } - put_io_context(cic->ioc); -err: spin_lock_irqsave(q->queue_lock, flags); cfqq->allocated[rw]--; + if (!(cfqq->allocated[0] + cfqq->allocated[1])) + cfqq->must_alloc = 1; cfq_put_queue(cfqq); -out_lock: +queue_fail: + if (cic) + put_io_context(cic->ioc); + /* + * mark us rq allocation starved. we need to kickstart the process + * ourselves if there are no pending requests that can do it for us. + * that would be an extremely rare OOM situation + */ + cfqd->rq_starved = 1; + kblockd_schedule_work(&cfqd->unplug_work); spin_unlock_irqrestore(q->queue_lock, flags); return 1; } +static void cfq_kick_queue(void *data) +{ + request_queue_t *q = data; + struct cfq_data *cfqd = q->elevator->elevator_data; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + + if (cfqd->rq_starved) { + struct request_list *rl = &q->rq; + + /* + * we aren't guaranteed to get a request after this, but we + * have to be opportunistic + */ + smp_mb(); + if (waitqueue_active(&rl->wait[READ])) + wake_up(&rl->wait[READ]); + if (waitqueue_active(&rl->wait[WRITE])) + wake_up(&rl->wait[WRITE]); + } + + blk_remove_plug(q); + q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/* + * Timer running if the active_queue is currently idling inside its time slice + */ +static void cfq_idle_slice_timer(unsigned long data) +{ + struct cfq_data *cfqd = (struct cfq_data *) data; + struct cfq_queue *cfqq; + unsigned long flags; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + if ((cfqq = cfqd->active_queue) != NULL) { + unsigned long now = jiffies; + + /* + * expired + */ + if (time_after(now, cfqq->slice_end)) + goto expire; + + /* + * only expire and reinvoke request handler, if there are + * other queues with pending requests + */ + if (!cfq_pending_requests(cfqd)) { + cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); + add_timer(&cfqd->idle_slice_timer); + goto out_cont; + } + + /* + * not expired and it has a request pending, let it dispatch + */ + if (!RB_EMPTY(&cfqq->sort_list)) { + cfqq->must_dispatch = 1; + goto out_kick; + } + } +expire: + cfq_slice_expired(cfqd, 0); +out_kick: + if (cfq_pending_requests(cfqd)) + kblockd_schedule_work(&cfqd->unplug_work); +out_cont: + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +/* + * Timer running if an idle class queue is waiting for service + */ +static void cfq_idle_class_timer(unsigned long data) +{ + struct cfq_data *cfqd = (struct cfq_data *) data; + unsigned long flags, end; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + /* + * race with a non-idle queue, reset timer + */ + end = cfqd->last_end_request + CFQ_IDLE_GRACE; + if (!time_after_eq(jiffies, end)) { + cfqd->idle_class_timer.expires = end; + add_timer(&cfqd->idle_class_timer); + } else + kblockd_schedule_work(&cfqd->unplug_work); + + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + + static void cfq_put_cfqd(struct cfq_data *cfqd) { request_queue_t *q = cfqd->queue; @@ -1485,6 +2111,8 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) if (!atomic_dec_and_test(&cfqd->ref)) return; + blk_sync_queue(q); + blk_put_queue(q); mempool_destroy(cfqd->crq_pool); @@ -1495,7 +2123,11 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) static void cfq_exit_queue(elevator_t *e) { - cfq_put_cfqd(e->elevator_data); + struct cfq_data *cfqd = e->elevator_data; + + del_timer_sync(&cfqd->idle_slice_timer); + del_timer_sync(&cfqd->idle_class_timer); + cfq_put_cfqd(cfqd); } static int cfq_init_queue(request_queue_t *q, elevator_t *e) @@ -1508,7 +2140,13 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) return -ENOMEM; memset(cfqd, 0, sizeof(*cfqd)); - INIT_LIST_HEAD(&cfqd->rr_list); + + for (i = 0; i < CFQ_PRIO_LISTS; i++) + INIT_LIST_HEAD(&cfqd->rr_list[i]); + + INIT_LIST_HEAD(&cfqd->busy_rr); + INIT_LIST_HEAD(&cfqd->cur_rr); + INIT_LIST_HEAD(&cfqd->idle_rr); INIT_LIST_HEAD(&cfqd->empty_list); cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); @@ -1533,25 +2171,32 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) cfqd->queue = q; atomic_inc(&q->refcnt); - /* - * just set it to some high value, we want anyone to be able to queue - * some requests. fairness is handled differently - */ - q->nr_requests = 1024; - cfqd->max_queued = q->nr_requests / 16; + cfqd->max_queued = q->nr_requests / 4; q->nr_batching = cfq_queued; - cfqd->key_type = CFQ_KEY_TGID; - cfqd->find_best_crq = 1; + + init_timer(&cfqd->idle_slice_timer); + cfqd->idle_slice_timer.function = cfq_idle_slice_timer; + cfqd->idle_slice_timer.data = (unsigned long) cfqd; + + init_timer(&cfqd->idle_class_timer); + cfqd->idle_class_timer.function = cfq_idle_class_timer; + cfqd->idle_class_timer.data = (unsigned long) cfqd; + + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); + atomic_set(&cfqd->ref, 1); cfqd->cfq_queued = cfq_queued; cfqd->cfq_quantum = cfq_quantum; - cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r; - cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w; - cfqd->cfq_fifo_batch_expire = cfq_fifo_rate; + cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; + cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; - + cfqd->cfq_slice[0] = cfq_slice_async; + cfqd->cfq_slice[1] = cfq_slice_sync; + cfqd->cfq_slice_async_rq = cfq_slice_async_rq; + cfqd->cfq_slice_idle = cfq_slice_idle; + cfqd->cfq_max_depth = cfq_max_depth; return 0; out_crqpool: kfree(cfqd->cfq_hash); @@ -1595,7 +2240,6 @@ static int __init cfq_slab_setup(void) return -ENOMEM; } - /* * sysfs parts below --> */ @@ -1620,45 +2264,6 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) return count; } -static ssize_t -cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count) -{ - max_elapsed_dispatch = max_elapsed_crq = 0; - return count; -} - -static ssize_t -cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count) -{ - spin_lock_irq(cfqd->queue->queue_lock); - if (!strncmp(page, "pgid", 4)) - cfqd->key_type = CFQ_KEY_PGID; - else if (!strncmp(page, "tgid", 4)) - cfqd->key_type = CFQ_KEY_TGID; - else if (!strncmp(page, "uid", 3)) - cfqd->key_type = CFQ_KEY_UID; - else if (!strncmp(page, "gid", 3)) - cfqd->key_type = CFQ_KEY_GID; - spin_unlock_irq(cfqd->queue->queue_lock); - return count; -} - -static ssize_t -cfq_read_key_type(struct cfq_data *cfqd, char *page) -{ - ssize_t len = 0; - int i; - - for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) { - if (cfqd->key_type == i) - len += sprintf(page+len, "[%s] ", cfq_key_types[i]); - else - len += sprintf(page+len, "%s ", cfq_key_types[i]); - } - len += sprintf(page+len, "\n"); - return len; -} - #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ { \ @@ -1669,12 +2274,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ } SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); -SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1); -SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1); -SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1); -SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0); +SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); +SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); +SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); +SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); +SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); +SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); +SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -1694,12 +2302,15 @@ static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); -STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1); -STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1); -STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1); -STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0); +STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); #undef STORE_FUNCTION static struct cfq_fs_entry cfq_quantum_entry = { @@ -1712,25 +2323,15 @@ static struct cfq_fs_entry cfq_queued_entry = { .show = cfq_queued_show, .store = cfq_queued_store, }; -static struct cfq_fs_entry cfq_fifo_expire_r_entry = { +static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_fifo_expire_r_show, - .store = cfq_fifo_expire_r_store, + .show = cfq_fifo_expire_sync_show, + .store = cfq_fifo_expire_sync_store, }; -static struct cfq_fs_entry cfq_fifo_expire_w_entry = { +static struct cfq_fs_entry cfq_fifo_expire_async_entry = { .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_fifo_expire_w_show, - .store = cfq_fifo_expire_w_store, -}; -static struct cfq_fs_entry cfq_fifo_batch_expire_entry = { - .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_fifo_batch_expire_show, - .store = cfq_fifo_batch_expire_store, -}; -static struct cfq_fs_entry cfq_find_best_entry = { - .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_find_best_show, - .store = cfq_find_best_store, + .show = cfq_fifo_expire_async_show, + .store = cfq_fifo_expire_async_store, }; static struct cfq_fs_entry cfq_back_max_entry = { .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, @@ -1742,27 +2343,43 @@ static struct cfq_fs_entry cfq_back_penalty_entry = { .show = cfq_back_penalty_show, .store = cfq_back_penalty_store, }; -static struct cfq_fs_entry cfq_clear_elapsed_entry = { - .attr = {.name = "clear_elapsed", .mode = S_IWUSR }, - .store = cfq_clear_elapsed, +static struct cfq_fs_entry cfq_slice_sync_entry = { + .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_sync_show, + .store = cfq_slice_sync_store, }; -static struct cfq_fs_entry cfq_key_type_entry = { - .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_read_key_type, - .store = cfq_set_key_type, +static struct cfq_fs_entry cfq_slice_async_entry = { + .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_async_show, + .store = cfq_slice_async_store, +}; +static struct cfq_fs_entry cfq_slice_async_rq_entry = { + .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_async_rq_show, + .store = cfq_slice_async_rq_store, +}; +static struct cfq_fs_entry cfq_slice_idle_entry = { + .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_idle_show, + .store = cfq_slice_idle_store, +}; +static struct cfq_fs_entry cfq_max_depth_entry = { + .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_max_depth_show, + .store = cfq_max_depth_store, }; - static struct attribute *default_attrs[] = { &cfq_quantum_entry.attr, &cfq_queued_entry.attr, - &cfq_fifo_expire_r_entry.attr, - &cfq_fifo_expire_w_entry.attr, - &cfq_fifo_batch_expire_entry.attr, - &cfq_key_type_entry.attr, - &cfq_find_best_entry.attr, + &cfq_fifo_expire_sync_entry.attr, + &cfq_fifo_expire_async_entry.attr, &cfq_back_max_entry.attr, &cfq_back_penalty_entry.attr, - &cfq_clear_elapsed_entry.attr, + &cfq_slice_sync_entry.attr, + &cfq_slice_async_entry.attr, + &cfq_slice_async_rq_entry.attr, + &cfq_slice_idle_entry.attr, + &cfq_max_depth_entry.attr, NULL, }; @@ -1832,21 +2449,46 @@ static int __init cfq_init(void) { int ret; + /* + * could be 0 on HZ < 1000 setups + */ + if (!cfq_slice_async) + cfq_slice_async = 1; + if (!cfq_slice_idle) + cfq_slice_idle = 1; + if (cfq_slab_setup()) return -ENOMEM; ret = elv_register(&iosched_cfq); - if (!ret) { - __module_get(THIS_MODULE); - return 0; - } + if (ret) + cfq_slab_kill(); - cfq_slab_kill(); return ret; } static void __exit cfq_exit(void) { + struct task_struct *g, *p; + unsigned long flags; + + read_lock_irqsave(&tasklist_lock, flags); + + /* + * iterate each process in the system, removing our io_context + */ + do_each_thread(g, p) { + struct io_context *ioc = p->io_context; + + if (ioc && ioc->cic) { + ioc->cic->exit(ioc->cic); + cfq_free_io_context(ioc->cic); + ioc->cic = NULL; + } + } while_each_thread(g, p); + + read_unlock_irqrestore(&tasklist_lock, flags); + cfq_slab_kill(); elv_unregister(&iosched_cfq); } diff --git a/trunk/drivers/block/deadline-iosched.c b/trunk/drivers/block/deadline-iosched.c index 4bc2fea73273..ff5201e02153 100644 --- a/trunk/drivers/block/deadline-iosched.c +++ b/trunk/drivers/block/deadline-iosched.c @@ -760,7 +760,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq) } static int -deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, + int gfp_mask) { struct deadline_data *dd = q->elevator->elevator_data; struct deadline_rq *drq; diff --git a/trunk/drivers/block/elevator.c b/trunk/drivers/block/elevator.c index f831f08f839c..98f0126a2deb 100644 --- a/trunk/drivers/block/elevator.c +++ b/trunk/drivers/block/elevator.c @@ -486,12 +486,13 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) return NULL; } -int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, + int gfp_mask) { elevator_t *e = q->elevator; if (e->ops->elevator_set_req_fn) - return e->ops->elevator_set_req_fn(q, rq, gfp_mask); + return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask); rq->elevator_private = NULL; return 0; @@ -505,12 +506,12 @@ void elv_put_request(request_queue_t *q, struct request *rq) e->ops->elevator_put_req_fn(q, rq); } -int elv_may_queue(request_queue_t *q, int rw) +int elv_may_queue(request_queue_t *q, int rw, struct bio *bio) { elevator_t *e = q->elevator; if (e->ops->elevator_may_queue_fn) - return e->ops->elevator_may_queue_fn(q, rw); + return e->ops->elevator_may_queue_fn(q, rw, bio); return ELV_MQUEUE_MAY; } diff --git a/trunk/drivers/block/ll_rw_blk.c b/trunk/drivers/block/ll_rw_blk.c index 60e64091de1b..234fdcfbdf01 100644 --- a/trunk/drivers/block/ll_rw_blk.c +++ b/trunk/drivers/block/ll_rw_blk.c @@ -276,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) rq->errors = 0; rq->rq_status = RQ_ACTIVE; rq->bio = rq->biotail = NULL; + rq->ioprio = 0; rq->buffer = NULL; rq->ref_count = 1; rq->q = q; @@ -1442,11 +1443,7 @@ void __generic_unplug_device(request_queue_t *q) if (!blk_remove_plug(q)) return; - /* - * was plugged, fire request_fn if queue has stuff to do - */ - if (elv_next_request(q)) - q->request_fn(q); + q->request_fn(q); } EXPORT_SYMBOL(__generic_unplug_device); @@ -1776,8 +1773,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) mempool_free(rq, q->rq.rq_pool); } -static inline struct request *blk_alloc_request(request_queue_t *q, int rw, - int gfp_mask) +static inline struct request * +blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -1790,7 +1787,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw, */ rq->flags = rw; - if (!elv_set_request(q, rq, gfp_mask)) + if (!elv_set_request(q, rq, bio, gfp_mask)) return rq; mempool_free(rq, q->rq.rq_pool); @@ -1872,7 +1869,8 @@ static void freed_request(request_queue_t *q, int rw) /* * Get a free request, queue_lock must not be held */ -static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) +static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, + int gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; @@ -1895,7 +1893,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) } } - switch (elv_may_queue(q, rw)) { + switch (elv_may_queue(q, rw, bio)) { case ELV_MQUEUE_NO: goto rq_starved; case ELV_MQUEUE_MAY: @@ -1920,7 +1918,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) set_queue_congested(q, rw); spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, gfp_mask); + rq = blk_alloc_request(q, rw, bio, gfp_mask); if (!rq) { /* * Allocation failed presumably due to memory. Undo anything @@ -1961,7 +1959,8 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) * No available requests for this queue, unplug the device and wait for some * requests to become available. */ -static struct request *get_request_wait(request_queue_t *q, int rw) +static struct request *get_request_wait(request_queue_t *q, int rw, + struct bio *bio) { DEFINE_WAIT(wait); struct request *rq; @@ -1972,7 +1971,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw) prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw, GFP_NOIO); + rq = get_request(q, rw, bio, GFP_NOIO); if (!rq) { struct io_context *ioc; @@ -2003,9 +2002,9 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) BUG_ON(rw != READ && rw != WRITE); if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw); + rq = get_request_wait(q, rw, NULL); else - rq = get_request(q, rw, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); return rq; } @@ -2333,7 +2332,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req) return; req->rq_status = RQ_INACTIVE; - req->q = NULL; req->rl = NULL; /* @@ -2462,6 +2460,8 @@ static int attempt_merge(request_queue_t *q, struct request *req, req->rq_disk->in_flight--; } + req->ioprio = ioprio_best(req->ioprio, next->ioprio); + __blk_put_request(q, next); return 1; } @@ -2514,11 +2514,13 @@ static int __make_request(request_queue_t *q, struct bio *bio) { struct request *req, *freereq = NULL; int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; + unsigned short prio; sector_t sector; sector = bio->bi_sector; nr_sectors = bio_sectors(bio); cur_nr_sectors = bio_cur_sectors(bio); + prio = bio_prio(bio); rw = bio_data_dir(bio); sync = bio_sync(bio); @@ -2559,6 +2561,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_back_merge(q, req)) elv_merged_request(q, req); @@ -2583,6 +2586,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) req->hard_cur_sectors = cur_nr_sectors; req->sector = req->hard_sector = sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_front_merge(q, req)) elv_merged_request(q, req); @@ -2610,7 +2614,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) freereq = NULL; } else { spin_unlock_irq(q->queue_lock); - if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) { + if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) { /* * READA bit set */ @@ -2618,7 +2622,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) if (bio_rw_ahead(bio)) goto end_io; - freereq = get_request_wait(q, rw); + freereq = get_request_wait(q, rw, bio); } goto again; } @@ -2646,6 +2650,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) req->buffer = bio_data(bio); /* see ->buffer comment above */ req->waiting = NULL; req->bio = req->biotail = bio; + req->ioprio = prio; req->rq_disk = bio->bi_bdev->bd_disk; req->start_time = jiffies; @@ -2674,7 +2679,7 @@ static inline void blk_partition_remap(struct bio *bio) if (bdev != bdev->bd_contains) { struct hd_struct *p = bdev->bd_part; - switch (bio->bi_rw) { + switch (bio_data_dir(bio)) { case READ: p->read_sectors += bio_sectors(bio); p->reads++; @@ -2693,6 +2698,7 @@ void blk_finish_queue_drain(request_queue_t *q) { struct request_list *rl = &q->rq; struct request *rq; + int requeued = 0; spin_lock_irq(q->queue_lock); clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); @@ -2701,9 +2707,13 @@ void blk_finish_queue_drain(request_queue_t *q) rq = list_entry_rq(q->drain_list.next); list_del_init(&rq->queuelist); - __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); + elv_requeue_request(q, rq); + requeued++; } + if (requeued) + q->request_fn(q); + spin_unlock_irq(q->queue_lock); wake_up(&rl->wait[0]); @@ -2900,7 +2910,7 @@ void submit_bio(int rw, struct bio *bio) BIO_BUG_ON(!bio->bi_size); BIO_BUG_ON(!bio->bi_io_vec); - bio->bi_rw = rw; + bio->bi_rw |= rw; if (rw & WRITE) mod_page_state(pgpgout, count); else @@ -3257,8 +3267,11 @@ void exit_io_context(void) struct io_context *ioc; local_irq_save(flags); + task_lock(current); ioc = current->io_context; current->io_context = NULL; + ioc->task = NULL; + task_unlock(current); local_irq_restore(flags); if (ioc->aic && ioc->aic->exit) @@ -3293,12 +3306,12 @@ struct io_context *get_io_context(int gfp_flags) ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); if (ret) { atomic_set(&ret->refcount, 1); - ret->pid = tsk->pid; + ret->task = current; + ret->set_ioprio = NULL; ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; ret->cic = NULL; - spin_lock_init(&ret->lock); local_irq_save(flags); diff --git a/trunk/drivers/net/skge.c b/trunk/drivers/net/skge.c index 3dbb1cb09ed8..30e8d589d167 100644 --- a/trunk/drivers/net/skge.c +++ b/trunk/drivers/net/skge.c @@ -7,7 +7,7 @@ * of the original driver such as link fail-over and link management because * those should be done at higher levels. * - * Copyright (C) 2004, 2005 Stephen Hemminger + * Copyright (C) 2004, Stephen Hemminger * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -42,20 +42,19 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "0.7" +#define DRV_VERSION "0.6" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 #define DEFAULT_RX_RING_SIZE 512 #define MAX_TX_RING_SIZE 1024 #define MAX_RX_RING_SIZE 4096 -#define RX_COPY_THRESHOLD 128 -#define RX_BUF_SIZE 1536 #define PHY_RETRIES 1000 #define ETH_JUMBO_MTU 9000 #define TX_WATCHDOG (5 * HZ) #define NAPI_WEIGHT 64 #define BLINK_HZ (HZ/4) +#define LINK_POLL_HZ (HZ/10) MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); MODULE_AUTHOR("Stephen Hemminger "); @@ -71,17 +70,28 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static const struct pci_device_id skge_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) }, - { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) }, - { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, - { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, - { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, - { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, - { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ - { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, - { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) }, - { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_SYSKONNECT, 0x9E00, /* SK-9Exx */ + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_MARVELL, 0x4320, /* Gigabit Ethernet Controller */ + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_MARVELL, 0x5005, /* Marvell (11ab), Belkin */ + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032, + PCI_ANY_ID, PCI_ANY_ID }, + { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064, + PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; MODULE_DEVICE_TABLE(pci, skge_id_table); @@ -89,22 +99,19 @@ MODULE_DEVICE_TABLE(pci, skge_id_table); static int skge_up(struct net_device *dev); static int skge_down(struct net_device *dev); static void skge_tx_clean(struct skge_port *skge); -static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); -static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); static void genesis_get_stats(struct skge_port *skge, u64 *data); static void yukon_get_stats(struct skge_port *skge, u64 *data); static void yukon_init(struct skge_hw *hw, int port); static void yukon_reset(struct skge_hw *hw, int port); static void genesis_mac_init(struct skge_hw *hw, int port); static void genesis_reset(struct skge_hw *hw, int port); -static void genesis_link_up(struct skge_port *skge); -/* Avoid conditionals by using array */ static const int txqaddr[] = { Q_XA1, Q_XA2 }; static const int rxqaddr[] = { Q_R1, Q_R2 }; static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; -static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; /* Don't need to look at whole 16K. * last interesting register is descriptor poll timer. @@ -147,7 +154,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, static int wol_supported(const struct skge_hw *hw) { return !((hw->chip_id == CHIP_ID_GENESIS || - (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0))); + (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0))); } static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -163,7 +170,7 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; - if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) + if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) return -EOPNOTSUPP; if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) @@ -183,36 +190,6 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; } -/* Determine supported/adverised modes based on hardware. - * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx - */ -static u32 skge_supported_modes(const struct skge_hw *hw) -{ - u32 supported; - - if (iscopper(hw)) { - supported = SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full - | SUPPORTED_Autoneg| SUPPORTED_TP; - - if (hw->chip_id == CHIP_ID_GENESIS) - supported &= ~(SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full); - - else if (hw->chip_id == CHIP_ID_YUKON) - supported &= ~SUPPORTED_1000baseT_Half; - } else - supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE - | SUPPORTED_Autoneg; - - return supported; -} static int skge_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) @@ -221,13 +198,38 @@ static int skge_get_settings(struct net_device *dev, struct skge_hw *hw = skge->hw; ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = skge_supported_modes(hw); if (iscopper(hw)) { + if (hw->chip_id == CHIP_ID_GENESIS) + ecmd->supported = SUPPORTED_1000baseT_Full + | SUPPORTED_1000baseT_Half + | SUPPORTED_Autoneg | SUPPORTED_TP; + else { + ecmd->supported = SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full + | SUPPORTED_Autoneg| SUPPORTED_TP; + + if (hw->chip_id == CHIP_ID_YUKON) + ecmd->supported &= ~SUPPORTED_1000baseT_Half; + + else if (hw->chip_id == CHIP_ID_YUKON_FE) + ecmd->supported &= ~(SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full); + } + ecmd->port = PORT_TP; ecmd->phy_address = hw->phy_addr; - } else + } else { + ecmd->supported = SUPPORTED_1000baseT_Full + | SUPPORTED_FIBRE + | SUPPORTED_Autoneg; + ecmd->port = PORT_FIBRE; + } ecmd->advertising = skge->advertising; ecmd->autoneg = skge->autoneg; @@ -236,57 +238,65 @@ static int skge_get_settings(struct net_device *dev, return 0; } +static u32 skge_modes(const struct skge_hw *hw) +{ + u32 modes = ADVERTISED_Autoneg + | ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half + | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half + | ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half; + + if (iscopper(hw)) { + modes |= ADVERTISED_TP; + switch(hw->chip_id) { + case CHIP_ID_GENESIS: + modes &= ~(ADVERTISED_100baseT_Full + | ADVERTISED_100baseT_Half + | ADVERTISED_10baseT_Full + | ADVERTISED_10baseT_Half); + break; + + case CHIP_ID_YUKON: + modes &= ~ADVERTISED_1000baseT_Half; + break; + + case CHIP_ID_YUKON_FE: + modes &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); + break; + } + } else { + modes |= ADVERTISED_FIBRE; + modes &= ~ADVERTISED_1000baseT_Half; + } + return modes; +} + static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct skge_port *skge = netdev_priv(dev); const struct skge_hw *hw = skge->hw; - u32 supported = skge_supported_modes(hw); if (ecmd->autoneg == AUTONEG_ENABLE) { - ecmd->advertising = supported; - skge->duplex = -1; - skge->speed = -1; + if (ecmd->advertising & skge_modes(hw)) + return -EINVAL; } else { - u32 setting; - switch(ecmd->speed) { case SPEED_1000: - if (ecmd->duplex == DUPLEX_FULL) - setting = SUPPORTED_1000baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) - setting = SUPPORTED_1000baseT_Half; - else + if (hw->chip_id == CHIP_ID_YUKON_FE) return -EINVAL; break; case SPEED_100: - if (ecmd->duplex == DUPLEX_FULL) - setting = SUPPORTED_100baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) - setting = SUPPORTED_100baseT_Half; - else - return -EINVAL; - break; - case SPEED_10: - if (ecmd->duplex == DUPLEX_FULL) - setting = SUPPORTED_10baseT_Full; - else if (ecmd->duplex == DUPLEX_HALF) - setting = SUPPORTED_10baseT_Half; - else + if (iscopper(hw) || hw->chip_id == CHIP_ID_GENESIS) return -EINVAL; break; default: return -EINVAL; } - - if ((setting & supported) == 0) - return -EINVAL; - - skge->speed = ecmd->speed; - skge->duplex = ecmd->duplex; } skge->autoneg = ecmd->autoneg; + skge->speed = ecmd->speed; + skge->duplex = ecmd->duplex; skge->advertising = ecmd->advertising; if (netif_running(dev)) { @@ -383,7 +393,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; - switch (stringset) { + switch(stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(skge_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, @@ -501,6 +511,14 @@ static int skge_set_rx_csum(struct net_device *dev, u32 data) return 0; } +/* Only Yukon II supports TSO (not implemented yet) */ +static int skge_set_tso(struct net_device *dev, u32 data) +{ + if (data) + return -EOPNOTSUPP; + return 0; +} + static void skge_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *ecmd) { @@ -522,9 +540,9 @@ static int skge_set_pauseparam(struct net_device *dev, skge->autoneg = ecmd->autoneg; if (ecmd->rx_pause && ecmd->tx_pause) skge->flow_control = FLOW_MODE_SYMMETRIC; - else if (ecmd->rx_pause && !ecmd->tx_pause) + else if(ecmd->rx_pause && !ecmd->tx_pause) skge->flow_control = FLOW_MODE_REM_SEND; - else if (!ecmd->rx_pause && ecmd->tx_pause) + else if(!ecmd->rx_pause && ecmd->tx_pause) skge->flow_control = FLOW_MODE_LOC_SEND; else skge->flow_control = FLOW_MODE_NONE; @@ -541,6 +559,8 @@ static inline u32 hwkhz(const struct skge_hw *hw) { if (hw->chip_id == CHIP_ID_GENESIS) return 53215; /* or: 53.125 MHz */ + else if (hw->chip_id == CHIP_ID_YUKON_EC) + return 125000; /* or: 125.000 MHz */ else return 78215; /* or: 78.125 MHz */ } @@ -623,18 +643,30 @@ static int skge_set_coalesce(struct net_device *dev, static void skge_led_on(struct skge_hw *hw, int port) { if (hw->chip_id == CHIP_ID_GENESIS) { - skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); skge_write8(hw, B0_LED, LED_STAT_ON); - skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); - skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); - skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SKGEMAC_REG(port, RX_LED_TST), LED_T_ON); + skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 100); + skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); - /* For Broadcom Phy only */ - xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); + switch (hw->phy_type) { + case SK_PHY_BCOM: + skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, + PHY_B_PEC_LED_ON); + break; + case SK_PHY_LONE: + skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG, + 0x0800); + break; + default: + skge_write8(hw, SKGEMAC_REG(port, TX_LED_TST), LED_T_ON); + skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 100); + skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START); + } } else { - gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); - gm_phy_write(hw, port, PHY_MARV_LED_OVER, + skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_DUP(MO_LED_ON) | PHY_M_LED_MO_10(MO_LED_ON) | PHY_M_LED_MO_100(MO_LED_ON) | @@ -646,17 +678,28 @@ static void skge_led_on(struct skge_hw *hw, int port) static void skge_led_off(struct skge_hw *hw, int port) { if (hw->chip_id == CHIP_ID_GENESIS) { - skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_OFF); skge_write8(hw, B0_LED, LED_STAT_OFF); - skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); - skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); + skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 0); + skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_T_OFF); - /* Broadcom only */ - xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); + switch (hw->phy_type) { + case SK_PHY_BCOM: + skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, + PHY_B_PEC_LED_OFF); + break; + case SK_PHY_LONE: + skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG, + PHY_L_LC_LEDT); + break; + default: + skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 0); + skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_T_OFF); + } } else { - gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); - gm_phy_write(hw, port, PHY_MARV_LED_OVER, + skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, PHY_M_LED_MO_DUP(MO_LED_OFF) | PHY_M_LED_MO_10(MO_LED_OFF) | PHY_M_LED_MO_100(MO_LED_OFF) | @@ -687,7 +730,7 @@ static int skge_phys_id(struct net_device *dev, u32 data) { struct skge_port *skge = netdev_priv(dev); - if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) + if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); /* start blinking */ @@ -720,6 +763,8 @@ static struct ethtool_ops skge_ethtool_ops = { .set_pauseparam = skge_set_pauseparam, .get_coalesce = skge_get_coalesce, .set_coalesce = skge_set_coalesce, + .get_tso = ethtool_op_get_tso, + .set_tso = skge_set_tso, .get_sg = ethtool_op_get_sg, .set_sg = skge_set_sg, .get_tx_csum = ethtool_op_get_tx_csum, @@ -748,7 +793,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { e->desc = d; - e->skb = NULL; if (i == ring->count - 1) { e->next = ring->start; d->next_offset = base; @@ -762,23 +806,24 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) return 0; } -static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size) +/* Setup buffer for receiving */ +static inline int skge_rx_alloc(struct skge_port *skge, + struct skge_element *e) { - struct sk_buff *skb = dev_alloc_skb(size); + unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */ + struct skge_rx_desc *rd = e->desc; + struct sk_buff *skb; + u64 map; - if (likely(skb)) { - skb->dev = dev; - skb_reserve(skb, NET_IP_ALIGN); + skb = dev_alloc_skb(bufsize + NET_IP_ALIGN); + if (unlikely(!skb)) { + printk(KERN_DEBUG PFX "%s: out of memory for receive\n", + skge->netdev->name); + return -ENOMEM; } - return skb; -} -/* Allocate and setup a new buffer for receiving */ -static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, - struct sk_buff *skb, unsigned int bufsize) -{ - struct skge_rx_desc *rd = e->desc; - u64 map; + skb->dev = skge->netdev; + skb_reserve(skb, NET_IP_ALIGN); map = pci_map_single(skge->hw->pdev, skb->data, bufsize, PCI_DMA_FROMDEVICE); @@ -796,69 +841,55 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; pci_unmap_addr_set(e, mapaddr, map); pci_unmap_len_set(e, maplen, bufsize); + return 0; } -/* Resume receiving using existing skb, - * Note: DMA address is not changed by chip. - * MTU not changed while receiver active. - */ -static void skge_rx_reuse(struct skge_element *e, unsigned int size) -{ - struct skge_rx_desc *rd = e->desc; - - rd->csum2 = 0; - rd->csum2_start = ETH_HLEN; - - wmb(); - - rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; -} - - -/* Free all buffers in receive ring, assumes receiver stopped */ +/* Free all unused buffers in receive ring, assumes receiver stopped */ static void skge_rx_clean(struct skge_port *skge) { struct skge_hw *hw = skge->hw; struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - e = ring->start; - do { + for (e = ring->to_clean; e != ring->to_use; e = e->next) { struct skge_rx_desc *rd = e->desc; rd->control = 0; - if (e->skb) { - pci_unmap_single(hw->pdev, - pci_unmap_addr(e, mapaddr), - pci_unmap_len(e, maplen), - PCI_DMA_FROMDEVICE); - dev_kfree_skb(e->skb); - e->skb = NULL; - } - } while ((e = e->next) != ring->start); -} + pci_unmap_single(hw->pdev, + pci_unmap_addr(e, mapaddr), + pci_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(e->skb); + e->skb = NULL; + } + ring->to_clean = e; +} /* Allocate buffers for receive ring - * For receive: to_clean is next received frame. + * For receive: to_use is refill location + * to_clean is next received frame. + * + * if (to_use == to_clean) + * then ring all frames in ring need buffers + * if (to_use->next == to_clean) + * then ring all frames in ring have buffers */ static int skge_rx_fill(struct skge_port *skge) { struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - unsigned int bufsize = skge->rx_buf_size; - - e = ring->start; - do { - struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); + int ret = 0; - if (!skb) - return -ENOMEM; + for (e = ring->to_use; e->next != ring->to_clean; e = e->next) { + if (skge_rx_alloc(skge, e)) { + ret = 1; + break; + } - skge_rx_setup(skge, e, skb, bufsize); - } while ( (e = e->next) != ring->start); + } + ring->to_use = e; - ring->to_clean = ring->start; - return 0; + return ret; } static void skge_link_up(struct skge_port *skge) @@ -888,50 +919,50 @@ static void skge_link_down(struct skge_port *skge) printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); } -static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +static u16 skge_xm_phy_read(struct skge_hw *hw, int port, u16 reg) { int i; u16 v; - xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); - v = xm_read16(hw, port, XM_PHY_DATA); + skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + v = skge_xm_read16(hw, port, XM_PHY_DATA); + if (hw->phy_type != SK_PHY_XMAC) { + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (skge_xm_read16(hw, port, XM_MMU_CMD) + & XM_MMU_PHY_RDY) + goto ready; + } - /* Need to wait for external PHY */ - for (i = 0; i < PHY_RETRIES; i++) { - udelay(1); - if (xm_read16(hw, port, XM_MMU_CMD) - & XM_MMU_PHY_RDY) - goto ready; + printk(KERN_WARNING PFX "%s: phy read timed out\n", + hw->dev[port]->name); + return 0; + ready: + v = skge_xm_read16(hw, port, XM_PHY_DATA); } - printk(KERN_WARNING PFX "%s: phy read timed out\n", - hw->dev[port]->name); - return 0; - ready: - v = xm_read16(hw, port, XM_PHY_DATA); - return v; } -static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) { int i; - xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); for (i = 0; i < PHY_RETRIES; i++) { - if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) goto ready; - udelay(1); + cpu_relax(); } printk(KERN_WARNING PFX "%s: phy write failed to come ready\n", hw->dev[port]->name); ready: - xm_write16(hw, port, XM_PHY_DATA, val); + skge_xm_write16(hw, port, XM_PHY_DATA, val); for (i = 0; i < PHY_RETRIES; i++) { udelay(1); - if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) return; } printk(KERN_WARNING PFX "%s: phy write timed out\n", @@ -968,112 +999,34 @@ static void genesis_init(struct skge_hw *hw) static void genesis_reset(struct skge_hw *hw, int port) { - const u8 zero[8] = { 0 }; + int i; + u64 zero = 0; /* reset the statistics module */ - xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); - xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ - xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ - xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ - xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + skge_xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); + skge_xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ + skge_xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ + skge_xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + skge_xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ - /* disable Broadcom PHY IRQ */ - xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); + /* disable all PHY IRQs */ + if (hw->phy_type == SK_PHY_BCOM) + skge_xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); - xm_outhash(hw, port, XM_HSM, zero); + skge_xm_outhash(hw, port, XM_HSM, (u8 *) &zero); + for (i = 0; i < 15; i++) + skge_xm_outaddr(hw, port, XM_EXM(i), (u8 *) &zero); + skge_xm_outhash(hw, port, XM_SRC_CHK, (u8 *) &zero); } -/* Convert mode to MII values */ -static const u16 phy_pause_map[] = { - [FLOW_MODE_NONE] = 0, - [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, - [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, - [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, -}; - - -/* Check status of Broadcom phy link */ -static void bcom_check_link(struct skge_hw *hw, int port) -{ - struct net_device *dev = hw->dev[port]; - struct skge_port *skge = netdev_priv(dev); - u16 status; - - /* read twice because of latch */ - (void) xm_phy_read(hw, port, PHY_BCOM_STAT); - status = xm_phy_read(hw, port, PHY_BCOM_STAT); - - pr_debug("bcom_check_link status=0x%x\n", status); - - if ((status & PHY_ST_LSYNC) == 0) { - u16 cmd = xm_read16(hw, port, XM_MMU_CMD); - cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); - xm_write16(hw, port, XM_MMU_CMD, cmd); - /* dummy read to ensure writing */ - (void) xm_read16(hw, port, XM_MMU_CMD); - - if (netif_carrier_ok(dev)) - skge_link_down(skge); - } else { - if (skge->autoneg == AUTONEG_ENABLE && - (status & PHY_ST_AN_OVER)) { - u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP); - u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); - - if (lpa & PHY_B_AN_RF) { - printk(KERN_NOTICE PFX "%s: remote fault\n", - dev->name); - return; - } - - /* Check Duplex mismatch */ - switch(aux & PHY_B_AS_AN_RES_MSK) { - case PHY_B_RES_1000FD: - skge->duplex = DUPLEX_FULL; - break; - case PHY_B_RES_1000HD: - skge->duplex = DUPLEX_HALF; - break; - default: - printk(KERN_NOTICE PFX "%s: duplex mismatch\n", - dev->name); - return; - } - - - /* We are using IEEE 802.3z/D5.0 Table 37-4 */ - switch (aux & PHY_B_AS_PAUSE_MSK) { - case PHY_B_AS_PAUSE_MSK: - skge->flow_control = FLOW_MODE_SYMMETRIC; - break; - case PHY_B_AS_PRR: - skge->flow_control = FLOW_MODE_REM_SEND; - break; - case PHY_B_AS_PRT: - skge->flow_control = FLOW_MODE_LOC_SEND; - break; - default: - skge->flow_control = FLOW_MODE_NONE; - } - - skge->speed = SPEED_1000; - } - - if (!netif_carrier_ok(dev)) - genesis_link_up(skge); - } -} - -/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional - * Phy on for 100 or 10Mbit operation - */ -static void bcom_phy_init(struct skge_port *skge, int jumbo) +static void genesis_mac_init(struct skge_hw *hw, int port) { - struct skge_hw *hw = skge->hw; - int port = skge->port; + struct skge_port *skge = netdev_priv(hw->dev[port]); int i; - u16 id1, r, ext, ctl; + u32 r; + u16 id1; + u16 ctrl1, ctrl2, ctrl3, ctrl4, ctrl5; /* magic workaround patterns for Broadcom */ static const struct { @@ -1089,120 +1042,16 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo) { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, }; - pr_debug("bcom_phy_init\n"); - - /* read Id from external PHY (all have the same address) */ - id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); - - /* Optimize MDIO transfer by suppressing preamble. */ - r = xm_read16(hw, port, XM_MMU_CMD); - r |= XM_MMU_NO_PRE; - xm_write16(hw, port, XM_MMU_CMD,r); - - switch(id1) { - case PHY_BCOM_ID1_C0: - /* - * Workaround BCOM Errata for the C0 type. - * Write magic patterns to reserved registers. - */ - for (i = 0; i < ARRAY_SIZE(C0hack); i++) - xm_phy_write(hw, port, - C0hack[i].reg, C0hack[i].val); - - break; - case PHY_BCOM_ID1_A1: - /* - * Workaround BCOM Errata for the A1 type. - * Write magic patterns to reserved registers. - */ - for (i = 0; i < ARRAY_SIZE(A1hack); i++) - xm_phy_write(hw, port, - A1hack[i].reg, A1hack[i].val); - break; - } - - /* - * Workaround BCOM Errata (#10523) for all BCom PHYs. - * Disable Power Management after reset. - */ - r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); - r |= PHY_B_AC_DIS_PM; - xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); - - /* Dummy read */ - xm_read16(hw, port, XM_ISRC); - - ext = PHY_B_PEC_EN_LTR; /* enable tx led */ - ctl = PHY_CT_SP1000; /* always 1000mbit */ - - if (skge->autoneg == AUTONEG_ENABLE) { - /* - * Workaround BCOM Errata #1 for the C5 type. - * 1000Base-T Link Acquisition Failure in Slave Mode - * Set Repeater/DTE bit 10 of the 1000Base-T Control Register - */ - u16 adv = PHY_B_1000C_RD; - if (skge->advertising & ADVERTISED_1000baseT_Half) - adv |= PHY_B_1000C_AHD; - if (skge->advertising & ADVERTISED_1000baseT_Full) - adv |= PHY_B_1000C_AFD; - xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); - - ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; - } else { - if (skge->duplex == DUPLEX_FULL) - ctl |= PHY_CT_DUP_MD; - /* Force to slave */ - xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); - } - - /* Set autonegotiation pause parameters */ - xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, - phy_pause_map[skge->flow_control] | PHY_AN_CSMA); - - /* Handle Jumbo frames */ - if (jumbo) { - xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, - PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); - - ext |= PHY_B_PEC_HIGH_LA; - - } - - xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); - xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); - - /* Use link status change interrrupt */ - xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); - - bcom_check_link(hw, port); -} - -static void genesis_mac_init(struct skge_hw *hw, int port) -{ - struct net_device *dev = hw->dev[port]; - struct skge_port *skge = netdev_priv(dev); - int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; - int i; - u32 r; - const u8 zero[6] = { 0 }; - - /* Clear MIB counters */ - xm_write16(hw, port, XM_STAT_CMD, - XM_SC_CLR_RXC | XM_SC_CLR_TXC); - /* Clear two times according to Errata #3 */ - xm_write16(hw, port, XM_STAT_CMD, - XM_SC_CLR_RXC | XM_SC_CLR_TXC); /* initialize Rx, Tx and Link LED */ - skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); - skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); + skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); - skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); - skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START); /* Unreset the XMAC. */ - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); /* * Perform additional initialization for external PHYs, @@ -1210,56 +1059,67 @@ static void genesis_mac_init(struct skge_hw *hw, int port) * GMII mode. */ spin_lock_bh(&hw->phy_lock); - /* Take external Phy out of reset */ - r = skge_read32(hw, B2_GP_IO); - if (port == 0) - r |= GP_DIR_0|GP_IO_0; - else - r |= GP_DIR_2|GP_IO_2; - - skge_write32(hw, B2_GP_IO, r); - skge_read32(hw, B2_GP_IO); - spin_unlock_bh(&hw->phy_lock); - - /* Enable GMII interfac */ - xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); - - bcom_phy_init(skge, jumbo); - - /* Set Station Address */ - xm_outaddr(hw, port, XM_SA, dev->dev_addr); - - /* We don't use match addresses so clear */ - for (i = 1; i < 16; i++) - xm_outaddr(hw, port, XM_EXM(i), zero); - - /* configure Rx High Water Mark (XM_RX_HI_WM) */ - xm_write16(hw, port, XM_RX_HI_WM, 1450); - - /* We don't need the FCS appended to the packet. */ - r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; - if (jumbo) - r |= XM_RX_BIG_PK_OK; + if (hw->phy_type != SK_PHY_XMAC) { + /* Take PHY out of reset. */ + r = skge_read32(hw, B2_GP_IO); + if (port == 0) + r |= GP_DIR_0|GP_IO_0; + else + r |= GP_DIR_2|GP_IO_2; + + skge_write32(hw, B2_GP_IO, r); + skge_read32(hw, B2_GP_IO); + + /* Enable GMII mode on the XMAC. */ + skge_xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); + + id1 = skge_xm_phy_read(hw, port, PHY_XMAC_ID1); + + /* Optimize MDIO transfer by suppressing preamble. */ + skge_xm_write16(hw, port, XM_MMU_CMD, + skge_xm_read16(hw, port, XM_MMU_CMD) + | XM_MMU_NO_PRE); + + if (id1 == PHY_BCOM_ID1_C0) { + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(C0hack); i++) + skge_xm_phy_write(hw, port, + C0hack[i].reg, C0hack[i].val); + + } else if (id1 == PHY_BCOM_ID1_A1) { + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(A1hack); i++) + skge_xm_phy_write(hw, port, + A1hack[i].reg, A1hack[i].val); + } - if (skge->duplex == DUPLEX_HALF) { /* - * If in manual half duplex mode the other side might be in - * full duplex mode, so ignore if a carrier extension is not seen - * on frames received + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. */ - r |= XM_RX_DIS_CEXT; + r = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); + skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r | PHY_B_AC_DIS_PM); } - xm_write16(hw, port, XM_RX_CMD, r); + /* Dummy read */ + skge_xm_read16(hw, port, XM_ISRC); - /* We want short frames padded to 60 bytes. */ - xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); + r = skge_xm_read32(hw, port, XM_MODE); + skge_xm_write32(hw, port, XM_MODE, r|XM_MD_CSA); - /* - * Bump up the transmit threshold. This helps hold off transmit - * underruns when we're blasting traffic from both ports at once. - */ - xm_write16(hw, port, XM_TX_THR, 512); + /* We don't need the FCS appended to the packet. */ + r = skge_xm_read16(hw, port, XM_RX_CMD); + skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_STRIP_FCS); + + /* We want short frames padded to 60 bytes. */ + r = skge_xm_read16(hw, port, XM_TX_CMD); + skge_xm_write16(hw, port, XM_TX_CMD, r | XM_TX_AUTO_PAD); /* * Enable the reception of all error frames. This is is @@ -1275,22 +1135,19 @@ static void genesis_mac_init(struct skge_hw *hw, int port) * case the XMAC will start transfering frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ - xm_write32(hw, port, XM_MODE, XM_DEF_MODE); + r = skge_xm_read32(hw, port, XM_MODE); + skge_xm_write32(hw, port, XM_MODE, + XM_MD_RX_CRCE|XM_MD_RX_LONG|XM_MD_RX_RUNT| + XM_MD_RX_ERR|XM_MD_RX_IRLE); + skge_xm_outaddr(hw, port, XM_SA, hw->dev[port]->dev_addr); + skge_xm_outaddr(hw, port, XM_EXM(0), hw->dev[port]->dev_addr); /* - * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) - * - Enable all bits excepting 'Octets Rx OK Low CntOv' - * and 'Octets Rx OK Hi Cnt Ov'. - */ - xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); - - /* - * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) - * - Enable all bits excepting 'Octets Tx OK Low CntOv' - * and 'Octets Tx OK Hi Cnt Ov'. + * Bump up the transmit threshold. This helps hold off transmit + * underruns when we're blasting traffic from both ports at once. */ - xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); + skge_xm_write16(hw, port, XM_TX_THR, 512); /* Configure MAC arbiter */ skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); @@ -1307,30 +1164,137 @@ static void genesis_mac_init(struct skge_hw *hw, int port) skge_write8(hw, B3_MA_RCINI_TX2, 0); /* Configure Rx MAC FIFO */ - skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); - skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); - skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); + skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); /* Configure Tx MAC FIFO */ - skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); - skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); - if (jumbo) { + if (hw->dev[port]->mtu > ETH_DATA_LEN) { /* Enable frame flushing if jumbo frames used */ - skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); + skge_write16(hw, SKGEMAC_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); } else { /* enable timeout timers if normal frames */ skge_write16(hw, B3_PA_CTRL, - (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); + port == 0 ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); } + + + r = skge_xm_read16(hw, port, XM_RX_CMD); + if (hw->dev[port]->mtu > ETH_DATA_LEN) + skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_BIG_PK_OK); + else + skge_xm_write16(hw, port, XM_RX_CMD, r & ~(XM_RX_BIG_PK_OK)); + + switch (hw->phy_type) { + case SK_PHY_XMAC: + if (skge->autoneg == AUTONEG_ENABLE) { + ctrl1 = PHY_X_AN_FD | PHY_X_AN_HD; + + switch (skge->flow_control) { + case FLOW_MODE_NONE: + ctrl1 |= PHY_X_P_NO_PAUSE; + break; + case FLOW_MODE_LOC_SEND: + ctrl1 |= PHY_X_P_ASYM_MD; + break; + case FLOW_MODE_SYMMETRIC: + ctrl1 |= PHY_X_P_SYM_MD; + break; + case FLOW_MODE_REM_SEND: + ctrl1 |= PHY_X_P_BOTH_MD; + break; + } + + skge_xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl1); + ctrl2 = PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + ctrl2 = 0; + if (skge->duplex == DUPLEX_FULL) + ctrl2 |= PHY_CT_DUP_MD; + } + + skge_xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl2); + break; + + case SK_PHY_BCOM: + ctrl1 = PHY_CT_SP1000; + ctrl2 = 0; + ctrl3 = PHY_SEL_TYPE; + ctrl4 = PHY_B_PEC_EN_LTR; + ctrl5 = PHY_B_AC_TX_TST; + + if (skge->autoneg == AUTONEG_ENABLE) { + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + ctrl2 |= PHY_B_1000C_RD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + ctrl2 |= PHY_B_1000C_AHD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + ctrl2 |= PHY_B_1000C_AFD; + + /* Set Flow-control capabilities */ + switch (skge->flow_control) { + case FLOW_MODE_NONE: + ctrl3 |= PHY_B_P_NO_PAUSE; + break; + case FLOW_MODE_LOC_SEND: + ctrl3 |= PHY_B_P_ASYM_MD; + break; + case FLOW_MODE_SYMMETRIC: + ctrl3 |= PHY_B_P_SYM_MD; + break; + case FLOW_MODE_REM_SEND: + ctrl3 |= PHY_B_P_BOTH_MD; + break; + } + + /* Restart Auto-negotiation */ + ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + if (skge->duplex == DUPLEX_FULL) + ctrl1 |= PHY_CT_DUP_MD; + + ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */ + } + + skge_xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, ctrl2); + skge_xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, ctrl3); + + if (skge->netdev->mtu > ETH_DATA_LEN) { + ctrl4 |= PHY_B_PEC_HIGH_LA; + ctrl5 |= PHY_B_AC_LONG_PACK; + + skge_xm_phy_write(hw, port,PHY_BCOM_AUX_CTRL, ctrl5); + } + + skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ctrl4); + skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl1); + break; + } + spin_unlock_bh(&hw->phy_lock); + + /* Clear MIB counters */ + skge_xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + skge_xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + /* Start polling for link status */ + mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ); } static void genesis_stop(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; - u32 reg; /* Clear Tx packet arbiter timeout IRQ */ skge_write16(hw, B3_PA_CTRL, @@ -1340,30 +1304,33 @@ static void genesis_stop(struct skge_port *skge) * If the transfer stucks at the MAC the STOP command will not * terminate if we don't flush the XMAC's transmit FIFO ! */ - xm_write32(hw, port, XM_MODE, - xm_read32(hw, port, XM_MODE)|XM_MD_FTF); + skge_xm_write32(hw, port, XM_MODE, + skge_xm_read32(hw, port, XM_MODE)|XM_MD_FTF); /* Reset the MAC */ - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); /* For external PHYs there must be special handling */ - reg = skge_read32(hw, B2_GP_IO); - if (port == 0) { - reg |= GP_DIR_0; - reg &= ~GP_IO_0; - } else { - reg |= GP_DIR_2; - reg &= ~GP_IO_2; + if (hw->phy_type != SK_PHY_XMAC) { + u32 reg = skge_read32(hw, B2_GP_IO); + + if (port == 0) { + reg |= GP_DIR_0; + reg &= ~GP_IO_0; + } else { + reg |= GP_DIR_2; + reg &= ~GP_IO_2; + } + skge_write32(hw, B2_GP_IO, reg); + skge_read32(hw, B2_GP_IO); } - skge_write32(hw, B2_GP_IO, reg); - skge_read32(hw, B2_GP_IO); - xm_write16(hw, port, XM_MMU_CMD, - xm_read16(hw, port, XM_MMU_CMD) + skge_xm_write16(hw, port, XM_MMU_CMD, + skge_xm_read16(hw, port, XM_MMU_CMD) & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); - xm_read16(hw, port, XM_MMU_CMD); + skge_xm_read16(hw, port, XM_MMU_CMD); } @@ -1374,11 +1341,11 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data) int i; unsigned long timeout = jiffies + HZ; - xm_write16(hw, port, + skge_xm_write16(hw, port, XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); /* wait for update to complete */ - while (xm_read16(hw, port, XM_STAT_CMD) + while (skge_xm_read16(hw, port, XM_STAT_CMD) & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { if (time_after(jiffies, timeout)) break; @@ -1386,60 +1353,68 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data) } /* special case for 64 bit octet counter */ - data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 - | xm_read32(hw, port, XM_TXO_OK_LO); - data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 - | xm_read32(hw, port, XM_RXO_OK_LO); + data[0] = (u64) skge_xm_read32(hw, port, XM_TXO_OK_HI) << 32 + | skge_xm_read32(hw, port, XM_TXO_OK_LO); + data[1] = (u64) skge_xm_read32(hw, port, XM_RXO_OK_HI) << 32 + | skge_xm_read32(hw, port, XM_RXO_OK_LO); for (i = 2; i < ARRAY_SIZE(skge_stats); i++) - data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); + data[i] = skge_xm_read32(hw, port, skge_stats[i].xmac_offset); } static void genesis_mac_intr(struct skge_hw *hw, int port) { struct skge_port *skge = netdev_priv(hw->dev[port]); - u16 status = xm_read16(hw, port, XM_ISRC); - - if (netif_msg_intr(skge)) - printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", - skge->netdev->name, status); + u16 status = skge_xm_read16(hw, port, XM_ISRC); + + pr_debug("genesis_intr status %x\n", status); + if (hw->phy_type == SK_PHY_XMAC) { + /* LInk down, start polling for state change */ + if (status & XM_IS_INP_ASS) { + skge_xm_write16(hw, port, XM_IMSK, + skge_xm_read16(hw, port, XM_IMSK) | XM_IS_INP_ASS); + mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ); + } + else if (status & XM_IS_AND) + mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ); + } if (status & XM_IS_TXF_UR) { - xm_write32(hw, port, XM_MODE, XM_MD_FTF); + skge_xm_write32(hw, port, XM_MODE, XM_MD_FTF); ++skge->net_stats.tx_fifo_errors; } if (status & XM_IS_RXF_OV) { - xm_write32(hw, port, XM_MODE, XM_MD_FRF); + skge_xm_write32(hw, port, XM_MODE, XM_MD_FRF); ++skge->net_stats.rx_fifo_errors; } } -static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) { int i; - gma_write16(hw, port, GM_SMI_DATA, val); - gma_write16(hw, port, GM_SMI_CTRL, + skge_gma_write16(hw, port, GM_SMI_DATA, val); + skge_gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); for (i = 0; i < PHY_RETRIES; i++) { udelay(1); - if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + if (!(skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) break; } } -static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg) { int i; - gma_write16(hw, port, GM_SMI_CTRL, + skge_gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); for (i = 0; i < PHY_RETRIES; i++) { udelay(1); - if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + if (skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) goto ready; } @@ -1447,7 +1422,24 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) hw->dev[port]->name); return 0; ready: - return gma_read16(hw, port, GM_SMI_DATA); + return skge_gma_read16(hw, port, GM_SMI_DATA); +} + +static void genesis_link_down(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + pr_debug("genesis_link_down\n"); + + skge_xm_write16(hw, port, XM_MMU_CMD, + skge_xm_read16(hw, port, XM_MMU_CMD) + & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + /* dummy read to ensure writing */ + (void) skge_xm_read16(hw, port, XM_MMU_CMD); + + skge_link_down(skge); } static void genesis_link_up(struct skge_port *skge) @@ -1458,7 +1450,7 @@ static void genesis_link_up(struct skge_port *skge) u32 mode, msk; pr_debug("genesis_link_up\n"); - cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd = skge_xm_read16(hw, port, XM_MMU_CMD); /* * enabling pause frame reception is required for 1000BT @@ -1466,15 +1458,14 @@ static void genesis_link_up(struct skge_port *skge) */ if (skge->flow_control == FLOW_MODE_NONE || skge->flow_control == FLOW_MODE_LOC_SEND) - /* Disable Pause Frame Reception */ cmd |= XM_MMU_IGN_PF; else /* Enable Pause Frame Reception */ cmd &= ~XM_MMU_IGN_PF; - xm_write16(hw, port, XM_MMU_CMD, cmd); + skge_xm_write16(hw, port, XM_MMU_CMD, cmd); - mode = xm_read32(hw, port, XM_MODE); + mode = skge_xm_read32(hw, port, XM_MODE); if (skge->flow_control == FLOW_MODE_SYMMETRIC || skge->flow_control == FLOW_MODE_LOC_SEND) { /* @@ -1488,10 +1479,10 @@ static void genesis_link_up(struct skge_port *skge) /* XM_PAUSE_DA = '010000C28001' (default) */ /* XM_MAC_PTIME = 0xffff (maximum) */ /* remember this value is defined in big endian (!) */ - xm_write16(hw, port, XM_MAC_PTIME, 0xffff); + skge_xm_write16(hw, port, XM_MAC_PTIME, 0xffff); mode |= XM_PAUSE_MODE; - skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); } else { /* * disable pause frame generation is required for 1000BT @@ -1500,68 +1491,125 @@ static void genesis_link_up(struct skge_port *skge) /* Disable Pause Mode in Mode Register */ mode &= ~XM_PAUSE_MODE; - skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); } - xm_write32(hw, port, XM_MODE, mode); + skge_xm_write32(hw, port, XM_MODE, mode); msk = XM_DEF_MSK; - /* disable GP0 interrupt bit for external Phy */ - msk |= XM_IS_INP_ASS; + if (hw->phy_type != SK_PHY_XMAC) + msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ - xm_write16(hw, port, XM_IMSK, msk); - xm_read16(hw, port, XM_ISRC); + skge_xm_write16(hw, port, XM_IMSK, msk); + skge_xm_read16(hw, port, XM_ISRC); /* get MMU Command Reg. */ - cmd = xm_read16(hw, port, XM_MMU_CMD); - if (skge->duplex == DUPLEX_FULL) + cmd = skge_xm_read16(hw, port, XM_MMU_CMD); + if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) cmd |= XM_MMU_GMII_FD; - /* - * Workaround BCOM Errata (#10523) for all BCom Phys - * Enable Power Management after link up - */ - xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, - xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) - & ~PHY_B_AC_DIS_PM); - xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + if (hw->phy_type == SK_PHY_BCOM) { + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) + & ~PHY_B_AC_DIS_PM); + skge_xm_phy_write(hw, port, PHY_BCOM_INT_MASK, + PHY_B_DEF_MSK); + } /* enable Rx/Tx */ - xm_write16(hw, port, XM_MMU_CMD, + skge_xm_write16(hw, port, XM_MMU_CMD, cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); skge_link_up(skge); } -static inline void bcom_phy_intr(struct skge_port *skge) +static void genesis_bcom_intr(struct skge_port *skge) { struct skge_hw *hw = skge->hw; int port = skge->port; - u16 isrc; - - isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); - if (netif_msg_intr(skge)) - printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n", - skge->netdev->name, isrc); + u16 stat = skge_xm_phy_read(hw, port, PHY_BCOM_INT_STAT); - if (isrc & PHY_B_IS_PSE) - printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n", - hw->dev[port]->name); + pr_debug("genesis_bcom intr stat=%x\n", stat); /* Workaround BCom Errata: * enable and disable loopback mode if "NO HCD" occurs. */ - if (isrc & PHY_B_IS_NO_HDCL) { - u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); - xm_phy_write(hw, port, PHY_BCOM_CTRL, + if (stat & PHY_B_IS_NO_HDCL) { + u16 ctrl = skge_xm_phy_read(hw, port, PHY_BCOM_CTRL); + skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl | PHY_CT_LOOP); - xm_phy_write(hw, port, PHY_BCOM_CTRL, + skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl & ~PHY_CT_LOOP); } - if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) - bcom_check_link(hw, port); + stat = skge_xm_phy_read(hw, port, PHY_BCOM_STAT); + if (stat & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) { + u16 aux = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); + if ( !(aux & PHY_B_AS_LS) && netif_carrier_ok(skge->netdev)) + genesis_link_down(skge); + + else if (stat & PHY_B_IS_LST_CHANGE) { + if (aux & PHY_B_AS_AN_C) { + switch (aux & PHY_B_AS_AN_RES_MSK) { + case PHY_B_RES_1000FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_B_RES_1000HD: + skge->duplex = DUPLEX_HALF; + break; + } + + switch (aux & PHY_B_AS_PAUSE_MSK) { + case PHY_B_AS_PAUSE_MSK: + skge->flow_control = FLOW_MODE_SYMMETRIC; + break; + case PHY_B_AS_PRR: + skge->flow_control = FLOW_MODE_REM_SEND; + break; + case PHY_B_AS_PRT: + skge->flow_control = FLOW_MODE_LOC_SEND; + break; + default: + skge->flow_control = FLOW_MODE_NONE; + } + skge->speed = SPEED_1000; + } + genesis_link_up(skge); + } + else + mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ); + } +} +/* Perodic poll of phy status to check for link transistion */ +static void skge_link_timer(unsigned long __arg) +{ + struct skge_port *skge = (struct skge_port *) __arg; + struct skge_hw *hw = skge->hw; + int port = skge->port; + + if (hw->chip_id != CHIP_ID_GENESIS || !netif_running(skge->netdev)) + return; + + spin_lock_bh(&hw->phy_lock); + if (hw->phy_type == SK_PHY_BCOM) + genesis_bcom_intr(skge); + else { + int i; + for (i = 0; i < 3; i++) + if (skge_xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS) + break; + + if (i == 3) + mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ); + else + genesis_link_up(skge); + } + spin_unlock_bh(&hw->phy_lock); } /* Marvell Phy Initailization */ @@ -1573,27 +1621,31 @@ static void yukon_init(struct skge_hw *hw, int port) pr_debug("yukon_init\n"); if (skge->autoneg == AUTONEG_ENABLE) { - u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + u16 ectrl = skge_gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | PHY_M_EC_MAC_S_MSK); ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); - ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + /* on PHY 88E1111 there is a change for downshift control */ + if (hw->chip_id == CHIP_ID_YUKON_EC) + ectrl |= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA; + else + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); - gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + skge_gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); } - ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl = skge_gm_phy_read(hw, port, PHY_MARV_CTRL); if (skge->autoneg == AUTONEG_DISABLE) ctrl &= ~PHY_CT_ANE; ctrl |= PHY_CT_RESET; - gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); ctrl = 0; ct1000 = 0; - adv = PHY_AN_CSMA; + adv = PHY_SEL_TYPE; if (skge->autoneg == AUTONEG_ENABLE) { if (iscopper(hw)) { @@ -1609,12 +1661,41 @@ static void yukon_init(struct skge_hw *hw, int port) adv |= PHY_M_AN_10_FD; if (skge->advertising & ADVERTISED_10baseT_Half) adv |= PHY_M_AN_10_HD; - } else /* special defines for FIBER (88E1011S only) */ - adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; - /* Set Flow-control capabilities */ - adv |= phy_pause_map[skge->flow_control]; + /* Set Flow-control capabilities */ + switch (skge->flow_control) { + case FLOW_MODE_NONE: + adv |= PHY_B_P_NO_PAUSE; + break; + case FLOW_MODE_LOC_SEND: + adv |= PHY_B_P_ASYM_MD; + break; + case FLOW_MODE_SYMMETRIC: + adv |= PHY_B_P_SYM_MD; + break; + case FLOW_MODE_REM_SEND: + adv |= PHY_B_P_BOTH_MD; + break; + } + } else { /* special defines for FIBER (88E1011S only) */ + adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; + /* Set Flow-control capabilities */ + switch (skge->flow_control) { + case FLOW_MODE_NONE: + adv |= PHY_M_P_NO_PAUSE_X; + break; + case FLOW_MODE_LOC_SEND: + adv |= PHY_M_P_ASYM_MD_X; + break; + case FLOW_MODE_SYMMETRIC: + adv |= PHY_M_P_SYM_MD_X; + break; + case FLOW_MODE_REM_SEND: + adv |= PHY_M_P_BOTH_MD_X; + break; + } + } /* Restart Auto-negotiation */ ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; } else { @@ -1636,23 +1717,36 @@ static void yukon_init(struct skge_hw *hw, int port) ctrl |= PHY_CT_RESET; } - gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + if (hw->chip_id != CHIP_ID_YUKON_FE) + skge_gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); - gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); - gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); /* Setup Phy LED's */ ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); ledover = 0; - ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; + if (hw->chip_id == CHIP_ID_YUKON_FE) { + /* on 88E3082 these bits are at 11..9 (shifted left) */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1; + + skge_gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, + ((skge_gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR) + + & ~PHY_M_FELP_LED1_MSK) + | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL))); + } else { + /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ + ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; - /* turn off the Rx LED (LED_RX) */ - ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); + /* turn off the Rx LED (LED_RX) */ + ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); + } /* disable blink mode (LED_DUPLEX) on collisions */ ctrl |= PHY_M_LEDC_DP_CTRL; - gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); + skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) { /* turn on 100 Mbps LED (LED_LINK100) */ @@ -1660,25 +1754,25 @@ static void yukon_init(struct skge_hw *hw, int port) } if (ledover) - gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); + skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); /* Enable phy interrupt on autonegotiation complete (or link up) */ if (skge->autoneg == AUTONEG_ENABLE) - gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); + skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); else - gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); } static void yukon_reset(struct skge_hw *hw, int port) { - gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ - gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ - gma_write16(hw, port, GM_MC_ADDR_H2, 0); - gma_write16(hw, port, GM_MC_ADDR_H3, 0); - gma_write16(hw, port, GM_MC_ADDR_H4, 0); + skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ + skge_gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + skge_gma_write16(hw, port, GM_MC_ADDR_H2, 0); + skge_gma_write16(hw, port, GM_MC_ADDR_H3, 0); + skge_gma_write16(hw, port, GM_MC_ADDR_H4, 0); - gma_write16(hw, port, GM_RX_CTRL, - gma_read16(hw, port, GM_RX_CTRL) + skge_gma_write16(hw, port, GM_RX_CTRL, + skge_gma_read16(hw, port, GM_RX_CTRL) | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); } @@ -1691,17 +1785,17 @@ static void yukon_mac_init(struct skge_hw *hw, int port) /* WA code for COMA mode -- set PHY reset */ if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev == CHIP_REV_YU_LITE_A3) + chip_rev(hw) == CHIP_REV_YU_LITE_A3) skge_write32(hw, B2_GP_IO, (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); /* hard reset */ - skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); - skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_RST_SET); /* WA code for COMA mode -- clear PHY reset */ if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev == CHIP_REV_YU_LITE_A3) + chip_rev(hw) == CHIP_REV_YU_LITE_A3) skge_write32(hw, B2_GP_IO, (skge_read32(hw, B2_GP_IO) | GP_DIR_9) & ~GP_IO_9); @@ -1712,13 +1806,13 @@ static void yukon_mac_init(struct skge_hw *hw, int port) reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; /* Clear GMC reset */ - skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); - skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); - skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_SET); + skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); + skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); if (skge->autoneg == AUTONEG_DISABLE) { reg = GM_GPCR_AU_ALL_DIS; - gma_write16(hw, port, GM_GP_CTRL, - gma_read16(hw, port, GM_GP_CTRL) | reg); + skge_gma_write16(hw, port, GM_GP_CTRL, + skge_gma_read16(hw, port, GM_GP_CTRL) | reg); switch (skge->speed) { case SPEED_1000: @@ -1734,7 +1828,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port) reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; switch (skge->flow_control) { case FLOW_MODE_NONE: - skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; break; case FLOW_MODE_LOC_SEND: @@ -1742,7 +1836,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port) reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; } - gma_write16(hw, port, GM_GP_CTRL, reg); + skge_gma_write16(hw, port, GM_GP_CTRL, reg); skge_read16(hw, GMAC_IRQ_SRC); spin_lock_bh(&hw->phy_lock); @@ -1750,25 +1844,25 @@ static void yukon_mac_init(struct skge_hw *hw, int port) spin_unlock_bh(&hw->phy_lock); /* MIB clear */ - reg = gma_read16(hw, port, GM_PHY_ADDR); - gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + reg = skge_gma_read16(hw, port, GM_PHY_ADDR); + skge_gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); for (i = 0; i < GM_MIB_CNT_SIZE; i++) - gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); - gma_write16(hw, port, GM_PHY_ADDR, reg); + skge_gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); + skge_gma_write16(hw, port, GM_PHY_ADDR, reg); /* transmit control */ - gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + skge_gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); /* receive control reg: unicast + multicast + no FCS */ - gma_write16(hw, port, GM_RX_CTRL, + skge_gma_write16(hw, port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); /* transmit flow control */ - gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + skge_gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); /* transmit parameter */ - gma_write16(hw, port, GM_TX_PARAM, + skge_gma_write16(hw, port, GM_TX_PARAM, TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); @@ -1778,33 +1872,33 @@ static void yukon_mac_init(struct skge_hw *hw, int port) if (hw->dev[port]->mtu > 1500) reg |= GM_SMOD_JUMBO_ENA; - gma_write16(hw, port, GM_SERIAL_MODE, reg); + skge_gma_write16(hw, port, GM_SERIAL_MODE, reg); /* physical address: used for pause frames */ - gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + skge_gm_set_addr(hw, port, GM_SRC_ADDR_1L, addr); /* virtual address for data */ - gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + skge_gm_set_addr(hw, port, GM_SRC_ADDR_2L, addr); /* enable interrupt mask for counter overflows */ - gma_write16(hw, port, GM_TX_IRQ_MSK, 0); - gma_write16(hw, port, GM_RX_IRQ_MSK, 0); - gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + skge_gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + skge_gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + skge_gma_write16(hw, port, GM_TR_IRQ_MSK, 0); /* Initialize Mac Fifo */ /* Configure Rx MAC FIFO */ - skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); + skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); reg = GMF_OPER_ON | GMF_RX_F_FL_ON; if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev == CHIP_REV_YU_LITE_A3) + chip_rev(hw) == CHIP_REV_YU_LITE_A3) reg &= ~GMF_RX_F_FL_ON; - skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); - skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); - skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); + skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), reg); + skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); /* Configure Tx MAC FIFO */ - skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); - skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); + skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); } static void yukon_stop(struct skge_port *skge) @@ -1813,19 +1907,19 @@ static void yukon_stop(struct skge_port *skge) int port = skge->port; if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev == CHIP_REV_YU_LITE_A3) { + chip_rev(hw) == CHIP_REV_YU_LITE_A3) { skge_write32(hw, B2_GP_IO, skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9); } - gma_write16(hw, port, GM_GP_CTRL, - gma_read16(hw, port, GM_GP_CTRL) + skge_gma_write16(hw, port, GM_GP_CTRL, + skge_gma_read16(hw, port, GM_GP_CTRL) & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA)); - gma_read16(hw, port, GM_GP_CTRL); + skge_gma_read16(hw, port, GM_GP_CTRL); /* set GPHY Control reset */ - gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET); - gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET); + skge_gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET); + skge_gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET); } static void yukon_get_stats(struct skge_port *skge, u64 *data) @@ -1834,40 +1928,39 @@ static void yukon_get_stats(struct skge_port *skge, u64 *data) int port = skge->port; int i; - data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 - | gma_read32(hw, port, GM_TXO_OK_LO); - data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 - | gma_read32(hw, port, GM_RXO_OK_LO); + data[0] = (u64) skge_gma_read32(hw, port, GM_TXO_OK_HI) << 32 + | skge_gma_read32(hw, port, GM_TXO_OK_LO); + data[1] = (u64) skge_gma_read32(hw, port, GM_RXO_OK_HI) << 32 + | skge_gma_read32(hw, port, GM_RXO_OK_LO); for (i = 2; i < ARRAY_SIZE(skge_stats); i++) - data[i] = gma_read32(hw, port, + data[i] = skge_gma_read32(hw, port, skge_stats[i].gma_offset); } static void yukon_mac_intr(struct skge_hw *hw, int port) { - struct net_device *dev = hw->dev[port]; - struct skge_port *skge = netdev_priv(dev); - u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); - - if (netif_msg_intr(skge)) - printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", - dev->name, status); + struct skge_port *skge = netdev_priv(hw->dev[port]); + u8 status = skge_read8(hw, SKGEMAC_REG(port, GMAC_IRQ_SRC)); + pr_debug("yukon_intr status %x\n", status); if (status & GM_IS_RX_FF_OR) { ++skge->net_stats.rx_fifo_errors; - gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO); + skge_gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO); } if (status & GM_IS_TX_FF_UR) { ++skge->net_stats.tx_fifo_errors; - gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU); + skge_gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU); } } static u16 yukon_speed(const struct skge_hw *hw, u16 aux) { - switch (aux & PHY_M_PS_SPEED_MSK) { + if (hw->chip_id == CHIP_ID_YUKON_FE) + return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10; + + switch(aux & PHY_M_PS_SPEED_MSK) { case PHY_M_PS_SPEED_1000: return SPEED_1000; case PHY_M_PS_SPEED_100: @@ -1888,15 +1981,15 @@ static void yukon_link_up(struct skge_port *skge) /* Enable Transmit FIFO Underrun */ skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); - reg = gma_read16(hw, port, GM_GP_CTRL); + reg = skge_gma_read16(hw, port, GM_GP_CTRL); if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) reg |= GM_GPCR_DUP_FULL; /* enable Rx/Tx */ reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; - gma_write16(hw, port, GM_GP_CTRL, reg); + skge_gma_write16(hw, port, GM_GP_CTRL, reg); - gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); skge_link_up(skge); } @@ -1906,15 +1999,16 @@ static void yukon_link_down(struct skge_port *skge) int port = skge->port; pr_debug("yukon_link_down\n"); - gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); - gm_phy_write(hw, port, GM_GP_CTRL, - gm_phy_read(hw, port, GM_GP_CTRL) + skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); + skge_gm_phy_write(hw, port, GM_GP_CTRL, + skge_gm_phy_read(hw, port, GM_GP_CTRL) & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)); - if (skge->flow_control == FLOW_MODE_REM_SEND) { + if (hw->chip_id != CHIP_ID_YUKON_FE && + skge->flow_control == FLOW_MODE_REM_SEND) { /* restore Asymmetric Pause bit */ - gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, - gm_phy_read(hw, port, + skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, + skge_gm_phy_read(hw, port, PHY_MARV_AUNE_ADV) | PHY_M_AN_ASP); @@ -1933,21 +2027,20 @@ static void yukon_phy_intr(struct skge_port *skge) const char *reason = NULL; u16 istatus, phystat; - istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); - phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); - - if (netif_msg_intr(skge)) - printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n", - skge->netdev->name, istatus, phystat); + istatus = skge_gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = skge_gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus, phystat); if (istatus & PHY_M_IS_AN_COMPL) { - if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) + if (skge_gm_phy_read(hw, port, PHY_MARV_AUNE_LP) & PHY_M_AN_RF) { reason = "remote fault"; goto failed; } - if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { + if (!(hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_EC) + && (skge_gm_phy_read(hw, port, PHY_MARV_1000T_STAT) + & PHY_B_1000S_MSF)) { reason = "master/slave fault"; goto failed; } @@ -1961,6 +2054,10 @@ static void yukon_phy_intr(struct skge_port *skge) ? DUPLEX_FULL : DUPLEX_HALF; skge->speed = yukon_speed(hw, phystat); + /* Tx & Rx Pause Enabled bits are at 9..8 */ + if (hw->chip_id == CHIP_ID_YUKON_XL) + phystat >>= 6; + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ switch (phystat & PHY_M_PS_PAUSE_MSK) { case PHY_M_PS_PAUSE_MSK: @@ -1978,9 +2075,9 @@ static void yukon_phy_intr(struct skge_port *skge) if (skge->flow_control == FLOW_MODE_NONE || (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) - skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); else - skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON); yukon_link_up(skge); return; } @@ -2064,12 +2161,6 @@ static int skge_up(struct net_device *dev) if (netif_msg_ifup(skge)) printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); - if (dev->mtu > RX_BUF_SIZE) - skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN; - else - skge->rx_buf_size = RX_BUF_SIZE; - - rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); skge->mem_size = tx_size + rx_size; @@ -2082,8 +2173,7 @@ static int skge_up(struct net_device *dev) if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) goto free_pci_mem; - err = skge_rx_fill(skge); - if (err) + if (skge_rx_fill(skge)) goto free_rx_ring; if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, @@ -2092,10 +2182,6 @@ static int skge_up(struct net_device *dev) skge->tx_avail = skge->tx_ring.count - 1; - /* Enable IRQ from port */ - hw->intr_mask |= portirqmask[port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - /* Initialze MAC */ if (hw->chip_id == CHIP_ID_GENESIS) genesis_mac_init(hw, port); @@ -2103,7 +2189,7 @@ static int skge_up(struct net_device *dev) yukon_mac_init(hw, port); /* Configure RAMbuffers */ - chunk = hw->ram_size / ((hw->ports + 1)*2); + chunk = hw->ram_size / (isdualport(hw) ? 4 : 2); ram_addr = hw->ram_offset + 2 * chunk * port; skge_ramset(hw, rxqaddr[port], ram_addr, chunk); @@ -2141,6 +2227,7 @@ static int skge_down(struct net_device *dev) netif_stop_queue(dev); del_timer_sync(&skge->led_blink); + del_timer_sync(&skge->link_check); /* Stop transmitter */ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); @@ -2153,12 +2240,12 @@ static int skge_down(struct net_device *dev) yukon_stop(skge); /* Disable Force Sync bit and Enable Alloc bit */ - skge_write8(hw, SK_REG(port, TXA_CTRL), + skge_write8(hw, SKGEMAC_REG(port, TXA_CTRL), TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); /* Stop Interval Timer and Limit Counter of Tx Arbiter */ - skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); - skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + skge_write32(hw, SKGEMAC_REG(port, TXA_ITI_INI), 0L); + skge_write32(hw, SKGEMAC_REG(port, TXA_LIM_INI), 0L); /* Reset PCI FIFO */ skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); @@ -2173,13 +2260,13 @@ static int skge_down(struct net_device *dev) skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); if (hw->chip_id == CHIP_ID_GENESIS) { - skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); - skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); - skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_STOP); - skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_STOP); + skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_STOP); + skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_STOP); } else { - skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); - skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); } /* turn off led's */ @@ -2212,10 +2299,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) local_irq_save(flags); if (!spin_trylock(&skge->tx_lock)) { - /* Collision - tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + /* Collision - tell upper layer to requeue */ + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { netif_stop_queue(dev); @@ -2246,7 +2333,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) * does. Looks like hardware is wrong? */ if (ip->protocol == IPPROTO_UDP - && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) + && chip_rev(hw) == 0 && hw->chip_id == CHIP_ID_YUKON) control = BMU_TCP_CHECK; else control = BMU_UDP_CHECK; @@ -2307,7 +2394,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) { - /* This ring element can be skb or fragment */ if (e->skb) { pci_unmap_single(hw->pdev, pci_unmap_addr(e, mapaddr), @@ -2352,17 +2438,16 @@ static void skge_tx_timeout(struct net_device *dev) static int skge_change_mtu(struct net_device *dev, int new_mtu) { int err = 0; - int running = netif_running(dev); - if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + if(new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; + dev->mtu = new_mtu; - if (running) + if (netif_running(dev)) { skge_down(dev); - dev->mtu = new_mtu; - if (running) skge_up(dev); + } return err; } @@ -2377,9 +2462,7 @@ static void genesis_set_multicast(struct net_device *dev) u32 mode; u8 filter[8]; - pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count); - - mode = xm_read32(hw, port, XM_MODE); + mode = skge_xm_read32(hw, port, XM_MODE); mode |= XM_MD_ENA_HASH; if (dev->flags & IFF_PROMISC) mode |= XM_MD_ENA_PROM; @@ -2390,16 +2473,17 @@ static void genesis_set_multicast(struct net_device *dev) memset(filter, 0xff, sizeof(filter)); else { memset(filter, 0, sizeof(filter)); - for (i = 0; list && i < count; i++, list = list->next) { - u32 crc, bit; - crc = ether_crc_le(ETH_ALEN, list->dmi_addr); - bit = ~crc & 0x3f; + for(i = 0; list && i < count; i++, list = list->next) { + u32 crc = crc32_le(~0, list->dmi_addr, ETH_ALEN); + u8 bit = 63 - (crc & 63); + filter[bit/8] |= 1 << (bit%8); } } - xm_write32(hw, port, XM_MODE, mode); - xm_outhash(hw, port, XM_HSM, filter); + skge_xm_outhash(hw, port, XM_HSM, filter); + + skge_xm_write32(hw, port, XM_MODE, mode); } static void yukon_set_multicast(struct net_device *dev) @@ -2413,7 +2497,7 @@ static void yukon_set_multicast(struct net_device *dev) memset(filter, 0, sizeof(filter)); - reg = gma_read16(hw, port, GM_RX_CTRL); + reg = skge_gma_read16(hw, port, GM_RX_CTRL); reg |= GM_RXCR_UCF_ENA; if (dev->flags & IFF_PROMISC) /* promiscious */ @@ -2426,23 +2510,23 @@ static void yukon_set_multicast(struct net_device *dev) int i; reg |= GM_RXCR_MCF_ENA; - for (i = 0; list && i < dev->mc_count; i++, list = list->next) { + for(i = 0; list && i < dev->mc_count; i++, list = list->next) { u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; filter[bit/8] |= 1 << (bit%8); } } - gma_write16(hw, port, GM_MC_ADDR_H1, + skge_gma_write16(hw, port, GM_MC_ADDR_H1, (u16)filter[0] | ((u16)filter[1] << 8)); - gma_write16(hw, port, GM_MC_ADDR_H2, + skge_gma_write16(hw, port, GM_MC_ADDR_H2, (u16)filter[2] | ((u16)filter[3] << 8)); - gma_write16(hw, port, GM_MC_ADDR_H3, + skge_gma_write16(hw, port, GM_MC_ADDR_H3, (u16)filter[4] | ((u16)filter[5] << 8)); - gma_write16(hw, port, GM_MC_ADDR_H4, + skge_gma_write16(hw, port, GM_MC_ADDR_H4, (u16)filter[6] | ((u16)filter[7] << 8)); - gma_write16(hw, port, GM_RX_CTRL, reg); + skge_gma_write16(hw, port, GM_RX_CTRL, reg); } static inline int bad_phy_status(const struct skge_hw *hw, u32 status) @@ -2461,76 +2545,28 @@ static void skge_rx_error(struct skge_port *skge, int slot, printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", skge->netdev->name, slot, control, status); - if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) + || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN) skge->net_stats.rx_length_errors++; - else if (skge->hw->chip_id == CHIP_ID_GENESIS) { - if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) - skge->net_stats.rx_length_errors++; - if (status & XMR_FS_FRA_ERR) - skge->net_stats.rx_frame_errors++; - if (status & XMR_FS_FCS_ERR) - skge->net_stats.rx_crc_errors++; - } else { - if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) - skge->net_stats.rx_length_errors++; - if (status & GMR_FS_FRAGMENT) - skge->net_stats.rx_frame_errors++; - if (status & GMR_FS_CRC_ERR) - skge->net_stats.rx_crc_errors++; - } -} - -/* Get receive buffer from descriptor. - * Handles copy of small buffers and reallocation failures - */ -static inline struct sk_buff *skge_rx_get(struct skge_port *skge, - struct skge_element *e, - unsigned int len) -{ - struct sk_buff *nskb, *skb; - - if (len < RX_COPY_THRESHOLD) { - nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); - if (unlikely(!nskb)) - return NULL; - - pci_dma_sync_single_for_cpu(skge->hw->pdev, - pci_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); - memcpy(nskb->data, e->skb->data, len); - pci_dma_sync_single_for_device(skge->hw->pdev, - pci_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); - - if (skge->rx_csum) { - struct skge_rx_desc *rd = e->desc; - nskb->csum = le16_to_cpu(rd->csum2); - nskb->ip_summed = CHECKSUM_HW; - } - skge_rx_reuse(e, skge->rx_buf_size); - return nskb; - } else { - nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); - if (unlikely(!nskb)) - return NULL; - - pci_unmap_single(skge->hw->pdev, - pci_unmap_addr(e, mapaddr), - pci_unmap_len(e, maplen), - PCI_DMA_FROMDEVICE); - skb = e->skb; - if (skge->rx_csum) { - struct skge_rx_desc *rd = e->desc; - skb->csum = le16_to_cpu(rd->csum2); - skb->ip_summed = CHECKSUM_HW; + else { + if (skge->hw->chip_id == CHIP_ID_GENESIS) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + skge->net_stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + skge->net_stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + skge->net_stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + skge->net_stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + skge->net_stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + skge->net_stats.rx_crc_errors++; } - - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); - return skb; } } - static int skge_poll(struct net_device *dev, int *budget) { struct skge_port *skge = netdev_priv(dev); @@ -2539,12 +2575,13 @@ static int skge_poll(struct net_device *dev, int *budget) struct skge_element *e; unsigned int to_do = min(dev->quota, *budget); unsigned int work_done = 0; + int done; + static const u32 irqmask[] = { IS_PORT_1, IS_PORT_2 }; - pr_debug("skge_poll\n"); - - for (e = ring->to_clean; work_done < to_do; e = e->next) { + for (e = ring->to_clean; e != ring->to_use && work_done < to_do; + e = e->next) { struct skge_rx_desc *rd = e->desc; - struct sk_buff *skb; + struct sk_buff *skb = e->skb; u32 control, len, status; rmb(); @@ -2553,12 +2590,19 @@ static int skge_poll(struct net_device *dev, int *budget) break; len = control & BMU_BBC; - status = rd->status; + e->skb = NULL; + + pci_unmap_single(hw->pdev, + pci_unmap_addr(e, mapaddr), + pci_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); - if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) - || bad_phy_status(hw, status))) { + status = rd->status; + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) + || len > dev->mtu + VLAN_ETH_HLEN + || bad_phy_status(hw, status)) { skge_rx_error(skge, e - ring->start, control, status); - skge_rx_reuse(e, skge->rx_buf_size); + dev_kfree_skb(skb); continue; } @@ -2566,37 +2610,43 @@ static int skge_poll(struct net_device *dev, int *budget) printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", dev->name, e - ring->start, rd->status, len); - skb = skge_rx_get(skge, e, len); - if (likely(skb)) { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, dev); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, dev); - dev->last_rx = jiffies; - netif_receive_skb(skb); + if (skge->rx_csum) { + skb->csum = le16_to_cpu(rd->csum2); + skb->ip_summed = CHECKSUM_HW; + } - ++work_done; - } else - skge_rx_reuse(e, skge->rx_buf_size); + dev->last_rx = jiffies; + netif_receive_skb(skb); + + ++work_done; } ring->to_clean = e; + *budget -= work_done; + dev->quota -= work_done; + done = work_done < to_do; + + if (skge_rx_fill(skge)) + done = 0; + /* restart receiver */ wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START | CSR_IRQ_CL_F); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done >= to_do) - return 1; /* not done */ + if (done) { + local_irq_disable(); + hw->intr_mask |= irqmask[skge->port]; + /* Order is important since data can get interrupted */ + skge_write32(hw, B0_IMSK, hw->intr_mask); + __netif_rx_complete(dev); + local_irq_enable(); + } - local_irq_disable(); - __netif_rx_complete(dev); - hw->intr_mask |= portirqmask[skge->port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - local_irq_enable(); - return 0; + return !done; } static inline void skge_tx_intr(struct net_device *dev) @@ -2607,7 +2657,7 @@ static inline void skge_tx_intr(struct net_device *dev) struct skge_element *e; spin_lock(&skge->tx_lock); - for (e = ring->to_clean; e != ring->to_use; e = e->next) { + for(e = ring->to_clean; e != ring->to_use; e = e->next) { struct skge_tx_desc *td = e->desc; u32 control; @@ -2640,12 +2690,12 @@ static void skge_mac_parity(struct skge_hw *hw, int port) : (port == 0 ? "(port A)": "(port B")); if (hw->chip_id == CHIP_ID_GENESIS) - skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_CLR_PERR); else /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ - skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), - (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), + (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); } @@ -2653,16 +2703,16 @@ static void skge_pci_clear(struct skge_hw *hw) { u16 status; - pci_read_config_word(hw->pdev, PCI_STATUS, &status); + status = skge_read16(hw, SKGEPCI_REG(PCI_STATUS)); skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - pci_write_config_word(hw->pdev, PCI_STATUS, - status | PCI_STATUS_ERROR_BITS); + skge_write16(hw, SKGEPCI_REG(PCI_STATUS), + status | PCI_STATUS_ERROR_BITS); skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } static void skge_mac_intr(struct skge_hw *hw, int port) { - if (hw->chip_id == CHIP_ID_GENESIS) + if (hw->chip_id == CHIP_ID_GENESIS) genesis_mac_intr(hw, port); else yukon_mac_intr(hw, port); @@ -2676,9 +2726,9 @@ static void skge_error_irq(struct skge_hw *hw) if (hw->chip_id == CHIP_ID_GENESIS) { /* clear xmac errors */ if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) - skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); + skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) - skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); + skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); } else { /* Timestamp (unused) overflow */ if (hwstatus & IS_IRQ_TIST_OV) @@ -2753,8 +2803,8 @@ static void skge_extirq(unsigned long data) if (hw->chip_id != CHIP_ID_GENESIS) yukon_phy_intr(skge); - else - bcom_phy_intr(skge); + else if (hw->phy_type == SK_PHY_BCOM) + genesis_bcom_intr(skge); } } spin_unlock(&hw->phy_lock); @@ -2774,14 +2824,19 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) return IRQ_NONE; status &= hw->intr_mask; - if (status & IS_R1_F) { + + if ((status & IS_R1_F) && netif_rx_schedule_prep(hw->dev[0])) { + status &= ~IS_R1_F; hw->intr_mask &= ~IS_R1_F; - netif_rx_schedule(hw->dev[0]); + skge_write32(hw, B0_IMSK, hw->intr_mask); + __netif_rx_schedule(hw->dev[0]); } - if (status & IS_R2_F) { + if ((status & IS_R2_F) && netif_rx_schedule_prep(hw->dev[1])) { + status &= ~IS_R2_F; hw->intr_mask &= ~IS_R2_F; - netif_rx_schedule(hw->dev[1]); + skge_write32(hw, B0_IMSK, hw->intr_mask); + __netif_rx_schedule(hw->dev[1]); } if (status & IS_XA1_F) @@ -2790,27 +2845,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) if (status & IS_XA2_F) skge_tx_intr(hw->dev[1]); - if (status & IS_PA_TO_RX1) { - struct skge_port *skge = netdev_priv(hw->dev[0]); - ++skge->net_stats.rx_over_errors; - skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); - } - - if (status & IS_PA_TO_RX2) { - struct skge_port *skge = netdev_priv(hw->dev[1]); - ++skge->net_stats.rx_over_errors; - skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); - } - - if (status & IS_PA_TO_TX1) - skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); - - if (status & IS_PA_TO_TX2) - skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); - if (status & IS_MAC1) skge_mac_intr(hw, 0); - + if (status & IS_MAC2) skge_mac_intr(hw, 1); @@ -2822,7 +2859,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) tasklet_schedule(&hw->ext_tasklet); } - skge_write32(hw, B0_IMSK, hw->intr_mask); + if (status) + skge_write32(hw, B0_IMSK, hw->intr_mask); return IRQ_HANDLED; } @@ -2866,6 +2904,9 @@ static const struct { { CHIP_ID_YUKON, "Yukon" }, { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, { CHIP_ID_YUKON_LP, "Yukon-LP"}, + { CHIP_ID_YUKON_XL, "Yukon-2 XL"}, + { CHIP_ID_YUKON_EC, "YUKON-2 EC"}, + { CHIP_ID_YUKON_FE, "YUKON-2 FE"}, }; static const char *skge_board_name(const struct skge_hw *hw) @@ -2889,8 +2930,8 @@ static const char *skge_board_name(const struct skge_hw *hw) static int skge_reset(struct skge_hw *hw) { u16 ctst; - u8 t8, mac_cfg; - int i; + u8 t8; + int i, ports; ctst = skge_read16(hw, B0_CTST); @@ -2911,9 +2952,12 @@ static int skge_reset(struct skge_hw *hw) hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; hw->pmd_type = skge_read8(hw, B2_PMD_TYP); - switch (hw->chip_id) { + switch(hw->chip_id) { case CHIP_ID_GENESIS: switch (hw->phy_type) { + case SK_PHY_XMAC: + hw->phy_addr = PHY_ADDR_XMAC; + break; case SK_PHY_BCOM: hw->phy_addr = PHY_ADDR_BCOM; break; @@ -2942,9 +2986,8 @@ static int skge_reset(struct skge_hw *hw) return -EOPNOTSUPP; } - mac_cfg = skge_read8(hw, B2_MAC_CFG); - hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; - hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; + hw->mac_cfg = skge_read8(hw, B2_MAC_CFG); + ports = isdualport(hw) ? 2 : 1; /* read the adapters RAM size */ t8 = skge_read8(hw, B2_E_0); @@ -2967,9 +3010,9 @@ static int skge_reset(struct skge_hw *hw) /* switch power to VCC (WA for VAUX problem) */ skge_write8(hw, B0_POWER_CTRL, PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); - for (i = 0; i < hw->ports; i++) { - skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); - skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + for (i = 0; i < ports; i++) { + skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); } } @@ -2979,8 +3022,8 @@ static int skge_reset(struct skge_hw *hw) skge_write8(hw, B0_LED, LED_STAT_ON); /* enable the Tx Arbiters */ - for (i = 0; i < hw->ports; i++) - skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + for (i = 0; i < ports; i++) + skge_write8(hw, SKGEMAC_REG(i, TXA_CTRL), TXA_ENA_ARB); /* Initialize ram interface */ skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); @@ -3007,14 +3050,16 @@ static int skge_reset(struct skge_hw *hw) skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); skge_write32(hw, B2_IRQM_CTRL, TIM_START); - hw->intr_mask = IS_HW_ERR | IS_EXT_REG; + hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; + if (isdualport(hw)) + hw->intr_mask |= IS_PORT_2; skge_write32(hw, B0_IMSK, hw->intr_mask); if (hw->chip_id != CHIP_ID_GENESIS) skge_write8(hw, GMAC_IRQ_MSK, 0); spin_lock_bh(&hw->phy_lock); - for (i = 0; i < hw->ports; i++) { + for (i = 0; i < ports; i++) { if (hw->chip_id == CHIP_ID_GENESIS) genesis_reset(hw, i); else @@ -3026,8 +3071,7 @@ static int skge_reset(struct skge_hw *hw) } /* Initialize network device */ -static struct net_device *skge_devinit(struct skge_hw *hw, int port, - int highmem) +static struct net_device *skge_devinit(struct skge_hw *hw, int port) { struct skge_port *skge; struct net_device *dev = alloc_etherdev(sizeof(*skge)); @@ -3060,8 +3104,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, #endif dev->irq = hw->pdev->irq; dev->features = NETIF_F_LLTX; - if (highmem) - dev->features |= NETIF_F_HIGHDMA; skge = netdev_priv(dev); skge->netdev = dev; @@ -3075,7 +3117,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, skge->flow_control = FLOW_MODE_SYMMETRIC; skge->duplex = -1; skge->speed = -1; - skge->advertising = skge_supported_modes(hw); + skge->advertising = skge_modes(hw); hw->dev[port] = dev; @@ -3083,6 +3125,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, spin_lock_init(&skge->tx_lock); + init_timer(&skge->link_check); + skge->link_check.function = skge_link_timer; + skge->link_check.data = (unsigned long) skge; + init_timer(&skge->led_blink); skge->led_blink.function = skge_blink_timer; skge->led_blink.data = (unsigned long) skge; @@ -3186,11 +3232,14 @@ static int __devinit skge_probe(struct pci_dev *pdev, printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n", pci_resource_start(pdev, 0), pdev->irq, - skge_board_name(hw), hw->chip_rev); + skge_board_name(hw), chip_rev(hw)); - if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) + if ((dev = skge_devinit(hw, 0)) == NULL) goto err_out_led_off; + if (using_dac) + dev->features |= NETIF_F_HIGHDMA; + if ((err = register_netdev(dev))) { printk(KERN_ERR PFX "%s: cannot register net device\n", pci_name(pdev)); @@ -3199,7 +3248,10 @@ static int __devinit skge_probe(struct pci_dev *pdev, skge_show_addr(dev); - if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { + if (isdualport(hw) && (dev1 = skge_devinit(hw, 1))) { + if (using_dac) + dev1->features |= NETIF_F_HIGHDMA; + if (register_netdev(dev1) == 0) skge_show_addr(dev1); else { @@ -3236,7 +3288,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) struct skge_hw *hw = pci_get_drvdata(pdev); struct net_device *dev0, *dev1; - if (!hw) + if(!hw) return; if ((dev1 = hw->dev[1])) @@ -3264,7 +3316,7 @@ static int skge_suspend(struct pci_dev *pdev, u32 state) struct skge_hw *hw = pci_get_drvdata(pdev); int i, wol = 0; - for (i = 0; i < 2; i++) { + for(i = 0; i < 2; i++) { struct net_device *dev = hw->dev[i]; if (dev) { @@ -3297,11 +3349,11 @@ static int skge_resume(struct pci_dev *pdev) skge_reset(hw); - for (i = 0; i < 2; i++) { + for(i = 0; i < 2; i++) { struct net_device *dev = hw->dev[i]; if (dev) { netif_device_attach(dev); - if (netif_running(dev)) + if(netif_running(dev)) skge_up(dev); } } diff --git a/trunk/drivers/net/skge.h b/trunk/drivers/net/skge.h index 14d0cc01fb9a..36c62b68fab4 100644 --- a/trunk/drivers/net/skge.h +++ b/trunk/drivers/net/skge.h @@ -7,6 +7,31 @@ /* PCI config registers */ #define PCI_DEV_REG1 0x40 #define PCI_DEV_REG2 0x44 +#ifndef PCI_VPD +#define PCI_VPD 0x50 +#endif + +/* PCI_OUR_REG_2 32 bit Our Register 2 */ +enum { + PCI_VPD_WR_THR = 0xff<<24, /* Bit 31..24: VPD Write Threshold */ + PCI_DEV_SEL = 0x7f<<17, /* Bit 23..17: EEPROM Device Select */ + PCI_VPD_ROM_SZ = 7 <<14, /* Bit 16..14: VPD ROM Size */ + /* Bit 13..12: reserved */ + PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */ + PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */ + PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ +}; + +/* PCI_VPD_ADR_REG 16 bit VPD Address Register */ +enum { + PCI_VPD_FLAG = 1<<15, /* starts VPD rd/wr cycle */ + PCI_VPD_ADR_MSK =0x7fffL, /* Bit 14.. 0: VPD Address Mask */ + VPD_RES_ID = 0x82, + VPD_RES_READ = 0x90, + VPD_RES_WRITE = 0x81, + VPD_RES_END = 0x78, +}; + #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ PCI_STATUS_SIG_SYSTEM_ERROR | \ @@ -14,6 +39,7 @@ PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_PARITY) + enum csr_regs { B0_RAP = 0x0000, B0_CTST = 0x0004, @@ -203,11 +229,8 @@ enum { IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ - IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, - IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, - - IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, - IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, + IS_PORT_1 = IS_XA1_F| IS_R1_F| IS_MAC1, + IS_PORT_2 = IS_XA2_F| IS_R2_F| IS_MAC2, }; @@ -265,6 +288,14 @@ enum { CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ }; +/* B2_LD_TEST 8 bit EPROM loader test register */ +enum { + LD_T_ON = 1<<3, /* Loader Test mode on */ + LD_T_OFF = 1<<2, /* Loader Test mode off */ + LD_T_STEP = 1<<1, /* Decrement FPROM addr. Counter */ + LD_START = 1<<0, /* Start loading FPROM */ +}; + /* B2_TI_CTRL 8 bit Timer control */ /* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ enum { @@ -282,6 +313,16 @@ enum { TIM_T_STEP = 1<<0, /* Test step */ }; +/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */ +/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */ +/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */ +enum { + DPT_MSK = 0x00ffffffL, /* Bit 23.. 0: Desc Poll Timer Bits */ + + DPT_START = 1<<1, /* Start Descriptor Poll Timer */ + DPT_STOP = 1<<0, /* Stop Descriptor Poll Timer */ +}; + /* B2_GP_IO 32 bit General Purpose I/O Register */ enum { GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ @@ -307,6 +348,30 @@ enum { GP_IO_0 = 1<<0, /* IO_0 pin */ }; +/* Rx/Tx Path related Arbiter Test Registers */ +/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */ +/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */ +/* B3_PA_TEST 16 bit Packet Arbiter Test Register */ +/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */ +enum { + TX2_T_EV = 1<<15,/* TX2 Timeout/Recv Event occured */ + TX2_T_ON = 1<<14,/* TX2 Timeout/Recv Timer Test On */ + TX2_T_OFF = 1<<13,/* TX2 Timeout/Recv Timer Tst Off */ + TX2_T_STEP = 1<<12,/* TX2 Timeout/Recv Timer Step */ + TX1_T_EV = 1<<11,/* TX1 Timeout/Recv Event occured */ + TX1_T_ON = 1<<10,/* TX1 Timeout/Recv Timer Test On */ + TX1_T_OFF = 1<<9, /* TX1 Timeout/Recv Timer Tst Off */ + TX1_T_STEP = 1<<8, /* TX1 Timeout/Recv Timer Step */ + RX2_T_EV = 1<<7, /* RX2 Timeout/Recv Event occured */ + RX2_T_ON = 1<<6, /* RX2 Timeout/Recv Timer Test On */ + RX2_T_OFF = 1<<5, /* RX2 Timeout/Recv Timer Tst Off */ + RX2_T_STEP = 1<<4, /* RX2 Timeout/Recv Timer Step */ + RX1_T_EV = 1<<3, /* RX1 Timeout/Recv Event occured */ + RX1_T_ON = 1<<2, /* RX1 Timeout/Recv Timer Test On */ + RX1_T_OFF = 1<<1, /* RX1 Timeout/Recv Timer Tst Off */ + RX1_T_STEP = 1<<0, /* RX1 Timeout/Recv Timer Step */ +}; + /* Descriptor Bit Definition */ /* TxCtrl Transmit Buffer Control Field */ /* RxCtrl Receive Buffer Control Field */ @@ -363,6 +428,14 @@ enum { RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ }; +/* B3_RI_TEST 8 bit RAM Iface Test Register */ +enum { + RI_T_EV = 1<<3, /* Timeout Event occured */ + RI_T_ON = 1<<2, /* Timeout Timer Test On */ + RI_T_OFF = 1<<1, /* Timeout Timer Test Off */ + RI_T_STEP = 1<<0, /* Timeout Timer Step */ +}; + /* MAC Arbiter Registers */ /* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ enum { @@ -379,6 +452,19 @@ enum { #define SK_PKT_TO_MAX 0xffff /* Maximum value */ #define SK_RI_TO_53 36 /* RAM interface timeout */ + +/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */ +enum { + MA_ENA_REC_TX2 = 1<<7, /* Enable Recovery Timer TX2 */ + MA_DIS_REC_TX2 = 1<<6, /* Disable Recovery Timer TX2 */ + MA_ENA_REC_TX1 = 1<<5, /* Enable Recovery Timer TX1 */ + MA_DIS_REC_TX1 = 1<<4, /* Disable Recovery Timer TX1 */ + MA_ENA_REC_RX2 = 1<<3, /* Enable Recovery Timer RX2 */ + MA_DIS_REC_RX2 = 1<<2, /* Disable Recovery Timer RX2 */ + MA_ENA_REC_RX1 = 1<<1, /* Enable Recovery Timer RX1 */ + MA_DIS_REC_RX1 = 1<<0, /* Disable Recovery Timer RX1 */ +}; + /* Packet Arbiter Registers */ /* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ enum { @@ -402,7 +488,7 @@ enum { PA_ENA_TO_TX1 | PA_ENA_TO_TX2) -/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ /* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ /* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ /* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ @@ -425,7 +511,7 @@ enum { /* * Bank 4 - 5 */ -/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ enum { TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ @@ -451,7 +537,7 @@ enum { /* Queue Register Offsets, use Q_ADDR() to access */ enum { - B8_Q_REGS = 0x0400, /* base of Queue registers */ + B8_Q_REGS = 0x0400, /* base of Queue registers */ Q_D = 0x00, /* 8*32 bit Current Descriptor */ Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ @@ -532,7 +618,8 @@ enum { enum { PHY_ADDR_XMAC = 0<<8, PHY_ADDR_BCOM = 1<<8, - + PHY_ADDR_LONE = 3<<8, + PHY_ADDR_NAT = 0<<8, /* GPHY address (bits 15..11 of SMI control reg) */ PHY_ADDR_MARV = 0, }; @@ -899,7 +986,7 @@ enum { LINKLED_BLINK_OFF = 0x10, LINKLED_BLINK_ON = 0x20, }; - + /* GMAC and GPHY Control Registers (YUKON only) */ enum { GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ @@ -1064,6 +1151,54 @@ enum { PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ }; +/* Level One-PHY Registers, indirect addressed over XMAC */ +enum { + PHY_LONE_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_LONE_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_LONE_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_LONE_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_LONE_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_LONE_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_LONE_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_LONE_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_LONE_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Level One-specific registers */ + PHY_LONE_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_LONE_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_LONE_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_LONE_PORT_CFG = 0x10,/* 16 bit r/w Port Configuration Reg*/ + PHY_LONE_Q_STAT = 0x11,/* 16 bit r/o Quick Status Reg */ + PHY_LONE_INT_ENAB = 0x12,/* 16 bit r/w Interrupt Enable Reg */ + PHY_LONE_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_LONE_LED_CFG = 0x14,/* 16 bit r/w LED Configuration Reg */ + PHY_LONE_PORT_CTRL = 0x15,/* 16 bit r/w Port Control Reg */ + PHY_LONE_CIM = 0x16,/* 16 bit r/o CIM Reg */ +}; + +/* National-PHY Registers, indirect addressed over XMAC */ +enum { + PHY_NAT_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_NAT_STAT = 0x01,/* 16 bit r/w PHY Status Register */ + PHY_NAT_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_NAT_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_NAT_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_NAT_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Ability Reg */ + PHY_NAT_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_NAT_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_NAT_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner Reg */ + /* National-specific registers */ + PHY_NAT_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_NAT_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_NAT_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Register */ + PHY_NAT_EXT_CTRL1 = 0x10,/* 16 bit r/o Extended Control Reg1 */ + PHY_NAT_Q_STAT1 = 0x11,/* 16 bit r/o Quick Status Reg1 */ + PHY_NAT_10B_OP = 0x12,/* 16 bit r/o 10Base-T Operations Reg */ + PHY_NAT_EXT_CTRL2 = 0x13,/* 16 bit r/o Extended Control Reg1 */ + PHY_NAT_Q_STAT2 = 0x14,/* 16 bit r/o Quick Status Reg2 */ + + PHY_NAT_PHY_ADDR = 0x19,/* 16 bit r/o PHY Address Register */ +}; + enum { PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ @@ -1118,29 +1253,8 @@ enum { PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ }; -/* Advertisement register bits */ enum { PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ - PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ - PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ - - PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ - PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ - PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ - PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ - PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ - PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ - PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ - PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ - PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ - PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, - PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | - PHY_AN_100HALF | PHY_AN_100FULL, -}; - -/* Xmac Specific */ -enum { - PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ @@ -1149,6 +1263,82 @@ enum { PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ }; +enum { + PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ + PHY_B_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ +}; + +enum { + PHY_L_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + /* Bit 12: reserved */ + PHY_L_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_L_AN_PC = 1<<10, /* Bit 10: Pause Capable */ + + PHY_L_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ +}; + +/* PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement */ +/* PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */ +enum { + PHY_N_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_N_AN_100F = 1<<11, /* Bit 11: 100Base-T2 FD Support */ + PHY_N_AN_100H = 1<<10, /* Bit 10: 100Base-T2 HD Support */ + + PHY_N_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ +}; + +/* field type definition for PHY_x_AN_SEL */ +enum { + PHY_SEL_TYPE = 1, /* 00001 = Ethernet */ +}; + +enum { + PHY_ANE_LP_NP = 1<<3, /* Bit 3: Link Partner can Next Page */ + PHY_ANE_LOC_NP = 1<<2, /* Bit 2: Local PHY can Next Page */ + PHY_ANE_RX_PG = 1<<1, /* Bit 1: Page Received */ +}; + +enum { + PHY_ANE_PAR_DF = 1<<4, /* Bit 4: Parallel Detection Fault */ + + PHY_ANE_LP_CAP = 1<<0, /* Bit 0: Link Partner Auto-Neg. Cap. */ +}; + +enum { + PHY_NP_MORE = 1<<15, /* Bit 15: More, Next Pages to follow */ + PHY_NP_ACK1 = 1<<14, /* Bit 14: (ro) Ack1, for receiving a message */ + PHY_NP_MSG_VAL = 1<<13, /* Bit 13: Message Page valid */ + PHY_NP_ACK2 = 1<<12, /* Bit 12: Ack2, comply with msg content */ + PHY_NP_TOG = 1<<11, /* Bit 11: Toggle Bit, ensure sync */ + PHY_NP_MSG = 0x07ff, /* Bit 10..0: Message from/to Link Partner */ +}; + +enum { + PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ + PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ +}; + +enum { + PHY_X_RS_PAUSE = 3<<7,/* Bit 8..7: selected Pause Mode */ + PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ + PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ + PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ + PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ +}; + +/** Remote Fault Bits (PHY_X_AN_RFB) encoding */ +enum { + X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ + X_RFB_LF = 1<<12, /* Bit 13..12 Link Failure */ + X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ + X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ +}; + /* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ enum { PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */ @@ -1228,16 +1418,6 @@ enum { PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ }; -/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ -/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ -enum { - PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ - - PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ - PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ -}; - - /***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ enum { PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ @@ -1298,9 +1478,7 @@ enum { PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ }; -#define PHY_B_DEF_MSK \ - (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ - PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) +#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) /* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ enum { @@ -1317,6 +1495,166 @@ enum { PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ }; +/* + * Level One-Specific + */ +/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_L_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_L_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_L_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_L_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_L_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_L_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ +}; + +/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_L_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_L_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_L_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_L_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_L_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_L_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + + PHY_L_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ + +/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/ + PHY_L_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_L_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_L_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_L_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + +/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/ +enum { + PHY_L_PC_REP_MODE = 1<<15, /* Bit 15: Repeater Mode */ + + PHY_L_PC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ + PHY_L_PC_BY_SCR = 1<<12, /* Bit 12: Bypass Scrambler */ + PHY_L_PC_BY_45 = 1<<11, /* Bit 11: Bypass 4B5B-Decoder */ + PHY_L_PC_JAB_DIS = 1<<10, /* Bit 10: Jabber Disabled */ + PHY_L_PC_SQE = 1<<9, /* Bit 9: Enable Heartbeat */ + PHY_L_PC_TP_LOOP = 1<<8, /* Bit 8: TP Loopback */ + PHY_L_PC_SSS = 1<<7, /* Bit 7: Smart Speed Selection */ + PHY_L_PC_FIFO_SIZE = 1<<6, /* Bit 6: FIFO Size */ + PHY_L_PC_PRE_EN = 1<<5, /* Bit 5: Preamble Enable */ + PHY_L_PC_CIM = 1<<4, /* Bit 4: Carrier Integrity Mon */ + PHY_L_PC_10_SER = 1<<3, /* Bit 3: Use Serial Output */ + PHY_L_PC_ANISOL = 1<<2, /* Bit 2: Unisolate Port */ + PHY_L_PC_TEN_BIT = 1<<1, /* Bit 1: 10bit iface mode on */ + PHY_L_PC_ALTCLOCK = 1<<0, /* Bit 0: (ro) ALTCLOCK Mode on */ +}; + +/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/ +enum { + PHY_L_QS_D_RATE = 3<<14,/* Bit 15..14: Data Rate */ + PHY_L_QS_TX_STAT = 1<<13, /* Bit 13: Transmitting */ + PHY_L_QS_RX_STAT = 1<<12, /* Bit 12: Receiving */ + PHY_L_QS_COL_STAT = 1<<11, /* Bit 11: Collision */ + PHY_L_QS_L_STAT = 1<<10, /* Bit 10: Link is up */ + PHY_L_QS_DUP_MOD = 1<<9, /* Bit 9: Full/Half Duplex */ + PHY_L_QS_AN = 1<<8, /* Bit 8: AutoNeg is On */ + PHY_L_QS_AN_C = 1<<7, /* Bit 7: AN is Complete */ + PHY_L_QS_LLE = 7<<4,/* Bit 6..4: Line Length Estim. */ + PHY_L_QS_PAUSE = 1<<3, /* Bit 3: LP advertised Pause */ + PHY_L_QS_AS_PAUSE = 1<<2, /* Bit 2: LP adv. asym. Pause */ + PHY_L_QS_ISOLATE = 1<<1, /* Bit 1: CIM Isolated */ + PHY_L_QS_EVENT = 1<<0, /* Bit 0: Event has occurred */ +}; + +/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/ +/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +enum { + PHY_L_IS_AN_F = 1<<13, /* Bit 13: Auto-Negotiation fault */ + PHY_L_IS_CROSS = 1<<11, /* Bit 11: Crossover used */ + PHY_L_IS_POL = 1<<10, /* Bit 10: Polarity correct. used */ + PHY_L_IS_SS = 1<<9, /* Bit 9: Smart Speed Downgrade */ + PHY_L_IS_CFULL = 1<<8, /* Bit 8: Counter Full */ + PHY_L_IS_AN_C = 1<<7, /* Bit 7: AutoNeg Complete */ + PHY_L_IS_SPEED = 1<<6, /* Bit 6: Speed Changed */ + PHY_L_IS_DUP = 1<<5, /* Bit 5: Duplex Changed */ + PHY_L_IS_LS = 1<<4, /* Bit 4: Link Status Changed */ + PHY_L_IS_ISOL = 1<<3, /* Bit 3: Isolate Occured */ + PHY_L_IS_MDINT = 1<<2, /* Bit 2: (ro) STAT: MII Int Pending */ + PHY_L_IS_INTEN = 1<<1, /* Bit 1: ENAB: Enable IRQs */ + PHY_L_IS_FORCE = 1<<0, /* Bit 0: ENAB: Force Interrupt */ +}; + +/* int. mask */ +#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN) + +/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/ +enum { + PHY_L_LC_LEDC = 3<<14,/* Bit 15..14: Col/Blink/On/Off */ + PHY_L_LC_LEDR = 3<<12,/* Bit 13..12: Rx/Blink/On/Off */ + PHY_L_LC_LEDT = 3<<10,/* Bit 11..10: Tx/Blink/On/Off */ + PHY_L_LC_LEDG = 3<<8,/* Bit 9..8: Giga/Blink/On/Off */ + PHY_L_LC_LEDS = 3<<6,/* Bit 7..6: 10-100/Blink/On/Off */ + PHY_L_LC_LEDL = 3<<4,/* Bit 5..4: Link/Blink/On/Off */ + PHY_L_LC_LEDF = 3<<2,/* Bit 3..2: Duplex/Blink/On/Off */ + PHY_L_LC_PSTRECH= 1<<1, /* Bit 1: Strech LED Pulses */ + PHY_L_LC_FREQ = 1<<0, /* Bit 0: 30/100 ms */ +}; + +/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/ +enum { + PHY_L_PC_TX_TCLK = 1<<15, /* Bit 15: Enable TX_TCLK */ + PHY_L_PC_ALT_NP = 1<<13, /* Bit 14: Alternate Next Page */ + PHY_L_PC_GMII_ALT= 1<<12, /* Bit 13: Alternate GMII driver */ + PHY_L_PC_TEN_CRS = 1<<10, /* Bit 10: Extend CRS*/ +}; + +/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/ +enum { + PHY_L_CIM_ISOL = 0xff<<8,/* Bit 15..8: Isolate Count */ + PHY_L_CIM_FALSE_CAR = 0xff, /* Bit 7..0: False Carrier Count */ +}; + +/* + * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding + */ +enum { + PHY_L_P_NO_PAUSE= 0<<10,/* Bit 11..10: no Pause Mode */ + PHY_L_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ + PHY_L_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ + PHY_L_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ +}; + +/* + * National-Specific + */ +/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_N_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ + PHY_N_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_N_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_N_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_N_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_N_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ + PHY_N_1000C_APC = 1<<7, /* Bit 7: Asymmetric Pause Cap. */}; + + +/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_N_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_N_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_N_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_N_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status*/ + PHY_N_1000S_LP_FD= 1<<11, /* Bit 11: Link Partner can FD */ + PHY_N_1000S_LP_HD= 1<<10, /* Bit 10: Link Partner can HD */ + PHY_N_1000C_LP_APC= 1<<9, /* Bit 9: LP Asym. Pause Cap. */ + PHY_N_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/ +enum { + PHY_N_ES_X_FD_CAP= 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_N_ES_X_HD_CAP= 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_N_ES_T_FD_CAP= 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_N_ES_T_HD_CAP= 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + /** Marvell-Specific */ enum { PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ @@ -1380,7 +1718,7 @@ enum { PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ }; -#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK) +#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK) enum { PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ @@ -1767,7 +2105,7 @@ enum { GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ }; - + /* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ enum { GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ @@ -1789,7 +2127,7 @@ enum { #define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) #define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) - + /* GM_TX_CTRL 16 bit r/w Transmit Control Register */ enum { GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ @@ -1800,7 +2138,7 @@ enum { #define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) #define TX_COL_DEF 0x04 - + /* GM_RX_CTRL 16 bit r/w Receive Control Register */ enum { GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ @@ -1808,7 +2146,7 @@ enum { GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ }; - + /* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ enum { GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ @@ -1833,7 +2171,7 @@ enum { GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ }; - + #define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) #define DATA_BLIND_DEF 0x04 @@ -1848,7 +2186,7 @@ enum { GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ }; - + #define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) #define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) @@ -1857,7 +2195,7 @@ enum { GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ }; - + /* Receive Frame Status Encoding */ enum { GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ @@ -1879,12 +2217,12 @@ enum { /* * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) */ - GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | - GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | + GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_JABBER, /* Rx GMAC FIFO Flush Mask (default) */ RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | - GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | + GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, }; @@ -2202,6 +2540,10 @@ enum { }; +/* XM_PHY_ADDR 16 bit r/w PHY Address Register */ +#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */ + + /* XM_GP_PORT 32 bit r/w General Purpose Port Register */ enum { XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ @@ -2320,8 +2662,8 @@ enum { }; #define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) -#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ - XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) /* XM_STAT_CMD 16 bit r/w Statistics Command Register */ enum { @@ -2451,20 +2793,28 @@ struct skge_hw { u32 intr_mask; struct net_device *dev[2]; + u8 mac_cfg; u8 chip_id; - u8 chip_rev; u8 phy_type; u8 pmd_type; u16 phy_addr; - u8 ports; u32 ram_size; u32 ram_offset; - + struct tasklet_struct ext_tasklet; spinlock_t phy_lock; }; +static inline int isdualport(const struct skge_hw *hw) +{ + return !(hw->mac_cfg & CFG_SNG_MAC); +} + +static inline u8 chip_rev(const struct skge_hw *hw) +{ + return (hw->mac_cfg & CFG_CHIP_R_MSK) >> 4; +} static inline int iscopper(const struct skge_hw *hw) { @@ -2477,7 +2827,7 @@ enum { FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */ FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ }; - + struct skge_port { u32 msg_enable; struct skge_hw *hw; @@ -2503,8 +2853,8 @@ struct skge_port { void *mem; /* PCI memory for rings */ dma_addr_t dma; unsigned long mem_size; - unsigned int rx_buf_size; + struct timer_list link_check; struct timer_list led_blink; }; @@ -2513,6 +2863,7 @@ struct skge_port { static inline u32 skge_read32(const struct skge_hw *hw, int reg) { return readl(hw->regs + reg); + } static inline u16 skge_read16(const struct skge_hw *hw, int reg) @@ -2541,87 +2892,114 @@ static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) } /* MAC Related Registers inside the device. */ -#define SK_REG(port,reg) (((port)<<7)+(reg)) -#define SK_XMAC_REG(port, reg) \ +#define SKGEMAC_REG(port,reg) (((port)<<7)+(reg)) + +/* PCI config space can be accessed via memory mapped space */ +#define SKGEPCI_REG(reg) ((reg)+ 0x380) + +#define SKGEXM_REG(port, reg) \ ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) -static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) +static inline u32 skge_xm_read32(const struct skge_hw *hw, int port, int reg) +{ + return skge_read32(hw, SKGEXM_REG(port,reg)); +} + +static inline u16 skge_xm_read16(const struct skge_hw *hw, int port, int reg) { - u32 v; - v = skge_read16(hw, SK_XMAC_REG(port, reg)); - v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; - return v; + return skge_read16(hw, SKGEXM_REG(port,reg)); } -static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) +static inline u8 skge_xm_read8(const struct skge_hw *hw, int port, int reg) { - return skge_read16(hw, SK_XMAC_REG(port,reg)); + return skge_read8(hw, SKGEXM_REG(port,reg)); } -static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) +static inline void skge_xm_write32(const struct skge_hw *hw, int port, int r, u32 v) { - skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); - skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); + skge_write32(hw, SKGEXM_REG(port,r), v); } -static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) +static inline void skge_xm_write16(const struct skge_hw *hw, int port, int r, u16 v) { - skge_write16(hw, SK_XMAC_REG(port,r), v); + skge_write16(hw, SKGEXM_REG(port,r), v); } -static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, +static inline void skge_xm_write8(const struct skge_hw *hw, int port, int r, u8 v) +{ + skge_write8(hw, SKGEXM_REG(port,r), v); +} + +static inline void skge_xm_outhash(const struct skge_hw *hw, int port, int reg, const u8 *hash) { - xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); - xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); - xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); - xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); + skge_xm_write16(hw, port, reg, + (u16)hash[0] | ((u16)hash[1] << 8)); + skge_xm_write16(hw, port, reg+2, + (u16)hash[2] | ((u16)hash[3] << 8)); + skge_xm_write16(hw, port, reg+4, + (u16)hash[4] | ((u16)hash[5] << 8)); + skge_xm_write16(hw, port, reg+6, + (u16)hash[6] | ((u16)hash[7] << 8)); } -static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, +static inline void skge_xm_outaddr(const struct skge_hw *hw, int port, int reg, const u8 *addr) { - xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); - xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); - xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); + skge_xm_write16(hw, port, reg, + (u16)addr[0] | ((u16)addr[1] << 8)); + skge_xm_write16(hw, port, reg, + (u16)addr[2] | ((u16)addr[3] << 8)); + skge_xm_write16(hw, port, reg, + (u16)addr[4] | ((u16)addr[5] << 8)); } -#define SK_GMAC_REG(port,reg) \ - (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) -static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) +#define SKGEGMA_REG(port,reg) \ + ((reg) + BASE_GMAC_1 + \ + (port) * (BASE_GMAC_2-BASE_GMAC_1)) + +static inline u16 skge_gma_read16(const struct skge_hw *hw, int port, int reg) { - return skge_read16(hw, SK_GMAC_REG(port,reg)); + return skge_read16(hw, SKGEGMA_REG(port,reg)); } -static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) +static inline u32 skge_gma_read32(const struct skge_hw *hw, int port, int reg) { - return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) - | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); + return (u32) skge_read16(hw, SKGEGMA_REG(port,reg)) + | ((u32)skge_read16(hw, SKGEGMA_REG(port,reg+4)) << 16); } -static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) +static inline u8 skge_gma_read8(const struct skge_hw *hw, int port, int reg) { - skge_write16(hw, SK_GMAC_REG(port,r), v); + return skge_read8(hw, SKGEGMA_REG(port,reg)); } -static inline void gma_write32(const struct skge_hw *hw, int port, int r, u32 v) +static inline void skge_gma_write16(const struct skge_hw *hw, int port, int r, u16 v) { - skge_write16(hw, SK_GMAC_REG(port, r), (u16) v); - skge_write32(hw, SK_GMAC_REG(port, r+4), (u16)(v >> 16)); + skge_write16(hw, SKGEGMA_REG(port,r), v); } -static inline void gma_write8(const struct skge_hw *hw, int port, int r, u8 v) +static inline void skge_gma_write32(const struct skge_hw *hw, int port, int r, u32 v) { - skge_write8(hw, SK_GMAC_REG(port,r), v); + skge_write16(hw, SKGEGMA_REG(port, r), (u16) v); + skge_write32(hw, SKGEGMA_REG(port, r+4), (u16)(v >> 16)); } -static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, - const u8 *addr) +static inline void skge_gma_write8(const struct skge_hw *hw, int port, int r, u8 v) { - gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); - gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); - gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); + skge_write8(hw, SKGEGMA_REG(port,r), v); } +static inline void skge_gm_set_addr(struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + skge_gma_write16(hw, port, reg, + (u16) addr[0] | ((u16) addr[1] << 8)); + skge_gma_write16(hw, port, reg+4, + (u16) addr[2] | ((u16) addr[3] << 8)); + skge_gma_write16(hw, port, reg+8, + (u16) addr[4] | ((u16) addr[5] << 8)); +} + #endif diff --git a/trunk/fs/Makefile b/trunk/fs/Makefile index fc92e59e9faf..20edcf28bfd2 100644 --- a/trunk/fs/Makefile +++ b/trunk/fs/Makefile @@ -10,6 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ + ioprio.o obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_COMPAT) += compat.o diff --git a/trunk/fs/ioprio.c b/trunk/fs/ioprio.c new file mode 100644 index 000000000000..663e420636d6 --- /dev/null +++ b/trunk/fs/ioprio.c @@ -0,0 +1,172 @@ +/* + * fs/ioprio.c + * + * Copyright (C) 2004 Jens Axboe + * + * Helper functions for setting/querying io priorities of processes. The + * system calls closely mimmick getpriority/setpriority, see the man page for + * those. The prio argument is a composite of prio class and prio data, where + * the data argument has meaning within that class. The standard scheduling + * classes have 8 distinct prio levels, with 0 being the highest prio and 7 + * being the lowest. + * + * IOW, setting BE scheduling class with prio 2 is done ala: + * + * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2; + * + * ioprio_set(PRIO_PROCESS, pid, prio); + * + * See also Documentation/block/ioprio.txt + * + */ +#include +#include +#include + +static int set_task_ioprio(struct task_struct *task, int ioprio) +{ + struct io_context *ioc; + + if (task->uid != current->euid && + task->uid != current->uid && !capable(CAP_SYS_NICE)) + return -EPERM; + + task_lock(task); + + task->ioprio = ioprio; + + ioc = task->io_context; + if (ioc && ioc->set_ioprio) + ioc->set_ioprio(ioc, ioprio); + + task_unlock(task); + return 0; +} + +asmlinkage int sys_ioprio_set(int which, int who, int ioprio) +{ + int class = IOPRIO_PRIO_CLASS(ioprio); + int data = IOPRIO_PRIO_DATA(ioprio); + struct task_struct *p, *g; + struct user_struct *user; + int ret; + + switch (class) { + case IOPRIO_CLASS_RT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + /* fall through, rt has prio field too */ + case IOPRIO_CLASS_BE: + if (data >= IOPRIO_BE_NR || data < 0) + return -EINVAL; + + break; + case IOPRIO_CLASS_IDLE: + break; + default: + return -EINVAL; + } + + ret = -ESRCH; + read_lock_irq(&tasklist_lock); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_pid(who); + if (p) + ret = set_task_ioprio(p, ioprio); + break; + case IOPRIO_WHO_PGRP: + if (!who) + who = process_group(current); + do_each_task_pid(who, PIDTYPE_PGID, p) { + ret = set_task_ioprio(p, ioprio); + if (ret) + break; + } while_each_task_pid(who, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + if (!who) + user = current->user; + else + user = find_user(who); + + if (!user) + break; + + do_each_thread(g, p) { + if (p->uid != who) + continue; + ret = set_task_ioprio(p, ioprio); + if (ret) + break; + } while_each_thread(g, p); + + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + read_unlock_irq(&tasklist_lock); + return ret; +} + +asmlinkage int sys_ioprio_get(int which, int who) +{ + struct task_struct *g, *p; + struct user_struct *user; + int ret = -ESRCH; + + read_lock_irq(&tasklist_lock); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_pid(who); + if (p) + ret = p->ioprio; + break; + case IOPRIO_WHO_PGRP: + if (!who) + who = process_group(current); + do_each_task_pid(who, PIDTYPE_PGID, p) { + if (ret == -ESRCH) + ret = p->ioprio; + else + ret = ioprio_best(ret, p->ioprio); + } while_each_task_pid(who, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + if (!who) + user = current->user; + else + user = find_user(who); + + if (!user) + break; + + do_each_thread(g, p) { + if (p->uid != user->uid) + continue; + if (ret == -ESRCH) + ret = p->ioprio; + else + ret = ioprio_best(ret, p->ioprio); + } while_each_thread(g, p); + + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + read_unlock_irq(&tasklist_lock); + return ret; +} + diff --git a/trunk/fs/reiserfs/journal.c b/trunk/fs/reiserfs/journal.c index 7b87707acc36..d1bcf0da6728 100644 --- a/trunk/fs/reiserfs/journal.c +++ b/trunk/fs/reiserfs/journal.c @@ -645,18 +645,22 @@ struct buffer_chunk { static void write_chunk(struct buffer_chunk *chunk) { int i; + get_fs_excl(); for (i = 0; i < chunk->nr ; i++) { submit_logged_buffer(chunk->bh[i]) ; } chunk->nr = 0; + put_fs_excl(); } static void write_ordered_chunk(struct buffer_chunk *chunk) { int i; + get_fs_excl(); for (i = 0; i < chunk->nr ; i++) { submit_ordered_buffer(chunk->bh[i]) ; } chunk->nr = 0; + put_fs_excl(); } static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, @@ -918,6 +922,8 @@ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list return 0 ; } + get_fs_excl(); + /* before we can put our commit blocks on disk, we have to make sure everyone older than ** us is on disk too */ @@ -1055,6 +1061,7 @@ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list if (retval) reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__); + put_fs_excl(); return retval; } @@ -1251,6 +1258,8 @@ static int flush_journal_list(struct super_block *s, return 0 ; } + get_fs_excl(); + /* if all the work is already done, get out of here */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { @@ -1450,6 +1459,7 @@ static int flush_journal_list(struct super_block *s, put_journal_list(s, jl); if (flushall) up(&journal->j_flush_sem); + put_fs_excl(); return err ; } @@ -2719,6 +2729,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct sup th->t_trans_id = journal->j_trans_id ; unlock_journal(p_s_sb) ; INIT_LIST_HEAD (&th->t_list); + get_fs_excl(); return 0 ; out_fail: @@ -3526,6 +3537,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b BUG_ON (th->t_refcount > 1); BUG_ON (!th->t_trans_id); + put_fs_excl(); current->journal_info = th->t_handle_save; reiserfs_check_lock_depth(p_s_sb, "journal end"); if (journal->j_len == 0) { diff --git a/trunk/include/asm-i386/unistd.h b/trunk/include/asm-i386/unistd.h index 176413fb9ae3..e25e4c71a879 100644 --- a/trunk/include/asm-i386/unistd.h +++ b/trunk/include/asm-i386/unistd.h @@ -294,8 +294,10 @@ #define __NR_add_key 286 #define __NR_request_key 287 #define __NR_keyctl 288 +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 -#define NR_syscalls 289 +#define NR_syscalls 291 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/trunk/include/asm-ia64/unistd.h b/trunk/include/asm-ia64/unistd.h index f7f43ec2483a..517f1649ee64 100644 --- a/trunk/include/asm-ia64/unistd.h +++ b/trunk/include/asm-ia64/unistd.h @@ -263,6 +263,8 @@ #define __NR_add_key 1271 #define __NR_request_key 1272 #define __NR_keyctl 1273 +#define __NR_ioprio_set 1274 +#define __NR_ioprio_get 1275 #define __NR_set_zone_reclaim 1276 #ifdef __KERNEL__ diff --git a/trunk/include/asm-ppc/unistd.h b/trunk/include/asm-ppc/unistd.h index cc51e5c9acc2..e8b79220b29c 100644 --- a/trunk/include/asm-ppc/unistd.h +++ b/trunk/include/asm-ppc/unistd.h @@ -277,8 +277,10 @@ #define __NR_request_key 270 #define __NR_keyctl 271 #define __NR_waitid 272 +#define __NR_ioprio_set 273 +#define __NR_ioprio_get 274 -#define __NR_syscalls 273 +#define __NR_syscalls 275 #define __NR(n) #n diff --git a/trunk/include/asm-x86_64/unistd.h b/trunk/include/asm-x86_64/unistd.h index d767adcbf0ff..6560439a83e4 100644 --- a/trunk/include/asm-x86_64/unistd.h +++ b/trunk/include/asm-x86_64/unistd.h @@ -561,8 +561,12 @@ __SYSCALL(__NR_add_key, sys_add_key) __SYSCALL(__NR_request_key, sys_request_key) #define __NR_keyctl 250 __SYSCALL(__NR_keyctl, sys_keyctl) +#define __NR_ioprio_set 251 +__SYSCALL(__NR_ioprio_set, sys_ioprio_set) +#define __NR_ioprio_get 252 +__SYSCALL(__NR_ioprio_get, sys_ioprio_get) -#define __NR_syscall_max __NR_keyctl +#define __NR_syscall_max __NR_ioprio_get #ifndef __NO_STUBS /* user-visible error numbers are in the range -1 - -4095 */ diff --git a/trunk/include/linux/bio.h b/trunk/include/linux/bio.h index 038022763f09..36ef29fa0d8b 100644 --- a/trunk/include/linux/bio.h +++ b/trunk/include/linux/bio.h @@ -22,6 +22,7 @@ #include #include +#include /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ #include @@ -149,6 +150,19 @@ struct bio { #define BIO_RW_FAILFAST 3 #define BIO_RW_SYNC 4 +/* + * upper 16 bits of bi_rw define the io priority of this bio + */ +#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) +#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) +#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) + +#define bio_set_prio(bio, prio) do { \ + WARN_ON(prio >= (1 << IOPRIO_BITS)); \ + (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ + (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ +} while (0) + /* * various member access, note that bio_data should of course not be used * on highmem page vectors diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index b54a0348a890..21a8674cd149 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -54,16 +54,23 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); - - struct io_context *ioc; - /* * circular list of cfq_io_contexts belonging to a process io context */ struct list_head list; struct cfq_queue *cfqq; + void *key; + + struct io_context *ioc; + + unsigned long last_end_request; + unsigned long last_queue; + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; + + void (*dtor)(struct cfq_io_context *); + void (*exit)(struct cfq_io_context *); }; /* @@ -73,7 +80,9 @@ struct cfq_io_context { */ struct io_context { atomic_t refcount; - pid_t pid; + struct task_struct *task; + + int (*set_ioprio)(struct io_context *, unsigned int); /* * For request batching @@ -81,8 +90,6 @@ struct io_context { unsigned long last_waited; /* Time last woken after wait for request */ int nr_batch_requests; /* Number of requests left in the batch */ - spinlock_t lock; - struct as_io_context *aic; struct cfq_io_context *cic; }; @@ -134,6 +141,8 @@ struct request { void *elevator_private; + unsigned short ioprio; + int rq_status; /* should split this into a few status bits */ struct gendisk *rq_disk; int errors; diff --git a/trunk/include/linux/elevator.h b/trunk/include/linux/elevator.h index ee54f81faad5..ea6bbc2d7407 100644 --- a/trunk/include/linux/elevator.h +++ b/trunk/include/linux/elevator.h @@ -16,9 +16,9 @@ typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *); typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); -typedef int (elevator_may_queue_fn) (request_queue_t *, int); +typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); -typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int); +typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); @@ -96,9 +96,9 @@ extern struct request *elv_former_request(request_queue_t *, struct request *); extern struct request *elv_latter_request(request_queue_t *, struct request *); extern int elv_register_queue(request_queue_t *q); extern void elv_unregister_queue(request_queue_t *q); -extern int elv_may_queue(request_queue_t *, int); +extern int elv_may_queue(request_queue_t *, int, struct bio *); extern void elv_completed_request(request_queue_t *, struct request *); -extern int elv_set_request(request_queue_t *, struct request *, int); +extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int); extern void elv_put_request(request_queue_t *, struct request *); /* diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index 3ae8e37bdfc8..047bde30836a 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -213,6 +213,7 @@ extern int dir_notify_enable; #include #include #include +#include #include #include @@ -822,16 +823,34 @@ enum { #define vfs_check_frozen(sb, level) \ wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) +static inline void get_fs_excl(void) +{ + atomic_inc(¤t->fs_excl); +} + +static inline void put_fs_excl(void) +{ + atomic_dec(¤t->fs_excl); +} + +static inline int has_fs_excl(void) +{ + return atomic_read(¤t->fs_excl); +} + + /* * Superblock locking. */ static inline void lock_super(struct super_block * sb) { + get_fs_excl(); down(&sb->s_lock); } static inline void unlock_super(struct super_block * sb) { + put_fs_excl(); up(&sb->s_lock); } diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 03206a425d7a..c727c195a91a 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -81,6 +81,7 @@ extern struct group_info init_groups; .mm = NULL, \ .active_mm = &init_mm, \ .run_list = LIST_HEAD_INIT(tsk.run_list), \ + .ioprio = 0, \ .time_slice = HZ, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ @@ -110,6 +111,7 @@ extern struct group_info init_groups; .proc_lock = SPIN_LOCK_UNLOCKED, \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .fs_excl = ATOMIC_INIT(0), \ } diff --git a/trunk/include/linux/ioprio.h b/trunk/include/linux/ioprio.h new file mode 100644 index 000000000000..7811300d88ee --- /dev/null +++ b/trunk/include/linux/ioprio.h @@ -0,0 +1,87 @@ +#ifndef IOPRIO_H +#define IOPRIO_H + +#include + +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_BITS (16) +#define IOPRIO_CLASS_SHIFT (13) +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) +#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) + +#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) + +/* + * These are the io priority groups as implemented by CFQ. RT is the realtime + * class, it always gets premium service. BE is the best-effort scheduling + * class, the default for any process. IDLE is the idle scheduling class, it + * is only served when no one else is using the disk. + */ +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + +/* + * 8 best effort priority levels are supported + */ +#define IOPRIO_BE_NR (8) + +asmlinkage int sys_ioprio_set(int, int, int); +asmlinkage int sys_ioprio_get(int, int); + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * if process has set io priority explicitly, use that. if not, convert + * the cpu scheduler nice value to an io priority + */ +#define IOPRIO_NORM (4) +static inline int task_ioprio(struct task_struct *task) +{ + WARN_ON(!ioprio_valid(task->ioprio)); + return IOPRIO_PRIO_DATA(task->ioprio); +} + +static inline int task_nice_ioprio(struct task_struct *task) +{ + return (task_nice(task) + 20) / 5; +} + +/* + * For inheritance, return the highest of the two given priorities + */ +static inline int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (!ioprio_valid(aprio)) + return bprio; + if (!ioprio_valid(bprio)) + return aprio; + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + +#endif diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index 9530b1903160..ff48815bd3a2 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -608,6 +608,8 @@ struct task_struct { struct list_head run_list; prio_array_t *array; + unsigned short ioprio; + unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ @@ -763,6 +765,7 @@ struct task_struct { nodemask_t mems_allowed; int cpuset_mems_generation; #endif + atomic_t fs_excl; /* holding fs exclusive resources */ }; static inline pid_t process_group(struct task_struct *tsk) @@ -1112,7 +1115,8 @@ extern void unhash_process(struct task_struct *p); /* * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring - * subscriptions and synchronises with wait4(). Also used in procfs. + * subscriptions and synchronises with wait4(). Also used in procfs. Also + * pins the final release of task.io_context. * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), diff --git a/trunk/include/linux/writeback.h b/trunk/include/linux/writeback.h index 1262cb43c3ab..d5c3fe1bf33d 100644 --- a/trunk/include/linux/writeback.h +++ b/trunk/include/linux/writeback.h @@ -14,11 +14,13 @@ extern struct list_head inode_unused; * Yes, writeback.h requires sched.h * No, sched.h is not included from here. */ -static inline int current_is_pdflush(void) +static inline int task_is_pdflush(struct task_struct *task) { - return current->flags & PF_FLUSHER; + return task->flags & PF_FLUSHER; } +#define current_is_pdflush() task_is_pdflush(current) + /* * fs/fs-writeback.c */ diff --git a/trunk/kernel/exit.c b/trunk/kernel/exit.c index 3ebcd60a19c6..9d1b10ed0135 100644 --- a/trunk/kernel/exit.c +++ b/trunk/kernel/exit.c @@ -784,6 +784,8 @@ fastcall NORET_TYPE void do_exit(long code) profile_task_exit(tsk); + WARN_ON(atomic_read(&tsk->fs_excl)); + if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 2c7806873bfd..cdef6cea8900 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -1090,6 +1090,11 @@ static task_t *copy_process(unsigned long clone_flags, spin_unlock(¤t->sighand->siglock); } + /* + * inherit ioprio + */ + p->ioprio = current->ioprio; + SET_LINKS(p); if (unlikely(p->ptrace & PT_PTRACED)) __ptrace_link(p, current->parent); diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index a07cff90d849..e2b0d3e4dd06 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -3448,15 +3448,7 @@ int task_nice(const task_t *p) { return TASK_NICE(p); } - -/* - * The only users of task_nice are binfmt_elf and binfmt_elf32. - * binfmt_elf is no longer modular, but binfmt_elf32 still is. - * Therefore, task_nice is needed if there is a compat_mode. - */ -#ifdef CONFIG_COMPAT EXPORT_SYMBOL_GPL(task_nice); -#endif /** * idle_cpu - is a given cpu idle currently?