Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 54317
b: refs/heads/master
c: a0044bd
h: refs/heads/master
i:
  54315: 6a8e3b3
v: v3
  • Loading branch information
Jeff Dike authored and Linus Torvalds committed May 7, 2007
1 parent 28b3a17 commit 694dd65
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 78 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a61f334fd2864b9b040f7e882726426ed7e8a317
refs/heads/master: a0044bdf60c212366a314da09ca624cb315906e2
176 changes: 99 additions & 77 deletions trunk/arch/um/drivers/ubd_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,10 @@ struct cow {
int data_offset;
};

#define MAX_SG 64

struct ubd {
struct list_head restart;
/* name (and fd, below) of the file opened for writing, either the
* backing or the cow file. */
char *file;
Expand All @@ -164,7 +167,9 @@ struct ubd {
struct platform_device pdev;
struct request_queue *queue;
spinlock_t lock;
int active;
struct scatterlist sg[MAX_SG];
struct request *request;
int start_sg, end_sg;
};

#define DEFAULT_COW { \
Expand All @@ -186,7 +191,9 @@ struct ubd {
.shared = 0, \
.cow = DEFAULT_COW, \
.lock = SPIN_LOCK_UNLOCKED, \
.active = 0, \
.request = NULL, \
.start_sg = 0, \
.end_sg = 0, \
}

/* Protected by ubd_lock */
Expand Down Expand Up @@ -466,60 +473,67 @@ static void do_ubd_request(request_queue_t * q);
/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;

/* call ubd_finish if you need to serialize */
static void __ubd_finish(struct request *req, int error)
static void ubd_end_request(struct request *req, int bytes, int uptodate)
{
int nsect;

if(error){
end_request(req, 0);
return;
if (!end_that_request_first(req, uptodate, bytes >> 9)) {
struct ubd *dev = req->rq_disk->private_data;
unsigned long flags;

add_disk_randomness(req->rq_disk);
spin_lock_irqsave(&dev->lock, flags);
end_that_request_last(req, uptodate);
spin_unlock_irqrestore(&dev->lock, flags);
}
nsect = req->current_nr_sectors;
req->sector += nsect;
req->buffer += nsect << 9;
req->errors = 0;
req->nr_sectors -= nsect;
req->current_nr_sectors = 0;
end_request(req, 1);
}

/* Callable only from interrupt context - otherwise you need to do
* spin_lock_irq()/spin_lock_irqsave() */
static inline void ubd_finish(struct request *req, int error)
static inline void ubd_finish(struct request *req, int bytes)
{
struct ubd *dev = req->rq_disk->private_data;

spin_lock(&dev->lock);
__ubd_finish(req, error);
spin_unlock(&dev->lock);
if(bytes < 0){
ubd_end_request(req, 0, 0);
return;
}
ubd_end_request(req, bytes, 1);
}

static LIST_HEAD(restart);

/* XXX - move this inside ubd_intr. */
/* Called without dev->lock held, and only in interrupt context. */
static void ubd_handler(void)
{
struct io_thread_req req;
struct request *rq;
struct ubd *dev;
struct ubd *ubd;
struct list_head *list, *next_ele;
unsigned long flags;
int n;

n = os_read_file_k(thread_fd, &req, sizeof(req));
if(n != sizeof(req)){
printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, "
"err = %d\n", os_getpid(), -n);
return;
}

rq = req.req;
dev = rq->rq_disk->private_data;
dev->active = 0;
while(1){
n = os_read_file_k(thread_fd, &req, sizeof(req));
if(n != sizeof(req)){
if(n == -EAGAIN)
break;
printk(KERN_ERR "spurious interrupt in ubd_handler, "
"err = %d\n", -n);
return;
}

ubd_finish(rq, req.error);
rq = req.req;
rq->nr_sectors -= req.length >> 9;
if(rq->nr_sectors == 0)
ubd_finish(rq, rq->hard_nr_sectors << 9);
}
reactivate_fd(thread_fd, UBD_IRQ);
spin_lock(&dev->lock);
do_ubd_request(dev->queue);
spin_unlock(&dev->lock);

list_for_each_safe(list, next_ele, &restart){
ubd = container_of(list, struct ubd, restart);
list_del_init(&ubd->restart);
spin_lock_irqsave(&ubd->lock, flags);
do_ubd_request(ubd->queue);
spin_unlock_irqrestore(&ubd->lock, flags);
}
}

static irqreturn_t ubd_intr(int irq, void *dev)
Expand Down Expand Up @@ -684,6 +698,8 @@ static int ubd_add(int n, char **error_out)

ubd_dev->size = ROUND_BLOCK(ubd_dev->size);

INIT_LIST_HEAD(&ubd_dev->restart);

err = -ENOMEM;
ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
if (ubd_dev->queue == NULL) {
Expand All @@ -692,6 +708,7 @@ static int ubd_add(int n, char **error_out)
}
ubd_dev->queue->queuedata = ubd_dev;

blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";
Expand Down Expand Up @@ -1029,26 +1046,16 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
}

/* Called with dev->lock held */
static int prepare_request(struct request *req, struct io_thread_req *io_req)
static void prepare_request(struct request *req, struct io_thread_req *io_req,
unsigned long long offset, int page_offset,
int len, struct page *page)
{
struct gendisk *disk = req->rq_disk;
struct ubd *ubd_dev = disk->private_data;
__u64 offset;
int len;

/* This should be impossible now */
if((rq_data_dir(req) == WRITE) && !ubd_dev->openflags.w){
printk("Write attempted on readonly ubd device %s\n",
disk->disk_name);
end_request(req, 0);
return(1);
}

offset = ((__u64) req->sector) << 9;
len = req->current_nr_sectors << 9;

io_req->req = req;
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd : ubd_dev->fd;
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
ubd_dev->fd;
io_req->fds[1] = ubd_dev->fd;
io_req->cow_offset = -1;
io_req->offset = offset;
Expand All @@ -1059,44 +1066,59 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0;
io_req->offsets[1] = ubd_dev->cow.data_offset;
io_req->buffer = req->buffer;
io_req->buffer = page_address(page) + page_offset;
io_req->sectorsize = 1 << 9;

if(ubd_dev->cow.file != NULL)
cowify_req(io_req, ubd_dev->cow.bitmap, ubd_dev->cow.bitmap_offset,
ubd_dev->cow.bitmap_len);
cowify_req(io_req, ubd_dev->cow.bitmap,
ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len);

return(0);
}

/* Called with dev->lock held */
static void do_ubd_request(request_queue_t *q)
{
struct io_thread_req io_req;
struct request *req;
int err, n;

if(thread_fd == -1){
while((req = elv_next_request(q)) != NULL){
err = prepare_request(req, &io_req);
if(!err){
do_io(&io_req);
__ubd_finish(req, io_req.error);
}
}
}
else {
int n;

while(1){
struct ubd *dev = q->queuedata;
if(dev->active || (req = elv_next_request(q)) == NULL)
return;
err = prepare_request(req, &io_req);
if(!err){
dev->active = 1;
n = os_write_file_k(thread_fd, &io_req, sizeof(io_req));
if(n != sizeof(io_req))
printk("write to io thread failed, "
"errno = %d\n", -n);
if(dev->end_sg == 0){
struct request *req = elv_next_request(q);
if(req == NULL)
return;

dev->request = req;
blkdev_dequeue_request(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}

req = dev->request;
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];

prepare_request(req, &io_req,
(unsigned long long) req->sector << 9,
sg->offset, sg->length, sg->page);

n = os_write_file_k(thread_fd, (char *) &io_req,
sizeof(io_req));
if(n != sizeof(io_req)){
if(n != -EAGAIN)
printk("write to io thread failed, "
"errno = %d\n", -n);
else if(list_empty(&dev->restart))
list_add(&dev->restart, &restart);
return;
}

req->sector += sg->length >> 9;
dev->start_sg++;
}
dev->end_sg = 0;
dev->request = NULL;
}
}

Expand Down

0 comments on commit 694dd65

Please sign in to comment.