Skip to content

Commit

Permalink
[PATCH] sg: use compound pages
Browse files Browse the repository at this point in the history
sg increments the refcount of constituent pages in its higher order memory
allocations when they are about to be mapped by userspace.  This is done so
the subsequent get_page/put_page when doing the mapping and unmapping does not
free the page.

Move over to the preferred way, that is, using compound pages instead.  This
fixes a whole class of possible obscure bugs where a get_user_pages on a
constituent page may outlast the user mappings or even the driver.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Douglas Gilbert <dougg@torque.net>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Mar 22, 2006
1 parent a6f563d commit f9aed0e
Showing 1 changed file with 3 additions and 34 deletions.
37 changes: 3 additions & 34 deletions drivers/scsi/sg.c
Original file line number Diff line number Diff line change
Expand Up @@ -1140,32 +1140,6 @@ sg_fasync(int fd, struct file *filp, int mode)
return (retval < 0) ? retval : 0;
}

/* When startFinish==1 increments page counts for pages other than the
first of scatter gather elements obtained from alloc_pages().
When startFinish==0 decrements ... */
static void
sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
{
struct scatterlist *sg = rsv_schp->buffer;
struct page *page;
int k, m;

SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
startFinish, rsv_schp->k_use_sg));
/* N.B. correction _not_ applied to base page of each allocation */
for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
page = sg->page;
if (startFinish)
get_page(page);
else {
if (page_count(page) > 0)
__put_page(page);
}
}
}
}

static struct page *
sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
{
Expand Down Expand Up @@ -1237,10 +1211,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
sa += len;
}

if (0 == sfp->mmap_called) {
sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
sfp->mmap_called = 1;
}
sfp->mmap_called = 1;
vma->vm_flags |= VM_RESERVED;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
Expand Down Expand Up @@ -2395,8 +2366,6 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
SCSI_LOG_TIMEOUT(6,
printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
if (sfp->mmap_called)
sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
sg_remove_scat(&sfp->reserve);
}
sfp->parentdp = NULL;
Expand Down Expand Up @@ -2478,9 +2447,9 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
return resp;

if (lowDma)
page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
else
page_mask = GFP_ATOMIC | __GFP_NOWARN;
page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;

for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
order++, a_size <<= 1) ;
Expand Down

0 comments on commit f9aed0e

Please sign in to comment.