Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 65023
b: refs/heads/master
c: 4191ba2
h: refs/heads/master
i:
  65021: 0b51e1a
  65019: 69ac8ba
  65015: 00c0e03
  65007: 98ca4dc
  64991: 996eb09
  64959: f6de346
  64895: da6a811
  64767: 05c18eb
  64511: c3008c1
v: v3
  • Loading branch information
Cliff Wickman authored and Linus Torvalds committed Sep 19, 2007
1 parent 169f04f commit d28d8c3
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 22 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 49cc886aea1d79cdb0ea409554866238b07fe26f
refs/heads/master: 4191ba26dae8338892e73f6e67bd18068b4344e9
69 changes: 48 additions & 21 deletions trunk/drivers/char/mspec.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
/*
* Page types allocated by the device.
*/
enum {
enum mspec_page_type {
MSPEC_FETCHOP = 1,
MSPEC_CACHED,
MSPEC_UNCACHED
Expand All @@ -83,15 +83,25 @@ static int is_sn2;
* One of these structures is allocated when an mspec region is mmaped. The
* structure is pointed to by the vma->vm_private_data field in the vma struct.
* This structure is used to record the addresses of the mspec pages.
* This structure is shared by all vma's that are split off from the
* original vma when split_vma()'s are done.
*
* The refcnt is incremented atomically because mm->mmap_sem does not
* protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
atomic_t refcnt; /* Number of vmas sharing the data. */
spinlock_t lock; /* Serialize access to the vma. */
spinlock_t lock; /* Serialize access to this structure. */
int count; /* Number of pages allocated. */
int type; /* Type of pages allocated. */
enum mspec_page_type type; /* Type of pages allocated. */
int flags; /* See VMD_xxx below. */
unsigned long vm_start; /* Original (unsplit) base. */
unsigned long vm_end; /* Original (unsplit) end. */
unsigned long maddr[0]; /* Array of MSPEC addresses. */
};

#define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */

/* used on shub2 to clear FOP cache in the HUB */
static unsigned long scratch_page[MAX_NUMNODES];
#define SH2_AMO_CACHE_ENTRIES 4
Expand Down Expand Up @@ -129,8 +139,8 @@ mspec_zero_block(unsigned long addr, int len)
* mspec_open
*
* Called when a device mapping is created by a means other than mmap
* (via fork, etc.). Increments the reference count on the underlying
* mspec data so it is not freed prematurely.
* (via fork, munmap, etc.). Increments the reference count on the
* underlying mspec data so it is not freed prematurely.
*/
static void
mspec_open(struct vm_area_struct *vma)
Expand All @@ -151,34 +161,44 @@ static void
mspec_close(struct vm_area_struct *vma)
{
struct vma_data *vdata;
int i, pages, result, vdata_size;
int index, last_index, result;
unsigned long my_page;

vdata = vma->vm_private_data;
if (!atomic_dec_and_test(&vdata->refcnt))
return;

pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
for (i = 0; i < pages; i++) {
if (vdata->maddr[i] == 0)
BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);

spin_lock(&vdata->lock);
index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
for (; index < last_index; index++) {
if (vdata->maddr[index] == 0)
continue;
/*
* Clear the page before sticking it back
* into the pool.
*/
result = mspec_zero_block(vdata->maddr[i], PAGE_SIZE);
my_page = vdata->maddr[index];
vdata->maddr[index] = 0;
spin_unlock(&vdata->lock);
result = mspec_zero_block(my_page, PAGE_SIZE);
if (!result)
uncached_free_page(vdata->maddr[i]);
uncached_free_page(my_page);
else
printk(KERN_WARNING "mspec_close(): "
"failed to zero page %i\n",
result);
spin_lock(&vdata->lock);
}
spin_unlock(&vdata->lock);

if (vdata_size <= PAGE_SIZE)
kfree(vdata);
else
if (!atomic_dec_and_test(&vdata->refcnt))
return;

if (vdata->flags & VMD_VMALLOCED)
vfree(vdata);
else
kfree(vdata);
}


Expand All @@ -195,7 +215,8 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
int index;
struct vma_data *vdata = vma->vm_private_data;

index = (address - vma->vm_start) >> PAGE_SHIFT;
BUG_ON(address < vdata->vm_start || address >= vdata->vm_end);
index = (address - vdata->vm_start) >> PAGE_SHIFT;
maddr = (volatile unsigned long) vdata->maddr[index];
if (maddr == 0) {
maddr = uncached_alloc_page(numa_node_id());
Expand Down Expand Up @@ -237,10 +258,11 @@ static struct vm_operations_struct mspec_vm_ops = {
* underlying pages.
*/
static int
mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
mspec_mmap(struct file *file, struct vm_area_struct *vma,
enum mspec_page_type type)
{
struct vma_data *vdata;
int pages, vdata_size;
int pages, vdata_size, flags = 0;

if (vma->vm_pgoff != 0)
return -EINVAL;
Expand All @@ -255,12 +277,17 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
if (vdata_size <= PAGE_SIZE)
vdata = kmalloc(vdata_size, GFP_KERNEL);
else
else {
vdata = vmalloc(vdata_size);
flags = VMD_VMALLOCED;
}
if (!vdata)
return -ENOMEM;
memset(vdata, 0, vdata_size);

vdata->vm_start = vma->vm_start;
vdata->vm_end = vma->vm_end;
vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
vdata->refcnt = ATOMIC_INIT(1);
Expand Down

0 comments on commit d28d8c3

Please sign in to comment.