Skip to content

Commit

Permalink
amd-iommu: move page table allocation code to seperate function
Browse files Browse the repository at this point in the history
This patch makes page table allocation usable for dma_ops code.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
  • Loading branch information
Joerg Roedel committed May 28, 2009
1 parent c323956 commit 8bda309
Showing 1 changed file with 61 additions and 25 deletions.
86 changes: 61 additions & 25 deletions arch/x86/kernel/amd_iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ struct iommu_cmd {
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
struct unity_map_entry *e);
static struct dma_ops_domain *find_protection_domain(u16 devid);

static u64* alloc_pte(struct protection_domain *dom,
unsigned long address, u64
**pte_page, gfp_t gfp);

#ifdef CONFIG_AMD_IOMMU_STATS

Expand Down Expand Up @@ -468,7 +470,7 @@ static int iommu_map_page(struct protection_domain *dom,
unsigned long phys_addr,
int prot)
{
u64 __pte, *pte, *page;
u64 __pte, *pte;

bus_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(phys_addr);
Expand All @@ -477,27 +479,7 @@ static int iommu_map_page(struct protection_domain *dom,
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
return -EINVAL;

pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];

if (!IOMMU_PTE_PRESENT(*pte)) {
page = (u64 *)get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
*pte = IOMMU_L2_PDE(virt_to_phys(page));
}

pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];

if (!IOMMU_PTE_PRESENT(*pte)) {
page = (u64 *)get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
*pte = IOMMU_L1_PDE(virt_to_phys(page));
}

pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL);

if (IOMMU_PTE_PRESENT(*pte))
return -EBUSY;
Expand Down Expand Up @@ -1139,6 +1121,61 @@ static int get_device_resources(struct device *dev,
return 1;
}

/*
* If the pte_page is not yet allocated this function is called
*/
static u64* alloc_pte(struct protection_domain *dom,
unsigned long address, u64 **pte_page, gfp_t gfp)
{
u64 *pte, *page;

pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];

if (!IOMMU_PTE_PRESENT(*pte)) {
page = (u64 *)get_zeroed_page(gfp);
if (!page)
return NULL;
*pte = IOMMU_L2_PDE(virt_to_phys(page));
}

pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[IOMMU_PTE_L1_INDEX(address)];

if (!IOMMU_PTE_PRESENT(*pte)) {
page = (u64 *)get_zeroed_page(gfp);
if (!page)
return NULL;
*pte = IOMMU_L1_PDE(virt_to_phys(page));
}

pte = IOMMU_PTE_PAGE(*pte);

if (pte_page)
*pte_page = pte;

pte = &pte[IOMMU_PTE_L0_INDEX(address)];

return pte;
}

/*
* This function fetches the PTE for a given address in the aperture
*/
static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
unsigned long address)
{
struct aperture_range *aperture = &dom->aperture;
u64 *pte, *pte_page;

pte = aperture->pte_pages[IOMMU_PTE_L1_INDEX(address)];
if (!pte) {
pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC);
aperture->pte_pages[IOMMU_PTE_L1_INDEX(address)] = pte_page;
}

return pte;
}

/*
* This is the generic map function. It maps one 4kb page at paddr to
* the given address in the DMA address space for the domain.
Expand All @@ -1155,8 +1192,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,

paddr &= PAGE_MASK;

pte = dom->aperture.pte_pages[IOMMU_PTE_L1_INDEX(address)];
pte += IOMMU_PTE_L0_INDEX(address);
pte = dma_ops_get_pte(dom, address);

__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;

Expand Down

0 comments on commit 8bda309

Please sign in to comment.