Skip to content

Commit

Permalink
drm/i915/gtt: Introduce kmap|kunmap for dma page
Browse files Browse the repository at this point in the history
As there is flushing involved when we have done the cpu
write, make functions for mapping for cpu space. Make macros
to map any type of paging structure.

v2: Make it clear tha flushing kunmap is only for ppgtt (Ville)
v3: Flushing fixed (Ville, Michel). Removed superfluous semicolon

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
  • Loading branch information
Mika Kuoppala authored and Daniel Vetter committed Jun 26, 2015
1 parent 73eeea5 commit d1c54ac
Showing 1 changed file with 40 additions and 37 deletions.
77 changes: 40 additions & 37 deletions drivers/gpu/drm/i915/i915_gem_gtt.c
Original file line number Diff line number Diff line change
Expand Up @@ -330,15 +330,16 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
memset(p, 0, sizeof(*p));
}

static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
const uint64_t val)
static void *kmap_page_dma(struct i915_page_dma *p)
{
int i;
uint64_t * const vaddr = kmap_atomic(p->page);

for (i = 0; i < 512; i++)
vaddr[i] = val;
return kmap_atomic(p->page);
}

/* We use the flushing unmap only with ppgtt structures:
* page directories, page tables and scratch pages.
*/
static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
{
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
Expand All @@ -348,6 +349,21 @@ static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
kunmap_atomic(vaddr);
}

#define kmap_px(px) kmap_page_dma(&(px)->base)
#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))

static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
const uint64_t val)
{
int i;
uint64_t * const vaddr = kmap_page_dma(p);

for (i = 0; i < 512; i++)
vaddr[i] = val;

kunmap_page_dma(dev, vaddr);
}

static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
const uint32_t val32)
{
Expand Down Expand Up @@ -504,7 +520,6 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
while (num_entries) {
struct i915_page_directory *pd;
struct i915_page_table *pt;
struct page *page_table;

if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
continue;
Expand All @@ -519,22 +534,18 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
if (WARN_ON(!pt->base.page))
continue;

page_table = pt->base.page;

last_pte = pte + num_entries;
if (last_pte > GEN8_PTES)
last_pte = GEN8_PTES;

pt_vaddr = kmap_atomic(page_table);
pt_vaddr = kmap_px(pt);

for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
num_entries--;
}

if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt);

pte = 0;
if (++pde == I915_PDES) {
Expand Down Expand Up @@ -566,18 +577,14 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (pt_vaddr == NULL) {
struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
struct i915_page_table *pt = pd->page_table[pde];
struct page *page_table = pt->base.page;

pt_vaddr = kmap_atomic(page_table);
pt_vaddr = kmap_px(pt);
}

pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
if (++pte == GEN8_PTES) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
pdpe++;
Expand All @@ -586,11 +593,9 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
pte = 0;
}
}
if (pt_vaddr) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
}

if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr);
}

static void __gen8_do_map_pt(gen8_pde_t * const pde,
Expand Down Expand Up @@ -870,7 +875,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
gen8_pde_t *const page_directory = kmap_atomic(pd->base.page);
gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = gen8_clamp_pd(start, length);
uint64_t pd_start = start;
Expand Down Expand Up @@ -900,10 +905,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
* point we're still relying on insert_entries() */
}

if (!HAS_LLC(vm->dev))
drm_clflush_virt_range(page_directory, PAGE_SIZE);

kunmap_atomic(page_directory);
kunmap_px(ppgtt, page_directory);

set_bit(pdpe, ppgtt->pdp.used_pdpes);
}
Expand Down Expand Up @@ -992,7 +994,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);

pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->base.page);
pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);

for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * GEN6_PTES) +
Expand All @@ -1014,7 +1017,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
seq_puts(m, "\n");
}
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);
}
}

Expand Down Expand Up @@ -1221,12 +1224,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
if (last_pte > GEN6_PTES)
last_pte = GEN6_PTES;

pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);

for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;

kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);

num_entries -= last_pte - first_pte;
first_pte = 0;
Expand All @@ -1250,21 +1253,21 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->base.page);
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);

pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);

if (++act_pte == GEN6_PTES) {
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
act_pt++;
act_pte = 0;
}
}
if (pt_vaddr)
kunmap_atomic(pt_vaddr);
kunmap_px(ppgtt, pt_vaddr);
}

static void gen6_initialize_pt(struct i915_address_space *vm,
Expand Down

0 comments on commit d1c54ac

Please sign in to comment.