Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 311571
b: refs/heads/master
c: 0d200ae
h: refs/heads/master
i:
  311569: f5bc08a
  311567: 6b700bb
v: v3
  • Loading branch information
Joe Thornber authored and Alasdair G Kergon committed Jul 3, 2012
1 parent 68511a6 commit ca2bfea
Show file tree
Hide file tree
Showing 18 changed files with 70 additions and 278 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 73e6080547429a3cf16f2cceba54891d345f44c2
refs/heads/master: 0d200aefd4ac51787b6b80de1bb7ce93bccd59f6
50 changes: 0 additions & 50 deletions trunk/Documentation/prctl/no_new_privs.txt

This file was deleted.

2 changes: 0 additions & 2 deletions trunk/arch/arm/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,7 @@ SECTIONS
}
#endif

#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
#endif

#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
Expand Down
74 changes: 0 additions & 74 deletions trunk/arch/arm/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -791,79 +791,6 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
}
}

#ifndef CONFIG_ARM_LPAE

/*
* The Linux PMD is made of two consecutive section entries covering 2MB
* (see definition in include/asm/pgtable-2level.h). However a call to
* create_mapping() may optimize static mappings by using individual
* 1MB section mappings. This leaves the actual PMD potentially half
* initialized if the top or bottom section entry isn't used, leaving it
* open to problems if a subsequent ioremap() or vmalloc() tries to use
* the virtual space left free by that unused section entry.
*
* Let's avoid the issue by inserting dummy vm entries covering the unused
* PMD halves once the static mappings are in place.
*/

static void __init pmd_empty_section_gap(unsigned long addr)
{
struct vm_struct *vm;

vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}

static void __init fill_pmd_gaps(void)
{
struct vm_struct *vm;
unsigned long addr, next = 0;
pmd_t *pmd;

/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
continue;

/*
* Check if this vm starts on an odd section boundary.
* If so and the first section entry for this PMD is free
* then we block the corresponding virtual address.
*/
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd = pmd_off_k(addr);
if (pmd_none(*pmd))
pmd_empty_section_gap(addr & PMD_MASK);
}

/*
* Then check if this vm ends on an odd section boundary.
* If so and the second section entry for this PMD is empty
* then we block the corresponding virtual address.
*/
addr += vm->size;
if ((addr & ~PMD_MASK) == SECTION_SIZE) {
pmd = pmd_off_k(addr) + 1;
if (pmd_none(*pmd))
pmd_empty_section_gap(addr);
}

/* no need to look at any vm entry until we hit the next PMD */
next = (addr + PMD_SIZE - 1) & PMD_MASK;
}
}

#else
#define fill_pmd_gaps() do { } while (0)
#endif

static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);

Expand Down Expand Up @@ -1145,7 +1072,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
if (mdesc->map_io)
mdesc->map_io();
fill_pmd_gaps();

/*
* Finally flush the caches and tlb to ensure that we're in a
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
lwz r3,VCORE_NAPPING_THREADS(r5)
lwz r4,VCPU_PTID(r9)
li r0,1
sld r0,r0,r4
sldi r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/xmon/xmon.c
Original file line number Diff line number Diff line change
Expand Up @@ -971,7 +971,7 @@ static int cpu_cmd(void)
/* print cpus waiting or in xmon */
printf("cpus stopped:");
count = 0;
for_each_possible_cpu(cpu) {
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
if (count == 0)
printf(" %x", cpu);
Expand Down
27 changes: 3 additions & 24 deletions trunk/drivers/gpu/drm/drm_edid.c
Original file line number Diff line number Diff line change
Expand Up @@ -1039,24 +1039,6 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}

static bool valid_inferred_mode(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_display_mode *m;
bool ok = false;

list_for_each_entry(m, &connector->probed_modes, head) {
if (mode->hdisplay == m->hdisplay &&
mode->vdisplay == m->vdisplay &&
drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
return false; /* duplicated */
if (mode->hdisplay <= m->hdisplay &&
mode->vdisplay <= m->vdisplay)
ok = true;
}
return ok;
}

static int
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
Expand All @@ -1066,8 +1048,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;

for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
Expand Down Expand Up @@ -1107,8 +1088,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;

fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
if (!mode_in_range(newmode, edid, timing)) {
drm_mode_destroy(dev, newmode);
continue;
}
Expand Down Expand Up @@ -1136,8 +1116,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;

fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
if (!mode_in_range(newmode, edid, timing)) {
drm_mode_destroy(dev, newmode);
continue;
}
Expand Down
37 changes: 7 additions & 30 deletions trunk/drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1401,27 +1401,6 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
}
}

static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;

ap = alloc_apertures(1);
if (!ap)
return;

ap->ranges[0].base = dev_priv->dev->agp->base;
ap->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;

remove_conflicting_framebuffers(ap, "inteldrmfb", primary);

kfree(ap);
}

/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
Expand Down Expand Up @@ -1467,15 +1446,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}

dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_bridge;
}

i915_kick_out_firmware_fb(dev_priv);

pci_set_master(dev->pdev);

/* overlay on gen2 is broken and can't address above 1G */
Expand All @@ -1501,6 +1471,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}

dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto out_rmmap;
}

aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;

dev_priv->mm.gtt_mapping =
Expand Down
13 changes: 2 additions & 11 deletions trunk/drivers/gpu/drm/radeon/radeon_gart.c
Original file line number Diff line number Diff line change
Expand Up @@ -289,9 +289,8 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;

/* mark first vm as always in use, it's the system one */
/* allocate enough for 2 full VM pts */
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
rdev->vm_manager.max_pfn * 8 * 2,
rdev->vm_manager.max_pfn * 8,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
Expand Down Expand Up @@ -634,15 +633,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
/* SI requires equal sized PTs for all VMs, so always set
* last_pfn to max_pfn. cayman allows variable sized
* pts so we can grow then as needed. Once we switch
* to two level pts we can unify this again.
*/
if (rdev->family >= CHIP_TAHITI)
vm->last_pfn = rdev->vm_manager.max_pfn;
else
vm->last_pfn = 0;
vm->last_pfn = 0;
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
Expand Down
10 changes: 4 additions & 6 deletions trunk/drivers/gpu/drm/radeon/radeon_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
Expand All @@ -318,14 +317,13 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}

int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
Expand All @@ -338,10 +336,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(rdev, robj);
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}

Expand Down
4 changes: 2 additions & 2 deletions trunk/drivers/gpu/drm/radeon/si.c
Original file line number Diff line number Diff line change
Expand Up @@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);

/* empty context1-15 */
/* FIXME start with 4G, once using 2 level pt switch to full
/* FIXME start with 1G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
Expand Down
7 changes: 7 additions & 0 deletions trunk/drivers/md/dm-thin.c
Original file line number Diff line number Diff line change
Expand Up @@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;

r = dm_pool_commit_metadata(pool->pmd);
if (r) {
DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
__func__, r);
return r;
}

r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
DMWARN("reserve_metadata_snap message failed.");
Expand Down
Loading

0 comments on commit ca2bfea

Please sign in to comment.