Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 222161
b: refs/heads/master
c: 8c05cd0
h: refs/heads/master
i:
  222159: ba32f2a
v: v3
  • Loading branch information
Darrick J. Wong authored and Jesse Barnes committed Nov 16, 2010
1 parent 8b14ad7 commit 7c5fa64
Show file tree
Hide file tree
Showing 11 changed files with 47 additions and 170 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2ebc8ec86fe0f3f3acf9ba9b41a368f819e7807e
refs/heads/master: 8c05cd08a7504b855c265263e84af61aabafa329
12 changes: 0 additions & 12 deletions trunk/arch/s390/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,6 @@ config TRACE_IRQFLAGS_SUPPORT

source "lib/Kconfig.debug"

config STRICT_DEVMEM
def_bool y
prompt "Filter access to /dev/mem"
---help---
This option restricts access to /dev/mem. If this option is
disabled, you allow userspace access to all memory, including
kernel and userspace memory. Accidental memory access is likely
to be disastrous.
Memory access is required for experts who want to debug the kernel.

If you are unsure, say Y.

config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict user copy size checks"
---help---
Expand Down
5 changes: 0 additions & 5 deletions trunk/arch/s390/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,11 +130,6 @@ struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);

static inline int devmem_is_allowed(unsigned long pfn)
{
return 0;
}

#define HAVE_ARCH_FREE_PAGE
#define HAVE_ARCH_ALLOC_PAGE

Expand Down
70 changes: 17 additions & 53 deletions trunk/arch/s390/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
#include <asm/sections.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>

DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
Expand Down Expand Up @@ -213,7 +212,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
/* Set the PER control regs, turns on single step for this address */
__ctl_load(kprobe_per_regs, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
}

static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
Expand All @@ -240,7 +239,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__get_cpu_var(current_kprobe) = p;
/* Save the interrupt and per flags */
kcb->kprobe_saved_imask = regs->psw.mask &
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
/* Save the control regs that govern PER */
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
}
Expand Down Expand Up @@ -317,6 +316,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;

ss_probe:
if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO))
local_irq_disable();
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
Expand Down Expand Up @@ -349,7 +350,6 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;

INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
Expand All @@ -372,32 +372,10 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
/* another task is sharing our hash bucket */
continue;

orig_ret_address = (unsigned long)ri->ret_addr;

if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}

kretprobe_assert(ri, orig_ret_address, trampoline_address);

correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;

orig_ret_address = (unsigned long)ri->ret_addr;

if (ri->rp && ri->rp->handler) {
ri->ret_addr = correct_ret_addr;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
}

orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);

if (orig_ret_address != trampoline_address) {
Expand All @@ -409,7 +387,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
break;
}
}

kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;

reset_current_kprobe();
Expand Down Expand Up @@ -487,6 +465,8 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
goto out;
}
reset_current_kprobe();
if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO))
local_irq_enable();
out:
preempt_enable_no_resched();

Expand All @@ -502,7 +482,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
return 1;
}

static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
Expand All @@ -528,6 +508,8 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else {
reset_current_kprobe();
if (regs->psw.mask & (PSW_MASK_PER | PSW_MASK_IO))
local_irq_enable();
}
preempt_enable_no_resched();
break;
Expand Down Expand Up @@ -571,31 +553,15 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
return 0;
}

int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
int ret;

if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_disable();
ret = kprobe_trap_handler(regs, trapnr);
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
return ret;
}

/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
struct pt_regs *regs = args->regs;
int ret = NOTIFY_DONE;

if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_disable();

switch (val) {
case DIE_BPT:
if (kprobe_handler(args->regs))
Expand All @@ -606,17 +572,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_TRAP:
if (!preemptible() && kprobe_running() &&
kprobe_trap_handler(args->regs, args->trapnr))
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
preempt_enable();
break;
default:
break;
}

if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);

return ret;
}

Expand All @@ -630,7 +595,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)

/* setup return addr to the jprobe handler routine */
regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);

/* r14 is the function return address */
kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
Expand Down
7 changes: 4 additions & 3 deletions trunk/arch/s390/mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,18 @@
static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
unsigned long mask, result;
pte_t *ptep, pte;
struct page *page;

mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
result = write ? 0 : _PAGE_RO;
mask = result | _PAGE_INVALID | _PAGE_SPECIAL;

ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
pte = *ptep;
barrier();
if ((pte_val(pte) & mask) != 0)
if ((pte_val(pte) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/pci/pci-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_SYSFS) ?
pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
Expand Down
68 changes: 9 additions & 59 deletions trunk/drivers/s390/char/tape_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,79 +209,29 @@ tape_state_set(struct tape_device *device, enum tape_state newstate)
wake_up(&device->state_change_wq);
}

struct tape_med_state_work_data {
struct tape_device *device;
enum tape_medium_state state;
struct work_struct work;
};

static void
tape_med_state_work_handler(struct work_struct *work)
{
static char env_state_loaded[] = "MEDIUM_STATE=LOADED";
static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED";
struct tape_med_state_work_data *p =
container_of(work, struct tape_med_state_work_data, work);
struct tape_device *device = p->device;
char *envp[] = { NULL, NULL };

switch (p->state) {
case MS_UNLOADED:
pr_info("%s: The tape cartridge has been successfully "
"unloaded\n", dev_name(&device->cdev->dev));
envp[0] = env_state_unloaded;
kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
break;
case MS_LOADED:
pr_info("%s: A tape cartridge has been mounted\n",
dev_name(&device->cdev->dev));
envp[0] = env_state_loaded;
kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp);
break;
default:
break;
}
tape_put_device(device);
kfree(p);
}

static void
tape_med_state_work(struct tape_device *device, enum tape_medium_state state)
{
struct tape_med_state_work_data *p;

p = kzalloc(sizeof(*p), GFP_ATOMIC);
if (p) {
INIT_WORK(&p->work, tape_med_state_work_handler);
p->device = tape_get_device(device);
p->state = state;
schedule_work(&p->work);
}
}

void
tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
{
enum tape_medium_state oldstate;

oldstate = device->medium_state;
if (oldstate == newstate)
if (device->medium_state == newstate)
return;
device->medium_state = newstate;
switch(newstate){
case MS_UNLOADED:
device->tape_generic_status |= GMT_DR_OPEN(~0);
if (oldstate == MS_LOADED)
tape_med_state_work(device, MS_UNLOADED);
if (device->medium_state == MS_LOADED)
pr_info("%s: The tape cartridge has been successfully "
"unloaded\n", dev_name(&device->cdev->dev));
break;
case MS_LOADED:
device->tape_generic_status &= ~GMT_DR_OPEN(~0);
if (oldstate == MS_UNLOADED)
tape_med_state_work(device, MS_LOADED);
if (device->medium_state == MS_UNLOADED)
pr_info("%s: A tape cartridge has been mounted\n",
dev_name(&device->cdev->dev));
break;
default:
// print nothing
break;
}
device->medium_state = newstate;
wake_up(&device->state_change_wq);
}

Expand Down
Loading

0 comments on commit 7c5fa64

Please sign in to comment.