Skip to content

Commit

Permalink
atomic: Replace atomic_{set,clear}_mask() usage
Browse files Browse the repository at this point in the history
Replace the deprecated atomic_{set,clear}_mask() usage with the now
ubiquous atomic_{or,andnot}() functions.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Peter Zijlstra authored and Thomas Gleixner committed Jul 27, 2015
1 parent de9e432 commit 805de8f
Show file tree
Hide file tree
Showing 14 changed files with 97 additions and 97 deletions.
2 changes: 1 addition & 1 deletion arch/blackfin/mach-common/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
atomic_or((1 << msg), &bfin_ipi_data->bits);
atomic_inc(&bfin_ipi_data->count);
}
local_irq_restore(flags);
Expand Down
4 changes: 2 additions & 2 deletions arch/m32r/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock);
mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
while (flushcache_cpumask)
Expand Down Expand Up @@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_vma = vma;
flush_va = va;
mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
atomic_or(*mask, (atomic_t *)&flush_cpumask);

/*
* We have to send the IPI only to
Expand Down
2 changes: 1 addition & 1 deletion arch/mn10300/mm/tlb-smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm;
flush_va = va;
#if NR_CPUS <= BITS_PER_LONG
atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
#else
#error Not supported.
#endif
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy)
* increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock.
*/
atomic_clear_mask(0x80000000, sw_ptr);
atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}

Expand All @@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy)
static void enable_sync_clock(void)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
atomic_set_mask(0x80000000, sw_ptr);
atomic_or(0x80000000, sw_ptr);
}

/*
Expand Down
30 changes: 15 additions & 15 deletions arch/s390/kvm/interrupt.c
Original file line number Diff line number Diff line change
Expand Up @@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)

static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
}

static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
}

static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags);
atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);

Expand All @@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)

static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
{
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
}

static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock);

/* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
}

Expand Down Expand Up @@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)

li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}

Expand All @@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
/* another external call is pending */
return -EBUSY;
}
atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}

Expand All @@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
*extcall = irq->u.extcall;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}

Expand Down Expand Up @@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,

set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}

Expand Down Expand Up @@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
0, 0, 2);

set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}

Expand All @@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0, 0, 2);

set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0;
}

Expand Down Expand Up @@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
spin_lock(&li->lock);
switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
atomic_or(CPUSTAT_IO_INT, li->cpuflags);
break;
default:
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
break;
}
spin_unlock(&li->lock);
Expand Down
32 changes: 16 additions & 16 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap);
if (test_kvm_facility(vcpu->kvm, 129)) {
save_fp_ctl(&vcpu->run->s.regs.fpc);
Expand Down Expand Up @@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED);

if (test_kvm_facility(vcpu->kvm, 78))
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
else if (test_kvm_facility(vcpu->kvm, 8))
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);

kvm_s390_vcpu_setup_model(vcpu);

Expand Down Expand Up @@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)

void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu);
}

void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
}

static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu);
}

static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}

/*
Expand All @@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
Expand Down Expand Up @@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
/* enforce guest PER */
atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);

if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg);
} else {
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
vcpu->arch.guestdbg.last_bp = 0;
}

if (rc) {
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
}

return rc;
Expand Down Expand Up @@ -1771,7 +1771,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
atomic_set_mask(CPUSTAT_IBS,
atomic_or(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags);
}
goto retry;
Expand All @@ -1780,7 +1780,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
atomic_clear_mask(CPUSTAT_IBS,
atomic_andnot(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags);
}
goto retry;
Expand Down Expand Up @@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
__disable_ibs_on_all_vcpus(vcpu->kvm);
}

atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
Expand All @@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu);

atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu);

for (i = 0; i < online_vcpus; i++) {
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev)
* for all other failure, such as an allocation failure, bail.
*/
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0;
}

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/i915_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set_mask(I915_WEDGED, &error->reset_counter);
atomic_or(I915_WEDGED, &error->reset_counter);
}

/*
Expand Down Expand Up @@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_report_and_clear_eir(dev);

if (wedged) {
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);

/*
Expand Down
2 changes: 1 addition & 1 deletion drivers/s390/scsi/zfcp_aux.c
Original file line number Diff line number Diff line change
Expand Up @@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock);

atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);

return port;

Expand Down
Loading

0 comments on commit 805de8f

Please sign in to comment.