diff --git a/Documentation/ABI/testing/sysfs-devices-platform-dock b/Documentation/ABI/testing/sysfs-devices-platform-dock new file mode 100644 index 0000000000000..1d8c18f905c7d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-platform-dock @@ -0,0 +1,39 @@ +What: /sys/devices/platform/dock.N/docked +Date: Dec, 2006 +KernelVersion: 2.6.19 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Value 1 or 0 indicates whether the software believes the + laptop is docked in a docking station. + +What: /sys/devices/platform/dock.N/undock +Date: Dec, 2006 +KernelVersion: 2.6.19 +Contact: linux-acpi@vger.kernel.org +Description: + (WO) Writing to this file causes the software to initiate an + undock request to the firmware. + +What: /sys/devices/platform/dock.N/uid +Date: Feb, 2007 +KernelVersion: v2.6.21 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Displays the docking station the laptop is docked to. + +What: /sys/devices/platform/dock.N/flags +Date: May, 2007 +KernelVersion: v2.6.21 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Show dock station flags, useful for checking if undock + request has been made by the user (from the immediate_undock + option). + +What: /sys/devices/platform/dock.N/type +Date: Aug, 2008 +KernelVersion: v2.6.27 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Display the dock station type- dock_station, ata_bay or + battery_bay. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index bfd29bc8d37af..4ed63b6cfb155 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -108,6 +108,8 @@ Description: CPU topology files that describe a logical CPU's relationship What: /sys/devices/system/cpu/cpuidle/current_driver /sys/devices/system/cpu/cpuidle/current_governer_ro + /sys/devices/system/cpu/cpuidle/available_governors + /sys/devices/system/cpu/cpuidle/current_governor Date: September 2007 Contact: Linux kernel mailing list Description: Discover cpuidle policy and mechanism @@ -119,13 +121,84 @@ Description: Discover cpuidle policy and mechanism Idle policy (governor) is differentiated from idle mechanism (driver) - current_driver: displays current idle mechanism + current_driver: (RO) displays current idle mechanism - current_governor_ro: displays current idle policy + current_governor_ro: (RO) displays current idle policy + + With the cpuidle_sysfs_switch boot option enabled (meant for + developer testing), the following three attributes are visible + instead: + + current_driver: same as described above + + available_governors: (RO) displays a space separated list of + available governors + + current_governor: (RW) displays current idle policy. Users can + switch the governor at runtime by writing to this file. See files in Documentation/cpuidle/ for more information. +What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/name + /sys/devices/system/cpu/cpuX/cpuidle/stateN/latency + /sys/devices/system/cpu/cpuX/cpuidle/stateN/power + /sys/devices/system/cpu/cpuX/cpuidle/stateN/time + /sys/devices/system/cpu/cpuX/cpuidle/stateN/usage +Date: September 2007 +KernelVersion: v2.6.24 +Contact: Linux power management list +Description: + The directory /sys/devices/system/cpu/cpuX/cpuidle contains per + logical CPU specific cpuidle information for each online cpu X. + The processor idle states which are available for use have the + following attributes: + + name: (RO) Name of the idle state (string). + + latency: (RO) The latency to exit out of this idle state (in + microseconds). + + power: (RO) The power consumed while in this idle state (in + milliwatts). + + time: (RO) The total time spent in this idle state (in microseconds). + + usage: (RO) Number of times this state was entered (a count). + + +What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc +Date: February 2008 +KernelVersion: v2.6.25 +Contact: Linux power management list +Description: + (RO) A small description about the idle state (string). + + +What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/disable +Date: March 2012 +KernelVersion: v3.10 +Contact: Linux power management list +Description: + (RW) Option to disable this idle state (bool). The behavior and + the effect of the disable variable depends on the implementation + of a particular governor. In the ladder governor, for example, + it is not coherent, i.e. if one is disabling a light state, then + all deeper states are disabled as well, but the disable variable + does not reflect it. Likewise, if one enables a deep state but a + lighter state still is disabled, then this has no effect. + + +What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency +Date: March 2014 +KernelVersion: v3.15 +Contact: Linux power management list +Description: + (RO) Display the target residency i.e. the minimum amount of + time (in microseconds) this cpu should spend in this idle state + to make the transition worth the effort. + + What: /sys/devices/system/cpu/cpu#/cpufreq/* Date: pre-git history Contact: linux-pm@vger.kernel.org diff --git a/Documentation/ABI/testing/sysfs-platform-dptf b/Documentation/ABI/testing/sysfs-platform-dptf new file mode 100644 index 0000000000000..325dc0667dbb8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-dptf @@ -0,0 +1,40 @@ +What: /sys/bus/platform/devices/INT3407:00/dptf_power/charger_type +Date: Jul, 2016 +KernelVersion: v4.10 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) The charger type - Traditional, Hybrid or NVDC. + +What: /sys/bus/platform/devices/INT3407:00/dptf_power/adapter_rating_mw +Date: Jul, 2016 +KernelVersion: v4.10 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Adapter rating in milliwatts (the maximum Adapter power). + Must be 0 if no AC Adaptor is plugged in. + +What: /sys/bus/platform/devices/INT3407:00/dptf_power/max_platform_power_mw +Date: Jul, 2016 +KernelVersion: v4.10 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Maximum platform power that can be supported by the battery + in milliwatts. + +What: /sys/bus/platform/devices/INT3407:00/dptf_power/platform_power_source +Date: Jul, 2016 +KernelVersion: v4.10 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) Display the platform power source + 0x00 = DC + 0x01 = AC + 0x02 = USB + 0x03 = Wireless Charger + +What: /sys/bus/platform/devices/INT3407:00/dptf_power/battery_steady_power +Date: Jul, 2016 +KernelVersion: v4.10 +Contact: linux-acpi@vger.kernel.org +Description: + (RO) The maximum sustained power for battery in milliwatts. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 39ac9d4fad7ff..1d1d53f85ddd7 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -931,9 +931,12 @@ earlycon= [KNL] Output early console device and options. - When used with no options, the early console is - determined by the stdout-path property in device - tree's chosen node. + [ARM64] The early console is determined by the + stdout-path property in device tree's chosen node, + or determined by the ACPI SPCR table. + + [X86] When used with no options the early console is + determined by the ACPI SPCR table. cdns,[,options] Start an early, polled-mode console on a Cadence diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt index 5550bfdcce5f1..be70b32c95d91 100644 --- a/Documentation/atomic_bitops.txt +++ b/Documentation/atomic_bitops.txt @@ -58,7 +58,12 @@ Like with atomic_t, the rule of thumb is: - RMW operations that have a return value are fully ordered. -Except for test_and_set_bit_lock() which has ACQUIRE semantics and + - RMW operations that are conditional are unordered on FAILURE, + otherwise the above rules apply. In the case of test_and_{}_bit() operations, + if the bit in memory is unchanged by the operation then it is deemed to have + failed. + +Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and clear_bit_unlock() which has RELEASE semantics. Since a platform only has a single means of achieving atomic operations diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt index 434c49cc7330a..61546ac578d60 100644 --- a/Documentation/cpu-freq/cpu-drivers.txt +++ b/Documentation/cpu-freq/cpu-drivers.txt @@ -291,3 +291,7 @@ For example: /* Do something with pos */ pos->frequency = ... } + +If you need to work with the position of pos within driver_freq_table, +do not subtract the pointers, as it is quite costly. Instead, use the +macros cpufreq_for_each_entry_idx() and cpufreq_for_each_valid_entry_idx(). diff --git a/Documentation/devicetree/bindings/power/mti,mips-cpc.txt b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt new file mode 100644 index 0000000000000..c6b82511ae8a0 --- /dev/null +++ b/Documentation/devicetree/bindings/power/mti,mips-cpc.txt @@ -0,0 +1,8 @@ +Binding for MIPS Cluster Power Controller (CPC). + +This binding allows a system to specify where the CPC registers are +located. + +Required properties: +compatible : Should be "mti,mips-cpc". +regs: Should describe the address & size of the CPC register region. diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst index ecf8851d35651..30e6aa7e160b7 100644 --- a/Documentation/driver-api/s390-drivers.rst +++ b/Documentation/driver-api/s390-drivers.rst @@ -22,9 +22,28 @@ While most I/O devices on a s390 system are typically driven through the channel I/O mechanism described here, there are various other methods (like the diag interface). These are out of the scope of this document. +The s390 common I/O layer also provides access to some devices that are +not strictly considered I/O devices. They are considered here as well, +although they are not the focus of this document. + Some additional information can also be found in the kernel source under Documentation/s390/driver-model.txt. +The css bus +=========== + +The css bus contains the subchannels available on the system. They fall +into several categories: + +* Standard I/O subchannels, for use by the system. They have a child + device on the ccw bus and are described below. +* I/O subchannels bound to the vfio-ccw driver. See + Documentation/s390/vfio-ccw.txt. +* Message subchannels. No Linux driver currently exists. +* CHSC subchannels (at most one). The chsc subchannel driver can be used + to send asynchronous chsc commands. +* eADM subchannels. Used for talking to storage class memory. + The ccw bus =========== @@ -102,10 +121,15 @@ ccw group devices Generic interfaces ================== -Some interfaces are available to other drivers that do not necessarily -have anything to do with the busses described above, but still are -indirectly using basic infrastructure in the common I/O layer. One -example is the support for adapter interrupts. +The following section contains interfaces in use not only by drivers +dealing with ccw devices, but drivers for various other s390 hardware +as well. + +Adapter interrupts +------------------ + +The common I/O layer provides helper functions for dealing with adapter +interrupts and interrupt vectors. .. kernel-doc:: drivers/s390/cio/airq.c :export: diff --git a/Documentation/locking/mutex-design.txt b/Documentation/locking/mutex-design.txt index 60c482df1a38d..818aca19612f4 100644 --- a/Documentation/locking/mutex-design.txt +++ b/Documentation/locking/mutex-design.txt @@ -21,37 +21,23 @@ Implementation -------------- Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h -and implemented in kernel/locking/mutex.c. These locks use a three -state atomic counter (->count) to represent the different possible -transitions that can occur during the lifetime of a lock: - - 1: unlocked - 0: locked, no waiters - negative: locked, with potential waiters - -In its most basic form it also includes a wait-queue and a spinlock -that serializes access to it. CONFIG_SMP systems can also include -a pointer to the lock task owner (->owner) as well as a spinner MCS -lock (->osq), both described below in (ii). +and implemented in kernel/locking/mutex.c. These locks use an atomic variable +(->owner) to keep track of the lock state during its lifetime. Field owner +actually contains 'struct task_struct *' to the current lock owner and it is +therefore NULL if not currently owned. Since task_struct pointers are aligned +at at least L1_CACHE_BYTES, low bits (3) are used to store extra state (e.g., +if waiter list is non-empty). In its most basic form it also includes a +wait-queue and a spinlock that serializes access to it. Furthermore, +CONFIG_MUTEX_SPIN_ON_OWNER=y systems use a spinner MCS lock (->osq), described +below in (ii). When acquiring a mutex, there are three possible paths that can be taken, depending on the state of the lock: -(i) fastpath: tries to atomically acquire the lock by decrementing the - counter. If it was already taken by another task it goes to the next - possible path. This logic is architecture specific. On x86-64, the - locking fastpath is 2 instructions: - - 0000000000000e10 : - e21: f0 ff 0b lock decl (%rbx) - e24: 79 08 jns e2e - - the unlocking fastpath is equally tight: - - 0000000000000bc0 : - bc8: f0 ff 07 lock incl (%rdi) - bcb: 7f 0a jg bd7 - +(i) fastpath: tries to atomically acquire the lock by cmpxchg()ing the owner with + the current task. This only works in the uncontended case (cmpxchg() checks + against 0UL, so all 3 state bits above have to be 0). If the lock is + contended it goes to the next possible path. (ii) midpath: aka optimistic spinning, tries to spin for acquisition while the lock owner is running and there are no other tasks ready @@ -143,11 +129,10 @@ Test if the mutex is taken: Disadvantages ------------- -Unlike its original design and purpose, 'struct mutex' is larger than -most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice -as large as 'struct semaphore' (24 bytes) and tied, along with rwsems, -for the largest lock in the kernel. Larger structure sizes mean more -CPU cache and memory footprint. +Unlike its original design and purpose, 'struct mutex' is among the largest +locks in the kernel. E.g: on x86-64 it is 32 bytes, where 'struct semaphore' +is 24 bytes and rw_semaphore is 40 bytes. Larger structure sizes mean more CPU +cache and memory footprint. When to use mutexes ------------------- diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX index 69fe1a8b7ad16..3da73aabff5ac 100644 --- a/Documentation/virtual/kvm/00-INDEX +++ b/Documentation/virtual/kvm/00-INDEX @@ -26,3 +26,6 @@ s390-diag.txt - Diagnose hypercall description (for IBM S/390) timekeeping.txt - timekeeping virtualization for x86-based architectures. +amd-memory-encryption.txt + - notes on AMD Secure Encrypted Virtualization feature and SEV firmware + command description diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst new file mode 100644 index 0000000000000..71d6d257074ff --- /dev/null +++ b/Documentation/virtual/kvm/amd-memory-encryption.rst @@ -0,0 +1,247 @@ +====================================== +Secure Encrypted Virtualization (SEV) +====================================== + +Overview +======== + +Secure Encrypted Virtualization (SEV) is a feature found on AMD processors. + +SEV is an extension to the AMD-V architecture which supports running +virtual machines (VMs) under the control of a hypervisor. When enabled, +the memory contents of a VM will be transparently encrypted with a key +unique to that VM. + +The hypervisor can determine the SEV support through the CPUID +instruction. The CPUID function 0x8000001f reports information related +to SEV:: + + 0x8000001f[eax]: + Bit[1] indicates support for SEV + ... + [ecx]: + Bits[31:0] Number of encrypted guests supported simultaneously + +If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015 +(MSR_K7_HWCR) can be used to determine if it can be enabled:: + + 0xc001_0010: + Bit[23] 1 = memory encryption can be enabled + 0 = memory encryption can not be enabled + + 0xc001_0015: + Bit[0] 1 = memory encryption can be enabled + 0 = memory encryption can not be enabled + +When SEV support is available, it can be enabled in a specific VM by +setting the SEV bit before executing VMRUN.:: + + VMCB[0x90]: + Bit[1] 1 = SEV is enabled + 0 = SEV is disabled + +SEV hardware uses ASIDs to associate a memory encryption key with a VM. +Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value +defined in the CPUID 0x8000001f[ecx] field. + +SEV Key Management +================== + +The SEV guest key management is handled by a separate processor called the AMD +Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure +key management interface to perform common hypervisor activities such as +encrypting bootstrap code, snapshot, migrating and debugging the guest. For more +information, see the SEV Key Management spec [api-spec]_ + +KVM implements the following commands to support common lifecycle events of SEV +guests, such as launching, running, snapshotting, migrating and decommissioning. + +1. KVM_SEV_INIT +--------------- + +The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform +context. In a typical workflow, this command should be the first command issued. + +Returns: 0 on success, -negative on error + +2. KVM_SEV_LAUNCH_START +----------------------- + +The KVM_SEV_LAUNCH_START command is used for creating the memory encryption +context. To create the encryption context, user must provide a guest policy, +the owner's public Diffie-Hellman (PDH) key and session information. + +Parameters: struct kvm_sev_launch_start (in/out) + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_start { + __u32 handle; /* if zero then firmware creates a new handle */ + __u32 policy; /* guest's policy */ + + __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */ + __u32 dh_len; + + __u64 session_addr; /* userspace address which points to the guest session information */ + __u32 session_len; + }; + +On success, the 'handle' field contains a new handle and on error, a negative value. + +For more details, see SEV spec Section 6.2. + +3. KVM_SEV_LAUNCH_UPDATE_DATA +----------------------------- + +The KVM_SEV_LAUNCH_UPDATE_DATA is used for encrypting a memory region. It also +calculates a measurement of the memory contents. The measurement is a signature +of the memory contents that can be sent to the guest owner as an attestation +that the memory was encrypted correctly by the firmware. + +Parameters (in): struct kvm_sev_launch_update_data + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_update { + __u64 uaddr; /* userspace address to be encrypted (must be 16-byte aligned) */ + __u32 len; /* length of the data to be encrypted (must be 16-byte aligned) */ + }; + +For more details, see SEV spec Section 6.3. + +4. KVM_SEV_LAUNCH_MEASURE +------------------------- + +The KVM_SEV_LAUNCH_MEASURE command is used to retrieve the measurement of the +data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may +wait to provide the guest with confidential information until it can verify the +measurement. Since the guest owner knows the initial contents of the guest at +boot, the measurement can be verified by comparing it to what the guest owner +expects. + +Parameters (in): struct kvm_sev_launch_measure + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_measure { + __u64 uaddr; /* where to copy the measurement */ + __u32 len; /* length of measurement blob */ + }; + +For more details on the measurement verification flow, see SEV spec Section 6.4. + +5. KVM_SEV_LAUNCH_FINISH +------------------------ + +After completion of the launch flow, the KVM_SEV_LAUNCH_FINISH command can be +issued to make the guest ready for the execution. + +Returns: 0 on success, -negative on error + +6. KVM_SEV_GUEST_STATUS +----------------------- + +The KVM_SEV_GUEST_STATUS command is used to retrieve status information about a +SEV-enabled guest. + +Parameters (out): struct kvm_sev_guest_status + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_guest_status { + __u32 handle; /* guest handle */ + __u32 policy; /* guest policy */ + __u8 state; /* guest state (see enum below) */ + }; + +SEV guest state: + +:: + + enum { + SEV_STATE_INVALID = 0; + SEV_STATE_LAUNCHING, /* guest is currently being launched */ + SEV_STATE_SECRET, /* guest is being launched and ready to accept the ciphertext data */ + SEV_STATE_RUNNING, /* guest is fully launched and running */ + SEV_STATE_RECEIVING, /* guest is being migrated in from another SEV machine */ + SEV_STATE_SENDING /* guest is getting migrated out to another SEV machine */ + }; + +7. KVM_SEV_DBG_DECRYPT +---------------------- + +The KVM_SEV_DEBUG_DECRYPT command can be used by the hypervisor to request the +firmware to decrypt the data at the given memory region. + +Parameters (in): struct kvm_sev_dbg + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_dbg { + __u64 src_uaddr; /* userspace address of data to decrypt */ + __u64 dst_uaddr; /* userspace address of destination */ + __u32 len; /* length of memory region to decrypt */ + }; + +The command returns an error if the guest policy does not allow debugging. + +8. KVM_SEV_DBG_ENCRYPT +---------------------- + +The KVM_SEV_DEBUG_ENCRYPT command can be used by the hypervisor to request the +firmware to encrypt the data at the given memory region. + +Parameters (in): struct kvm_sev_dbg + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_dbg { + __u64 src_uaddr; /* userspace address of data to encrypt */ + __u64 dst_uaddr; /* userspace address of destination */ + __u32 len; /* length of memory region to encrypt */ + }; + +The command returns an error if the guest policy does not allow debugging. + +9. KVM_SEV_LAUNCH_SECRET +------------------------ + +The KVM_SEV_LAUNCH_SECRET command can be used by the hypervisor to inject secret +data after the measurement has been validated by the guest owner. + +Parameters (in): struct kvm_sev_launch_secret + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_secret { + __u64 hdr_uaddr; /* userspace address containing the packet header */ + __u32 hdr_len; + + __u64 guest_uaddr; /* the guest memory region where the secret should be injected */ + __u32 guest_len; + + __u64 trans_uaddr; /* the hypervisor memory region which contains the secret */ + __u32 trans_len; + }; + +References +========== + +.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf +.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf +.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) +.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index fc3ae951bc07e..792fa8717d133 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1841,6 +1841,7 @@ registers, find a list below: PPC | KVM_REG_PPC_DBSR | 32 PPC | KVM_REG_PPC_TIDR | 64 PPC | KVM_REG_PPC_PSSCR | 64 + PPC | KVM_REG_PPC_DEC_EXPIRY | 64 PPC | KVM_REG_PPC_TM_GPR0 | 64 ... PPC | KVM_REG_PPC_TM_GPR31 | 64 @@ -3403,7 +3404,7 @@ invalid, if invalid pages are written to (e.g. after the end of memory) or if no page table is present for the addresses (e.g. when using hugepages). -4.108 KVM_PPC_GET_CPU_CHAR +4.109 KVM_PPC_GET_CPU_CHAR Capability: KVM_CAP_PPC_GET_CPU_CHAR Architectures: powerpc @@ -3449,6 +3450,57 @@ array bounds check and the array access. These fields use the same bit definitions as the new H_GET_CPU_CHARACTERISTICS hypercall. +4.110 KVM_MEMORY_ENCRYPT_OP + +Capability: basic +Architectures: x86 +Type: system +Parameters: an opaque platform specific structure (in/out) +Returns: 0 on success; -1 on error + +If the platform supports creating encrypted VMs then this ioctl can be used +for issuing platform-specific memory encryption commands to manage those +encrypted VMs. + +Currently, this ioctl is used for issuing Secure Encrypted Virtualization +(SEV) commands on AMD Processors. The SEV commands are defined in +Documentation/virtual/kvm/amd-memory-encryption.txt. + +4.111 KVM_MEMORY_ENCRYPT_REG_REGION + +Capability: basic +Architectures: x86 +Type: system +Parameters: struct kvm_enc_region (in) +Returns: 0 on success; -1 on error + +This ioctl can be used to register a guest memory region which may +contain encrypted data (e.g. guest RAM, SMRAM etc). + +It is used in the SEV-enabled guest. When encryption is enabled, a guest +memory region may contain encrypted data. The SEV memory encryption +engine uses a tweak such that two identical plaintext pages, each at +different locations will have differing ciphertexts. So swapping or +moving ciphertext of those pages will not result in plaintext being +swapped. So relocating (or migrating) physical backing pages for the SEV +guest will require some additional steps. + +Note: The current SEV key management spec does not provide commands to +swap or migrate (move) ciphertext pages. Hence, for now we pin the guest +memory region registered with the ioctl. + +4.112 KVM_MEMORY_ENCRYPT_UNREG_REGION + +Capability: basic +Architectures: x86 +Type: system +Parameters: struct kvm_enc_region (in) +Returns: 0 on success; -1 on error + +This ioctl can be used to unregister the guest memory region registered +with KVM_MEMORY_ENCRYPT_REG_REGION ioctl above. + + 5. The kvm_run structure ------------------------ diff --git a/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt b/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt deleted file mode 100644 index 38bca2835278c..0000000000000 --- a/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt +++ /dev/null @@ -1,187 +0,0 @@ -KVM/ARM VGIC Forwarded Physical Interrupts -========================================== - -The KVM/ARM code implements software support for the ARM Generic -Interrupt Controller's (GIC's) hardware support for virtualization by -allowing software to inject virtual interrupts to a VM, which the guest -OS sees as regular interrupts. The code is famously known as the VGIC. - -Some of these virtual interrupts, however, correspond to physical -interrupts from real physical devices. One example could be the -architected timer, which itself supports virtualization, and therefore -lets a guest OS program the hardware device directly to raise an -interrupt at some point in time. When such an interrupt is raised, the -host OS initially handles the interrupt and must somehow signal this -event as a virtual interrupt to the guest. Another example could be a -passthrough device, where the physical interrupts are initially handled -by the host, but the device driver for the device lives in the guest OS -and KVM must therefore somehow inject a virtual interrupt on behalf of -the physical one to the guest OS. - -These virtual interrupts corresponding to a physical interrupt on the -host are called forwarded physical interrupts, but are also sometimes -referred to as 'virtualized physical interrupts' and 'mapped interrupts'. - -Forwarded physical interrupts are handled slightly differently compared -to virtual interrupts generated purely by a software emulated device. - - -The HW bit ----------- -Virtual interrupts are signalled to the guest by programming the List -Registers (LRs) on the GIC before running a VCPU. The LR is programmed -with the virtual IRQ number and the state of the interrupt (Pending, -Active, or Pending+Active). When the guest ACKs and EOIs a virtual -interrupt, the LR state moves from Pending to Active, and finally to -inactive. - -The LRs include an extra bit, called the HW bit. When this bit is set, -KVM must also program an additional field in the LR, the physical IRQ -number, to link the virtual with the physical IRQ. - -When the HW bit is set, KVM must EITHER set the Pending OR the Active -bit, never both at the same time. - -Setting the HW bit causes the hardware to deactivate the physical -interrupt on the physical distributor when the guest deactivates the -corresponding virtual interrupt. - - -Forwarded Physical Interrupts Life Cycle ----------------------------------------- - -The state of forwarded physical interrupts is managed in the following way: - - - The physical interrupt is acked by the host, and becomes active on - the physical distributor (*). - - KVM sets the LR.Pending bit, because this is the only way the GICV - interface is going to present it to the guest. - - LR.Pending will stay set as long as the guest has not acked the interrupt. - - LR.Pending transitions to LR.Active on the guest read of the IAR, as - expected. - - On guest EOI, the *physical distributor* active bit gets cleared, - but the LR.Active is left untouched (set). - - KVM clears the LR on VM exits when the physical distributor - active state has been cleared. - -(*): The host handling is slightly more complicated. For some forwarded -interrupts (shared), KVM directly sets the active state on the physical -distributor before entering the guest, because the interrupt is never actually -handled on the host (see details on the timer as an example below). For other -forwarded interrupts (non-shared) the host does not deactivate the interrupt -when the host ISR completes, but leaves the interrupt active until the guest -deactivates it. Leaving the interrupt active is allowed, because Linux -configures the physical GIC with EOIMode=1, which causes EOI operations to -perform a priority drop allowing the GIC to receive other interrupts of the -default priority. - - -Forwarded Edge and Level Triggered PPIs and SPIs ------------------------------------------------- -Forwarded physical interrupts injected should always be active on the -physical distributor when injected to a guest. - -Level-triggered interrupts will keep the interrupt line to the GIC -asserted, typically until the guest programs the device to deassert the -line. This means that the interrupt will remain pending on the physical -distributor until the guest has reprogrammed the device. Since we -always run the VM with interrupts enabled on the CPU, a pending -interrupt will exit the guest as soon as we switch into the guest, -preventing the guest from ever making progress as the process repeats -over and over. Therefore, the active state on the physical distributor -must be set when entering the guest, preventing the GIC from forwarding -the pending interrupt to the CPU. As soon as the guest deactivates the -interrupt, the physical line is sampled by the hardware again and the host -takes a new interrupt if and only if the physical line is still asserted. - -Edge-triggered interrupts do not exhibit the same problem with -preventing guest execution that level-triggered interrupts do. One -option is to not use HW bit at all, and inject edge-triggered interrupts -from a physical device as pure virtual interrupts. But that would -potentially slow down handling of the interrupt in the guest, because a -physical interrupt occurring in the middle of the guest ISR would -preempt the guest for the host to handle the interrupt. Additionally, -if you configure the system to handle interrupts on a separate physical -core from that running your VCPU, you still have to interrupt the VCPU -to queue the pending state onto the LR, even though the guest won't use -this information until the guest ISR completes. Therefore, the HW -bit should always be set for forwarded edge-triggered interrupts. With -the HW bit set, the virtual interrupt is injected and additional -physical interrupts occurring before the guest deactivates the interrupt -simply mark the state on the physical distributor as Pending+Active. As -soon as the guest deactivates the interrupt, the host takes another -interrupt if and only if there was a physical interrupt between injecting -the forwarded interrupt to the guest and the guest deactivating the -interrupt. - -Consequently, whenever we schedule a VCPU with one or more LRs with the -HW bit set, the interrupt must also be active on the physical -distributor. - - -Forwarded LPIs --------------- -LPIs, introduced in GICv3, are always edge-triggered and do not have an -active state. They become pending when a device signal them, and as -soon as they are acked by the CPU, they are inactive again. - -It therefore doesn't make sense, and is not supported, to set the HW bit -for physical LPIs that are forwarded to a VM as virtual interrupts, -typically virtual SPIs. - -For LPIs, there is no other choice than to preempt the VCPU thread if -necessary, and queue the pending state onto the LR. - - -Putting It Together: The Architected Timer ------------------------------------------- -The architected timer is a device that signals interrupts with level -triggered semantics. The timer hardware is directly accessed by VCPUs -which program the timer to fire at some point in time. Each VCPU on a -system programs the timer to fire at different times, and therefore the -hardware is multiplexed between multiple VCPUs. This is implemented by -context-switching the timer state along with each VCPU thread. - -However, this means that a scenario like the following is entirely -possible, and in fact, typical: - -1. KVM runs the VCPU -2. The guest programs the time to fire in T+100 -3. The guest is idle and calls WFI (wait-for-interrupts) -4. The hardware traps to the host -5. KVM stores the timer state to memory and disables the hardware timer -6. KVM schedules a soft timer to fire in T+(100 - time since step 2) -7. KVM puts the VCPU thread to sleep (on a waitqueue) -8. The soft timer fires, waking up the VCPU thread -9. KVM reprograms the timer hardware with the VCPU's values -10. KVM marks the timer interrupt as active on the physical distributor -11. KVM injects a forwarded physical interrupt to the guest -12. KVM runs the VCPU - -Notice that KVM injects a forwarded physical interrupt in step 11 without -the corresponding interrupt having actually fired on the host. That is -exactly why we mark the timer interrupt as active in step 10, because -the active state on the physical distributor is part of the state -belonging to the timer hardware, which is context-switched along with -the VCPU thread. - -If the guest does not idle because it is busy, the flow looks like this -instead: - -1. KVM runs the VCPU -2. The guest programs the time to fire in T+100 -4. At T+100 the timer fires and a physical IRQ causes the VM to exit - (note that this initially only traps to EL2 and does not run the host ISR - until KVM has returned to the host). -5. With interrupts still disabled on the CPU coming back from the guest, KVM - stores the virtual timer state to memory and disables the virtual hw timer. -6. KVM looks at the timer state (in memory) and injects a forwarded physical - interrupt because it concludes the timer has expired. -7. KVM marks the timer interrupt as active on the physical distributor -7. KVM enables the timer, enables interrupts, and runs the VCPU - -Notice that again the forwarded physical interrupt is injected to the -guest without having actually been handled on the host. In this case it -is because the physical interrupt is never actually seen by the host because the -timer is disabled upon guest return, and the virtual forwarded interrupt is -injected on the KVM guest entry path. diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt index 3c65feb830101..dcab6dc11e3b0 100644 --- a/Documentation/virtual/kvm/cpuid.txt +++ b/Documentation/virtual/kvm/cpuid.txt @@ -54,6 +54,10 @@ KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit || || before enabling paravirtualized || || spinlock support. ------------------------------------------------------------------------------ +KVM_FEATURE_PV_TLB_FLUSH || 9 || guest checks this feature bit + || || before enabling paravirtualized + || || tlb flush. +------------------------------------------------------------------------------ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side || || per-cpu warps are expected in || || kvmclock. diff --git a/MAINTAINERS b/MAINTAINERS index e6c26cb47d02d..9a7f76eadae9a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7748,7 +7748,9 @@ F: arch/powerpc/kernel/kvm* KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger -M: Cornelia Huck +M: Janosch Frank +R: David Hildenbrand +R: Cornelia Huck L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git @@ -9204,6 +9206,7 @@ MIPS GENERIC PLATFORM M: Paul Burton L: linux-mips@linux-mips.org S: Supported +F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt F: arch/mips/generic/ F: arch/mips/tools/generic-board-config.sh @@ -9943,6 +9946,7 @@ F: drivers/nfc/nxp-nci OBJTOOL M: Josh Poimboeuf +M: Peter Zijlstra S: Supported F: tools/objtool/ @@ -12026,6 +12030,7 @@ F: drivers/pci/hotplug/s390_pci_hpc.c S390 VFIO-CCW DRIVER M: Cornelia Huck M: Dong Jia Shi +M: Halil Pasic L: linux-s390@vger.kernel.org L: kvm@vger.kernel.org S: Supported diff --git a/Makefile b/Makefile index d192dd826ccea..d9cf3a40eda9d 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 -PATCHLEVEL = 15 +PATCHLEVEL = 16 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc2 NAME = Fearless Coyote # *DOCUMENTATION* @@ -729,7 +729,6 @@ endif ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) @@ -747,9 +746,9 @@ else # These warnings generated too much noise in a regular build. # Use make W=1 to enable them (see scripts/Makefile.extrawarn) KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) -KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) endif +KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3d22eb87f919a..9003bd19cb701 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -131,7 +131,7 @@ static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) { unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; - return cpsr_mode > USR_MODE;; + return cpsr_mode > USR_MODE; } static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ef54013b5b9f1..248b930563e5a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -48,6 +48,8 @@ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); + u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index ab20ffa8b9e76..1ab8329e9ff75 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -21,7 +21,6 @@ #include #include #include -#include #include #define __hyp_text __section(.hyp.text) notrace @@ -69,6 +68,8 @@ #define HIFAR __ACCESS_CP15(c6, 4, c0, 2) #define HPFAR __ACCESS_CP15(c6, 4, c0, 4) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6) +#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1) #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALL __ACCESS_CP15(c8, 0, c7, 0) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index a2d176a308bd6..de1b919404e43 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -37,6 +37,8 @@ #include #include +#include +#include #include #include @@ -83,6 +85,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) return pmd; } +static inline pte_t kvm_s2pte_mkexec(pte_t pte) +{ + pte_val(pte) &= ~L_PTE_XN; + return pte; +} + +static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_SECT_XN; + return pmd; +} + static inline void kvm_set_s2pte_readonly(pte_t *pte) { pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY; @@ -93,6 +107,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte) return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY; } +static inline bool kvm_s2pte_exec(pte_t *pte) +{ + return !(pte_val(*pte) & L_PTE_XN); +} + static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) { pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; @@ -103,6 +122,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; } +static inline bool kvm_s2pmd_exec(pmd_t *pmd) +{ + return !(pmd_val(*pmd) & PMD_SECT_XN); +} + static inline bool kvm_page_empty(void *ptr) { struct page *ptr_page = virt_to_page(ptr); @@ -126,10 +150,36 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; } -static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, - kvm_pfn_t pfn, - unsigned long size) +static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) +{ + /* + * Clean the dcache to the Point of Coherency. + * + * We need to do this through a kernel mapping (using the + * user-space mapping has proved to be the wrong + * solution). For that, we need to kmap one page at a time, + * and iterate over the range. + */ + + VM_BUG_ON(size & ~PAGE_MASK); + + while (size) { + void *va = kmap_atomic_pfn(pfn); + + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + size -= PAGE_SIZE; + pfn++; + + kunmap_atomic(va); + } +} + +static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, + unsigned long size) { + u32 iclsz; + /* * If we are going to insert an instruction page and the icache is * either VIPT or PIPT, there is a potential problem where the host @@ -141,23 +191,40 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, * * VIVT caches are tagged using both the ASID and the VMID and doesn't * need any kind of flushing (DDI 0406C.b - Page B3-1392). - * - * We need to do this through a kernel mapping (using the - * user-space mapping has proved to be the wrong - * solution). For that, we need to kmap one page at a time, - * and iterate over the range. */ VM_BUG_ON(size & ~PAGE_MASK); + if (icache_is_vivt_asid_tagged()) + return; + + if (!icache_is_pipt()) { + /* any kind of VIPT cache */ + __flush_icache_all(); + return; + } + + /* + * CTR IminLine contains Log2 of the number of words in the + * cache line, so we can get the number of words as + * 2 << (IminLine - 1). To get the number of bytes, we + * multiply by 4 (the number of bytes in a 32-bit word), and + * get 4 << (IminLine). + */ + iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf); + while (size) { void *va = kmap_atomic_pfn(pfn); + void *end = va + PAGE_SIZE; + void *addr = va; - kvm_flush_dcache_to_poc(va, PAGE_SIZE); + do { + write_sysreg(addr, ICIMVAU); + addr += iclsz; + } while (addr < end); - if (icache_is_pipt()) - __cpuc_coherent_user_range((unsigned long)va, - (unsigned long)va + PAGE_SIZE); + dsb(ishst); + isb(); size -= PAGE_SIZE; pfn++; @@ -165,9 +232,11 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, kunmap_atomic(va); } - if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { - /* any kind of VIPT cache */ - __flush_icache_all(); + /* Check if we need to invalidate the BTB */ + if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) { + write_sysreg(0, BPIALLIS); + dsb(ishst); + isb(); } } diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 150ece66ddf34..a757401129f95 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -102,8 +102,8 @@ extern pgprot_t pgprot_s2_device; #define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY) #define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN) #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) -#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY) +#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN) +#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index 330c9ce34ba5f..ae45ae96aac28 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -18,6 +18,7 @@ #include #include +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 6d810af2d9fd7..c0edd450e1045 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -19,6 +19,7 @@ */ #include +#include /** * Flush per-VMID TLBs diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig index 1156a585dafc9..8841199058ea8 100644 --- a/arch/arm/mach-vt8500/Kconfig +++ b/arch/arm/mach-vt8500/Kconfig @@ -13,7 +13,6 @@ config ARCH_WM8505 depends on ARCH_MULTI_V5 select ARCH_VT8500 select CPU_ARM926T - help config ARCH_WM8750 bool "WonderMedia WM8750" diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 1241fb211293b..3c78835bba944 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -435,6 +435,27 @@ alternative_endif dsb \domain .endm +/* + * Macro to perform an instruction cache maintenance for the interval + * [start, end) + * + * start, end: virtual addresses describing the region + * label: A label to branch to on user fault. + * Corrupts: tmp1, tmp2 + */ + .macro invalidate_icache_by_line start, end, tmp1, tmp2, label + icache_line_size \tmp1, \tmp2 + sub \tmp2, \tmp1, #1 + bic \tmp2, \start, \tmp2 +9997: +USER(\label, ic ivau, \tmp2) // invalidate I line PoU + add \tmp2, \tmp2, \tmp1 + cmp \tmp2, \end + b.lo 9997b + dsb ish + isb + .endm + /* * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present */ diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 955130762a3c6..bef9f418f0898 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -52,6 +52,12 @@ * - start - virtual start address * - end - virtual end address * + * invalidate_icache_range(start, end) + * + * Invalidate the I-cache in the region described by start, end. + * - start - virtual start address + * - end - virtual end address + * * __flush_cache_user_range(start, end) * * Ensure coherency between the I-cache and the D-cache in the @@ -66,6 +72,7 @@ * - size - region size */ extern void flush_icache_range(unsigned long start, unsigned long end); +extern int invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); extern void __clean_dcache_area_poc(void *addr, size_t len); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index be7bd19c87ec2..eda8c5f629fc8 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -20,7 +20,7 @@ #define MPIDR_UP_BITMASK (0x1 << 30) #define MPIDR_MT_BITMASK (0x1 << 24) -#define MPIDR_HWID_BITMASK 0xff00ffffff +#define MPIDR_HWID_BITMASK 0xff00ffffffUL #define MPIDR_LEVEL_BITS_SHIFT 3 #define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 1dca41bea16ad..e73f685696246 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -22,7 +22,7 @@ static inline pte_t huge_ptep_get(pte_t *ptep) { - return *ptep; + return READ_ONCE(*ptep); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a73f63aca68e9..596f8e414a4c7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -48,6 +48,8 @@ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); + int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext); diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 08d3bb66c8b75..f26f9cd70c721 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -20,7 +20,6 @@ #include #include -#include #include #define __hyp_text __section(.hyp.text) notrace diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 72e279dbae5f4..7faed6e48b462 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -173,32 +173,54 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd) return pmd; } -static inline void kvm_set_s2pte_readonly(pte_t *pte) +static inline pte_t kvm_s2pte_mkexec(pte_t pte) +{ + pte_val(pte) &= ~PTE_S2_XN; + return pte; +} + +static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd) +{ + pmd_val(pmd) &= ~PMD_S2_XN; + return pmd; +} + +static inline void kvm_set_s2pte_readonly(pte_t *ptep) { pteval_t old_pteval, pteval; - pteval = READ_ONCE(pte_val(*pte)); + pteval = READ_ONCE(pte_val(*ptep)); do { old_pteval = pteval; pteval &= ~PTE_S2_RDWR; pteval |= PTE_S2_RDONLY; - pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval); + pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); } while (pteval != old_pteval); } -static inline bool kvm_s2pte_readonly(pte_t *pte) +static inline bool kvm_s2pte_readonly(pte_t *ptep) { - return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; + return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY; } -static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) +static inline bool kvm_s2pte_exec(pte_t *ptep) { - kvm_set_s2pte_readonly((pte_t *)pmd); + return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN); } -static inline bool kvm_s2pmd_readonly(pmd_t *pmd) +static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp) { - return kvm_s2pte_readonly((pte_t *)pmd); + kvm_set_s2pte_readonly((pte_t *)pmdp); +} + +static inline bool kvm_s2pmd_readonly(pmd_t *pmdp) +{ + return kvm_s2pte_readonly((pte_t *)pmdp); +} + +static inline bool kvm_s2pmd_exec(pmd_t *pmdp) +{ + return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN); } static inline bool kvm_page_empty(void *ptr) @@ -230,21 +252,25 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; } -static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, - kvm_pfn_t pfn, - unsigned long size) +static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) { void *va = page_address(pfn_to_page(pfn)); kvm_flush_dcache_to_poc(va, size); +} +static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, + unsigned long size) +{ if (icache_is_aliasing()) { /* any kind of VIPT cache */ __flush_icache_all(); } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ - flush_icache_range((unsigned long)va, - (unsigned long)va + size); + void *va = page_address(pfn_to_page(pfn)); + + invalidate_icache_range((unsigned long)va, + (unsigned long)va + size); } } diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 8d3331985d2e3..39ec0b8a689ee 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -141,13 +141,13 @@ static inline void cpu_install_idmap(void) * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ -static inline void cpu_replace_ttbr1(pgd_t *pgd) +static inline void cpu_replace_ttbr1(pgd_t *pgdp) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; ttbr_replace_func *replace_phys; - phys_addr_t pgd_phys = virt_to_phys(pgd); + phys_addr_t pgd_phys = virt_to_phys(pgdp); replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index e9d9f1b006efe..2e05bcd944c83 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -36,23 +36,23 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) return (pmd_t *)__get_free_page(PGALLOC_GFP); } -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) { - BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - free_page((unsigned long)pmd); + BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1)); + free_page((unsigned long)pmdp); } -static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) +static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) { - set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot)); + set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot)); } -static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp) { - __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE); + __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE); } #else -static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot) +static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot) { BUILD_BUG(); } @@ -65,30 +65,30 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) return (pud_t *)__get_free_page(PGALLOC_GFP); } -static inline void pud_free(struct mm_struct *mm, pud_t *pud) +static inline void pud_free(struct mm_struct *mm, pud_t *pudp) { - BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - free_page((unsigned long)pud); + BUG_ON((unsigned long)pudp & (PAGE_SIZE-1)); + free_page((unsigned long)pudp); } -static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) +static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot) { - set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot)); + set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot)); } -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp) { - __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE); + __pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE); } #else -static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot) +static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot) { BUILD_BUG(); } #endif /* CONFIG_PGTABLE_LEVELS > 3 */ extern pgd_t *pgd_alloc(struct mm_struct *mm); -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp); static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) @@ -114,10 +114,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) /* * Free a PTE table. */ -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep) { - if (pte) - free_page((unsigned long)pte); + if (ptep) + free_page((unsigned long)ptep); } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) @@ -126,10 +126,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) __free_page(pte); } -static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, +static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep, pmdval_t prot) { - set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot)); + set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot)); } /* diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index f42836da8723a..cdfe3e657a9e9 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -187,9 +187,11 @@ */ #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define PTE_S2_XN (_AT(pteval_t, 2) << 53) /* XN[1:0] */ #define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */ #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ +#define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 2db84df5eb422..108ecad7acc56 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -67,8 +67,8 @@ #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) -#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) -#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) +#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN) +#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN) #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 094374c82db08..7e2c27e63cd89 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -218,7 +218,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) static inline void set_pte(pte_t *ptep, pte_t pte) { - *ptep = pte; + WRITE_ONCE(*ptep, pte); /* * Only if the new pte is valid and kernel, otherwise TLB maintenance @@ -250,6 +250,8 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { + pte_t old_pte; + if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) __sync_icache_dcache(pte, addr); @@ -258,14 +260,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, * hardware updates of the pte (ptep_set_access_flags safely changes * valid ptes without going through an invalid entry). */ - if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) && + old_pte = READ_ONCE(*ptep); + if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) && (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) { VM_WARN_ONCE(!pte_young(pte), "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", - __func__, pte_val(*ptep), pte_val(pte)); - VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte), + __func__, pte_val(old_pte), pte_val(pte)); + VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", - __func__, pte_val(*ptep), pte_val(pte)); + __func__, pte_val(old_pte), pte_val(pte)); } set_pte(ptep, pte); @@ -431,7 +434,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); dsb(ishst); isb(); } @@ -482,7 +485,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd) static inline void set_pud(pud_t *pudp, pud_t pud) { - *pudp = pud; + WRITE_ONCE(*pudp, pud); dsb(ishst); isb(); } @@ -500,7 +503,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) /* Find an entry in the second-level page table. */ #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) -#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) +#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) @@ -535,7 +538,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud) static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { - *pgdp = pgd; + WRITE_ONCE(*pgdp, pgd); dsb(ishst); } @@ -552,7 +555,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd) /* Find an entry in the frst-level page table. */ #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) -#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) +#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 252396a96c78f..7b09487ff8fb6 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -230,10 +230,10 @@ void __init acpi_boot_table_init(void) done: if (acpi_disabled) { - if (earlycon_init_is_deferred) + if (earlycon_acpi_spcr_enable) early_init_dt_scan_chosen_stdout(); } else { - parse_spcr(earlycon_init_is_deferred); + acpi_parse_spcr(earlycon_acpi_spcr_enable, true); if (IS_ENABLED(CONFIG_ACPI_BGRT)) acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); } diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 07823595b7f01..52f15cd896e11 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), }, + { + .capability = ARM64_HARDEN_BRANCH_PREDICTOR, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + .enable = qcom_enable_link_stack_sanitization, + }, + { + .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), + }, { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index f85ac58d08a35..a8bf1c892b906 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -90,7 +90,7 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { efi_memory_desc_t *md = data; - pte_t pte = *ptep; + pte_t pte = READ_ONCE(*ptep); if (md->attribute & EFI_MEMORY_RO) pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index f20cf7e992495..1ec5f28c39fc5 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -202,10 +202,10 @@ static int create_safe_exec_page(void *src_start, size_t length, gfp_t mask) { int rc = 0; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; unsigned long dst = (unsigned long)allocator(mask); if (!dst) { @@ -216,38 +216,38 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy((void *)dst, src_start, length); flush_icache_range(dst, dst + length); - pgd = pgd_offset_raw(allocator(mask), dst_addr); - if (pgd_none(*pgd)) { - pud = allocator(mask); - if (!pud) { + pgdp = pgd_offset_raw(allocator(mask), dst_addr); + if (pgd_none(READ_ONCE(*pgdp))) { + pudp = allocator(mask); + if (!pudp) { rc = -ENOMEM; goto out; } - pgd_populate(&init_mm, pgd, pud); + pgd_populate(&init_mm, pgdp, pudp); } - pud = pud_offset(pgd, dst_addr); - if (pud_none(*pud)) { - pmd = allocator(mask); - if (!pmd) { + pudp = pud_offset(pgdp, dst_addr); + if (pud_none(READ_ONCE(*pudp))) { + pmdp = allocator(mask); + if (!pmdp) { rc = -ENOMEM; goto out; } - pud_populate(&init_mm, pud, pmd); + pud_populate(&init_mm, pudp, pmdp); } - pmd = pmd_offset(pud, dst_addr); - if (pmd_none(*pmd)) { - pte = allocator(mask); - if (!pte) { + pmdp = pmd_offset(pudp, dst_addr); + if (pmd_none(READ_ONCE(*pmdp))) { + ptep = allocator(mask); + if (!ptep) { rc = -ENOMEM; goto out; } - pmd_populate_kernel(&init_mm, pmd, pte); + pmd_populate_kernel(&init_mm, pmdp, ptep); } - pte = pte_offset_kernel(pmd, dst_addr); - set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC)); + ptep = pte_offset_kernel(pmdp, dst_addr); + set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC)); /* * Load our new page tables. A strict BBM approach requires that we @@ -263,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length, */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); - write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1); + write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1); isb(); *phys_dst_addr = virt_to_phys((void *)dst); @@ -320,9 +320,9 @@ int swsusp_arch_suspend(void) return ret; } -static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) +static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) { - pte_t pte = *src_pte; + pte_t pte = READ_ONCE(*src_ptep); if (pte_valid(pte)) { /* @@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) * read only (code, rodata). Clear the RDONLY bit from * the temporary mappings we use during restore. */ - set_pte(dst_pte, pte_mkwrite(pte)); + set_pte(dst_ptep, pte_mkwrite(pte)); } else if (debug_pagealloc_enabled() && !pte_none(pte)) { /* * debug_pagealloc will removed the PTE_VALID bit if @@ -343,112 +343,116 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) */ BUG_ON(!pfn_valid(pte_pfn(pte))); - set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte))); + set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte))); } } -static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, +static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, unsigned long end) { - pte_t *src_pte; - pte_t *dst_pte; + pte_t *src_ptep; + pte_t *dst_ptep; unsigned long addr = start; - dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC); - if (!dst_pte) + dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC); + if (!dst_ptep) return -ENOMEM; - pmd_populate_kernel(&init_mm, dst_pmd, dst_pte); - dst_pte = pte_offset_kernel(dst_pmd, start); + pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep); + dst_ptep = pte_offset_kernel(dst_pmdp, start); - src_pte = pte_offset_kernel(src_pmd, start); + src_ptep = pte_offset_kernel(src_pmdp, start); do { - _copy_pte(dst_pte, src_pte, addr); - } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + _copy_pte(dst_ptep, src_ptep, addr); + } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end); return 0; } -static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start, +static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, unsigned long end) { - pmd_t *src_pmd; - pmd_t *dst_pmd; + pmd_t *src_pmdp; + pmd_t *dst_pmdp; unsigned long next; unsigned long addr = start; - if (pud_none(*dst_pud)) { - dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); - if (!dst_pmd) + if (pud_none(READ_ONCE(*dst_pudp))) { + dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pmdp) return -ENOMEM; - pud_populate(&init_mm, dst_pud, dst_pmd); + pud_populate(&init_mm, dst_pudp, dst_pmdp); } - dst_pmd = pmd_offset(dst_pud, start); + dst_pmdp = pmd_offset(dst_pudp, start); - src_pmd = pmd_offset(src_pud, start); + src_pmdp = pmd_offset(src_pudp, start); do { + pmd_t pmd = READ_ONCE(*src_pmdp); + next = pmd_addr_end(addr, end); - if (pmd_none(*src_pmd)) + if (pmd_none(pmd)) continue; - if (pmd_table(*src_pmd)) { - if (copy_pte(dst_pmd, src_pmd, addr, next)) + if (pmd_table(pmd)) { + if (copy_pte(dst_pmdp, src_pmdp, addr, next)) return -ENOMEM; } else { - set_pmd(dst_pmd, - __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY)); + set_pmd(dst_pmdp, + __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY)); } - } while (dst_pmd++, src_pmd++, addr = next, addr != end); + } while (dst_pmdp++, src_pmdp++, addr = next, addr != end); return 0; } -static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start, +static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, unsigned long end) { - pud_t *dst_pud; - pud_t *src_pud; + pud_t *dst_pudp; + pud_t *src_pudp; unsigned long next; unsigned long addr = start; - if (pgd_none(*dst_pgd)) { - dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC); - if (!dst_pud) + if (pgd_none(READ_ONCE(*dst_pgdp))) { + dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC); + if (!dst_pudp) return -ENOMEM; - pgd_populate(&init_mm, dst_pgd, dst_pud); + pgd_populate(&init_mm, dst_pgdp, dst_pudp); } - dst_pud = pud_offset(dst_pgd, start); + dst_pudp = pud_offset(dst_pgdp, start); - src_pud = pud_offset(src_pgd, start); + src_pudp = pud_offset(src_pgdp, start); do { + pud_t pud = READ_ONCE(*src_pudp); + next = pud_addr_end(addr, end); - if (pud_none(*src_pud)) + if (pud_none(pud)) continue; - if (pud_table(*(src_pud))) { - if (copy_pmd(dst_pud, src_pud, addr, next)) + if (pud_table(pud)) { + if (copy_pmd(dst_pudp, src_pudp, addr, next)) return -ENOMEM; } else { - set_pud(dst_pud, - __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY)); + set_pud(dst_pudp, + __pud(pud_val(pud) & ~PMD_SECT_RDONLY)); } - } while (dst_pud++, src_pud++, addr = next, addr != end); + } while (dst_pudp++, src_pudp++, addr = next, addr != end); return 0; } -static int copy_page_tables(pgd_t *dst_pgd, unsigned long start, +static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, unsigned long end) { unsigned long next; unsigned long addr = start; - pgd_t *src_pgd = pgd_offset_k(start); + pgd_t *src_pgdp = pgd_offset_k(start); - dst_pgd = pgd_offset_raw(dst_pgd, start); + dst_pgdp = pgd_offset_raw(dst_pgdp, start); do { next = pgd_addr_end(addr, end); - if (pgd_none(*src_pgd)) + if (pgd_none(READ_ONCE(*src_pgdp))) continue; - if (copy_pud(dst_pgd, src_pgd, addr, next)) + if (copy_pud(dst_pgdp, src_pgdp, addr, next)) return -ENOMEM; - } while (dst_pgd++, src_pgd++, addr = next, addr != end); + } while (dst_pgdp++, src_pgdp++, addr = next, addr != end); return 0; } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 5c7f657dd2074..d7e3299a77346 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -361,10 +361,16 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { + int ret = 0; + + vcpu_load(vcpu); + trace_kvm_set_guest_debug(vcpu, dbg->control); - if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) - return -EINVAL; + if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { + ret = -EINVAL; + goto out; + } if (dbg->control & KVM_GUESTDBG_ENABLE) { vcpu->guest_debug = dbg->control; @@ -378,7 +384,10 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, /* If not enabled clear all flags */ vcpu->guest_debug = 0; } - return 0; + +out: + vcpu_put(vcpu); + return ret; } int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index f4363d40e2cd7..dabb5cc7b087c 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -21,6 +21,7 @@ #include #include #include +#include #define read_debug(r,n) read_sysreg(r##n##_el1) #define write_debug(v,r,n) write_sysreg(v, r##n##_el1) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index cac6a05001624..870f4b1587f97 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -406,8 +407,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) u32 midr = read_cpuid_id(); /* Apply BTAC predictors mitigation to all Falkor chips */ - if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1) + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { __qcom_hyp_sanitize_btac_predictors(); + } } fp_enabled = __fpsimd_enabled(); diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index 73464a96c3657..131c7772703c2 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -16,6 +16,7 @@ */ #include +#include #include static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 91464e7f77cc3..758bde7e2fa68 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -60,16 +60,7 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE b.lo 1b dsb ish - icache_line_size x2, x3 - sub x3, x2, #1 - bic x4, x0, x3 -1: -USER(9f, ic ivau, x4 ) // invalidate I line PoU - add x4, x4, x2 - cmp x4, x1 - b.lo 1b - dsb ish - isb + invalidate_icache_by_line x0, x1, x2, x3, 9f mov x0, #0 1: uaccess_ttbr0_disable x1, x2 @@ -80,6 +71,27 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU ENDPROC(flush_icache_range) ENDPROC(__flush_cache_user_range) +/* + * invalidate_icache_range(start,end) + * + * Ensure that the I cache is invalid within specified region. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +ENTRY(invalidate_icache_range) + uaccess_ttbr0_enable x2, x3, x4 + + invalidate_icache_by_line x0, x1, x2, x3, 2f + mov x0, xzr +1: + uaccess_ttbr0_disable x1, x2 + ret +2: + mov x0, #-EFAULT + b 1b +ENDPROC(invalidate_icache_range) + /* * __flush_dcache_area(kaddr, size) * diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 7b60d62ac5939..65dfc8571bf83 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, } -static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) +static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) { - pte_t *pte = pte_offset_kernel(pmd, 0UL); + pte_t *ptep = pte_offset_kernel(pmdp, 0UL); unsigned long addr; unsigned i; - for (i = 0; i < PTRS_PER_PTE; i++, pte++) { + for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { addr = start + i * PAGE_SIZE; - note_page(st, addr, 4, pte_val(*pte)); + note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); } } -static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) +static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) { - pmd_t *pmd = pmd_offset(pud, 0UL); + pmd_t *pmdp = pmd_offset(pudp, 0UL); unsigned long addr; unsigned i; - for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { + for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { + pmd_t pmd = READ_ONCE(*pmdp); + addr = start + i * PMD_SIZE; - if (pmd_none(*pmd) || pmd_sect(*pmd)) { - note_page(st, addr, 3, pmd_val(*pmd)); + if (pmd_none(pmd) || pmd_sect(pmd)) { + note_page(st, addr, 3, pmd_val(pmd)); } else { - BUG_ON(pmd_bad(*pmd)); - walk_pte(st, pmd, addr); + BUG_ON(pmd_bad(pmd)); + walk_pte(st, pmdp, addr); } } } -static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) +static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) { - pud_t *pud = pud_offset(pgd, 0UL); + pud_t *pudp = pud_offset(pgdp, 0UL); unsigned long addr; unsigned i; - for (i = 0; i < PTRS_PER_PUD; i++, pud++) { + for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { + pud_t pud = READ_ONCE(*pudp); + addr = start + i * PUD_SIZE; - if (pud_none(*pud) || pud_sect(*pud)) { - note_page(st, addr, 2, pud_val(*pud)); + if (pud_none(pud) || pud_sect(pud)) { + note_page(st, addr, 2, pud_val(pud)); } else { - BUG_ON(pud_bad(*pud)); - walk_pmd(st, pud, addr); + BUG_ON(pud_bad(pud)); + walk_pmd(st, pudp, addr); } } } @@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) { - pgd_t *pgd = pgd_offset(mm, 0UL); + pgd_t *pgdp = pgd_offset(mm, 0UL); unsigned i; unsigned long addr; - for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { + for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { + pgd_t pgd = READ_ONCE(*pgdp); + addr = start + i * PGDIR_SIZE; - if (pgd_none(*pgd)) { - note_page(st, addr, 1, pgd_val(*pgd)); + if (pgd_none(pgd)) { + note_page(st, addr, 1, pgd_val(pgd)); } else { - BUG_ON(pgd_bad(*pgd)); - walk_pud(st, pgd, addr); + BUG_ON(pgd_bad(pgd)); + walk_pud(st, pgdp, addr); } } } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index f76bb2c3c9434..bff11553eb050 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -130,7 +130,8 @@ static void mem_abort_decode(unsigned int esr) void show_pte(unsigned long addr) { struct mm_struct *mm; - pgd_t *pgd; + pgd_t *pgdp; + pgd_t pgd; if (addr < TASK_SIZE) { /* TTBR0 */ @@ -149,33 +150,37 @@ void show_pte(unsigned long addr) return; } - pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n", + pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, VA_BITS, mm->pgd); - pgd = pgd_offset(mm, addr); - pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd)); + pgdp = pgd_offset(mm, addr); + pgd = READ_ONCE(*pgdp); + pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); do { - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + pte_t *ptep, pte; - if (pgd_none(*pgd) || pgd_bad(*pgd)) + if (pgd_none(pgd) || pgd_bad(pgd)) break; - pud = pud_offset(pgd, addr); - pr_cont(", *pud=%016llx", pud_val(*pud)); - if (pud_none(*pud) || pud_bad(*pud)) + pudp = pud_offset(pgdp, addr); + pud = READ_ONCE(*pudp); + pr_cont(", pud=%016llx", pud_val(pud)); + if (pud_none(pud) || pud_bad(pud)) break; - pmd = pmd_offset(pud, addr); - pr_cont(", *pmd=%016llx", pmd_val(*pmd)); - if (pmd_none(*pmd) || pmd_bad(*pmd)) + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + pr_cont(", pmd=%016llx", pmd_val(pmd)); + if (pmd_none(pmd) || pmd_bad(pmd)) break; - pte = pte_offset_map(pmd, addr); - pr_cont(", *pte=%016llx", pte_val(*pte)); - pte_unmap(pte); + ptep = pte_offset_map(pmdp, addr); + pte = READ_ONCE(*ptep); + pr_cont(", pte=%016llx", pte_val(pte)); + pte_unmap(ptep); } while(0); pr_cont("\n"); @@ -196,8 +201,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { pteval_t old_pteval, pteval; + pte_t pte = READ_ONCE(*ptep); - if (pte_same(*ptep, entry)) + if (pte_same(pte, entry)) return 0; /* only preserve the access flags and write permission */ @@ -210,7 +216,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, * (calculated as: a & b == ~(~a | ~b)). */ pte_val(entry) ^= PTE_RDONLY; - pteval = READ_ONCE(pte_val(*ptep)); + pteval = pte_val(pte); do { old_pteval = pteval; pteval ^= PTE_RDONLY; diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 6cb0fa92a6516..ecc6818191df9 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -54,14 +54,14 @@ static inline pgprot_t pte_pgprot(pte_t pte) static int find_num_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, size_t *pgsize) { - pgd_t *pgd = pgd_offset(mm, addr); - pud_t *pud; - pmd_t *pmd; + pgd_t *pgdp = pgd_offset(mm, addr); + pud_t *pudp; + pmd_t *pmdp; *pgsize = PAGE_SIZE; - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - if ((pte_t *)pmd == ptep) { + pudp = pud_offset(pgdp, addr); + pmdp = pmd_offset(pudp, addr); + if ((pte_t *)pmdp == ptep) { *pgsize = PMD_SIZE; return CONT_PMDS; } @@ -181,11 +181,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, clear_flush(mm, addr, ptep, pgsize, ncontig); - for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) { - pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep, - pte_val(pfn_pte(pfn, hugeprot))); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); - } } void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, @@ -203,20 +200,20 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { - pgd_t *pgd; - pud_t *pud; - pte_t *pte = NULL; - - pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz); - pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); - if (!pud) + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep = NULL; + + pgdp = pgd_offset(mm, addr); + pudp = pud_alloc(mm, pgdp, addr); + if (!pudp) return NULL; if (sz == PUD_SIZE) { - pte = (pte_t *)pud; + ptep = (pte_t *)pudp; } else if (sz == (PAGE_SIZE * CONT_PTES)) { - pmd_t *pmd = pmd_alloc(mm, pud, addr); + pmdp = pmd_alloc(mm, pudp, addr); WARN_ON(addr & (sz - 1)); /* @@ -226,60 +223,55 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, * will be no pte_unmap() to correspond with this * pte_alloc_map(). */ - pte = pte_alloc_map(mm, pmd, addr); + ptep = pte_alloc_map(mm, pmdp, addr); } else if (sz == PMD_SIZE) { if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(*pud)) - pte = huge_pmd_share(mm, addr, pud); + pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, addr, pudp); else - pte = (pte_t *)pmd_alloc(mm, pud, addr); + ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (PMD_SIZE * CONT_PMDS)) { - pmd_t *pmd; - - pmd = pmd_alloc(mm, pud, addr); + pmdp = pmd_alloc(mm, pudp, addr); WARN_ON(addr & (sz - 1)); - return (pte_t *)pmd; + return (pte_t *)pmdp; } - pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr, - sz, pte, pte_val(*pte)); - return pte; + return ptep; } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; + pgd_t *pgdp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; - pgd = pgd_offset(mm, addr); - pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd); - if (!pgd_present(*pgd)) + pgdp = pgd_offset(mm, addr); + if (!pgd_present(READ_ONCE(*pgdp))) return NULL; - pud = pud_offset(pgd, addr); - if (sz != PUD_SIZE && pud_none(*pud)) + pudp = pud_offset(pgdp, addr); + pud = READ_ONCE(*pudp); + if (sz != PUD_SIZE && pud_none(pud)) return NULL; /* hugepage or swap? */ - if (pud_huge(*pud) || !pud_present(*pud)) - return (pte_t *)pud; + if (pud_huge(pud) || !pud_present(pud)) + return (pte_t *)pudp; /* table; check the next level */ if (sz == CONT_PMD_SIZE) addr &= CONT_PMD_MASK; - pmd = pmd_offset(pud, addr); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && - pmd_none(*pmd)) + pmd_none(pmd)) return NULL; - if (pmd_huge(*pmd) || !pmd_present(*pmd)) - return (pte_t *)pmd; + if (pmd_huge(pmd) || !pmd_present(pmd)) + return (pte_t *)pmdp; - if (sz == CONT_PTE_SIZE) { - pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK)); - return pte; - } + if (sz == CONT_PTE_SIZE) + return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK)); return NULL; } @@ -367,7 +359,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, size_t pgsize; pte_t pte; - if (!pte_cont(*ptep)) { + if (!pte_cont(READ_ONCE(*ptep))) { ptep_set_wrprotect(mm, addr, ptep); return; } @@ -391,7 +383,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, size_t pgsize; int ncontig; - if (!pte_cont(*ptep)) { + if (!pte_cont(READ_ONCE(*ptep))) { ptep_clear_flush(vma, addr, ptep); return; } diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 6e02e6fb4c7b9..dabfc1ecda3d3 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) return __pa(p); } -static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node, +static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) { - if (pmd_none(*pmd)) { + if (pmd_none(READ_ONCE(*pmdp))) { phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte) : kasan_alloc_zeroed_page(node); - __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); + __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); } - return early ? pte_offset_kimg(pmd, addr) - : pte_offset_kernel(pmd, addr); + return early ? pte_offset_kimg(pmdp, addr) + : pte_offset_kernel(pmdp, addr); } -static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node, +static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) { - if (pud_none(*pud)) { + if (pud_none(READ_ONCE(*pudp))) { phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd) : kasan_alloc_zeroed_page(node); - __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE); + __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); } - return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr); + return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); } -static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node, +static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node, bool early) { - if (pgd_none(*pgd)) { + if (pgd_none(READ_ONCE(*pgdp))) { phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud) : kasan_alloc_zeroed_page(node); - __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE); + __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE); } - return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr); + return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr); } -static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr, +static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pte_t *pte = kasan_pte_offset(pmd, addr, node, early); + pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); do { phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page) : kasan_alloc_zeroed_page(node); next = addr + PAGE_SIZE; - set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (pte++, addr = next, addr != end && pte_none(*pte)); + set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); + } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); } -static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr, +static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early); + pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early); do { next = pmd_addr_end(addr, end); - kasan_pte_populate(pmd, addr, next, node, early); - } while (pmd++, addr = next, addr != end && pmd_none(*pmd)); + kasan_pte_populate(pmdp, addr, next, node, early); + } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); } -static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr, +static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pud_t *pud = kasan_pud_offset(pgd, addr, node, early); + pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early); do { next = pud_addr_end(addr, end); - kasan_pmd_populate(pud, addr, next, node, early); - } while (pud++, addr = next, addr != end && pud_none(*pud)); + kasan_pmd_populate(pudp, addr, next, node, early); + } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); } static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - pgd_t *pgd; + pgd_t *pgdp; - pgd = pgd_offset_k(addr); + pgdp = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - kasan_pud_populate(pgd, addr, next, node, early); - } while (pgd++, addr = next, addr != end); + kasan_pud_populate(pgdp, addr, next, node, early); + } while (pgdp++, addr = next, addr != end); } /* The early shadow maps everything to a single page of zeroes */ @@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end, */ void __init kasan_copy_shadow(pgd_t *pgdir) { - pgd_t *pgd, *pgd_new, *pgd_end; + pgd_t *pgdp, *pgdp_new, *pgdp_end; - pgd = pgd_offset_k(KASAN_SHADOW_START); - pgd_end = pgd_offset_k(KASAN_SHADOW_END); - pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); + pgdp = pgd_offset_k(KASAN_SHADOW_START); + pgdp_end = pgd_offset_k(KASAN_SHADOW_END); + pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START); do { - set_pgd(pgd_new, *pgd); - } while (pgd++, pgd_new++, pgd != pgd_end); + set_pgd(pgdp_new, READ_ONCE(*pgdp)); + } while (pgdp++, pgdp_new++, pgdp != pgdp_end); } static void __init clear_pgds(unsigned long start, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 4694cda823c95..3161b853f29e1 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -125,45 +125,48 @@ static bool pgattr_change_is_safe(u64 old, u64 new) return ((old ^ new) & ~mask) == 0; } -static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, +static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot) { - pte_t *pte; + pte_t *ptep; - pte = pte_set_fixmap_offset(pmd, addr); + ptep = pte_set_fixmap_offset(pmdp, addr); do { - pte_t old_pte = *pte; + pte_t old_pte = READ_ONCE(*ptep); - set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot)); + set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); /* * After the PTE entry has been populated once, we * only allow updates to the permission attributes. */ - BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); + BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), + READ_ONCE(pte_val(*ptep)))); phys += PAGE_SIZE; - } while (pte++, addr += PAGE_SIZE, addr != end); + } while (ptep++, addr += PAGE_SIZE, addr != end); pte_clear_fixmap(); } -static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr, +static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void), int flags) { unsigned long next; + pmd_t pmd = READ_ONCE(*pmdp); - BUG_ON(pmd_sect(*pmd)); - if (pmd_none(*pmd)) { + BUG_ON(pmd_sect(pmd)); + if (pmd_none(pmd)) { phys_addr_t pte_phys; BUG_ON(!pgtable_alloc); pte_phys = pgtable_alloc(); - __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); + __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); + pmd = READ_ONCE(*pmdp); } - BUG_ON(pmd_bad(*pmd)); + BUG_ON(pmd_bad(pmd)); do { pgprot_t __prot = prot; @@ -175,67 +178,69 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr, (flags & NO_CONT_MAPPINGS) == 0) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - init_pte(pmd, addr, next, phys, __prot); + init_pte(pmdp, addr, next, phys, __prot); phys += next - addr; } while (addr = next, addr != end); } -static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end, +static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void), int flags) { unsigned long next; - pmd_t *pmd; + pmd_t *pmdp; - pmd = pmd_set_fixmap_offset(pud, addr); + pmdp = pmd_set_fixmap_offset(pudp, addr); do { - pmd_t old_pmd = *pmd; + pmd_t old_pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); /* try section mapping first */ if (((addr | next | phys) & ~SECTION_MASK) == 0 && (flags & NO_BLOCK_MAPPINGS) == 0) { - pmd_set_huge(pmd, phys, prot); + pmd_set_huge(pmdp, phys, prot); /* * After the PMD entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), - pmd_val(*pmd))); + READ_ONCE(pmd_val(*pmdp)))); } else { - alloc_init_cont_pte(pmd, addr, next, phys, prot, + alloc_init_cont_pte(pmdp, addr, next, phys, prot, pgtable_alloc, flags); BUG_ON(pmd_val(old_pmd) != 0 && - pmd_val(old_pmd) != pmd_val(*pmd)); + pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); } phys += next - addr; - } while (pmd++, addr = next, addr != end); + } while (pmdp++, addr = next, addr != end); pmd_clear_fixmap(); } -static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr, +static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void), int flags) { unsigned long next; + pud_t pud = READ_ONCE(*pudp); /* * Check for initial section mappings in the pgd/pud. */ - BUG_ON(pud_sect(*pud)); - if (pud_none(*pud)) { + BUG_ON(pud_sect(pud)); + if (pud_none(pud)) { phys_addr_t pmd_phys; BUG_ON(!pgtable_alloc); pmd_phys = pgtable_alloc(); - __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); + __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); + pud = READ_ONCE(*pudp); } - BUG_ON(pud_bad(*pud)); + BUG_ON(pud_bad(pud)); do { pgprot_t __prot = prot; @@ -247,7 +252,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr, (flags & NO_CONT_MAPPINGS) == 0) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); - init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags); + init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); phys += next - addr; } while (addr = next, addr != end); @@ -265,25 +270,27 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next, return true; } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - phys_addr_t phys, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(void), - int flags) +static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(void), + int flags) { - pud_t *pud; unsigned long next; + pud_t *pudp; + pgd_t pgd = READ_ONCE(*pgdp); - if (pgd_none(*pgd)) { + if (pgd_none(pgd)) { phys_addr_t pud_phys; BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(); - __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); + __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); + pgd = READ_ONCE(*pgdp); } - BUG_ON(pgd_bad(*pgd)); + BUG_ON(pgd_bad(pgd)); - pud = pud_set_fixmap_offset(pgd, addr); + pudp = pud_set_fixmap_offset(pgdp, addr); do { - pud_t old_pud = *pud; + pud_t old_pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); @@ -292,23 +299,23 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, */ if (use_1G_block(addr, next, phys) && (flags & NO_BLOCK_MAPPINGS) == 0) { - pud_set_huge(pud, phys, prot); + pud_set_huge(pudp, phys, prot); /* * After the PUD entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), - pud_val(*pud))); + READ_ONCE(pud_val(*pudp)))); } else { - alloc_init_cont_pmd(pud, addr, next, phys, prot, + alloc_init_cont_pmd(pudp, addr, next, phys, prot, pgtable_alloc, flags); BUG_ON(pud_val(old_pud) != 0 && - pud_val(old_pud) != pud_val(*pud)); + pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); } phys += next - addr; - } while (pud++, addr = next, addr != end); + } while (pudp++, addr = next, addr != end); pud_clear_fixmap(); } @@ -320,7 +327,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, int flags) { unsigned long addr, length, end, next; - pgd_t *pgd = pgd_offset_raw(pgdir, virt); + pgd_t *pgdp = pgd_offset_raw(pgdir, virt); /* * If the virtual and physical address don't have the same offset @@ -336,10 +343,10 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, end = addr + length; do { next = pgd_addr_end(addr, end); - alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc, + alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, flags); phys += next - addr; - } while (pgd++, addr = next, addr != end); + } while (pgdp++, addr = next, addr != end); } static phys_addr_t pgd_pgtable_alloc(void) @@ -401,10 +408,10 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt, flush_tlb_kernel_range(virt, virt + size); } -static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, +static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, phys_addr_t end, pgprot_t prot, int flags) { - __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start, + __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, prot, early_pgtable_alloc, flags); } @@ -418,7 +425,7 @@ void __init mark_linear_text_alias_ro(void) PAGE_KERNEL_RO); } -static void __init map_mem(pgd_t *pgd) +static void __init map_mem(pgd_t *pgdp) { phys_addr_t kernel_start = __pa_symbol(_text); phys_addr_t kernel_end = __pa_symbol(__init_begin); @@ -451,7 +458,7 @@ static void __init map_mem(pgd_t *pgd) if (memblock_is_nomap(reg)) continue; - __map_memblock(pgd, start, end, PAGE_KERNEL, flags); + __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); } /* @@ -464,7 +471,7 @@ static void __init map_mem(pgd_t *pgd) * Note that contiguous mappings cannot be remapped in this way, * so we should avoid them here. */ - __map_memblock(pgd, kernel_start, kernel_end, + __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); @@ -475,7 +482,7 @@ static void __init map_mem(pgd_t *pgd) * through /sys/kernel/kexec_crash_size interface. */ if (crashk_res.end) { - __map_memblock(pgd, crashk_res.start, crashk_res.end + 1, + __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, PAGE_KERNEL, NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); memblock_clear_nomap(crashk_res.start, @@ -499,7 +506,7 @@ void mark_rodata_ro(void) debug_checkwx(); } -static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, +static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, pgprot_t prot, struct vm_struct *vma, int flags, unsigned long vm_flags) { @@ -509,7 +516,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, BUG_ON(!PAGE_ALIGNED(pa_start)); BUG_ON(!PAGE_ALIGNED(size)); - __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, + __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, early_pgtable_alloc, flags); if (!(vm_flags & VM_NO_GUARD)) @@ -562,7 +569,7 @@ core_initcall(map_entry_trampoline); /* * Create fine-grained mappings for the kernel. */ -static void __init map_kernel(pgd_t *pgd) +static void __init map_kernel(pgd_t *pgdp) { static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, vmlinux_initdata, vmlinux_data; @@ -578,24 +585,24 @@ static void __init map_kernel(pgd_t *pgd) * Only rodata will be remapped with different permissions later on, * all other segments are allowed to use contiguous mappings. */ - map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0, + map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, VM_NO_GUARD); - map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, + map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); - map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, + map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, &vmlinux_inittext, 0, VM_NO_GUARD); - map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, + map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, &vmlinux_initdata, 0, VM_NO_GUARD); - map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); + map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); - if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { + if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { /* * The fixmap falls in a separate pgd to the kernel, and doesn't * live in the carveout for the swapper_pg_dir. We can simply * re-use the existing dir for the fixmap. */ - set_pgd(pgd_offset_raw(pgd, FIXADDR_START), - *pgd_offset_k(FIXADDR_START)); + set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), + READ_ONCE(*pgd_offset_k(FIXADDR_START))); } else if (CONFIG_PGTABLE_LEVELS > 3) { /* * The fixmap shares its top level pgd entry with the kernel @@ -604,14 +611,15 @@ static void __init map_kernel(pgd_t *pgd) * entry instead. */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START), + pud_populate(&init_mm, + pud_set_fixmap_offset(pgdp, FIXADDR_START), lm_alias(bm_pmd)); pud_clear_fixmap(); } else { BUG(); } - kasan_copy_shadow(pgd); + kasan_copy_shadow(pgdp); } /* @@ -621,10 +629,10 @@ static void __init map_kernel(pgd_t *pgd) void __init paging_init(void) { phys_addr_t pgd_phys = early_pgtable_alloc(); - pgd_t *pgd = pgd_set_fixmap(pgd_phys); + pgd_t *pgdp = pgd_set_fixmap(pgd_phys); - map_kernel(pgd); - map_mem(pgd); + map_kernel(pgdp); + map_mem(pgdp); /* * We want to reuse the original swapper_pg_dir so we don't have to @@ -635,7 +643,7 @@ void __init paging_init(void) * To do this we need to go via a temporary pgd. */ cpu_replace_ttbr1(__va(pgd_phys)); - memcpy(swapper_pg_dir, pgd, PGD_SIZE); + memcpy(swapper_pg_dir, pgdp, PGD_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); pgd_clear_fixmap(); @@ -655,37 +663,40 @@ void __init paging_init(void) */ int kern_addr_valid(unsigned long addr) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + pte_t *ptep, pte; if ((((long)addr) >> VA_BITS) != -1UL) return 0; - pgd = pgd_offset_k(addr); - if (pgd_none(*pgd)) + pgdp = pgd_offset_k(addr); + if (pgd_none(READ_ONCE(*pgdp))) return 0; - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) + pudp = pud_offset(pgdp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) return 0; - if (pud_sect(*pud)) - return pfn_valid(pud_pfn(*pud)); + if (pud_sect(pud)) + return pfn_valid(pud_pfn(pud)); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) return 0; - if (pmd_sect(*pmd)) - return pfn_valid(pmd_pfn(*pmd)); + if (pmd_sect(pmd)) + return pfn_valid(pmd_pfn(pmd)); - pte = pte_offset_kernel(pmd, addr); - if (pte_none(*pte)) + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + if (pte_none(pte)) return 0; - return pfn_valid(pte_pfn(*pte)); + return pfn_valid(pte_pfn(pte)); } #ifdef CONFIG_SPARSEMEM_VMEMMAP #if !ARM64_SWAPPER_USES_SECTION_MAPS @@ -700,32 +711,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, { unsigned long addr = start; unsigned long next; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; do { next = pmd_addr_end(addr, end); - pgd = vmemmap_pgd_populate(addr, node); - if (!pgd) + pgdp = vmemmap_pgd_populate(addr, node); + if (!pgdp) return -ENOMEM; - pud = vmemmap_pud_populate(pgd, addr, node); - if (!pud) + pudp = vmemmap_pud_populate(pgdp, addr, node); + if (!pudp) return -ENOMEM; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { + pmdp = pmd_offset(pudp, addr); + if (pmd_none(READ_ONCE(*pmdp))) { void *p = NULL; p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (!p) return -ENOMEM; - pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL)); + pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); } else - vmemmap_verify((pte_t *)pmd, node, addr, next); + vmemmap_verify((pte_t *)pmdp, node, addr, next); } while (addr = next, addr != end); return 0; @@ -739,20 +750,22 @@ void vmemmap_free(unsigned long start, unsigned long end, static inline pud_t * fixmap_pud(unsigned long addr) { - pgd_t *pgd = pgd_offset_k(addr); + pgd_t *pgdp = pgd_offset_k(addr); + pgd_t pgd = READ_ONCE(*pgdp); - BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); + BUG_ON(pgd_none(pgd) || pgd_bad(pgd)); - return pud_offset_kimg(pgd, addr); + return pud_offset_kimg(pgdp, addr); } static inline pmd_t * fixmap_pmd(unsigned long addr) { - pud_t *pud = fixmap_pud(addr); + pud_t *pudp = fixmap_pud(addr); + pud_t pud = READ_ONCE(*pudp); - BUG_ON(pud_none(*pud) || pud_bad(*pud)); + BUG_ON(pud_none(pud) || pud_bad(pud)); - return pmd_offset_kimg(pud, addr); + return pmd_offset_kimg(pudp, addr); } static inline pte_t * fixmap_pte(unsigned long addr) @@ -768,30 +781,31 @@ static inline pte_t * fixmap_pte(unsigned long addr) */ void __init early_fixmap_init(void) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; + pgd_t *pgdp, pgd; + pud_t *pudp; + pmd_t *pmdp; unsigned long addr = FIXADDR_START; - pgd = pgd_offset_k(addr); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); if (CONFIG_PGTABLE_LEVELS > 3 && - !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { + !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { /* * We only end up here if the kernel mapping and the fixmap * share the top level pgd entry, which should only happen on * 16k/4 levels configurations. */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - pud = pud_offset_kimg(pgd, addr); + pudp = pud_offset_kimg(pgdp, addr); } else { - if (pgd_none(*pgd)) - __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE); - pud = fixmap_pud(addr); + if (pgd_none(pgd)) + __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); + pudp = fixmap_pud(addr); } - if (pud_none(*pud)) - __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); - pmd = fixmap_pmd(addr); - __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); + if (pud_none(READ_ONCE(*pudp))) + __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); + pmdp = fixmap_pmd(addr); + __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); /* * The boot-ioremap range spans multiple pmds, for which @@ -800,11 +814,11 @@ void __init early_fixmap_init(void) BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); - if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) - || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { + if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) + || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); - pr_warn("pmd %p != %p, %p\n", - pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), + pr_warn("pmdp %p != %p, %p\n", + pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); @@ -824,16 +838,16 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { unsigned long addr = __fix_to_virt(idx); - pte_t *pte; + pte_t *ptep; BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); - pte = fixmap_pte(addr); + ptep = fixmap_pte(addr); if (pgprot_val(flags)) { - set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); } else { - pte_clear(&init_mm, addr, pte); + pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr+PAGE_SIZE); } } @@ -915,36 +929,36 @@ int __init arch_ioremap_pmd_supported(void) return 1; } -int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) +int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))); BUG_ON(phys & ~PUD_MASK); - set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot)); + set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); return 1; } -int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) +int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) { pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))); BUG_ON(phys & ~PMD_MASK); - set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot)); + set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); return 1; } -int pud_clear_huge(pud_t *pud) +int pud_clear_huge(pud_t *pudp) { - if (!pud_sect(*pud)) + if (!pud_sect(READ_ONCE(*pudp))) return 0; - pud_clear(pud); + pud_clear(pudp); return 1; } -int pmd_clear_huge(pmd_t *pmd) +int pmd_clear_huge(pmd_t *pmdp) { - if (!pmd_sect(*pmd)) + if (!pmd_sect(READ_ONCE(*pmdp))) return 0; - pmd_clear(pmd); + pmd_clear(pmdp); return 1; } diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index a682a0a2a0fa4..a56359373d8b3 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -29,7 +29,7 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct page_change_data *cdata = data; - pte_t pte = *ptep; + pte_t pte = READ_ONCE(*ptep); pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); @@ -156,30 +156,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) */ bool kernel_page_present(struct page *page) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); - pgd = pgd_offset_k(addr); - if (pgd_none(*pgd)) + pgdp = pgd_offset_k(addr); + if (pgd_none(READ_ONCE(*pgdp))) return false; - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) + pudp = pud_offset(pgdp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) return false; - if (pud_sect(*pud)) + if (pud_sect(pud)) return true; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) return false; - if (pmd_sect(*pmd)) + if (pmd_sect(pmd)) return true; - pte = pte_offset_kernel(pmd, addr); - return pte_valid(*pte); + ptep = pte_offset_kernel(pmdp, addr); + return pte_valid(READ_ONCE(*ptep)); } #endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 71baed7e592a4..c0af476172998 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -205,7 +205,8 @@ ENDPROC(idmap_cpu_replace_ttbr1) dc cvac, cur_\()\type\()p // Ensure any existing dirty dmb sy // lines are written back before ldr \type, [cur_\()\type\()p] // loading the entry - tbz \type, #0, next_\()\type // Skip invalid entries + tbz \type, #0, skip_\()\type // Skip invalid and + tbnz \type, #11, skip_\()\type // non-global entries .endm .macro __idmap_kpti_put_pgtable_ent_ng, type @@ -265,8 +266,9 @@ ENTRY(idmap_kpti_install_ng_mappings) add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) do_pgd: __idmap_kpti_get_pgtable_ent pgd tbnz pgd, #1, walk_puds - __idmap_kpti_put_pgtable_ent_ng pgd next_pgd: + __idmap_kpti_put_pgtable_ent_ng pgd +skip_pgd: add cur_pgdp, cur_pgdp, #8 cmp cur_pgdp, end_pgdp b.ne do_pgd @@ -294,8 +296,9 @@ walk_puds: add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) do_pud: __idmap_kpti_get_pgtable_ent pud tbnz pud, #1, walk_pmds - __idmap_kpti_put_pgtable_ent_ng pud next_pud: + __idmap_kpti_put_pgtable_ent_ng pud +skip_pud: add cur_pudp, cur_pudp, 8 cmp cur_pudp, end_pudp b.ne do_pud @@ -314,8 +317,9 @@ walk_pmds: add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) do_pmd: __idmap_kpti_get_pgtable_ent pmd tbnz pmd, #1, walk_ptes - __idmap_kpti_put_pgtable_ent_ng pmd next_pmd: + __idmap_kpti_put_pgtable_ent_ng pmd +skip_pmd: add cur_pmdp, cur_pmdp, #8 cmp cur_pmdp, end_pmdp b.ne do_pmd @@ -333,7 +337,7 @@ walk_ptes: add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) do_pte: __idmap_kpti_get_pgtable_ent pte __idmap_kpti_put_pgtable_ent_ng pte -next_pte: +skip_pte: add cur_ptep, cur_ptep, #8 cmp cur_ptep, end_ptep b.ne do_pte diff --git a/arch/blackfin/include/uapi/asm/poll.h b/arch/blackfin/include/uapi/asm/poll.h index 3b162f2d29703..cd2f1a78aba57 100644 --- a/arch/blackfin/include/uapi/asm/poll.h +++ b/arch/blackfin/include/uapi/asm/poll.h @@ -9,25 +9,8 @@ #ifndef _UAPI__BFIN_POLL_H #define _UAPI__BFIN_POLL_H -#ifndef __KERNEL__ #define POLLWRNORM POLLOUT -#define POLLWRBAND (__force __poll_t)256 -#else -#define __ARCH_HAS_MANGLED_POLL -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; - /* bit 9 -> bit 8, bit 8 -> bit 2 */ - return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6); -} - -static inline __poll_t demangle_poll(__u16 v) -{ - /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */ - return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) | - ((v & 4) << 6)); -} -#endif +#define POLLWRBAND 256 #include diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c index a2986c60aaac2..cd0e05d89d42f 100644 --- a/arch/cris/arch-v10/drivers/gpio.c +++ b/arch/cris/arch-v10/drivers/gpio.c @@ -173,7 +173,7 @@ static __poll_t gpio_poll(struct file *file, poll_table *wait) if ((data & priv->highalarm) || (~data & priv->lowalarm)) { - mask = POLLIN|POLLRDNORM; + mask = EPOLLIN|EPOLLRDNORM; } out: diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c index 177843c640715..ed1a568a72170 100644 --- a/arch/cris/arch-v10/drivers/sync_serial.c +++ b/arch/cris/arch-v10/drivers/sync_serial.c @@ -666,16 +666,16 @@ static __poll_t sync_serial_poll(struct file *file, poll_table *wait) poll_wait(file, &port->in_wait_q, wait); /* Some room to write */ if (port->out_count < OUT_BUFFER_SIZE) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; /* At least an inbufchunk of data */ if (sync_data_avail(port) >= port->inbufchunk) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; DEBUGPOLL(if (mask != prev_mask) printk(KERN_DEBUG "sync_serial_poll: mask 0x%08X %s %s\n", mask, - mask & POLLOUT ? "POLLOUT" : "", - mask & POLLIN ? "POLLIN" : ""); + mask & EPOLLOUT ? "POLLOUT" : "", + mask & EPOLLIN ? "POLLIN" : ""); prev_mask = mask; ); return mask; diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index e20e0b9a3a5ce..1b0ce8a8af167 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c @@ -574,24 +574,24 @@ static __poll_t sync_serial_poll(struct file *file, poll_table *wait) /* No active transfer, descriptors are available */ if (port->output && !port->tr_running) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; /* Descriptor and buffer space available. */ if (port->output && port->active_tr_descr != port->catch_tr_descr && port->out_buf_count < OUT_BUFFER_SIZE) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; /* At least an inbufchunk of data */ if (port->input && sync_data_avail(port) >= port->inbufchunk) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; DEBUGPOLL( if (mask != prev_mask) pr_info("sync_serial_poll: mask 0x%08X %s %s\n", mask, - mask & POLLOUT ? "POLLOUT" : "", - mask & POLLIN ? "POLLIN" : ""); + mask & EPOLLOUT ? "POLLOUT" : "", + mask & EPOLLIN ? "POLLIN" : ""); prev_mask = mask; ); return mask; diff --git a/arch/frv/include/uapi/asm/poll.h b/arch/frv/include/uapi/asm/poll.h index a44c8f0ebee79..f55b45f475eca 100644 --- a/arch/frv/include/uapi/asm/poll.h +++ b/arch/frv/include/uapi/asm/poll.h @@ -2,25 +2,8 @@ #ifndef _ASM_POLL_H #define _ASM_POLL_H -#ifndef __KERNEL__ #define POLLWRNORM POLLOUT -#define POLLWRBAND (__force __poll_t)256 -#else -#define __ARCH_HAS_MANGLED_POLL -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; - /* bit 9 -> bit 8, bit 8 -> bit 2 */ - return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6); -} - -static inline __poll_t demangle_poll(__u16 v) -{ - /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */ - return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) | - ((v & 4) << 6)); -} -#endif +#define POLLWRBAND 256 #include #undef POLLREMOVE diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 0b4c65a1af25f..498f3da3f225d 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -41,7 +41,6 @@ ifneq ($(CONFIG_IA64_ESI),) obj-y += esi_stub.o # must be in kernel proper endif obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o -obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_BINFMT_ELF) += elfcore.o diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 8586024940963..8fb280e331141 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -1670,7 +1670,7 @@ pfm_poll(struct file *filp, poll_table * wait) PROTECT_CTX(ctx, flags); if (PFM_CTXQ_EMPTY(ctx) == 0) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; UNPROTECT_CTX(ctx, flags); diff --git a/arch/m68k/include/uapi/asm/poll.h b/arch/m68k/include/uapi/asm/poll.h index d8be239e81417..c3e3fcc15e1dc 100644 --- a/arch/m68k/include/uapi/asm/poll.h +++ b/arch/m68k/include/uapi/asm/poll.h @@ -2,25 +2,8 @@ #ifndef __m68k_POLL_H #define __m68k_POLL_H -#ifndef __KERNEL__ #define POLLWRNORM POLLOUT -#define POLLWRBAND (__force __poll_t)256 -#else -#define __ARCH_HAS_MANGLED_POLL -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; - /* bit 9 -> bit 8, bit 8 -> bit 2 */ - return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6); -} - -static inline __poll_t demangle_poll(__u16 v) -{ - /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */ - return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) | - ((v & 4) << 6)); -} -#endif +#define POLLWRBAND 256 #include diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 449397c60b56e..8128c3b68d6b0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2333,7 +2333,6 @@ config MIPS_VPE_LOADER_TOM config MIPS_VPE_APSP_API bool "Enable support for AP/SP API (RTLX)" depends on MIPS_VPE_LOADER - help config MIPS_VPE_APSP_API_CMP bool diff --git a/arch/mips/bcm63xx/boards/Kconfig b/arch/mips/bcm63xx/boards/Kconfig index 6ff0a74810818..f60d96610ace9 100644 --- a/arch/mips/bcm63xx/boards/Kconfig +++ b/arch/mips/bcm63xx/boards/Kconfig @@ -7,6 +7,5 @@ choice config BOARD_BCM963XX bool "Generic Broadcom 963xx boards" select SSB - help endchoice diff --git a/arch/mips/include/uapi/asm/poll.h b/arch/mips/include/uapi/asm/poll.h index 3173f89171284..ad289d7b74340 100644 --- a/arch/mips/include/uapi/asm/poll.h +++ b/arch/mips/include/uapi/asm/poll.h @@ -2,25 +2,8 @@ #ifndef __ASM_POLL_H #define __ASM_POLL_H -#ifndef __KERNEL__ #define POLLWRNORM POLLOUT -#define POLLWRBAND (__force __poll_t)0x0100 -#else -#define __ARCH_HAS_MANGLED_POLL -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; - /* bit 9 -> bit 8, bit 8 -> bit 2 */ - return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6); -} - -static inline __poll_t demangle_poll(__u16 v) -{ - /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */ - return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) | - ((v & 4) << 6)); -} -#endif +#define POLLWRBAND 0x0100 #include diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index 19c88d7700546..fcf9af492d602 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c @@ -10,6 +10,8 @@ #include #include +#include +#include #include #include @@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); phys_addr_t __weak mips_cpc_default_phys_base(void) { + struct device_node *cpc_node; + struct resource res; + int err; + + cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); + if (cpc_node) { + err = of_address_to_resource(cpc_node, 0, &res); + if (!err) + return res.start; + } + return 0; } diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index bbb0f4770c0d8..18c509c59f338 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -349,11 +349,11 @@ static __poll_t file_poll(struct file *file, poll_table *wait) /* data available to read? */ if (rtlx_read_poll(minor, 0)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* space to write */ if (rtlx_write_poll(minor)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 85bc601e9a0d4..5f8b0a9e30b3d 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -375,6 +375,7 @@ static void __init bootmem_init(void) unsigned long reserved_end; unsigned long mapstart = ~0UL; unsigned long bootmap_size; + phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; bool bootmap_valid = false; int i; @@ -395,7 +396,8 @@ static void __init bootmem_init(void) max_low_pfn = 0; /* - * Find the highest page frame number we have available. + * Find the highest page frame number we have available + * and the lowest used RAM address */ for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long start, end; @@ -407,6 +409,8 @@ static void __init bootmem_init(void) end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); + ramstart = min(ramstart, boot_mem_map.map[i].addr); + #ifndef CONFIG_HIGHMEM /* * Skip highmem here so we get an accurate max_low_pfn if low @@ -436,6 +440,13 @@ static void __init bootmem_init(void) mapstart = max(reserved_end, start); } + /* + * Reserve any memory between the start of RAM and PHYS_OFFSET + */ + if (ramstart > PHYS_OFFSET) + add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET, + BOOT_MEM_RESERVED); + if (min_low_pfn >= max_low_pfn) panic("Incorrect memory mapping !!!"); if (min_low_pfn > ARCH_PFN_OFFSET) { @@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p) add_memory_region(start, size, BOOT_MEM_RAM); - if (start && start > PHYS_OFFSET) - add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET, - BOOT_MEM_RESERVED); return 0; } early_param("mem", early_parse_mem); diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 87dcac2447c8d..9d41732a9146a 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -572,7 +572,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void) */ } -void __init bmips_cpu_setup(void) +void bmips_cpu_setup(void) { void __iomem __maybe_unused *cbr = BMIPS_GET_CBR(); u32 __maybe_unused cfg; diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index b17447ce88731..76b93a9c8c9b2 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -22,6 +22,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_MMIO select MMU_NOTIFIER select SRCU diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 75fdeaa8c62f2..2549fdd27ee16 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -446,6 +446,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = -EINTR; + vcpu_load(vcpu); + kvm_sigset_activate(vcpu); if (vcpu->mmio_needed) { @@ -480,6 +482,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) out: kvm_sigset_deactivate(vcpu); + vcpu_put(vcpu); return r; } @@ -900,6 +903,26 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return r; } +long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, + unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + + if (ioctl == KVM_INTERRUPT) { + struct kvm_mips_interrupt irq; + + if (copy_from_user(&irq, argp, sizeof(irq))) + return -EFAULT; + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, + irq.irq); + + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + + return -ENOIOCTLCMD; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -907,56 +930,54 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, void __user *argp = (void __user *)arg; long r; + vcpu_load(vcpu); + switch (ioctl) { case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + r = -EFAULT; if (copy_from_user(®, argp, sizeof(reg))) - return -EFAULT; + break; if (ioctl == KVM_SET_ONE_REG) - return kvm_mips_set_reg(vcpu, ®); + r = kvm_mips_set_reg(vcpu, ®); else - return kvm_mips_get_reg(vcpu, ®); + r = kvm_mips_get_reg(vcpu, ®); + break; } case KVM_GET_REG_LIST: { struct kvm_reg_list __user *user_list = argp; struct kvm_reg_list reg_list; unsigned n; + r = -EFAULT; if (copy_from_user(®_list, user_list, sizeof(reg_list))) - return -EFAULT; + break; n = reg_list.n; reg_list.n = kvm_mips_num_regs(vcpu); if (copy_to_user(user_list, ®_list, sizeof(reg_list))) - return -EFAULT; + break; + r = -E2BIG; if (n < reg_list.n) - return -E2BIG; - return kvm_mips_copy_reg_indices(vcpu, user_list->reg); - } - case KVM_INTERRUPT: - { - struct kvm_mips_interrupt irq; - - if (copy_from_user(&irq, argp, sizeof(irq))) - return -EFAULT; - kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, - irq.irq); - - r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; - } + r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); + break; + } case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; + r = -EFAULT; if (copy_from_user(&cap, argp, sizeof(cap))) - return -EFAULT; + break; r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } default: r = -ENOIOCTLCMD; } + + vcpu_put(vcpu); return r; } @@ -1145,6 +1166,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) vcpu->arch.gprs[i] = regs->gpr[i]; vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ @@ -1152,6 +1175,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.lo = regs->lo; vcpu->arch.pc = regs->pc; + vcpu_put(vcpu); return 0; } @@ -1159,6 +1183,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) regs->gpr[i] = vcpu->arch.gprs[i]; @@ -1166,6 +1192,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->lo = vcpu->arch.lo; regs->pc = vcpu->arch.pc; + vcpu_put(vcpu); return 0; } diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 60fae03dac79a..3d4ec88f1db1c 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -152,7 +152,6 @@ menu "Advanced setup" config ADVANCED_OPTIONS bool "Prompt for advanced kernel configuration options" - help comment "Default settings for advanced configuration options are used" depends on !ADVANCED_OPTIONS diff --git a/arch/nios2/boot/dts/3c120_devboard.dts b/arch/nios2/boot/dts/3c120_devboard.dts index 36ccdf05837de..56f4b5df6d650 100644 --- a/arch/nios2/boot/dts/3c120_devboard.dts +++ b/arch/nios2/boot/dts/3c120_devboard.dts @@ -29,7 +29,7 @@ #address-cells = <1>; #size-cells = <0>; - cpu: cpu@0x0 { + cpu: cpu@0 { device_type = "cpu"; compatible = "altr,nios2-1.0"; reg = <0x00000000>; @@ -69,7 +69,7 @@ compatible = "altr,avalon", "simple-bus"; bus-frequency = <125000000>; - pb_cpu_to_io: bridge@0x8000000 { + pb_cpu_to_io: bridge@8000000 { compatible = "simple-bus"; reg = <0x08000000 0x00800000>; #address-cells = <1>; @@ -83,7 +83,7 @@ <0x00008000 0x08008000 0x00000020>, <0x00400000 0x08400000 0x00000020>; - timer_1ms: timer@0x400000 { + timer_1ms: timer@400000 { compatible = "altr,timer-1.0"; reg = <0x00400000 0x00000020>; interrupt-parent = <&cpu>; @@ -91,7 +91,7 @@ clock-frequency = <125000000>; }; - timer_0: timer@0x8000 { + timer_0: timer@8000 { compatible = "altr,timer-1.0"; reg = < 0x00008000 0x00000020 >; interrupt-parent = < &cpu >; @@ -99,14 +99,14 @@ clock-frequency = < 125000000 >; }; - jtag_uart: serial@0x4d50 { + jtag_uart: serial@4d50 { compatible = "altr,juart-1.0"; reg = <0x00004d50 0x00000008>; interrupt-parent = <&cpu>; interrupts = <1>; }; - tse_mac: ethernet@0x4000 { + tse_mac: ethernet@4000 { compatible = "altr,tse-1.0"; reg = <0x00004000 0x00000400>, <0x00004400 0x00000040>, @@ -133,7 +133,7 @@ }; }; - uart: serial@0x4c80 { + uart: serial@4c80 { compatible = "altr,uart-1.0"; reg = <0x00004c80 0x00000020>; interrupt-parent = <&cpu>; @@ -143,7 +143,7 @@ }; }; - cfi_flash_64m: flash@0x0 { + cfi_flash_64m: flash@0 { compatible = "cfi-flash"; reg = <0x00000000 0x04000000>; bank-width = <2>; diff --git a/arch/nios2/configs/10m50_defconfig b/arch/nios2/configs/10m50_defconfig index 8b2a30b3b34fe..c601c8ff1ae68 100644 --- a/arch/nios2/configs/10m50_defconfig +++ b/arch/nios2/configs/10m50_defconfig @@ -33,7 +33,6 @@ CONFIG_IP_PNP_RARP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" diff --git a/arch/nios2/configs/3c120_defconfig b/arch/nios2/configs/3c120_defconfig index 9451940678a03..fce33588d55cd 100644 --- a/arch/nios2/configs/3c120_defconfig +++ b/arch/nios2/configs/3c120_defconfig @@ -35,7 +35,6 @@ CONFIG_IP_PNP_RARP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 30a155c0a6b07..c615abdce119e 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -16,6 +16,7 @@ #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) #define PMD_CACHE_INDEX PMD_INDEX_SIZE +#define PUD_CACHE_INDEX PUD_INDEX_SIZE #ifndef __ASSEMBLY__ #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index 949d691094a46..67c5475311ee6 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd) * keeping the prototype consistent across the two formats. */ static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, - unsigned int subpg_index, unsigned long hidx) + unsigned int subpg_index, unsigned long hidx, + int offset) { return (hidx << H_PAGE_F_GIX_SHIFT) & (H_PAGE_F_SECOND | H_PAGE_F_GIX); diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 338b7da468cef..3bcf269f8f554 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -45,7 +45,7 @@ * generic accessors and iterators here */ #define __real_pte __real_pte -static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) { real_pte_t rpte; unsigned long *hidxp; @@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) */ smp_rmb(); - hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); + hidxp = (unsigned long *)(ptep + offset); rpte.hidx = *hidxp; return rpte; } @@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) * expected to modify the PTE bits accordingly and commit the PTE to memory. */ static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, - unsigned int subpg_index, unsigned long hidx) + unsigned int subpg_index, + unsigned long hidx, int offset) { - unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); + unsigned long *hidxp = (unsigned long *)(ptep + offset); rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index); @@ -140,13 +141,18 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a } #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE) #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ (sizeof(unsigned long) << PMD_INDEX_SIZE)) #else #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) #endif +#ifdef CONFIG_HUGETLB_PAGE +#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \ + (sizeof(unsigned long) << PUD_INDEX_SIZE)) +#else #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) +#endif #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) #ifdef CONFIG_TRANSPARENT_HUGEPAGE diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 0920eff731b38..935adcd92a816 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -23,7 +23,8 @@ H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) +#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \ + defined(CONFIG_PPC_64K_PAGES) /* * only with hash 64k we need to use the second half of pmd page table * to store pointer to deposited pgtable_t @@ -32,6 +33,16 @@ #else #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE #endif +/* + * We store the slot details in the second half of page table. + * Increase the pud level table so that hugetlb ptes can be stored + * at pud level. + */ +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) +#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1) +#else +#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE) +#endif /* * Define the address range of the kernel non-linear virtual area */ diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 1fcfa425cefaf..4746bc68d446d 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { + pgd_t *pgd; + if (radix_enabled()) return radix__pgd_alloc(mm); - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), - pgtable_gfp_flags(mm, GFP_KERNEL)); + + pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); + memset(pgd, 0, PGD_TABLE_SIZE); + + return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) @@ -93,13 +99,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), + return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), pgtable_gfp_flags(mm, GFP_KERNEL)); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) { - kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); + kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); } static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) @@ -115,7 +121,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, * ahead and flush the page walk cache */ flush_tlb_pgtable(tlb, address); - pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); + pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 51017726d4953..a6b9f1d746002 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size; extern unsigned long __pud_index_size; extern unsigned long __pgd_index_size; extern unsigned long __pmd_cache_index; +extern unsigned long __pud_cache_index; #define PTE_INDEX_SIZE __pte_index_size #define PMD_INDEX_SIZE __pmd_index_size #define PUD_INDEX_SIZE __pud_index_size #define PGD_INDEX_SIZE __pgd_index_size #define PMD_CACHE_INDEX __pmd_cache_index +#define PUD_CACHE_INDEX __pud_cache_index /* * Because of use of pte fragments and THP, size of page table * are not always derived out of index size above. @@ -348,7 +350,7 @@ extern unsigned long pci_io_base; */ #ifndef __real_pte -#define __real_pte(e,p) ((real_pte_t){(e)}) +#define __real_pte(e, p, o) ((real_pte_t){(e)}) #define __rpte_to_pte(r) ((r).pte) #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 176dfb73d42c0..471b2274fbeba 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -645,7 +645,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) EXC_HV, SOFTEN_TEST_HV, bitmask) #define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \ - MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\ + MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\ EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV) /* diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 88e5e8f17e989..855e17d158b11 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -29,6 +29,16 @@ #define PACA_IRQ_HMI 0x20 #define PACA_IRQ_PMI 0x40 +/* + * Some soft-masked interrupts must be hard masked until they are replayed + * (e.g., because the soft-masked handler does not clear the exception). + */ +#ifdef CONFIG_PPC_BOOK3S +#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI) +#else +#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) +#endif + /* * flags for paca->irq_soft_mask */ @@ -244,7 +254,7 @@ static inline bool lazy_irq_pending(void) static inline void may_hard_irq_enable(void) { get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS; - if (!(get_paca()->irq_happened & PACA_IRQ_EE)) + if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) __hard_irq_enable(); } diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 9dcbfa6bbb91e..d8b1e8e7e035b 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -140,6 +140,12 @@ static inline bool kdump_in_progress(void) return false; } +static inline void crash_ipi_callback(struct pt_regs *regs) { } + +static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) +{ +} + #endif /* CONFIG_KEXEC_CORE */ #endif /* ! __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 9a667007bff81..376ae803b69c6 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); extern int kvmppc_hcall_impl_pr(unsigned long cmd); extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); -extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, - struct kvm_vcpu *vcpu); -extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, - struct kvmppc_book3s_shadow_vcpu *svcpu); +extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); extern int kvm_irq_bypass; static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 735cfa35298ac..998f7b7aaa9e5 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -122,13 +122,13 @@ static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) lphi = (l >> 16) & 0xf; switch ((l >> 12) & 0xf) { case 0: - return !lphi ? 24 : -1; /* 16MB */ + return !lphi ? 24 : 0; /* 16MB */ break; case 1: return 16; /* 64kB */ break; case 3: - return !lphi ? 34 : -1; /* 16GB */ + return !lphi ? 34 : 0; /* 16GB */ break; case 7: return (16 << 8) + 12; /* 64kB in 4kB */ @@ -140,7 +140,7 @@ static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) return (24 << 8) + 12; /* 16MB in 4kB */ break; } - return -1; + return 0; } static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l) @@ -159,7 +159,11 @@ static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r) { - return 1ul << kvmppc_hpte_actual_page_shift(v, r); + int shift = kvmppc_hpte_actual_page_shift(v, r); + + if (shift) + return 1ul << shift; + return 0; } static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift) @@ -232,7 +236,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, va_low ^= v >> (SID_SHIFT_1T - 16); va_low &= 0x7ff; - if (b_pgshift == 12) { + if (b_pgshift <= 12) { if (a_pgshift > 12) { sllp = (a_pgshift == 16) ? 5 : 4; rb |= sllp << 5; /* AP field */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 3aa5b577cd609..1f53b562726fd 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -690,6 +690,7 @@ struct kvm_vcpu_arch { u8 mmio_vsx_offset; u8 mmio_vsx_copy_type; u8 mmio_vsx_tx_sx_enabled; + u8 mmio_vmx_copy_nums; u8 osi_needed; u8 osi_enabled; u8 papr_enabled; @@ -709,6 +710,7 @@ struct kvm_vcpu_arch { u8 ceded; u8 prodded; u8 doorbell_request; + u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ u32 last_inst; struct swait_queue_head *wqp; @@ -738,8 +740,11 @@ struct kvm_vcpu_arch { struct kvmppc_icp *icp; /* XICS presentation controller */ struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */ __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */ - u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */ + u8 xive_pushed; /* Is the VP pushed on the physical CPU ? */ + u8 xive_esc_on; /* Is the escalation irq enabled ? */ union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */ + u64 xive_esc_raddr; /* Escalation interrupt ESB real addr */ + u64 xive_esc_vaddr; /* Escalation interrupt ESB virt addr */ #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -800,6 +805,7 @@ struct kvm_vcpu_arch { #define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_FQPR 0x0060 #define KVM_MMIO_REG_VSX 0x0080 +#define KVM_MMIO_REG_VMX 0x00c0 #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9db18287b5f44..7765a800ddaea 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian, int mmio_sign_extend); +extern int kvmppc_handle_load128_by2x64(struct kvm_run *run, + struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian); +extern int kvmppc_handle_store128_by2x64(struct kvm_run *run, + struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 504a3c36ce5c9..03bbd1149530d 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -24,6 +24,7 @@ extern int icache_44x_need_flush; #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) #define PMD_CACHE_INDEX PMD_INDEX_SIZE +#define PUD_CACHE_INDEX PUD_INDEX_SIZE #ifndef __ASSEMBLY__ #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index abddf5830ad55..5c5f75d005ada 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -27,6 +27,7 @@ #else #define PMD_CACHE_INDEX PMD_INDEX_SIZE #endif +#define PUD_CACHE_INDEX PUD_INDEX_SIZE /* * Define the address range of the kernel non-linear virtual area diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h index 24c73f5575ee3..94bd1bf2c8732 100644 --- a/arch/powerpc/include/asm/opal-api.h +++ b/arch/powerpc/include/asm/opal-api.h @@ -1076,6 +1076,7 @@ enum { /* Flags for OPAL_XIVE_GET/SET_VP_INFO */ enum { OPAL_XIVE_VP_ENABLED = 0x00000001, + OPAL_XIVE_VP_SINGLE_ESCALATION = 0x00000002, }; /* "Any chip" replacement for chip ID for allocation functions */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index ab5c1588b487a..f1083bcf449c5 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -156,6 +156,12 @@ #define OP_31_XOP_LFDX 599 #define OP_31_XOP_LFDUX 631 +/* VMX Vector Load Instructions */ +#define OP_31_XOP_LVX 103 + +/* VMX Vector Store Instructions */ +#define OP_31_XOP_STVX 231 + #define OP_LWZ 32 #define OP_STFS 52 #define OP_STFSU 53 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 88187c285c70d..9f421641a35c8 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); extern int numa_update_cpu_topology(bool cpus_locked); +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) +{ + numa_cpu_lookup_table[cpu] = node; +} + static inline int early_cpu_to_node(int cpu) { int nid; @@ -76,12 +81,16 @@ static inline int numa_update_cpu_topology(bool cpus_locked) { return 0; } + +static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {} + #endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) extern int start_topology_update(void); extern int stop_topology_update(void); extern int prrn_is_enabled(void); +extern int find_and_online_cpu_nid(int cpu); #else static inline int start_topology_update(void) { @@ -95,6 +104,10 @@ static inline int prrn_is_enabled(void) { return 0; } +static inline int find_and_online_cpu_nid(int cpu) +{ + return 0; +} #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES) diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 7624e22f5045d..8d1a2792484f4 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -111,9 +111,10 @@ extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); extern void xive_native_sync_source(u32 hw_irq); extern bool is_xive_irq(struct irq_chip *chip); -extern int xive_native_enable_vp(u32 vp_id); +extern int xive_native_enable_vp(u32 vp_id, bool single_escalation); extern int xive_native_disable_vp(u32 vp_id); extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); +extern bool xive_native_has_single_escalation(void); #else diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 637b7263cb867..833ed9a16adfd 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc) #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd) +#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe) + /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 88b84ac76b532..ea5eb91b836e4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -520,6 +520,7 @@ int main(void) OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions); OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded); OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded); + OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending); OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request); OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); @@ -739,6 +740,9 @@ int main(void) DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu, arch.xive_cam_word)); DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed)); + DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on)); + DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr)); + DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr)); #endif #ifdef CONFIG_KVM_EXIT_TIMING diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index ee832d344a5a2..9b6e653e501a1 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -943,6 +943,8 @@ kernel_dbg_exc: /* * An interrupt came in while soft-disabled; We mark paca->irq_happened * accordingly and if the interrupt is level sensitive, we hard disable + * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so + * keep these in synch. */ .macro masked_interrupt_book3e paca_irq full_mask diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 243d072a225aa..3ac87e53b3da0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1426,7 +1426,7 @@ EXC_COMMON_BEGIN(soft_nmi_common) * triggered and won't automatically refire. * - If it was a HMI we return immediately since we handled it in realmode * and it won't refire. - * - else we hard disable and return. + * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return. * This is called with r10 containing the value to OR to the paca field. */ #define MASKED_INTERRUPT(_H) \ @@ -1441,8 +1441,8 @@ masked_##_H##interrupt: \ ori r10,r10,0xffff; \ mtspr SPRN_DEC,r10; \ b MASKED_DEC_HANDLER_LABEL; \ -1: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \ - bne 2f; \ +1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \ + beq 2f; \ mfspr r10,SPRN_##_H##SRR1; \ xori r10,r10,MSR_EE; /* clear MSR_EE */ \ mtspr SPRN_##_H##SRR1,r10; \ diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index ae2ede4de6be7..446c79611d56c 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -362,7 +362,7 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) */ static int pci_read_irq_line(struct pci_dev *pci_dev) { - unsigned int virq = 0; + int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); @@ -370,7 +370,8 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ - if (!of_irq_parse_and_map_pci(pci_dev, 0, 0)) { + virq = of_irq_parse_and_map_pci(pci_dev, 0, 0); + if (virq <= 0) { u8 line, pin; /* If that fails, lets fallback to what is in the config diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index fc600a8b1e77b..f915db93cd429 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -392,7 +392,7 @@ static __poll_t rtas_log_poll(struct file *file, poll_table * wait) { poll_wait(file, &rtas_log_wait, wait); if (rtas_log_size) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 5a8bfee6e1877..04d0bbd7a1dd0 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu) if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) device_create_file(s, &dev_attr_pir); - if (cpu_has_feature(CPU_FTR_ARCH_206)) + if (cpu_has_feature(CPU_FTR_ARCH_206) && + !firmware_has_feature(FW_FEATURE_LPAR)) device_create_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ @@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu) if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) device_remove_file(s, &dev_attr_pir); - if (cpu_has_feature(CPU_FTR_ARCH_206)) + if (cpu_has_feature(CPU_FTR_ARCH_206) && + !firmware_has_feature(FW_FEATURE_LPAR)) device_remove_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b12b8eb39c297..68a0e9d5b4402 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -22,6 +22,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_EVENTFD + select HAVE_KVM_VCPU_ASYNC_IOCTL select SRCU select KVM_VFIO select IRQ_BYPASS_MANAGER @@ -68,7 +69,7 @@ config KVM_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE - select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV) + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV) ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 72d977e309523..234531d1bee1e 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -484,19 +484,33 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); + int ret; + + vcpu_load(vcpu); + ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); + vcpu_put(vcpu); + + return ret; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); + int ret; + + vcpu_load(vcpu); + ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); + vcpu_put(vcpu); + + return ret; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu); @@ -518,6 +532,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); + vcpu_put(vcpu); return 0; } @@ -525,6 +540,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_ctr(vcpu, regs->ctr); @@ -545,6 +562,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); + vcpu_put(vcpu); return 0; } @@ -737,7 +755,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { + vcpu_load(vcpu); vcpu->guest_debug = dbg->control; + vcpu_put(vcpu); return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index b73dbc9e797da..ef243fed2f2b6 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1269,6 +1269,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, /* Nothing to do */ goto out; + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + rpte = be64_to_cpu(hptep[1]); + vpte = hpte_new_to_old_v(vpte, rpte); + } + /* Unmap */ rev = &old->rev[idx]; guest_rpte = rev->guest_rpte; @@ -1298,7 +1303,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, /* Reload PTE after unmap */ vpte = be64_to_cpu(hptep[0]); - BUG_ON(vpte & HPTE_V_VALID); BUG_ON(!(vpte & HPTE_V_ABSENT)); @@ -1307,6 +1311,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, goto out; rpte = be64_to_cpu(hptep[1]); + + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + vpte = hpte_new_to_old_v(vpte, rpte); + rpte = hpte_new_to_old_r(rpte); + } + pshift = kvmppc_hpte_base_page_shift(vpte, rpte); avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); pteg = idx / HPTES_PER_GROUP; @@ -1337,17 +1347,17 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, } new_pteg = hash & new_hash_mask; - if (vpte & HPTE_V_SECONDARY) { - BUG_ON(~pteg != (hash & old_hash_mask)); - new_pteg = ~new_pteg; - } else { - BUG_ON(pteg != (hash & old_hash_mask)); - } + if (vpte & HPTE_V_SECONDARY) + new_pteg = ~hash & new_hash_mask; new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); new_hptep = (__be64 *)(new->virt + (new_idx << 4)); replace_vpte = be64_to_cpu(new_hptep[0]); + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + unsigned long replace_rpte = be64_to_cpu(new_hptep[1]); + replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte); + } if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { BUG_ON(new->order >= old->order); @@ -1363,6 +1373,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, /* Discard the previous HPTE */ } + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + rpte = hpte_old_to_new_r(vpte, rpte); + vpte = hpte_old_to_new_v(vpte); + } + new_hptep[1] = cpu_to_be64(rpte); new->rev[new_idx].guest_rpte = guest_rpte; /* No need for a barrier, since new HPT isn't active */ @@ -1380,12 +1395,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize) unsigned long i; int rc; - /* - * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs - * that POWER9 uses, and could well hit a BUG_ON on POWER9. - */ - if (cpu_has_feature(CPU_FTR_ARCH_300)) - return -EIO; for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { rc = resize_hpt_rehash_hpte(resize, i); if (rc != 0) @@ -1416,6 +1425,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) synchronize_srcu_expedited(&kvm->srcu); + if (cpu_has_feature(CPU_FTR_ARCH_300)) + kvmppc_setup_partition_table(kvm); + resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); } diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 58618f644c56b..0c854816e653e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -573,7 +573,7 @@ long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, j = i + 1; if (npages) { set_dirty_bits(map, i, npages); - i = j + npages; + j = i + npages; } } return 0; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e4f70c33fbc7d..89707354c2efd 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -116,6 +116,9 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); #endif +/* If set, the threads on each CPU core have to be in the same MMU mode */ +static bool no_mixing_hpt_and_radix; + static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); @@ -1003,8 +1006,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; - if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return EMULATE_FAIL; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) return RESUME_GUEST; if (get_op(inst) != 31) @@ -1054,6 +1055,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* Called with vcpu->arch.vcore->lock held */ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1174,7 +1176,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, swab32(vcpu->arch.emul_inst) : vcpu->arch.emul_inst; if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_debug_inst(run, vcpu); + spin_lock(&vcpu->arch.vcore->lock); } else { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1189,8 +1194,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, */ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: r = EMULATE_FAIL; - if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) + if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && + cpu_has_feature(CPU_FTR_ARCH_300)) { + /* Need vcore unlocked to call kvmppc_get_last_inst */ + spin_unlock(&vcpu->arch.vcore->lock); r = kvmppc_emulate_doorbell_instr(vcpu); + spin_lock(&vcpu->arch.vcore->lock); + } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; @@ -1495,6 +1505,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ARCH_COMPAT: *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); break; + case KVM_REG_PPC_DEC_EXPIRY: + *val = get_reg_val(id, vcpu->arch.dec_expires + + vcpu->arch.vcore->tb_offset); + break; default: r = -EINVAL; break; @@ -1722,6 +1736,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_ARCH_COMPAT: r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); break; + case KVM_REG_PPC_DEC_EXPIRY: + vcpu->arch.dec_expires = set_reg_val(id, *val) - + vcpu->arch.vcore->tb_offset; + break; default: r = -EINVAL; break; @@ -2376,8 +2394,8 @@ static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) static bool subcore_config_ok(int n_subcores, int n_threads) { /* - * POWER9 "SMT4" cores are permanently in what is effectively a 4-way split-core - * mode, with one thread per subcore. + * POWER9 "SMT4" cores are permanently in what is effectively a 4-way + * split-core mode, with one thread per subcore. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) return n_subcores <= 4 && n_threads == 1; @@ -2413,8 +2431,8 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return false; - /* POWER9 currently requires all threads to be in the same MMU mode */ - if (cpu_has_feature(CPU_FTR_ARCH_300) && + /* Some POWER9 chips require all threads to be in the same MMU mode */ + if (no_mixing_hpt_and_radix && kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm)) return false; @@ -2677,9 +2695,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * threads are offline. Also check if the number of threads in this * guest are greater than the current system threads per guest. * On POWER9, we need to be not in independent-threads mode if - * this is a HPT guest on a radix host. + * this is a HPT guest on a radix host machine where the + * CPU threads may not be in different MMU modes. */ - hpt_on_radix = radix_enabled() && !kvm_is_radix(vc->kvm); + hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() && + !kvm_is_radix(vc->kvm); if (((controlled_threads > 1) && ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) || (hpt_on_radix && vc->kvm->arch.threads_indep)) { @@ -2829,7 +2849,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ if (!thr0_done) kvmppc_start_thread(NULL, pvc); - thr += pvc->num_threads; } /* @@ -2932,13 +2951,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); + preempt_enable(); + for (sub = 0; sub < core_info.n_subcores; ++sub) { pvc = core_info.vc[sub]; post_guest_process(pvc, pvc == vc); } spin_lock(&vc->lock); - preempt_enable(); out: vc->vcore_state = VCORE_INACTIVE; @@ -2985,7 +3005,7 @@ static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) { if (!xive_enabled()) return false; - return vcpu->arch.xive_saved_state.pipr < + return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < vcpu->arch.xive_saved_state.cppr; } #else @@ -3174,17 +3194,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) * this thread straight away and have it join in. */ if (!signal_pending(current)) { - if (vc->vcore_state == VCORE_PIGGYBACK) { - if (spin_trylock(&vc->lock)) { - if (vc->vcore_state == VCORE_RUNNING && - !VCORE_IS_EXITING(vc)) { - kvmppc_create_dtl_entry(vcpu, vc); - kvmppc_start_thread(vcpu, vc); - trace_kvm_guest_enter(vcpu); - } - spin_unlock(&vc->lock); - } - } else if (vc->vcore_state == VCORE_RUNNING && + if ((vc->vcore_state == VCORE_PIGGYBACK || + vc->vcore_state == VCORE_RUNNING) && !VCORE_IS_EXITING(vc)) { kvmppc_create_dtl_entry(vcpu, vc); kvmppc_start_thread(vcpu, vc); @@ -4446,6 +4457,19 @@ static int kvmppc_book3s_init_hv(void) if (kvmppc_radix_possible()) r = kvmppc_radix_init(); + + /* + * POWER9 chips before version 2.02 can't have some threads in + * HPT mode and some in radix mode on the same core. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + unsigned int pvr = mfspr(SPRN_PVR); + if ((pvr >> 16) == PVR_POWER9 && + (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) || + ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101))) + no_mixing_hpt_and_radix = true; + } + return r; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 7886b313d135f..f31f357b8c5ae 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -413,10 +413,11 @@ FTR_SECTION_ELSE /* On P9 we use the split_info for coordinating LPCR changes */ lwz r4, KVM_SPLIT_DO_SET(r6) cmpwi r4, 0 - beq 63f + beq 1f mr r3, r6 bl kvmhv_p9_set_lpcr nop +1: ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 63: /* Order load of vcpu after load of vcore */ @@ -617,13 +618,6 @@ kvmppc_hv_entry: lbz r0, KVM_RADIX(r9) cmpwi cr7, r0, 0 - /* Clear out SLB if hash */ - bne cr7, 2f - li r6,0 - slbmte r6,r6 - slbia - ptesync -2: /* * POWER7/POWER8 host -> guest partition switch code. * We don't have to lock against concurrent tlbies, @@ -738,19 +732,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 10: cmpdi r4, 0 beq kvmppc_primary_no_guest kvmppc_got_guest: - - /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ - lwz r5,VCPU_SLB_MAX(r4) - cmpwi r5,0 - beq 9f - mtctr r5 - addi r6,r4,VCPU_SLB -1: ld r8,VCPU_SLB_E(r6) - ld r9,VCPU_SLB_V(r6) - slbmte r9,r8 - addi r6,r6,VCPU_SLB_SIZE - bdnz 1b -9: /* Increment yield count if they have a VPA */ ld r3, VCPU_VPA(r4) cmpdi r3, 0 @@ -957,7 +938,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) mftb r7 subf r3,r7,r8 mtspr SPRN_DEC,r3 - std r3,VCPU_DEC(r4) ld r5, VCPU_SPRG0(r4) ld r6, VCPU_SPRG1(r4) @@ -1018,6 +998,29 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) cmpdi r3, 512 /* 1 microsecond */ blt hdec_soon + /* For hash guest, clear out and reload the SLB */ + ld r6, VCPU_KVM(r4) + lbz r0, KVM_RADIX(r6) + cmpwi r0, 0 + bne 9f + li r6, 0 + slbmte r6, r6 + slbia + ptesync + + /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ + lwz r5,VCPU_SLB_MAX(r4) + cmpwi r5,0 + beq 9f + mtctr r5 + addi r6,r4,VCPU_SLB +1: ld r8,VCPU_SLB_E(r6) + ld r9,VCPU_SLB_V(r6) + slbmte r9,r8 + addi r6,r6,VCPU_SLB_SIZE + bdnz 1b +9: + #ifdef CONFIG_KVM_XICS /* We are entering the guest on that thread, push VCPU to XIVE */ ld r10, HSTATE_XIVE_TIMA_PHYS(r13) @@ -1031,8 +1034,53 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) li r9, TM_QW1_OS + TM_WORD2 stwcix r11,r9,r10 li r9, 1 - stw r9, VCPU_XIVE_PUSHED(r4) + stb r9, VCPU_XIVE_PUSHED(r4) eieio + + /* + * We clear the irq_pending flag. There is a small chance of a + * race vs. the escalation interrupt happening on another + * processor setting it again, but the only consequence is to + * cause a spurrious wakeup on the next H_CEDE which is not an + * issue. + */ + li r0,0 + stb r0, VCPU_IRQ_PENDING(r4) + + /* + * In single escalation mode, if the escalation interrupt is + * on, we mask it. + */ + lbz r0, VCPU_XIVE_ESC_ON(r4) + cmpwi r0,0 + beq 1f + ld r10, VCPU_XIVE_ESC_RADDR(r4) + li r9, XIVE_ESB_SET_PQ_01 + ldcix r0, r10, r9 + sync + + /* We have a possible subtle race here: The escalation interrupt might + * have fired and be on its way to the host queue while we mask it, + * and if we unmask it early enough (re-cede right away), there is + * a theorical possibility that it fires again, thus landing in the + * target queue more than once which is a big no-no. + * + * Fortunately, solving this is rather easy. If the above load setting + * PQ to 01 returns a previous value where P is set, then we know the + * escalation interrupt is somewhere on its way to the host. In that + * case we simply don't clear the xive_esc_on flag below. It will be + * eventually cleared by the handler for the escalation interrupt. + * + * Then, when doing a cede, we check that flag again before re-enabling + * the escalation interrupt, and if set, we abort the cede. + */ + andi. r0, r0, XIVE_ESB_VAL_P + bne- 1f + + /* Now P is 0, we can clear the flag */ + li r0, 0 + stb r0, VCPU_XIVE_ESC_ON(r4) +1: no_xive: #endif /* CONFIG_KVM_XICS */ @@ -1193,7 +1241,7 @@ hdec_soon: addi r3, r4, VCPU_TB_RMEXIT bl kvmhv_accumulate_time #endif - b guest_exit_cont + b guest_bypass /****************************************************************************** * * @@ -1423,15 +1471,35 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) blt deliver_guest_interrupt guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ + /* Save more register state */ + mfdar r6 + mfdsisr r7 + std r6, VCPU_DAR(r9) + stw r7, VCPU_DSISR(r9) + /* don't overwrite fault_dar/fault_dsisr if HDSI */ + cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE + beq mc_cont + std r6, VCPU_FAULT_DAR(r9) + stw r7, VCPU_FAULT_DSISR(r9) + + /* See if it is a machine check */ + cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK + beq machine_check_realmode +mc_cont: +#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING + addi r3, r9, VCPU_TB_RMEXIT + mr r4, r9 + bl kvmhv_accumulate_time +#endif #ifdef CONFIG_KVM_XICS /* We are exiting, pull the VP from the XIVE */ - lwz r0, VCPU_XIVE_PUSHED(r9) + lbz r0, VCPU_XIVE_PUSHED(r9) cmpwi cr0, r0, 0 beq 1f li r7, TM_SPC_PULL_OS_CTX li r6, TM_QW1_OS mfmsr r0 - andi. r0, r0, MSR_IR /* in real mode? */ + andi. r0, r0, MSR_DR /* in real mode? */ beq 2f ld r10, HSTATE_XIVE_TIMA_VIRT(r13) cmpldi cr0, r10, 0 @@ -1454,33 +1522,42 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ /* Fixup some of the state for the next load */ li r10, 0 li r0, 0xff - stw r10, VCPU_XIVE_PUSHED(r9) + stb r10, VCPU_XIVE_PUSHED(r9) stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) eieio 1: #endif /* CONFIG_KVM_XICS */ - /* Save more register state */ - mfdar r6 - mfdsisr r7 - std r6, VCPU_DAR(r9) - stw r7, VCPU_DSISR(r9) - /* don't overwrite fault_dar/fault_dsisr if HDSI */ - cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE - beq mc_cont - std r6, VCPU_FAULT_DAR(r9) - stw r7, VCPU_FAULT_DSISR(r9) - /* See if it is a machine check */ - cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK - beq machine_check_realmode -mc_cont: -#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING - addi r3, r9, VCPU_TB_RMEXIT - mr r4, r9 - bl kvmhv_accumulate_time -#endif + /* For hash guest, read the guest SLB and save it away */ + ld r5, VCPU_KVM(r9) + lbz r0, KVM_RADIX(r5) + li r5, 0 + cmpwi r0, 0 + bne 3f /* for radix, save 0 entries */ + lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ + mtctr r0 + li r6,0 + addi r7,r9,VCPU_SLB +1: slbmfee r8,r6 + andis. r0,r8,SLB_ESID_V@h + beq 2f + add r8,r8,r6 /* put index in */ + slbmfev r3,r6 + std r8,VCPU_SLB_E(r7) + std r3,VCPU_SLB_V(r7) + addi r7,r7,VCPU_SLB_SIZE + addi r5,r5,1 +2: addi r6,r6,1 + bdnz 1b + /* Finally clear out the SLB */ + li r0,0 + slbmte r0,r0 + slbia + ptesync +3: stw r5,VCPU_SLB_MAX(r9) +guest_bypass: mr r3, r12 /* Increment exit count, poke other threads to exit */ bl kvmhv_commence_exit @@ -1501,31 +1578,6 @@ mc_cont: ori r6,r6,1 mtspr SPRN_CTRLT,r6 4: - /* Check if we are running hash or radix and store it in cr2 */ - ld r5, VCPU_KVM(r9) - lbz r0, KVM_RADIX(r5) - cmpwi cr2,r0,0 - - /* Read the guest SLB and save it away */ - li r5, 0 - bne cr2, 3f /* for radix, save 0 entries */ - lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ - mtctr r0 - li r6,0 - addi r7,r9,VCPU_SLB -1: slbmfee r8,r6 - andis. r0,r8,SLB_ESID_V@h - beq 2f - add r8,r8,r6 /* put index in */ - slbmfev r3,r6 - std r8,VCPU_SLB_E(r7) - std r3,VCPU_SLB_V(r7) - addi r7,r7,VCPU_SLB_SIZE - addi r5,r5,1 -2: addi r6,r6,1 - bdnz 1b -3: stw r5,VCPU_SLB_MAX(r9) - /* * Save the guest PURR/SPURR */ @@ -1803,7 +1855,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ld r5, VCPU_KVM(r9) lbz r0, KVM_RADIX(r5) cmpwi cr2, r0, 0 - beq cr2, 3f + beq cr2, 4f /* Radix: Handle the case where the guest used an illegal PID */ LOAD_REG_ADDR(r4, mmu_base_pid) @@ -1839,15 +1891,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) BEGIN_FTR_SECTION PPC_INVALIDATE_ERAT END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) - b 4f +4: #endif /* CONFIG_PPC_RADIX_MMU */ - /* Hash: clear out SLB */ -3: li r5,0 - slbmte r5,r5 - slbia - ptesync -4: /* * POWER7/POWER8 guest -> host partition switch code. * We don't have to lock against tlbies but we do @@ -2745,7 +2791,32 @@ kvm_cede_prodded: /* we've ceded but we want to give control to the host */ kvm_cede_exit: ld r9, HSTATE_KVM_VCPU(r13) - b guest_exit_cont +#ifdef CONFIG_KVM_XICS + /* Abort if we still have a pending escalation */ + lbz r5, VCPU_XIVE_ESC_ON(r9) + cmpwi r5, 0 + beq 1f + li r0, 0 + stb r0, VCPU_CEDED(r9) +1: /* Enable XIVE escalation */ + li r5, XIVE_ESB_SET_PQ_00 + mfmsr r0 + andi. r0, r0, MSR_DR /* in real mode? */ + beq 1f + ld r10, VCPU_XIVE_ESC_VADDR(r9) + cmpdi r10, 0 + beq 3f + ldx r0, r10, r5 + b 2f +1: ld r10, VCPU_XIVE_ESC_RADDR(r9) + cmpdi r10, 0 + beq 3f + ldcix r0, r10, r5 +2: sync + li r0, 1 + stb r0, VCPU_XIVE_ESC_ON(r9) +#endif /* CONFIG_KVM_XICS */ +3: b guest_exit_cont /* Try to handle a machine check in real mode */ machine_check_realmode: diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 901e6fe00c39c..c18e845019ec5 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -96,7 +96,7 @@ kvm_start_entry: kvm_start_lightweight: /* Copy registers into shadow vcpu so we can access them in real mode */ - GET_SHADOW_VCPU(r3) + mr r3, r4 bl FUNC(kvmppc_copy_to_svcpu) nop REST_GPR(4, r1) @@ -165,9 +165,7 @@ after_sprg3_load: stw r12, VCPU_TRAP(r3) /* Transfer reg values from shadow vcpu back to vcpu struct */ - /* On 64-bit, interrupts are still off at this point */ - GET_SHADOW_VCPU(r4) bl FUNC(kvmppc_copy_from_svcpu) nop diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7deaeeb14b935..3ae752314b349 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -121,7 +121,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); if (svcpu->in_use) { - kvmppc_copy_from_svcpu(vcpu, svcpu); + kvmppc_copy_from_svcpu(vcpu); } memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; @@ -143,9 +143,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) } /* Copy data needed by real-mode code from vcpu to shadow vcpu */ -void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, - struct kvm_vcpu *vcpu) +void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) { + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + svcpu->gpr[0] = vcpu->arch.gpr[0]; svcpu->gpr[1] = vcpu->arch.gpr[1]; svcpu->gpr[2] = vcpu->arch.gpr[2]; @@ -177,17 +178,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.entry_ic = mfspr(SPRN_IC); svcpu->in_use = true; + + svcpu_put(svcpu); } /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ -void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, - struct kvmppc_book3s_shadow_vcpu *svcpu) +void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) { - /* - * vcpu_put would just call us again because in_use hasn't - * been updated yet. - */ - preempt_disable(); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); /* * Maybe we were already preempted and synced the svcpu from @@ -233,7 +231,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, svcpu->in_use = false; out: - preempt_enable(); + svcpu_put(svcpu); } static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 6882bc94eba81..f0f5cd4d2fe7c 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -84,12 +84,22 @@ static irqreturn_t xive_esc_irq(int irq, void *data) { struct kvm_vcpu *vcpu = data; - /* We use the existing H_PROD mechanism to wake up the target */ - vcpu->arch.prodded = 1; + vcpu->arch.irq_pending = 1; smp_mb(); if (vcpu->arch.ceded) kvmppc_fast_vcpu_kick(vcpu); + /* Since we have the no-EOI flag, the interrupt is effectively + * disabled now. Clearing xive_esc_on means we won't bother + * doing so on the next entry. + * + * This also allows the entry code to know that if a PQ combination + * of 10 is observed while xive_esc_on is true, it means the queue + * contains an unprocessed escalation interrupt. We don't make use of + * that knowledge today but might (see comment in book3s_hv_rmhandler.S) + */ + vcpu->arch.xive_esc_on = false; + return IRQ_HANDLED; } @@ -112,19 +122,21 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) return -EIO; } - /* - * Future improvement: start with them disabled - * and handle DD2 and later scheme of merged escalation - * interrupts - */ - name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", - vcpu->kvm->arch.lpid, xc->server_num, prio); + if (xc->xive->single_escalation) + name = kasprintf(GFP_KERNEL, "kvm-%d-%d", + vcpu->kvm->arch.lpid, xc->server_num); + else + name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", + vcpu->kvm->arch.lpid, xc->server_num, prio); if (!name) { pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", prio, xc->server_num); rc = -ENOMEM; goto error; } + + pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); + rc = request_irq(xc->esc_virq[prio], xive_esc_irq, IRQF_NO_THREAD, name, vcpu); if (rc) { @@ -133,6 +145,25 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) goto error; } xc->esc_virq_names[prio] = name; + + /* In single escalation mode, we grab the ESB MMIO of the + * interrupt and mask it. Also populate the VCPU v/raddr + * of the ESB page for use by asm entry/exit code. Finally + * set the XIVE_IRQ_NO_EOI flag which will prevent the + * core code from performing an EOI on the escalation + * interrupt, thus leaving it effectively masked after + * it fires once. + */ + if (xc->xive->single_escalation) { + struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + + xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); + vcpu->arch.xive_esc_raddr = xd->eoi_page; + vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; + xd->flags |= XIVE_IRQ_NO_EOI; + } + return 0; error: irq_dispose_mapping(xc->esc_virq[prio]); @@ -191,12 +222,12 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio) pr_devel("Provisioning prio... %d\n", prio); - /* Provision each VCPU and enable escalations */ + /* Provision each VCPU and enable escalations if needed */ kvm_for_each_vcpu(i, vcpu, kvm) { if (!vcpu->arch.xive_vcpu) continue; rc = xive_provision_queue(vcpu, prio); - if (rc == 0) + if (rc == 0 && !xive->single_escalation) xive_attach_escalation(vcpu, prio); if (rc) return rc; @@ -1082,6 +1113,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, /* Allocate IPI */ xc->vp_ipi = xive_native_alloc_irq(); if (!xc->vp_ipi) { + pr_err("Failed to allocate xive irq for VCPU IPI\n"); r = -EIO; goto bail; } @@ -1091,19 +1123,34 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, if (r) goto bail; + /* + * Enable the VP first as the single escalation mode will + * affect escalation interrupts numbering + */ + r = xive_native_enable_vp(xc->vp_id, xive->single_escalation); + if (r) { + pr_err("Failed to enable VP in OPAL, err %d\n", r); + goto bail; + } + /* * Initialize queues. Initially we set them all for no queueing * and we enable escalation for queue 0 only which we'll use for * our mfrr change notifications. If the VCPU is hot-plugged, we - * do handle provisioning however. + * do handle provisioning however based on the existing "map" + * of enabled queues. */ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { struct xive_q *q = &xc->queues[i]; + /* Single escalation, no queue 7 */ + if (i == 7 && xive->single_escalation) + break; + /* Is queue already enabled ? Provision it */ if (xive->qmap & (1 << i)) { r = xive_provision_queue(vcpu, i); - if (r == 0) + if (r == 0 && !xive->single_escalation) xive_attach_escalation(vcpu, i); if (r) goto bail; @@ -1123,11 +1170,6 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, if (r) goto bail; - /* Enable the VP */ - r = xive_native_enable_vp(xc->vp_id); - if (r) - goto bail; - /* Route the IPI */ r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); if (!r) @@ -1474,6 +1516,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", val, server, guest_prio); + /* * If the source doesn't already have an IPI, allocate * one and get the corresponding data @@ -1762,6 +1805,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) if (xive->vp_base == XIVE_INVALID_VP) ret = -ENOMEM; + xive->single_escalation = xive_native_has_single_escalation(); + if (ret) { kfree(xive); return ret; @@ -1795,6 +1840,7 @@ static int xive_debug_show(struct seq_file *m, void *private) kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int i; if (!xc) continue; @@ -1804,6 +1850,33 @@ static int xive_debug_show(struct seq_file *m, void *private) xc->server_num, xc->cppr, xc->hw_cppr, xc->mfrr, xc->pending, xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + u32 i0, i1, idx; + + if (!q->qpage && !xc->esc_virq[i]) + continue; + + seq_printf(m, " [q%d]: ", i); + + if (q->qpage) { + idx = q->idx; + i0 = be32_to_cpup(q->qpage + idx); + idx = (idx + 1) & q->msk; + i1 = be32_to_cpup(q->qpage + idx); + seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1); + } + if (xc->esc_virq[i]) { + struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); + seq_printf(m, "E:%c%c I(%d:%llx:%llx)", + (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', + (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', + xc->esc_virq[i], pq, xd->eoi_page); + seq_printf(m, "\n"); + } + } t_rm_h_xirr += xc->stat_rm_h_xirr; t_rm_h_ipoll += xc->stat_rm_h_ipoll; diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 6ba63f8e8a614..a08ae6fd4c51f 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -120,6 +120,8 @@ struct kvmppc_xive { u32 q_order; u32 q_page_order; + /* Flags */ + u8 single_escalation; }; #define KVMPPC_XIVE_Q_COUNT 8 @@ -201,25 +203,20 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp * is as follow. * * Guest request for 0...6 are honored. Guest request for anything - * higher results in a priority of 7 being applied. - * - * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb - * in order to match AIX expectations + * higher results in a priority of 6 being applied. * * Similar mapping is done for CPPR values */ static inline u8 xive_prio_from_guest(u8 prio) { - if (prio == 0xff || prio < 8) + if (prio == 0xff || prio < 6) return prio; - return 7; + return 6; } static inline u8 xive_prio_to_guest(u8 prio) { - if (prio == 0xff || prio < 7) - return prio; - return 0xb; + return prio; } static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 83b485810aea2..6038e2e7aee03 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1431,6 +1431,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + regs->pc = vcpu->arch.pc; regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.ctr; @@ -1452,6 +1454,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); + vcpu_put(vcpu); return 0; } @@ -1459,6 +1462,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + vcpu->arch.pc = regs->pc; kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; @@ -1480,6 +1485,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); + vcpu_put(vcpu); return 0; } @@ -1607,30 +1613,42 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + int ret; + + vcpu_load(vcpu); + sregs->pvr = vcpu->arch.pvr; get_sregs_base(vcpu, sregs); get_sregs_arch206(vcpu, sregs); - return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); + ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); + + vcpu_put(vcpu); + return ret; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - int ret; + int ret = -EINVAL; + vcpu_load(vcpu); if (vcpu->arch.pvr != sregs->pvr) - return -EINVAL; + goto out; ret = set_sregs_base(vcpu, sregs); if (ret < 0) - return ret; + goto out; ret = set_sregs_arch206(vcpu, sregs); if (ret < 0) - return ret; + goto out; - return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); + ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); + +out: + vcpu_put(vcpu); + return ret; } int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, @@ -1773,7 +1791,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, { int r; + vcpu_load(vcpu); r = kvmppc_core_vcpu_translate(vcpu, tr); + vcpu_put(vcpu); return r; } @@ -1996,12 +2016,15 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, { struct debug_reg *dbg_reg; int n, b = 0, w = 0; + int ret = 0; + + vcpu_load(vcpu); if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { vcpu->arch.dbg_reg.dbcr0 = 0; vcpu->guest_debug = 0; kvm_guest_protect_msr(vcpu, MSR_DE, false); - return 0; + goto out; } kvm_guest_protect_msr(vcpu, MSR_DE, true); @@ -2033,8 +2056,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, #endif if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) - return 0; + goto out; + ret = -EINVAL; for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { uint64_t addr = dbg->arch.bp[n].addr; uint32_t type = dbg->arch.bp[n].type; @@ -2045,21 +2069,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, if (type & ~(KVMPPC_DEBUG_WATCH_READ | KVMPPC_DEBUG_WATCH_WRITE | KVMPPC_DEBUG_BREAKPOINT)) - return -EINVAL; + goto out; if (type & KVMPPC_DEBUG_BREAKPOINT) { /* Setting H/W breakpoint */ if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++)) - return -EINVAL; + goto out; } else { /* Setting H/W watchpoint */ if (kvmppc_booke_add_watchpoint(dbg_reg, addr, type, w++)) - return -EINVAL; + goto out; } } - return 0; + ret = 0; +out: + vcpu_put(vcpu); + return ret; } void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index af833531af319..a382e15135e6d 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) +{ + if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { + kvmppc_core_queue_vec_unavail(vcpu); + return true; + } + + return false; +} +#endif /* CONFIG_ALTIVEC */ + /* * XXX to do: * lfiwax, lfiwzx @@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; vcpu->arch.mmio_sp64_extend = 0; vcpu->arch.mmio_sign_extend = 0; + vcpu->arch.mmio_vmx_copy_nums = 0; switch (get_op(inst)) { case 31: @@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) rs, 4, 1); break; #endif /* CONFIG_VSX */ + +#ifdef CONFIG_ALTIVEC + case OP_31_XOP_LVX: + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.vaddr_accessed &= ~0xFULL; + vcpu->arch.paddr_accessed &= ~0xFULL; + vcpu->arch.mmio_vmx_copy_nums = 2; + emulated = kvmppc_handle_load128_by2x64(run, vcpu, + KVM_MMIO_REG_VMX|rt, 1); + break; + + case OP_31_XOP_STVX: + if (kvmppc_check_altivec_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.vaddr_accessed &= ~0xFULL; + vcpu->arch.paddr_accessed &= ~0xFULL; + vcpu->arch.mmio_vmx_copy_nums = 2; + emulated = kvmppc_handle_store128_by2x64(run, vcpu, + rs, 1); + break; +#endif /* CONFIG_ALTIVEC */ + default: emulated = EMULATE_FAIL; break; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0a7c88786ec0e..403e642c78f51 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -638,8 +638,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_SPAPR_RESIZE_HPT: - /* Disable this on POWER9 until code handles new HPTE format */ - r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300); + r = !!hv_enabled; break; #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -763,7 +762,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; - vcpu->arch.dec_expires = ~(u64)0; + vcpu->arch.dec_expires = get_tb(); #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); @@ -930,6 +929,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, + u64 gpr) +{ + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + u32 hi, lo; + u32 di; + +#ifdef __BIG_ENDIAN + hi = gpr >> 32; + lo = gpr & 0xffffffff; +#else + lo = gpr >> 32; + hi = gpr & 0xffffffff; +#endif + + di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ + if (di > 1) + return; + + if (vcpu->arch.mmio_host_swabbed) + di = 1 - di; + + VCPU_VSX_VR(vcpu, index).u[di * 2] = hi; + VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo; +} +#endif /* CONFIG_ALTIVEC */ + #ifdef CONFIG_PPC_FPU static inline u64 sp_to_dp(u32 fprs) { @@ -1032,6 +1059,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) kvmppc_set_vsr_dword_dump(vcpu, gpr); break; +#endif +#ifdef CONFIG_ALTIVEC + case KVM_MMIO_REG_VMX: + kvmppc_set_vmx_dword(vcpu, gpr); + break; #endif default: BUG(); @@ -1106,11 +1138,9 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, { enum emulation_result emulated = EMULATE_DONE; - /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ - if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || - (vcpu->arch.mmio_vsx_copy_nums < 0) ) { + /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ + if (vcpu->arch.mmio_vsx_copy_nums > 4) return EMULATE_FAIL; - } while (vcpu->arch.mmio_vsx_copy_nums) { emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, @@ -1252,11 +1282,9 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.io_gpr = rs; - /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ - if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || - (vcpu->arch.mmio_vsx_copy_nums < 0) ) { + /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ + if (vcpu->arch.mmio_vsx_copy_nums > 4) return EMULATE_FAIL; - } while (vcpu->arch.mmio_vsx_copy_nums) { if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) @@ -1312,6 +1340,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, } #endif /* CONFIG_VSX */ +#ifdef CONFIG_ALTIVEC +/* handle quadword load access in two halves */ +int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, int is_default_endian) +{ + enum emulation_result emulated; + + while (vcpu->arch.mmio_vmx_copy_nums) { + emulated = __kvmppc_handle_load(run, vcpu, rt, 8, + is_default_endian, 0); + + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.mmio_vmx_copy_nums--; + } + + return emulated; +} + +static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val) +{ + vector128 vrs = VCPU_VSX_VR(vcpu, rs); + u32 di; + u64 w0, w1; + + di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */ + if (di > 1) + return -1; + + if (vcpu->arch.mmio_host_swabbed) + di = 1 - di; + + w0 = vrs.u[di * 2]; + w1 = vrs.u[di * 2 + 1]; + +#ifdef __BIG_ENDIAN + *val = (w0 << 32) | w1; +#else + *val = (w1 << 32) | w0; +#endif + return 0; +} + +/* handle quadword store in two halves */ +int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rs, int is_default_endian) +{ + u64 val = 0; + enum emulation_result emulated = EMULATE_DONE; + + vcpu->arch.io_gpr = rs; + + while (vcpu->arch.mmio_vmx_copy_nums) { + if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1) + return EMULATE_FAIL; + + emulated = kvmppc_handle_store(run, vcpu, val, 8, + is_default_endian); + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + vcpu->arch.mmio_vmx_copy_nums--; + } + + return emulated; +} + +static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, + struct kvm_run *run) +{ + enum emulation_result emulated = EMULATE_FAIL; + int r; + + vcpu->arch.paddr_accessed += run->mmio.len; + + if (!vcpu->mmio_is_write) { + emulated = kvmppc_handle_load128_by2x64(run, vcpu, + vcpu->arch.io_gpr, 1); + } else { + emulated = kvmppc_handle_store128_by2x64(run, vcpu, + vcpu->arch.io_gpr, 1); + } + + switch (emulated) { + case EMULATE_DO_MMIO: + run->exit_reason = KVM_EXIT_MMIO; + r = RESUME_HOST; + break; + case EMULATE_FAIL: + pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + r = RESUME_HOST; + break; + default: + r = RESUME_GUEST; + break; + } + return r; +} +#endif /* CONFIG_ALTIVEC */ + int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = 0; @@ -1413,6 +1546,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; + vcpu_load(vcpu); + if (vcpu->mmio_needed) { vcpu->mmio_needed = 0; if (!vcpu->mmio_is_write) @@ -1427,7 +1562,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); if (r == RESUME_HOST) { vcpu->mmio_needed = 1; - return r; + goto out; + } + } +#endif +#ifdef CONFIG_ALTIVEC + if (vcpu->arch.mmio_vmx_copy_nums > 0) + vcpu->arch.mmio_vmx_copy_nums--; + + if (vcpu->arch.mmio_vmx_copy_nums > 0) { + r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); + if (r == RESUME_HOST) { + vcpu->mmio_needed = 1; + goto out; } } #endif @@ -1461,6 +1608,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_sigset_deactivate(vcpu); +out: + vcpu_put(vcpu); return r; } @@ -1608,23 +1757,31 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, return -EINVAL; } -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; - long r; - switch (ioctl) { - case KVM_INTERRUPT: { + if (ioctl == KVM_INTERRUPT) { struct kvm_interrupt irq; - r = -EFAULT; if (copy_from_user(&irq, argp, sizeof(irq))) - goto out; - r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); - goto out; + return -EFAULT; + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); } + return -ENOIOCTLCMD; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + long r; + + vcpu_load(vcpu); + switch (ioctl) { case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; @@ -1664,6 +1821,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } out: + vcpu_put(vcpu); return r; } diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c index e44d2b2ea97e3..1c03c978eb184 100644 --- a/arch/powerpc/kvm/timing.c +++ b/arch/powerpc/kvm/timing.c @@ -143,8 +143,7 @@ static int kvmppc_exit_timing_show(struct seq_file *m, void *private) int i; u64 min, max, sum, sum_quad; - seq_printf(m, "%s", "type count min max sum sum_squared\n"); - + seq_puts(m, "type count min max sum sum_squared\n"); for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index 1604110c42386..916844f99c64e 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, u32 i, n_lmbs; n_lmbs = of_read_number(prop++, 1); + if (n_lmbs == 0) + return; for (i = 0; i < n_lmbs; i++) { read_drconf_v1_cell(&lmb, &prop); @@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, u32 i, j, lmb_sets; lmb_sets = of_read_number(prop++, 1); + if (lmb_sets == 0) + return; for (i = 0; i < lmb_sets; i++) { read_drconf_v2_cell(&dr_cell, &prop); @@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop) struct drmem_lmb *lmb; drmem_info->n_lmbs = of_read_number(prop++, 1); + if (drmem_info->n_lmbs == 0) + return; drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), GFP_KERNEL); @@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop) int lmb_index; lmb_sets = of_read_number(prop++, 1); + if (lmb_sets == 0) + return; /* first pass, calculate the number of LMBs */ p = prop; diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c index 5a69b51d08a36..d573d7d07f25f 100644 --- a/arch/powerpc/mm/hash64_4k.c +++ b/arch/powerpc/mm/hash64_4k.c @@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * need to add in 0x1 if it's a read-only user page */ rflags = htab_convert_pte_flags(new_pte); - rpte = __real_pte(__pte(old_pte), ptep); + rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) @@ -117,7 +117,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, return -1; } new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; - new_pte |= pte_set_hidx(ptep, rpte, 0, slot); + new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); } *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index 2253bbc6a599d..e601d95c3b202 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; vpn = hpt_vpn(ea, vsid, ssize); - rpte = __real_pte(__pte(old_pte), ptep); + rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); /* *None of the sub 4k page is hashed */ @@ -214,7 +214,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, return -1; } - new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot); + new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE); new_pte |= H_PAGE_HASHPTE; *ptep = __pte(new_pte & ~H_PAGE_BUSY); @@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); rflags = htab_convert_pte_flags(new_pte); - rpte = __real_pte(__pte(old_pte), ptep); + rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) @@ -327,7 +327,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, } new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; - new_pte |= pte_set_hidx(ptep, rpte, 0, slot); + new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); } *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 7d07c7e17db67..cf290d415dcd8 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void) __pmd_index_size = H_PMD_INDEX_SIZE; __pud_index_size = H_PUD_INDEX_SIZE; __pgd_index_size = H_PGD_INDEX_SIZE; + __pud_cache_index = H_PUD_CACHE_INDEX; __pmd_cache_index = H_PMD_CACHE_INDEX; __pte_table_size = H_PTE_TABLE_SIZE; __pmd_table_size = H_PMD_TABLE_SIZE; diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 12511f5a015fc..b320f5097a061 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, unsigned long vpn; unsigned long old_pte, new_pte; unsigned long rflags, pa, sz; - long slot; + long slot, offset; BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); @@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); rflags = htab_convert_pte_flags(new_pte); - rpte = __real_pte(__pte(old_pte), ptep); + if (unlikely(mmu_psize == MMU_PAGE_16G)) + offset = PTRS_PER_PUD; + else + offset = PTRS_PER_PMD; + rpte = __real_pte(__pte(old_pte), ptep, offset); sz = ((1UL) << shift); if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) @@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, return -1; } - new_pte |= pte_set_hidx(ptep, rpte, 0, slot); + new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset); } /* diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c index eb8c6c8c4851a..2b656e67f2eaa 100644 --- a/arch/powerpc/mm/init-common.c +++ b/arch/powerpc/mm/init-common.c @@ -100,6 +100,6 @@ void pgtable_cache_init(void) * same size as either the pgd or pmd index except with THP enabled * on book3s 64 */ - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) - pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); + if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX)) + pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor); } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 314d19ab9385e..edd8d0bc9364f 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -143,11 +143,6 @@ static void reset_numa_cpu_lookup_table(void) numa_cpu_lookup_table[cpu] = -1; } -static void update_numa_cpu_lookup_table(unsigned int cpu, int node) -{ - numa_cpu_lookup_table[cpu] = node; -} - static void map_cpu_to_node(int cpu, int node) { update_numa_cpu_lookup_table(cpu, node); diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 573a9a2ee4555..2e10a964e2908 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -17,9 +17,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -333,6 +335,22 @@ static void __init radix_init_pgtable(void) "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); asm volatile("eieio; tlbsync; ptesync" : : : "memory"); trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); + + /* + * The init_mm context is given the first available (non-zero) PID, + * which is the "guard PID" and contains no page table. PIDR should + * never be set to zero because that duplicates the kernel address + * space at the 0x0... offset (quadrant 0)! + * + * An arbitrary PID that may later be allocated by the PID allocator + * for userspace processes must not be used either, because that + * would cause stale user mappings for that PID on CPUs outside of + * the TLB invalidation scheme (because it won't be in mm_cpumask). + * + * So permanently carve out one PID for the purpose of a guard PID. + */ + init_mm.context.id = mmu_base_pid; + mmu_base_pid++; } static void __init radix_init_partition_table(void) @@ -535,6 +553,7 @@ void __init radix__early_init_mmu(void) __pmd_index_size = RADIX_PMD_INDEX_SIZE; __pud_index_size = RADIX_PUD_INDEX_SIZE; __pgd_index_size = RADIX_PGD_INDEX_SIZE; + __pud_cache_index = RADIX_PUD_INDEX_SIZE; __pmd_cache_index = RADIX_PMD_INDEX_SIZE; __pte_table_size = RADIX_PTE_TABLE_SIZE; __pmd_table_size = RADIX_PMD_TABLE_SIZE; @@ -579,7 +598,8 @@ void __init radix__early_init_mmu(void) radix_init_iamr(); radix_init_pgtable(); - + /* Switch to the guard PID before turning on MMU */ + radix__switch_mmu_context(NULL, &init_mm); if (cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); } @@ -604,6 +624,7 @@ void radix__early_init_mmu_secondary(void) } radix_init_iamr(); + radix__switch_mmu_context(NULL, &init_mm); if (cpu_has_feature(CPU_FTR_HVMODE)) tlbiel_all(); } @@ -666,6 +687,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) pud_clear(pud); } +struct change_mapping_params { + pte_t *pte; + unsigned long start; + unsigned long end; + unsigned long aligned_start; + unsigned long aligned_end; +}; + +static int stop_machine_change_mapping(void *data) +{ + struct change_mapping_params *params = + (struct change_mapping_params *)data; + + if (!data) + return -1; + + spin_unlock(&init_mm.page_table_lock); + pte_clear(&init_mm, params->aligned_start, params->pte); + create_physical_mapping(params->aligned_start, params->start); + create_physical_mapping(params->end, params->aligned_end); + spin_lock(&init_mm.page_table_lock); + return 0; +} + static void remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end) { @@ -694,6 +739,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr, } } +/* + * clear the pte and potentially split the mapping helper + */ +static void split_kernel_mapping(unsigned long addr, unsigned long end, + unsigned long size, pte_t *pte) +{ + unsigned long mask = ~(size - 1); + unsigned long aligned_start = addr & mask; + unsigned long aligned_end = addr + size; + struct change_mapping_params params; + bool split_region = false; + + if ((end - addr) < size) { + /* + * We're going to clear the PTE, but not flushed + * the mapping, time to remap and flush. The + * effects if visible outside the processor or + * if we are running in code close to the + * mapping we cleared, we are in trouble. + */ + if (overlaps_kernel_text(aligned_start, addr) || + overlaps_kernel_text(end, aligned_end)) { + /* + * Hack, just return, don't pte_clear + */ + WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " + "text, not splitting\n", addr, end); + return; + } + split_region = true; + } + + if (split_region) { + params.pte = pte; + params.start = addr; + params.end = end; + params.aligned_start = addr & ~(size - 1); + params.aligned_end = min_t(unsigned long, aligned_end, + (unsigned long)__va(memblock_end_of_DRAM())); + stop_machine(stop_machine_change_mapping, ¶ms, NULL); + return; + } + + pte_clear(&init_mm, addr, pte); +} + static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end) { @@ -709,13 +800,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, continue; if (pmd_huge(*pmd)) { - if (!IS_ALIGNED(addr, PMD_SIZE) || - !IS_ALIGNED(next, PMD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pmd); + split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); continue; } @@ -740,13 +825,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr, continue; if (pud_huge(*pud)) { - if (!IS_ALIGNED(addr, PUD_SIZE) || - !IS_ALIGNED(next, PUD_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pud); + split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); continue; } @@ -772,13 +851,7 @@ static void remove_pagetable(unsigned long start, unsigned long end) continue; if (pgd_huge(*pgd)) { - if (!IS_ALIGNED(addr, PGDIR_SIZE) || - !IS_ALIGNED(next, PGDIR_SIZE)) { - WARN_ONCE(1, "%s: unaligned range\n", __func__); - continue; - } - - pte_clear(&init_mm, addr, (pte_t *)pgd); + split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd); continue; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index c9a623c2d8a27..28c980eb44222 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -82,6 +82,8 @@ unsigned long __pgd_index_size; EXPORT_SYMBOL(__pgd_index_size); unsigned long __pmd_cache_index; EXPORT_SYMBOL(__pmd_cache_index); +unsigned long __pud_cache_index; +EXPORT_SYMBOL(__pud_cache_index); unsigned long __pte_table_size; EXPORT_SYMBOL(__pte_table_size); unsigned long __pmd_table_size; @@ -471,6 +473,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, if (old & PATB_HR) { asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); } else { asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 881ebd53ffc27..9b23f12e863cc 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, unsigned int psize; int ssize; real_pte_t rpte; - int i; + int i, offset; i = batch->index; @@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, psize = get_slice_psize(mm, addr); /* Mask the address for the correct page size */ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); + if (unlikely(psize == MMU_PAGE_16G)) + offset = PTRS_PER_PUD; + else + offset = PTRS_PER_PMD; #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ @@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, * support 64k pages, this might be different from the * hardware page size encoded in the slice table. */ addr &= PAGE_MASK; + offset = PTRS_PER_PTE; } @@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, } WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); - rpte = __real_pte(__pte(pte), ptep); + rpte = __real_pte(__pte(pte), ptep, offset); /* * Check if we have an active batch on this CPU. If not, just diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index 1a9a756b0b2f7..857580a78bbd8 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c @@ -101,9 +101,9 @@ static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx, but first mark any pending interrupts as done so we don't get woken up unnecessarily */ - if (events & (POLLIN | POLLRDNORM)) { + if (events & (EPOLLIN | EPOLLRDNORM)) { if (stat & 0xff0000) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; else { ctx->csa.priv1.int_stat_class2_RW &= ~CLASS2_MAILBOX_INTR; @@ -111,9 +111,9 @@ static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx, CLASS2_ENABLE_MAILBOX_INTR; } } - if (events & (POLLOUT | POLLWRNORM)) { + if (events & (EPOLLOUT | EPOLLWRNORM)) { if (stat & 0x00ff00) - ret = POLLOUT | POLLWRNORM; + ret = EPOLLOUT | EPOLLWRNORM; else { ctx->csa.priv1.int_stat_class2_RW &= ~CLASS2_MAILBOX_THRESHOLD_INTR; diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c1be486da8993..469bdd0b748f7 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -774,7 +774,7 @@ static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait) * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); - mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); + mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM); spu_release(ctx); return mask; @@ -910,7 +910,7 @@ static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait) * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); - mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); + mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM); spu_release(ctx); return mask; @@ -1710,9 +1710,9 @@ static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait) mask = 0; if (free_elements & 0xffff) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; if (tagstatus & ctx->tagwait) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, free_elements, tagstatus, ctx->tagwait); @@ -2469,7 +2469,7 @@ static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait) return rc; if (spufs_switch_log_used(ctx) > 0) - mask |= POLLIN; + mask |= EPOLLIN; spu_release(ctx); diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index fff58198b5b6e..ae9d24d31eed8 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c @@ -70,17 +70,17 @@ static __poll_t spu_hw_mbox_stat_poll(struct spu_context *ctx, __poll_t events) but first mark any pending interrupts as done so we don't get woken up unnecessarily */ - if (events & (POLLIN | POLLRDNORM)) { + if (events & (EPOLLIN | EPOLLRDNORM)) { if (stat & 0xff0000) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; else { spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR); spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); } } - if (events & (POLLOUT | POLLWRNORM)) { + if (events & (EPOLLOUT | EPOLLWRNORM)) { if (stat & 0x00ff00) - ret = POLLOUT | POLLWRNORM; + ret = EPOLLOUT | EPOLLWRNORM; else { spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_THRESHOLD_INTR); diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c index dd4c9b8b8a81e..f6f55ab4980e7 100644 --- a/arch/powerpc/platforms/powernv/opal-imc.c +++ b/arch/powerpc/platforms/powernv/opal-imc.c @@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void) const struct cpumask *l_cpumask; get_online_cpus(); - for_each_online_node(nid) { + for_each_node_with_cpus(nid) { l_cpumask = cpumask_of_node(nid); - cpu = cpumask_first(l_cpumask); + cpu = cpumask_first_and(l_cpumask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + continue; opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, get_hard_smp_processor_id(cpu)); } diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c index c18de0a9b1bdb..4070bb4e9da4a 100644 --- a/arch/powerpc/platforms/powernv/opal-prd.c +++ b/arch/powerpc/platforms/powernv/opal-prd.c @@ -153,7 +153,7 @@ static __poll_t opal_prd_poll(struct file *file, poll_wait(file, &opal_prd_msg_wait, wait); if (!opal_msg_queue_empty()) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c index 2b3eb01ab1107..b7c53a51c31bb 100644 --- a/arch/powerpc/platforms/powernv/vas-window.c +++ b/arch/powerpc/platforms/powernv/vas-window.c @@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, rc = PTR_ERR(txwin->paste_kaddr); goto free_window; } + } else { + /* + * A user mapping must ensure that context switch issues + * CP_ABORT for this thread. + */ + rc = set_thread_uses_vas(); + if (rc) + goto free_window; } - /* - * Now that we have a send window, ensure context switch issues - * CP_ABORT for this thread. - */ - rc = -EINVAL; - if (set_thread_uses_vas() < 0) - goto free_window; - set_vinst_win(vinst, txwin); return txwin; diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index dceb51454d8d2..652d3e96b812b 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "pseries.h" #include "offline_states.h" @@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np) BUG_ON(cpu_online(cpu)); set_cpu_present(cpu, false); set_hard_smp_processor_id(cpu, -1); + update_numa_cpu_lookup_table(cpu, -1); break; } if (cpu >= nr_cpu_ids) @@ -340,8 +342,6 @@ static void pseries_remove_processor(struct device_node *np) cpu_maps_update_done(); } -extern int find_and_online_cpu_nid(int cpu); - static int dlpar_online_cpu(struct device_node *dn) { int rc = 0; diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 81d8614e73790..5e1ef91501820 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id); +/* + * Enable the hotplug interrupt late because processing them may touch other + * devices or systems (e.g. hugepages) that have not been initialized at the + * subsys stage. + */ +int __init init_ras_hotplug_IRQ(void) +{ + struct device_node *np; + + /* Hotplug Events */ + np = of_find_node_by_path("/event-sources/hot-plug-events"); + if (np != NULL) { + if (dlpar_workqueue_init() == 0) + request_event_sources_irqs(np, ras_hotplug_interrupt, + "RAS_HOTPLUG"); + of_node_put(np); + } + + return 0; +} +machine_late_initcall(pseries, init_ras_hotplug_IRQ); + /* * Initialize handlers for the set of interrupts caused by hardware errors * and power system events. @@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void) of_node_put(np); } - /* Hotplug Events */ - np = of_find_node_by_path("/event-sources/hot-plug-events"); - if (np != NULL) { - if (dlpar_workqueue_init() == 0) - request_event_sources_irqs(np, ras_hotplug_interrupt, - "RAS_HOTPLUG"); - of_node_put(np); - } - /* EPOW Events */ np = of_find_node_by_path("/event-sources/epow-events"); if (np != NULL) { diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index ebc244b08d674..d22aeb0b69e10 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -42,6 +42,7 @@ static u32 xive_provision_chip_count; static u32 xive_queue_shift; static u32 xive_pool_vps = XIVE_INVALID_VP; static struct kmem_cache *xive_provision_cache; +static bool xive_has_single_esc; int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) { @@ -571,6 +572,10 @@ bool __init xive_native_init(void) break; } + /* Do we support single escalation */ + if (of_get_property(np, "single-escalation-support", NULL) != NULL) + xive_has_single_esc = true; + /* Configure Thread Management areas for KVM */ for_each_possible_cpu(cpu) kvmppc_set_xive_tima(cpu, r.start, tima); @@ -667,12 +672,15 @@ void xive_native_free_vp_block(u32 vp_base) } EXPORT_SYMBOL_GPL(xive_native_free_vp_block); -int xive_native_enable_vp(u32 vp_id) +int xive_native_enable_vp(u32 vp_id, bool single_escalation) { s64 rc; + u64 flags = OPAL_XIVE_VP_ENABLED; + if (single_escalation) + flags |= OPAL_XIVE_VP_SINGLE_ESCALATION; for (;;) { - rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0); + rc = opal_xive_set_vp_info(vp_id, flags, 0); if (rc != OPAL_BUSY) break; msleep(1); @@ -710,3 +718,9 @@ int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id) return 0; } EXPORT_SYMBOL_GPL(xive_native_get_vp_info); + +bool xive_native_has_single_escalation(void) +{ + return xive_has_single_esc; +} +EXPORT_SYMBOL_GPL(xive_native_has_single_escalation); diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index d9c4c93660491..091f1d0d0af19 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); if (rc) { - pr_err("Error %lld getting queue info prio %d\n", rc, prio); + pr_err("Error %lld getting queue info CPU %d prio %d\n", rc, + target, prio); rc = -EIO; goto fail; } @@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, /* Configure and enable the queue in HW */ rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); if (rc) { - pr_err("Error %lld setting queue for prio %d\n", rc, prio); + pr_err("Error %lld setting queue for CPU %d prio %d\n", rc, + target, prio); rc = -EIO; } else { q->qpage = qpage; @@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, if (IS_ERR(qpage)) return PTR_ERR(qpage); - return xive_spapr_configure_queue(cpu, q, prio, qpage, - xive_queue_shift); + return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu), + q, prio, qpage, xive_queue_shift); } static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, @@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, struct xive_q *q = &xc->queue[prio]; unsigned int alloc_order; long rc; + int hw_cpu = get_hard_smp_processor_id(cpu); - rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); + rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0); if (rc) - pr_err("Error %ld setting queue for prio %d\n", rc, prio); + pr_err("Error %ld setting queue for CPU %d prio %d\n", rc, + hw_cpu, prio); alloc_order = xive_alloc_order(xive_queue_shift); free_pages((unsigned long)q->qpage, alloc_order); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0105ce28e246a..eaee7087886fa 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -112,7 +112,6 @@ config S390 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANTS_DYNAMIC_TASK_STRUCT - select ARCH_WANTS_PROT_NUMA_PROT_NONE select ARCH_WANTS_UBSAN_NO_NULL select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT @@ -540,6 +539,51 @@ config ARCH_RANDOM If unsure, say Y. +config KERNEL_NOBP + def_bool n + prompt "Enable modified branch prediction for the kernel by default" + help + If this option is selected the kernel will switch to a modified + branch prediction mode if the firmware interface is available. + The modified branch prediction mode improves the behaviour in + regard to speculative execution. + + With the option enabled the kernel parameter "nobp=0" or "nospec" + can be used to run the kernel in the normal branch prediction mode. + + With the option disabled the modified branch prediction mode is + enabled with the "nobp=1" kernel parameter. + + If unsure, say N. + +config EXPOLINE + def_bool n + prompt "Avoid speculative indirect branches in the kernel" + help + Compile the kernel with the expoline compiler options to guard + against kernel-to-user data leaks by avoiding speculative indirect + branches. + Requires a compiler with -mindirect-branch=thunk support for full + protection. The kernel may run slower. + + If unsure, say N. + +choice + prompt "Expoline default" + depends on EXPOLINE + default EXPOLINE_FULL + +config EXPOLINE_OFF + bool "spectre_v2=off" + +config EXPOLINE_MEDIUM + bool "spectre_v2=auto" + +config EXPOLINE_FULL + bool "spectre_v2=on" + +endchoice + endmenu menu "Memory setup" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index fd691c4ff89ec..2ced3239cb847 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -78,6 +78,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack endif +ifdef CONFIG_EXPOLINE + ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y) + CC_FLAGS_EXPOLINE := -mindirect-branch=thunk + CC_FLAGS_EXPOLINE += -mfunction-return=thunk + CC_FLAGS_EXPOLINE += -mindirect-branch-table + export CC_FLAGS_EXPOLINE + cflags-y += $(CC_FLAGS_EXPOLINE) + endif +endif + ifdef CONFIG_FUNCTION_TRACER # make use of hotpatch feature if the compiler supports it cc_hotpatch := -mhotpatch=0,3 diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 10432607a5736..f9eddbca79d28 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -49,6 +49,30 @@ do { \ #define __smp_mb__before_atomic() barrier() #define __smp_mb__after_atomic() barrier() +/** + * array_index_mask_nospec - generate a mask for array_idx() that is + * ~0UL when the bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + */ +#define array_index_mask_nospec array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + if (__builtin_constant_p(size) && size > 0) { + asm(" clgr %2,%1\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); + return mask; + } + asm(" clgr %1,%2\n" + " slbgr %0,%0\n" + :"=d" (mask) : "d" (size), "d" (index) :"cc"); + return ~mask; +} + #include #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 31e400c1a1f35..86e5b2fdee3c8 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -261,6 +261,11 @@ static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); } +static inline int test_and_clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) +{ + return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); +} + static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr) { return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr); diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index a478eb61aaf7f..fb56fa3283a2c 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -20,7 +20,9 @@ struct css_general_char { u32 aif_tdd : 1; /* bit 56 */ u32 : 1; u32 qebsm : 1; /* bit 58 */ - u32 : 8; + u32 : 2; + u32 aiv : 1; /* bit 61 */ + u32 : 5; u32 aif_osa : 1; /* bit 67 */ u32 : 12; u32 eadm_rf : 1; /* bit 80 */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index eb5323161f11e..bb63b2afdf6fc 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -4,7 +4,7 @@ #include #include -#include +#include struct arqb { u64 data; diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h index fbe0c4be3cd8f..99c8ce30b3cd1 100644 --- a/arch/s390/include/asm/facility.h +++ b/arch/s390/include/asm/facility.h @@ -15,6 +15,24 @@ #define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8) +static inline void __set_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] |= 0x80 >> (nr & 7); +} + +static inline void __clear_facility(unsigned long nr, void *facilities) +{ + unsigned char *ptr = (unsigned char *) facilities; + + if (nr >= MAX_FACILITY_BIT) + return; + ptr[nr >> 3] &= ~(0x80 >> (nr & 7)); +} + static inline int __test_facility(unsigned long nr, void *facilities) { unsigned char *ptr; diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index c1b0a9ac1dc81..afb0f08b80214 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -2,7 +2,7 @@ /* * definition for kernel virtual machines on s390 * - * Copyright IBM Corp. 2008, 2009 + * Copyright IBM Corp. 2008, 2018 * * Author(s): Carsten Otte */ @@ -183,6 +183,7 @@ struct kvm_s390_sie_block { #define ECA_IB 0x40000000 #define ECA_SIGPI 0x10000000 #define ECA_MVPGI 0x01000000 +#define ECA_AIV 0x00200000 #define ECA_VX 0x00020000 #define ECA_PROTEXCI 0x00002000 #define ECA_SII 0x00000001 @@ -228,7 +229,9 @@ struct kvm_s390_sie_block { __u8 epdx; /* 0x0069 */ __u8 reserved6a[2]; /* 0x006a */ __u32 todpr; /* 0x006c */ - __u8 reserved70[16]; /* 0x0070 */ +#define GISA_FORMAT1 0x00000001 + __u32 gd; /* 0x0070 */ + __u8 reserved74[12]; /* 0x0074 */ __u64 mso; /* 0x0080 */ __u64 msl; /* 0x0088 */ psw_t gpsw; /* 0x0090 */ @@ -317,18 +320,30 @@ struct kvm_vcpu_stat { u64 deliver_program_int; u64 deliver_io_int; u64 exit_wait_state; + u64 instruction_epsw; + u64 instruction_gs; + u64 instruction_io_other; + u64 instruction_lpsw; + u64 instruction_lpswe; u64 instruction_pfmf; + u64 instruction_ptff; + u64 instruction_sck; + u64 instruction_sckpf; u64 instruction_stidp; u64 instruction_spx; u64 instruction_stpx; u64 instruction_stap; - u64 instruction_storage_key; + u64 instruction_iske; + u64 instruction_ri; + u64 instruction_rrbe; + u64 instruction_sske; u64 instruction_ipte_interlock; - u64 instruction_stsch; - u64 instruction_chsc; u64 instruction_stsi; u64 instruction_stfl; + u64 instruction_tb; + u64 instruction_tpi; u64 instruction_tprot; + u64 instruction_tsch; u64 instruction_sie; u64 instruction_essa; u64 instruction_sthyi; @@ -354,6 +369,7 @@ struct kvm_vcpu_stat { u64 diagnose_258; u64 diagnose_308; u64 diagnose_500; + u64 diagnose_other; }; #define PGM_OPERATION 0x01 @@ -410,35 +426,35 @@ struct kvm_vcpu_stat { #define PGM_PER 0x80 #define PGM_CRYPTO_OPERATION 0x119 -/* irq types in order of priority */ +/* irq types in ascend order of priorities */ enum irq_types { - IRQ_PEND_MCHK_EX = 0, - IRQ_PEND_SVC, - IRQ_PEND_PROG, - IRQ_PEND_MCHK_REP, - IRQ_PEND_EXT_IRQ_KEY, - IRQ_PEND_EXT_MALFUNC, - IRQ_PEND_EXT_EMERGENCY, - IRQ_PEND_EXT_EXTERNAL, - IRQ_PEND_EXT_CLOCK_COMP, - IRQ_PEND_EXT_CPU_TIMER, - IRQ_PEND_EXT_TIMING, - IRQ_PEND_EXT_SERVICE, - IRQ_PEND_EXT_HOST, - IRQ_PEND_PFAULT_INIT, - IRQ_PEND_PFAULT_DONE, - IRQ_PEND_VIRTIO, - IRQ_PEND_IO_ISC_0, - IRQ_PEND_IO_ISC_1, - IRQ_PEND_IO_ISC_2, - IRQ_PEND_IO_ISC_3, - IRQ_PEND_IO_ISC_4, - IRQ_PEND_IO_ISC_5, - IRQ_PEND_IO_ISC_6, - IRQ_PEND_IO_ISC_7, - IRQ_PEND_SIGP_STOP, + IRQ_PEND_SET_PREFIX = 0, IRQ_PEND_RESTART, - IRQ_PEND_SET_PREFIX, + IRQ_PEND_SIGP_STOP, + IRQ_PEND_IO_ISC_7, + IRQ_PEND_IO_ISC_6, + IRQ_PEND_IO_ISC_5, + IRQ_PEND_IO_ISC_4, + IRQ_PEND_IO_ISC_3, + IRQ_PEND_IO_ISC_2, + IRQ_PEND_IO_ISC_1, + IRQ_PEND_IO_ISC_0, + IRQ_PEND_VIRTIO, + IRQ_PEND_PFAULT_DONE, + IRQ_PEND_PFAULT_INIT, + IRQ_PEND_EXT_HOST, + IRQ_PEND_EXT_SERVICE, + IRQ_PEND_EXT_TIMING, + IRQ_PEND_EXT_CPU_TIMER, + IRQ_PEND_EXT_CLOCK_COMP, + IRQ_PEND_EXT_EXTERNAL, + IRQ_PEND_EXT_EMERGENCY, + IRQ_PEND_EXT_MALFUNC, + IRQ_PEND_EXT_IRQ_KEY, + IRQ_PEND_MCHK_REP, + IRQ_PEND_PROG, + IRQ_PEND_SVC, + IRQ_PEND_MCHK_EX, IRQ_PEND_COUNT }; @@ -516,9 +532,6 @@ struct kvm_s390_irq_payload { struct kvm_s390_local_interrupt { spinlock_t lock; - struct kvm_s390_float_interrupt *float_int; - struct swait_queue_head *wq; - atomic_t *cpuflags; DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); struct kvm_s390_irq_payload irq; unsigned long pending_irqs; @@ -707,14 +720,50 @@ struct kvm_s390_crypto_cb { struct kvm_s390_apcb1 apcb1; /* 0x0080 */ }; +struct kvm_s390_gisa { + union { + struct { /* common to all formats */ + u32 next_alert; + u8 ipm; + u8 reserved01[2]; + u8 iam; + }; + struct { /* format 0 */ + u32 next_alert; + u8 ipm; + u8 reserved01; + u8 : 6; + u8 g : 1; + u8 c : 1; + u8 iam; + u8 reserved02[4]; + u32 airq_count; + } g0; + struct { /* format 1 */ + u32 next_alert; + u8 ipm; + u8 simm; + u8 nimm; + u8 iam; + u8 aism[8]; + u8 : 6; + u8 g : 1; + u8 c : 1; + u8 reserved03[11]; + u32 airq_count; + } g1; + }; +}; + /* - * sie_page2 has to be allocated as DMA because fac_list and crycb need - * 31bit addresses in the sie control block. + * sie_page2 has to be allocated as DMA because fac_list, crycb and + * gisa need 31bit addresses in the sie control block. */ struct sie_page2 { __u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */ struct kvm_s390_crypto_cb crycb; /* 0x0800 */ - u8 reserved900[0x1000 - 0x900]; /* 0x0900 */ + struct kvm_s390_gisa gisa; /* 0x0900 */ + u8 reserved920[0x1000 - 0x920]; /* 0x0920 */ }; struct kvm_s390_vsie { @@ -761,6 +810,7 @@ struct kvm_arch{ struct kvm_s390_migration_state *migration_state; /* subset of available cpu features enabled by user space */ DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); + struct kvm_s390_gisa *gisa; }; #define KVM_HVA_ERR_BAD (-1UL) diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index ec6592e8ba36e..5bc888841eafe 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -136,7 +136,11 @@ struct lowcore { __u64 vdso_per_cpu_data; /* 0x03b8 */ __u64 machine_flags; /* 0x03c0 */ __u64 gmap; /* 0x03c8 */ - __u8 pad_0x03d0[0x0e00-0x03d0]; /* 0x03d0 */ + __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ + + /* br %r1 trampoline */ + __u16 br_r1_trampoline; /* 0x0400 */ + __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -151,7 +155,8 @@ struct lowcore { __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */ /* Extended facility list */ - __u64 stfle_fac_list[32]; /* 0x0f00 */ + __u64 stfle_fac_list[16]; /* 0x0f00 */ + __u64 alt_stfle_fac_list[16]; /* 0x0f80 */ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */ /* Pointer to the machine check extended save area */ diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h new file mode 100644 index 0000000000000..7df48e5cf36f7 --- /dev/null +++ b/arch/s390/include/asm/nospec-branch.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_S390_EXPOLINE_H +#define _ASM_S390_EXPOLINE_H + +#ifndef __ASSEMBLY__ + +#include + +extern int nospec_call_disable; +extern int nospec_return_disable; + +void nospec_init_branches(void); +void nospec_call_revert(s32 *start, s32 *end); +void nospec_return_revert(s32 *start, s32 *end); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_S390_EXPOLINE_H */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index bfbfad4822897..7f2953c15c37b 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -91,6 +91,7 @@ void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; extern int sysctl_ieee_emulation_warnings; extern void execve_tail(void); +extern void __bpon(void); /* * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. @@ -377,6 +378,9 @@ extern void memcpy_absolute(void *, void *, size_t); memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \ } while (0) +extern int s390_isolate_bp(void); +extern int s390_isolate_bp_guest(void); + #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h index 6b1540337ed6c..0e1605538cd47 100644 --- a/arch/s390/include/asm/runtime_instr.h +++ b/arch/s390/include/asm/runtime_instr.h @@ -2,75 +2,10 @@ #ifndef _RUNTIME_INSTR_H #define _RUNTIME_INSTR_H -#define S390_RUNTIME_INSTR_START 0x1 -#define S390_RUNTIME_INSTR_STOP 0x2 - -struct runtime_instr_cb { - __u64 rca; - __u64 roa; - __u64 rla; - - __u32 v : 1; - __u32 s : 1; - __u32 k : 1; - __u32 h : 1; - __u32 a : 1; - __u32 reserved1 : 3; - __u32 ps : 1; - __u32 qs : 1; - __u32 pc : 1; - __u32 qc : 1; - __u32 reserved2 : 1; - __u32 g : 1; - __u32 u : 1; - __u32 l : 1; - __u32 key : 4; - __u32 reserved3 : 8; - __u32 t : 1; - __u32 rgs : 3; - - __u32 m : 4; - __u32 n : 1; - __u32 mae : 1; - __u32 reserved4 : 2; - __u32 c : 1; - __u32 r : 1; - __u32 b : 1; - __u32 j : 1; - __u32 e : 1; - __u32 x : 1; - __u32 reserved5 : 2; - __u32 bpxn : 1; - __u32 bpxt : 1; - __u32 bpti : 1; - __u32 bpni : 1; - __u32 reserved6 : 2; - - __u32 d : 1; - __u32 f : 1; - __u32 ic : 4; - __u32 dc : 4; - - __u64 reserved7; - __u64 sf; - __u64 rsic; - __u64 reserved8; -} __packed __aligned(8); +#include extern struct runtime_instr_cb runtime_instr_empty_cb; -static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb) -{ - asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */ - : : "Q" (*cb)); -} - -static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) -{ - asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */ - : "=Q" (*cb) : : "cc"); -} - static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) { if (cb_prev) diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index d3c1a8a2e3ad4..3cae9168f63c4 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -77,6 +77,7 @@ struct sclp_info { unsigned char has_ibs : 1; unsigned char has_skey : 1; unsigned char has_kss : 1; + unsigned char has_gisaf : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 25057c118d563..fe7b3f8f07913 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -21,7 +21,8 @@ struct sysinfo_1_1_1 { unsigned char :8; unsigned char ccr; unsigned char cai; - char reserved_0[28]; + char reserved_0[20]; + unsigned long lic; char manufacturer[16]; char type[4]; char reserved_1[12]; diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 25d6ec3aaddda..83ba57533ce6f 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -58,6 +58,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ +#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ +#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ #define TIF_31BIT 16 /* 32bit process */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -78,6 +80,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define _TIF_UPROBE _BITUL(TIF_UPROBE) #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE) #define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) +#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP) +#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST) #define _TIF_31BIT _BITUL(TIF_31BIT) #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) diff --git a/arch/s390/include/uapi/asm/runtime_instr.h b/arch/s390/include/uapi/asm/runtime_instr.h new file mode 100644 index 0000000000000..45c9ec984e6bd --- /dev/null +++ b/arch/s390/include/uapi/asm/runtime_instr.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _S390_UAPI_RUNTIME_INSTR_H +#define _S390_UAPI_RUNTIME_INSTR_H + +#include + +#define S390_RUNTIME_INSTR_START 0x1 +#define S390_RUNTIME_INSTR_STOP 0x2 + +struct runtime_instr_cb { + __u64 rca; + __u64 roa; + __u64 rla; + + __u32 v : 1; + __u32 s : 1; + __u32 k : 1; + __u32 h : 1; + __u32 a : 1; + __u32 reserved1 : 3; + __u32 ps : 1; + __u32 qs : 1; + __u32 pc : 1; + __u32 qc : 1; + __u32 reserved2 : 1; + __u32 g : 1; + __u32 u : 1; + __u32 l : 1; + __u32 key : 4; + __u32 reserved3 : 8; + __u32 t : 1; + __u32 rgs : 3; + + __u32 m : 4; + __u32 n : 1; + __u32 mae : 1; + __u32 reserved4 : 2; + __u32 c : 1; + __u32 r : 1; + __u32 b : 1; + __u32 j : 1; + __u32 e : 1; + __u32 x : 1; + __u32 reserved5 : 2; + __u32 bpxn : 1; + __u32 bpxt : 1; + __u32 bpti : 1; + __u32 bpni : 1; + __u32 reserved6 : 2; + + __u32 d : 1; + __u32 f : 1; + __u32 ic : 4; + __u32 dc : 4; + + __u64 reserved7; + __u64 sf; + __u64 rsic; + __u64 reserved8; +} __packed __aligned(8); + +static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */ + : : "Q" (*cb)); +} + +static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb) +{ + asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */ + : "=Q" (*cb) : : "cc"); +} + +#endif /* _S390_UAPI_RUNTIME_INSTR_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 909bce65cb2bd..7f27e3da9709c 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -29,6 +29,7 @@ UBSAN_SANITIZE_early.o := n # ifneq ($(CC_FLAGS_MARCH),-march=z900) CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH) +CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE) CFLAGS_als.o += -march=z900 AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH) AFLAGS_head.o += -march=z900 @@ -63,6 +64,9 @@ obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o extra-y += head.o head64.o vmlinux.lds +obj-$(CONFIG_EXPOLINE) += nospec-branch.o +CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE) + obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c index 574e77622c049..22476135f7384 100644 --- a/arch/s390/kernel/alternative.c +++ b/arch/s390/kernel/alternative.c @@ -15,6 +15,29 @@ static int __init disable_alternative_instructions(char *str) early_param("noaltinstr", disable_alternative_instructions); +static int __init nobp_setup_early(char *str) +{ + bool enabled; + int rc; + + rc = kstrtobool(str, &enabled); + if (rc) + return rc; + if (enabled && test_facility(82)) + __set_facility(82, S390_lowcore.alt_stfle_fac_list); + else + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + return 0; +} +early_param("nobp", nobp_setup_early); + +static int __init nospec_setup_early(char *str) +{ + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); + return 0; +} +early_param("nospec", nospec_setup_early); + struct brcl_insn { u16 opc; s32 disp; @@ -75,7 +98,8 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start, instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; - if (!test_facility(a->facility)) + if (!__test_facility(a->facility, + S390_lowcore.alt_stfle_fac_list)) continue; if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) { diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 497a920475918..ac707a9f729ea 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -193,6 +193,11 @@ static noinline __init void setup_facility_list(void) { stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list)); + memcpy(S390_lowcore.alt_stfle_fac_list, + S390_lowcore.stfle_fac_list, + sizeof(S390_lowcore.alt_stfle_fac_list)); + if (!IS_ENABLED(CONFIG_KERNEL_NOBP)) + __clear_facility(82, S390_lowcore.alt_stfle_fac_list); } static __init void detect_diag9c(void) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 6cd444d25545c..13a133a6015c9 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -107,6 +107,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 3f 1: UPDATE_VTIME %r14,%r15,\timer + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 2: lg %r15,__LC_ASYNC_STACK # load async stack 3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm @@ -159,6 +160,130 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) tm off+\addr, \mask .endm + .macro BPOFF + .pushsection .altinstr_replacement, "ax" +660: .long 0xb2e8c000 + .popsection +661: .long 0x47000000 + .pushsection .altinstructions, "a" + .long 661b - . + .long 660b - . + .word 82 + .byte 4 + .byte 4 + .popsection + .endm + + .macro BPON + .pushsection .altinstr_replacement, "ax" +662: .long 0xb2e8d000 + .popsection +663: .long 0x47000000 + .pushsection .altinstructions, "a" + .long 663b - . + .long 662b - . + .word 82 + .byte 4 + .byte 4 + .popsection + .endm + + .macro BPENTER tif_ptr,tif_mask + .pushsection .altinstr_replacement, "ax" +662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop + .word 0xc004, 0x0000, 0x0000 # 6 byte nop + .popsection +664: TSTMSK \tif_ptr,\tif_mask + jz . + 8 + .long 0xb2e8d000 + .pushsection .altinstructions, "a" + .long 664b - . + .long 662b - . + .word 82 + .byte 12 + .byte 12 + .popsection + .endm + + .macro BPEXIT tif_ptr,tif_mask + TSTMSK \tif_ptr,\tif_mask + .pushsection .altinstr_replacement, "ax" +662: jnz . + 8 + .long 0xb2e8d000 + .popsection +664: jz . + 8 + .long 0xb2e8c000 + .pushsection .altinstructions, "a" + .long 664b - . + .long 662b - . + .word 82 + .byte 8 + .byte 8 + .popsection + .endm + +#ifdef CONFIG_EXPOLINE + + .macro GEN_BR_THUNK name,reg,tmp + .section .text.\name,"axG",@progbits,\name,comdat + .globl \name + .hidden \name + .type \name,@function +\name: + .cfi_startproc +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + exrl 0,0f +#else + larl \tmp,0f + ex 0,0(\tmp) +#endif + j . +0: br \reg + .cfi_endproc + .endm + + GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 + GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1 + GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11 + + .macro BASR_R14_R9 +0: brasl %r14,__s390x_indirect_jump_r1use_r9 + .pushsection .s390_indirect_branches,"a",@progbits + .long 0b-. + .popsection + .endm + + .macro BR_R1USE_R14 +0: jg __s390x_indirect_jump_r1use_r14 + .pushsection .s390_indirect_branches,"a",@progbits + .long 0b-. + .popsection + .endm + + .macro BR_R11USE_R14 +0: jg __s390x_indirect_jump_r11use_r14 + .pushsection .s390_indirect_branches,"a",@progbits + .long 0b-. + .popsection + .endm + +#else /* CONFIG_EXPOLINE */ + + .macro BASR_R14_R9 + basr %r14,%r9 + .endm + + .macro BR_R1USE_R14 + br %r14 + .endm + + .macro BR_R11USE_R14 + br %r14 + .endm + +#endif /* CONFIG_EXPOLINE */ + + .section .kprobes.text, "ax" .Ldummy: /* @@ -171,6 +296,11 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) */ nop 0 +ENTRY(__bpon) + .globl __bpon + BPON + BR_R1USE_R14 + /* * Scheduler resume function, called by switch_to * gpr2 = (task_struct *) prev @@ -193,9 +323,9 @@ ENTRY(__switch_to) mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP - bzr %r14 + jz 0f .insn s,0xb2800000,__LC_LPP # set program parameter - br %r14 +0: BR_R1USE_R14 .L__critical_start: @@ -207,9 +337,11 @@ ENTRY(__switch_to) */ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + lg %r12,__LC_CURRENT stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 + mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? jno .Lsie_load_guest_gprs brasl %r14,load_fpu_regs # load guest fp/vx regs @@ -226,8 +358,12 @@ ENTRY(sie64a) jnz .Lsie_skip TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed + BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_entry: sie 0(%r14) +.Lsie_exit: + BPOFF + BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) .Lsie_skip: ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce @@ -248,9 +384,15 @@ ENTRY(sie64a) sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + xgr %r0,%r0 # clear guest registers to + xgr %r1,%r1 # prevent speculative use + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers lg %r2,__SF_EMPTY+16(%r15) # return exit reason code - br %r14 + BR_R1USE_R14 .Lsie_fault: lghi %r14,-EFAULT stg %r14,__SF_EMPTY+16(%r15) # set exit reason code @@ -273,6 +415,7 @@ ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER .Lsysc_stmg: stmg %r8,%r15,__LC_SAVE_AREA_SYNC + BPOFF lg %r12,__LC_CURRENT lghi %r13,__TASK_thread lghi %r14,_PIF_SYSCALL @@ -281,7 +424,10 @@ ENTRY(system_call) la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs .Lsysc_vtime: UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled register to prevent speculative use + xgr %r0,%r0 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC @@ -305,7 +451,7 @@ ENTRY(system_call) lgf %r9,0(%r8,%r10) # get system call add. TSTMSK __TI_flags(%r12),_TIF_TRACE jnz .Lsysc_tracesys - basr %r14,%r9 # call sys_xxxx + BASR_R14_R9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_return: @@ -317,6 +463,7 @@ ENTRY(system_call) jnz .Lsysc_work # check for work TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lsysc_work + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lsysc_restore: lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) @@ -489,7 +636,7 @@ ENTRY(system_call) lmg %r3,%r7,__PT_R3(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lg %r2,__PT_ORIG_GPR2(%r11) - basr %r14,%r9 # call sys_xxx + BASR_R14_R9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: TSTMSK __TI_flags(%r12),_TIF_TRACE @@ -513,7 +660,7 @@ ENTRY(ret_from_fork) lmg %r9,%r10,__PT_R9(%r11) # load gprs ENTRY(kernel_thread_starter) la %r2,0(%r10) - basr %r14,%r9 + BASR_R14_R9 j .Lsysc_tracenogo /* @@ -522,6 +669,7 @@ ENTRY(kernel_thread_starter) ENTRY(pgm_check_handler) stpt __LC_SYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_CURRENT @@ -550,6 +698,7 @@ ENTRY(pgm_check_handler) aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 4f 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 aghi %r14,__TASK_thread # pointer to thread_struct @@ -561,6 +710,15 @@ ENTRY(pgm_check_handler) 4: lgr %r13,%r11 la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC @@ -582,9 +740,9 @@ ENTRY(pgm_check_handler) nill %r10,0x007f sll %r10,2 je .Lpgm_return - lgf %r1,0(%r10,%r1) # load address of handler routine + lgf %r9,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs - basr %r14,%r1 # branch to interrupt-handler + BASR_R14_R9 # branch to interrupt-handler .Lpgm_return: LOCKDEP_SYS_EXIT tm __PT_PSW+1(%r11),0x01 # returning to user ? @@ -620,12 +778,23 @@ ENTRY(pgm_check_handler) ENTRY(io_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID @@ -660,9 +829,13 @@ ENTRY(io_int_handler) lg %r14,__LC_VDSO_PER_CPU lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno .Lio_exit_kernel + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP .Lio_exit_timer: stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER +.Lio_exit_kernel: lmg %r11,%r15,__PT_R11(%r11) lpswe __LC_RETURN_PSW .Lio_done: @@ -833,12 +1006,23 @@ ENTRY(io_int_handler) ENTRY(ext_int_handler) STCK __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER + BPOFF stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r12,__LC_CURRENT larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) lghi %r1,__LC_EXT_PARAMS2 @@ -871,11 +1055,12 @@ ENTRY(psw_idle) .Lpsw_idle_stcctm: #endif oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT + BPON STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: lpswe __SF_EMPTY(%r15) - br %r14 + BR_R1USE_R14 .Lpsw_idle_end: /* @@ -889,7 +1074,7 @@ ENTRY(save_fpu_regs) lg %r2,__LC_CURRENT aghi %r2,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU - bor %r14 + jo .Lsave_fpu_regs_exit stfpc __THREAD_FPU_fpc(%r2) lg %r3,__THREAD_FPU_regs(%r2) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX @@ -916,7 +1101,8 @@ ENTRY(save_fpu_regs) std 15,120(%r3) .Lsave_fpu_regs_done: oi __LC_CPU_FLAGS+7,_CIF_FPU - br %r14 +.Lsave_fpu_regs_exit: + BR_R1USE_R14 .Lsave_fpu_regs_end: EXPORT_SYMBOL(save_fpu_regs) @@ -934,7 +1120,7 @@ load_fpu_regs: lg %r4,__LC_CURRENT aghi %r4,__TASK_thread TSTMSK __LC_CPU_FLAGS,_CIF_FPU - bnor %r14 + jno .Lload_fpu_regs_exit lfpc __THREAD_FPU_fpc(%r4) TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area @@ -961,7 +1147,8 @@ load_fpu_regs: ld 15,120(%r4) .Lload_fpu_regs_done: ni __LC_CPU_FLAGS+7,255-_CIF_FPU - br %r14 +.Lload_fpu_regs_exit: + BR_R1USE_R14 .Lload_fpu_regs_end: .L__critical_end: @@ -971,6 +1158,7 @@ load_fpu_regs: */ ENTRY(mcck_int_handler) STCK __LC_MCCK_CLOCK + BPOFF la %r1,4095 # validate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer sckc __LC_CLOCK_COMPARATOR # validate comparator @@ -1046,6 +1234,16 @@ ENTRY(mcck_int_handler) .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) + # clear user controlled registers to prevent speculative use + xgr %r0,%r0 + xgr %r1,%r1 + xgr %r2,%r2 + xgr %r3,%r3 + xgr %r4,%r4 + xgr %r5,%r5 + xgr %r6,%r6 + xgr %r7,%r7 + xgr %r10,%r10 mvc __PT_R8(64,%r11),0(%r14) stmg %r8,%r9,__PT_PSW(%r11) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) @@ -1071,6 +1269,7 @@ ENTRY(mcck_int_handler) mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP stpt __LC_EXIT_TIMER mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 0: lmg %r11,%r15,__PT_R11(%r11) @@ -1166,7 +1365,7 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs -0: br %r14 +0: BR_R11USE_R14 .align 8 .Lcleanup_table: @@ -1197,11 +1396,12 @@ cleanup_critical: clg %r9,BASED(.Lsie_crit_mcck_length) jh 1f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST -1: lg %r9,__SF_EMPTY(%r15) # get control block pointer +1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) + lg %r9,__SF_EMPTY(%r15) # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit - br %r14 + BR_R11USE_R14 #endif .Lcleanup_system_call: @@ -1254,7 +1454,7 @@ cleanup_critical: stg %r15,56(%r11) # r15 stack pointer # set new psw address and exit larl %r9,.Lsysc_do_svc - br %r14 + BR_R11USE_R14 .Lcleanup_system_call_insn: .quad system_call .quad .Lsysc_stmg @@ -1266,7 +1466,7 @@ cleanup_critical: .Lcleanup_sysc_tif: larl %r9,.Lsysc_tif - br %r14 + BR_R11USE_R14 .Lcleanup_sysc_restore: # check if stpt has been executed @@ -1283,14 +1483,14 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 + BR_R11USE_R14 .Lcleanup_sysc_restore_insn: .quad .Lsysc_exit_timer .quad .Lsysc_done - 4 .Lcleanup_io_tif: larl %r9,.Lio_tif - br %r14 + BR_R11USE_R14 .Lcleanup_io_restore: # check if stpt has been executed @@ -1304,7 +1504,7 @@ cleanup_critical: mvc 0(64,%r11),__PT_R8(%r9) lmg %r0,%r7,__PT_R0(%r9) 1: lmg %r8,%r9,__LC_RETURN_PSW - br %r14 + BR_R11USE_R14 .Lcleanup_io_restore_insn: .quad .Lio_exit_timer .quad .Lio_done - 4 @@ -1357,17 +1557,17 @@ cleanup_critical: # prepare return psw nihh %r8,0xfcfd # clear irq & wait state bits lg %r9,48(%r11) # return from psw_idle - br %r14 + BR_R11USE_R14 .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw .Lcleanup_save_fpu_regs: larl %r9,save_fpu_regs - br %r14 + BR_R11USE_R14 .Lcleanup_load_fpu_regs: larl %r9,load_fpu_regs - br %r14 + BR_R11USE_R14 /* * Integer constants @@ -1387,7 +1587,6 @@ cleanup_critical: .Lsie_crit_mcck_length: .quad .Lsie_skip - .Lsie_entry #endif - .section .rodata, "a" #define SYSCALL(esame,emu) .long esame .globl sys_call_table diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index da5cc3b469aa1..34477c1aee6df 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -543,6 +543,7 @@ static struct kset *ipl_kset; static void __ipl_run(void *unused) { + __bpon(); diag308(DIAG308_LOAD_CLEAR, NULL); if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 943d13e90c980..60f60afa645c1 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -281,7 +281,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) * is a BUG. The code path resides in the .kprobes.text * section and is executed with interrupts disabled. */ - printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); + pr_err("Invalid kprobe detected.\n"); dump_kprobe(p); BUG(); } diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index b7abfad4fd7df..1fc6d1ff92d3e 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #if 0 #define DEBUGP printk @@ -156,7 +158,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, me->arch.got_offset = me->core_layout.size; me->core_layout.size += me->arch.got_size; me->arch.plt_offset = me->core_layout.size; - me->core_layout.size += me->arch.plt_size; + if (me->arch.plt_size) { + if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable) + me->arch.plt_size += PLT_ENTRY_SIZE; + me->core_layout.size += me->arch.plt_size; + } return 0; } @@ -310,9 +316,21 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, unsigned int *ip; ip = me->core_layout.base + me->arch.plt_offset + info->plt_offset; - ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ - ip[1] = 0x100a0004; - ip[2] = 0x07f10000; + ip[0] = 0x0d10e310; /* basr 1,0 */ + ip[1] = 0x100a0004; /* lg 1,10(1) */ + if (IS_ENABLED(CONFIG_EXPOLINE) && + !nospec_call_disable) { + unsigned int *ij; + ij = me->core_layout.base + + me->arch.plt_offset + + me->arch.plt_size - PLT_ENTRY_SIZE; + ip[2] = 0xa7f40000 + /* j __jump_r1 */ + (unsigned int)(u16) + (((unsigned long) ij - 8 - + (unsigned long) ip) / 2); + } else { + ip[2] = 0x07f10000; /* br %r1 */ + } ip[3] = (unsigned int) (val >> 32); ip[4] = (unsigned int) val; info->plt_initialized = 1; @@ -418,16 +436,42 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { const Elf_Shdr *s; - char *secstrings; + char *secstrings, *secname; + void *aseg; + + if (IS_ENABLED(CONFIG_EXPOLINE) && + !nospec_call_disable && me->arch.plt_size) { + unsigned int *ij; + + ij = me->core_layout.base + me->arch.plt_offset + + me->arch.plt_size - PLT_ENTRY_SIZE; + if (test_facility(35)) { + ij[0] = 0xc6000000; /* exrl %r0,.+10 */ + ij[1] = 0x0005a7f4; /* j . */ + ij[2] = 0x000007f1; /* br %r1 */ + } else { + ij[0] = 0x44000000 | (unsigned int) + offsetof(struct lowcore, br_r1_trampoline); + ij[1] = 0xa7f40000; /* j . */ + } + } secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { - if (!strcmp(".altinstructions", secstrings + s->sh_name)) { - /* patch .altinstructions */ - void *aseg = (void *)s->sh_addr; + aseg = (void *) s->sh_addr; + secname = secstrings + s->sh_name; + if (!strcmp(".altinstructions", secname)) + /* patch .altinstructions */ apply_alternatives(aseg, aseg + s->sh_size); - } + + if (IS_ENABLED(CONFIG_EXPOLINE) && + (!strcmp(".nospec_call_table", secname))) + nospec_call_revert(aseg, aseg + s->sh_size); + + if (IS_ENABLED(CONFIG_EXPOLINE) && + (!strcmp(".nospec_return_table", secname))) + nospec_return_revert(aseg, aseg + s->sh_size); } jump_label_apply_nops(me); diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c new file mode 100644 index 0000000000000..69d7fcf481588 --- /dev/null +++ b/arch/s390/kernel/nospec-branch.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF); +int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL); + +static int __init nospectre_v2_setup_early(char *str) +{ + nospec_call_disable = 1; + nospec_return_disable = 1; + return 0; +} +early_param("nospectre_v2", nospectre_v2_setup_early); + +static int __init spectre_v2_setup_early(char *str) +{ + if (str && !strncmp(str, "on", 2)) { + nospec_call_disable = 0; + nospec_return_disable = 0; + } + if (str && !strncmp(str, "off", 3)) { + nospec_call_disable = 1; + nospec_return_disable = 1; + } + if (str && !strncmp(str, "auto", 4)) { + nospec_call_disable = 0; + nospec_return_disable = 1; + } + return 0; +} +early_param("spectre_v2", spectre_v2_setup_early); + +static void __init_or_module __nospec_revert(s32 *start, s32 *end) +{ + enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type; + u8 *instr, *thunk, *br; + u8 insnbuf[6]; + s32 *epo; + + /* Second part of the instruction replace is always a nop */ + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4); + for (epo = start; epo < end; epo++) { + instr = (u8 *) epo + *epo; + if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) + type = BRCL_EXPOLINE; /* brcl instruction */ + else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05) + type = BRASL_EXPOLINE; /* brasl instruction */ + else + continue; + thunk = instr + (*(int *)(instr + 2)) * 2; + if (thunk[0] == 0xc6 && thunk[1] == 0x00) + /* exrl %r0, */ + br = thunk + (*(int *)(thunk + 2)) * 2; + else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 && + thunk[6] == 0x44 && thunk[7] == 0x00 && + (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 && + (thunk[1] & 0xf0) == (thunk[8] & 0xf0)) + /* larl %rx, + ex %r0,0(%rx) */ + br = thunk + (*(int *)(thunk + 2)) * 2; + else + continue; + if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) + continue; + switch (type) { + case BRCL_EXPOLINE: + /* brcl to thunk, replace with br + nop */ + insnbuf[0] = br[0]; + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + break; + case BRASL_EXPOLINE: + /* brasl to thunk, replace with basr + nop */ + insnbuf[0] = 0x0d; + insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); + break; + } + + s390_kernel_write(instr, insnbuf, 6); + } +} + +void __init_or_module nospec_call_revert(s32 *start, s32 *end) +{ + if (nospec_call_disable) + __nospec_revert(start, end); +} + +void __init_or_module nospec_return_revert(s32 *start, s32 *end) +{ + if (nospec_return_disable) + __nospec_revert(start, end); +} + +extern s32 __nospec_call_start[], __nospec_call_end[]; +extern s32 __nospec_return_start[], __nospec_return_end[]; +void __init nospec_init_branches(void) +{ + nospec_call_revert(__nospec_call_start, __nospec_call_end); + nospec_return_revert(__nospec_return_start, __nospec_return_end); +} diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 94f90cefbffcb..c5bc3f209652e 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -226,7 +226,7 @@ CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af); CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0); CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1); CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2); -CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e9); +CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e8); CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3); CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4); CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 5362fd868d0d4..6fe2e1875058b 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -197,3 +197,21 @@ const struct seq_operations cpuinfo_op = { .stop = c_stop, .show = show_cpuinfo, }; + +int s390_isolate_bp(void) +{ + if (!test_facility(82)) + return -EOPNOTSUPP; + set_thread_flag(TIF_ISOLATE_BP); + return 0; +} +EXPORT_SYMBOL(s390_isolate_bp); + +int s390_isolate_bp_guest(void) +{ + if (!test_facility(82)) + return -EOPNOTSUPP; + set_thread_flag(TIF_ISOLATE_BP_GUEST); + return 0; +} +EXPORT_SYMBOL(s390_isolate_bp_guest); diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 09f5bf0d5c0c8..125c7f6e87150 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -18,6 +18,8 @@ #include #include +#include "entry.h" + /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; @@ -59,7 +61,13 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->v = 1; } -SYSCALL_DEFINE1(s390_runtime_instr, int, command) +/* + * The signum argument is unused. In older kernels it was used to + * specify a real-time signal. For backwards compatibility user space + * should pass a valid real-time signal number (the signum argument + * was checked in older kernels). + */ +SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) { struct runtime_instr_cb *cb; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 793da97f9a6e5..a6a91f01a17a3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -68,6 +68,7 @@ #include #include #include +#include #include "entry.h" /* @@ -340,7 +341,9 @@ static void __init setup_lowcore(void) lc->preempt_count = S390_lowcore.preempt_count; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); + sizeof(lc->stfle_fac_list)); + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, + sizeof(lc->alt_stfle_fac_list)); nmi_alloc_boot_cpu(lc); vdso_alloc_boot_cpu(lc); lc->sync_enter_timer = S390_lowcore.sync_enter_timer; @@ -377,6 +380,7 @@ static void __init setup_lowcore(void) lc->spinlock_index = 0; arch_spin_lock_setup(0); #endif + lc->br_r1_trampoline = 0x07f1; /* br %r1 */ set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; @@ -952,6 +956,8 @@ void __init setup_arch(char **cmdline_p) set_preferred_console(); apply_alternative_instructions(); + if (IS_ENABLED(CONFIG_EXPOLINE)) + nospec_init_branches(); /* Setup zfcpdump support */ setup_zfcpdump(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a919b2f0141da..a4a9fe1934e9f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -214,6 +214,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); lc->spinlock_index = 0; + lc->br_r1_trampoline = 0x07f1; /* br %r1 */ if (nmi_alloc_per_cpu(lc)) goto out; if (vdso_alloc_per_cpu(lc)) @@ -266,7 +267,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) __ctl_store(lc->cregs_save_area, 0, 15); save_access_regs((unsigned int *) lc->access_regs_save_area); memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, - MAX_FACILITY_BIT/8); + sizeof(lc->stfle_fac_list)); + memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, + sizeof(lc->alt_stfle_fac_list)); arch_spin_lock_setup(cpu); } @@ -317,6 +320,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), mem_assign_absolute(lc->restart_fn, (unsigned long) func); mem_assign_absolute(lc->restart_data, (unsigned long) data); mem_assign_absolute(lc->restart_source, source_cpu); + __bpon(); asm volatile( "0: sigp 0,%0,%2 # sigp restart to target cpu\n" " brc 2,0b # busy, try again\n" @@ -901,6 +905,7 @@ void __cpu_die(unsigned int cpu) void __noreturn cpu_die(void) { idle_task_exit(); + __bpon(); pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); for (;;) ; } diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index a441cba8d165c..fc7e04c2195bb 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -89,6 +89,8 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info) EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap)); seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer); seq_printf(m, "Type: %-4.4s\n", info->type); + if (info->lic) + seq_printf(m, "LIC Identifier: %016lx\n", info->lic); /* * Sigh: the model field has been renamed with System z9 * to model_capacity and a new model field has been added diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 608cf2987d196..08d12cfaf0918 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -123,6 +123,20 @@ SECTIONS *(.altinstr_replacement) } + /* + * Table with the patch locations to undo expolines + */ + .nospec_call_table : { + __nospec_call_start = . ; + *(.s390_indirect*) + __nospec_call_end = . ; + } + .nospec_return_table : { + __nospec_return_start = . ; + *(.s390_return*) + __nospec_return_end = . ; + } + /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); INIT_DATA_SECTION(0x100) diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 9a4594e0a1ffe..a3dbd459cce91 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -23,6 +23,7 @@ config KVM select PREEMPT_NOTIFIERS select ANON_INODES select HAVE_KVM_CPU_RELAX_INTERCEPT + select HAVE_KVM_VCPU_ASYNC_IOCTL select HAVE_KVM_EVENTFD select KVM_ASYNC_PF select KVM_ASYNC_PF_SYNC diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 89aa114a2cbad..45634b3d2e0ae 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -257,6 +257,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) case 0x500: return __diag_virtio_hypercall(vcpu); default: + vcpu->stat.diagnose_other++; return -EOPNOTSUPP; } } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 024ad8bcc5165..aabf46f5f883d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -36,7 +36,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id) { int c, scn; - if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) + if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND)) return 0; BUG_ON(!kvm_s390_use_sca_entries()); @@ -101,18 +101,17 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) /* another external call is pending */ return -EBUSY; } - atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND); return 0; } static void sca_clear_ext_call(struct kvm_vcpu *vcpu) { - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc, expect; if (!kvm_s390_use_sca_entries()) return; - atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND); read_lock(&vcpu->kvm->arch.sca_lock); if (vcpu->kvm->arch.use_esca) { struct esca_block *sca = vcpu->kvm->arch.sca; @@ -190,8 +189,8 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu) static inline int is_ioirq(unsigned long irq_type) { - return ((irq_type >= IRQ_PEND_IO_ISC_0) && - (irq_type <= IRQ_PEND_IO_ISC_7)); + return ((irq_type >= IRQ_PEND_IO_ISC_7) && + (irq_type <= IRQ_PEND_IO_ISC_0)); } static uint64_t isc_to_isc_bits(int isc) @@ -199,25 +198,59 @@ static uint64_t isc_to_isc_bits(int isc) return (0x80 >> isc) << 24; } +static inline u32 isc_to_int_word(u8 isc) +{ + return ((u32)isc << 27) | 0x80000000; +} + static inline u8 int_word_to_isc(u32 int_word) { return (int_word & 0x38000000) >> 27; } +/* + * To use atomic bitmap functions, we have to provide a bitmap address + * that is u64 aligned. However, the ipm might be u32 aligned. + * Therefore, we logically start the bitmap at the very beginning of the + * struct and fixup the bit number. + */ +#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE) + +static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) +{ + set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); +} + +static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa) +{ + return READ_ONCE(gisa->ipm); +} + +static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) +{ + clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); +} + +static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) +{ + return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa); +} + static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) { return vcpu->kvm->arch.float_int.pending_irqs | - vcpu->arch.local_int.pending_irqs; + vcpu->arch.local_int.pending_irqs | + kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7; } static inline int isc_to_irq_type(unsigned long isc) { - return IRQ_PEND_IO_ISC_0 + isc; + return IRQ_PEND_IO_ISC_0 - isc; } static inline int irq_type_to_isc(unsigned long irq_type) { - return irq_type - IRQ_PEND_IO_ISC_0; + return IRQ_PEND_IO_ISC_0 - irq_type; } static unsigned long disable_iscs(struct kvm_vcpu *vcpu, @@ -278,20 +311,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); - set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); + set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); } static void __unset_cpu_idle(struct kvm_vcpu *vcpu) { - atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); - clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); + clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); } static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) { - atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, - &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT | + CPUSTAT_STOP_INT); vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); @@ -302,17 +335,12 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) } } -static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) -{ - atomic_or(flag, &vcpu->arch.sie_block->cpuflags); -} - static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) { if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) return; else if (psw_ioint_disabled(vcpu)) - __set_cpuflag(vcpu, CPUSTAT_IO_INT); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT); else vcpu->arch.sie_block->lctl |= LCTL_CR6; } @@ -322,7 +350,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) return; if (psw_extint_disabled(vcpu)) - __set_cpuflag(vcpu, CPUSTAT_EXT_INT); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); else vcpu->arch.sie_block->lctl |= LCTL_CR0; } @@ -340,7 +368,7 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) { if (kvm_s390_is_stop_irq_pending(vcpu)) - __set_cpuflag(vcpu, CPUSTAT_STOP_INT); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); } /* Set interception request for non-deliverable interrupts */ @@ -897,18 +925,38 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu) return rc ? -EFAULT : 0; } +static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io) +{ + int rc; + + rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID); + rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR); + rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM); + rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD); + rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, + &vcpu->arch.sie_block->gpsw, + sizeof(psw_t)); + return rc ? -EFAULT : 0; +} + static int __must_check __deliver_io(struct kvm_vcpu *vcpu, unsigned long irq_type) { struct list_head *isc_list; struct kvm_s390_float_interrupt *fi; struct kvm_s390_interrupt_info *inti = NULL; + struct kvm_s390_io_info io; + u32 isc; int rc = 0; fi = &vcpu->kvm->arch.float_int; spin_lock(&fi->lock); - isc_list = &fi->lists[irq_type_to_isc(irq_type)]; + isc = irq_type_to_isc(irq_type); + isc_list = &fi->lists[isc]; inti = list_first_entry_or_null(isc_list, struct kvm_s390_interrupt_info, list); @@ -936,24 +984,31 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu, spin_unlock(&fi->lock); if (inti) { - rc = put_guest_lc(vcpu, inti->io.subchannel_id, - (u16 *)__LC_SUBCHANNEL_ID); - rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, - (u16 *)__LC_SUBCHANNEL_NR); - rc |= put_guest_lc(vcpu, inti->io.io_int_parm, - (u32 *)__LC_IO_INT_PARM); - rc |= put_guest_lc(vcpu, inti->io.io_int_word, - (u32 *)__LC_IO_INT_WORD); - rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); - rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, - &vcpu->arch.sie_block->gpsw, - sizeof(psw_t)); + rc = __do_deliver_io(vcpu, &(inti->io)); kfree(inti); + goto out; } - return rc ? -EFAULT : 0; + if (vcpu->kvm->arch.gisa && + kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) { + /* + * in case an adapter interrupt was not delivered + * in SIE context KVM will handle the delivery + */ + VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc); + memset(&io, 0, sizeof(io)); + io.io_int_word = isc_to_int_word(isc); + vcpu->stat.deliver_io_int++; + trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, + KVM_S390_INT_IO(1, 0, 0, 0), + ((__u32)io.subchannel_id << 16) | + io.subchannel_nr, + ((__u64)io.io_int_parm << 32) | + io.io_int_word); + rc = __do_deliver_io(vcpu, &io); + } +out: + return rc; } typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu); @@ -1155,8 +1210,8 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); while ((irqs = deliverable_irqs(vcpu)) && !rc) { - /* bits are in the order of interrupt priority */ - irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); + /* bits are in the reverse order of interrupt priority */ + irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT); if (is_ioirq(irq_type)) { rc = __deliver_io(vcpu, irq_type); } else { @@ -1228,7 +1283,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) li->irq.ext = irq->u.ext; set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); - atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); return 0; } @@ -1253,7 +1308,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) return -EBUSY; *extcall = irq->u.extcall; - atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); return 0; } @@ -1297,7 +1352,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) return -EBUSY; stop->flags = irq->u.stop.flags; - __set_cpuflag(vcpu, CPUSTAT_STOP_INT); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); return 0; } @@ -1329,7 +1384,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); - atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); return 0; } @@ -1373,7 +1428,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) 0, 0); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); - atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); return 0; } @@ -1386,7 +1441,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) 0, 0); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); - atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT); return 0; } @@ -1416,20 +1471,86 @@ static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm, return NULL; } +static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm, + u64 isc_mask, u32 schid) +{ + struct kvm_s390_interrupt_info *inti = NULL; + int isc; + + for (isc = 0; isc <= MAX_ISC && !inti; isc++) { + if (isc_mask & isc_to_isc_bits(isc)) + inti = get_io_int(kvm, isc, schid); + } + return inti; +} + +static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid) +{ + unsigned long active_mask; + int isc; + + if (schid) + goto out; + if (!kvm->arch.gisa) + goto out; + + active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32; + while (active_mask) { + isc = __fls(active_mask) ^ (BITS_PER_LONG - 1); + if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc)) + return isc; + clear_bit_inv(isc, &active_mask); + } +out: + return -EINVAL; +} + /* * Dequeue and return an I/O interrupt matching any of the interruption * subclasses as designated by the isc mask in cr6 and the schid (if != 0). + * Take into account the interrupts pending in the interrupt list and in GISA. + * + * Note that for a guest that does not enable I/O interrupts + * but relies on TPI, a flood of classic interrupts may starve + * out adapter interrupts on the same isc. Linux does not do + * that, and it is possible to work around the issue by configuring + * different iscs for classic and adapter interrupts in the guest, + * but we may want to revisit this in the future. */ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 isc_mask, u32 schid) { - struct kvm_s390_interrupt_info *inti = NULL; + struct kvm_s390_interrupt_info *inti, *tmp_inti; int isc; - for (isc = 0; isc <= MAX_ISC && !inti; isc++) { - if (isc_mask & isc_to_isc_bits(isc)) - inti = get_io_int(kvm, isc, schid); + inti = get_top_io_int(kvm, isc_mask, schid); + + isc = get_top_gisa_isc(kvm, isc_mask, schid); + if (isc < 0) + /* no AI in GISA */ + goto out; + + if (!inti) + /* AI in GISA but no classical IO int */ + goto gisa_out; + + /* both types of interrupts present */ + if (int_word_to_isc(inti->io.io_int_word) <= isc) { + /* classical IO int with higher priority */ + kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); + goto out; } +gisa_out: + tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL); + if (tmp_inti) { + tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0); + tmp_inti->io.io_int_word = isc_to_int_word(isc); + if (inti) + kvm_s390_reinject_io_int(kvm, inti); + inti = tmp_inti; + } else + kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); +out: return inti; } @@ -1517,6 +1638,15 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) struct list_head *list; int isc; + isc = int_word_to_isc(inti->io.io_int_word); + + if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) { + VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc); + kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc); + kfree(inti); + return 0; + } + fi = &kvm->arch.float_int; spin_lock(&fi->lock); if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) { @@ -1532,7 +1662,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) inti->io.subchannel_id >> 8, inti->io.subchannel_id >> 1 & 0x3, inti->io.subchannel_nr); - isc = int_word_to_isc(inti->io.io_int_word); list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc]; list_add_tail(&inti->list, list); set_bit(isc_to_irq_type(isc), &fi->pending_irqs); @@ -1546,7 +1675,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) static void __floating_irq_kick(struct kvm *kvm, u64 type) { struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; - struct kvm_s390_local_interrupt *li; struct kvm_vcpu *dst_vcpu; int sigcpu, online_vcpus, nr_tries = 0; @@ -1568,20 +1696,17 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) dst_vcpu = kvm_get_vcpu(kvm, sigcpu); /* make the VCPU drop out of the SIE, or wake it up if sleeping */ - li = &dst_vcpu->arch.local_int; - spin_lock(&li->lock); switch (type) { case KVM_S390_MCHK: - atomic_or(CPUSTAT_STOP_INT, li->cpuflags); + kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT); break; case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: - atomic_or(CPUSTAT_IO_INT, li->cpuflags); + kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT); break; default: - atomic_or(CPUSTAT_EXT_INT, li->cpuflags); + kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT); break; } - spin_unlock(&li->lock); kvm_s390_vcpu_wakeup(dst_vcpu); } @@ -1820,6 +1945,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) for (i = 0; i < FIRQ_MAX_COUNT; i++) fi->counters[i] = 0; spin_unlock(&fi->lock); + kvm_s390_gisa_clear(kvm); }; static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) @@ -1847,6 +1973,22 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) max_irqs = len / sizeof(struct kvm_s390_irq); + if (kvm->arch.gisa && + kvm_s390_gisa_get_ipm(kvm->arch.gisa)) { + for (i = 0; i <= MAX_ISC; i++) { + if (n == max_irqs) { + /* signal userspace to try again */ + ret = -ENOMEM; + goto out_nolock; + } + if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) { + irq = (struct kvm_s390_irq *) &buf[n]; + irq->type = KVM_S390_INT_IO(1, 0, 0, 0); + irq->u.io.io_int_word = isc_to_int_word(i); + n++; + } + } + } fi = &kvm->arch.float_int; spin_lock(&fi->lock); for (i = 0; i < FIRQ_LIST_COUNT; i++) { @@ -1885,6 +2027,7 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) out: spin_unlock(&fi->lock); +out_nolock: if (!ret && n > 0) { if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) ret = -EFAULT; @@ -2245,7 +2388,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm, struct kvm_s390_interrupt s390int = { .type = KVM_S390_INT_IO(1, 0, 0, 0), .parm = 0, - .parm64 = (adapter->isc << 27) | 0x80000000, + .parm64 = isc_to_int_word(adapter->isc), }; int ret = 0; @@ -2687,3 +2830,28 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) return n; } + +void kvm_s390_gisa_clear(struct kvm *kvm) +{ + if (kvm->arch.gisa) { + memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa)); + kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa; + VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa); + } +} + +void kvm_s390_gisa_init(struct kvm *kvm) +{ + if (css_general_characteristics.aiv) { + kvm->arch.gisa = &kvm->arch.sie_page2->gisa; + VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa); + kvm_s390_gisa_clear(kvm); + } +} + +void kvm_s390_gisa_destroy(struct kvm *kvm) +{ + if (!kvm->arch.gisa) + return; + kvm->arch.gisa = NULL; +} diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1371dff2b90d1..ba4c7092335ad 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2,7 +2,7 @@ /* * hosting IBM Z kernel virtual machines (s390x) * - * Copyright IBM Corp. 2008, 2017 + * Copyright IBM Corp. 2008, 2018 * * Author(s): Carsten Otte * Christian Borntraeger @@ -87,19 +87,31 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, { "exit_wait_state", VCPU_STAT(exit_wait_state) }, + { "instruction_epsw", VCPU_STAT(instruction_epsw) }, + { "instruction_gs", VCPU_STAT(instruction_gs) }, + { "instruction_io_other", VCPU_STAT(instruction_io_other) }, + { "instruction_lpsw", VCPU_STAT(instruction_lpsw) }, + { "instruction_lpswe", VCPU_STAT(instruction_lpswe) }, { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, + { "instruction_ptff", VCPU_STAT(instruction_ptff) }, { "instruction_stidp", VCPU_STAT(instruction_stidp) }, + { "instruction_sck", VCPU_STAT(instruction_sck) }, + { "instruction_sckpf", VCPU_STAT(instruction_sckpf) }, { "instruction_spx", VCPU_STAT(instruction_spx) }, { "instruction_stpx", VCPU_STAT(instruction_stpx) }, { "instruction_stap", VCPU_STAT(instruction_stap) }, - { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, + { "instruction_iske", VCPU_STAT(instruction_iske) }, + { "instruction_ri", VCPU_STAT(instruction_ri) }, + { "instruction_rrbe", VCPU_STAT(instruction_rrbe) }, + { "instruction_sske", VCPU_STAT(instruction_sske) }, { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, - { "instruction_stsch", VCPU_STAT(instruction_stsch) }, - { "instruction_chsc", VCPU_STAT(instruction_chsc) }, { "instruction_essa", VCPU_STAT(instruction_essa) }, { "instruction_stsi", VCPU_STAT(instruction_stsi) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) }, + { "instruction_tb", VCPU_STAT(instruction_tb) }, + { "instruction_tpi", VCPU_STAT(instruction_tpi) }, { "instruction_tprot", VCPU_STAT(instruction_tprot) }, + { "instruction_tsch", VCPU_STAT(instruction_tsch) }, { "instruction_sthyi", VCPU_STAT(instruction_sthyi) }, { "instruction_sie", VCPU_STAT(instruction_sie) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, @@ -118,12 +130,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, - { "diagnose_10", VCPU_STAT(diagnose_10) }, - { "diagnose_44", VCPU_STAT(diagnose_44) }, - { "diagnose_9c", VCPU_STAT(diagnose_9c) }, - { "diagnose_258", VCPU_STAT(diagnose_258) }, - { "diagnose_308", VCPU_STAT(diagnose_308) }, - { "diagnose_500", VCPU_STAT(diagnose_500) }, + { "instruction_diag_10", VCPU_STAT(diagnose_10) }, + { "instruction_diag_44", VCPU_STAT(diagnose_44) }, + { "instruction_diag_9c", VCPU_STAT(diagnose_9c) }, + { "instruction_diag_258", VCPU_STAT(diagnose_258) }, + { "instruction_diag_308", VCPU_STAT(diagnose_308) }, + { "instruction_diag_500", VCPU_STAT(diagnose_500) }, + { "instruction_diag_other", VCPU_STAT(diagnose_other) }, { NULL } }; @@ -576,7 +589,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_S390_GS: r = -EINVAL; mutex_lock(&kvm->lock); - if (atomic_read(&kvm->online_vcpus)) { + if (kvm->created_vcpus) { r = -EBUSY; } else if (test_facility(133)) { set_kvm_facility(kvm->arch.model.fac_mask, 133); @@ -1088,7 +1101,6 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, struct kvm_device_attr *attr) { struct kvm_s390_vm_cpu_feat data; - int ret = -EBUSY; if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) return -EFAULT; @@ -1098,13 +1110,18 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, return -EINVAL; mutex_lock(&kvm->lock); - if (!atomic_read(&kvm->online_vcpus)) { - bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, - KVM_S390_VM_CPU_FEAT_NR_BITS); - ret = 0; + if (kvm->created_vcpus) { + mutex_unlock(&kvm->lock); + return -EBUSY; } + bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, + KVM_S390_VM_CPU_FEAT_NR_BITS); mutex_unlock(&kvm->lock); - return ret; + VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", + data.feat[0], + data.feat[1], + data.feat[2]); + return 0; } static int kvm_s390_set_processor_subfunc(struct kvm *kvm, @@ -1206,6 +1223,10 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm, KVM_S390_VM_CPU_FEAT_NR_BITS); if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) return -EFAULT; + VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", + data.feat[0], + data.feat[1], + data.feat[2]); return 0; } @@ -1219,6 +1240,10 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm, KVM_S390_VM_CPU_FEAT_NR_BITS); if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) return -EFAULT; + VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", + data.feat[0], + data.feat[1], + data.feat[2]); return 0; } @@ -1911,6 +1936,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.dbf) goto out_err; + BUILD_BUG_ON(sizeof(struct sie_page2) != 4096); kvm->arch.sie_page2 = (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!kvm->arch.sie_page2) @@ -1981,6 +2007,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) spin_lock_init(&kvm->arch.start_stop_lock); kvm_s390_vsie_init(kvm); + kvm_s390_gisa_init(kvm); KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); return 0; @@ -2043,6 +2070,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_vcpus(kvm); sca_dispose(kvm); debug_unregister(kvm->arch.dbf); + kvm_s390_gisa_destroy(kvm); free_page((unsigned long)kvm->arch.sie_page2); if (!kvm_is_ucontrol(kvm)) gmap_remove(kvm->arch.gmap); @@ -2314,7 +2342,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { gmap_enable(vcpu->arch.enabled_gmap); - atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING); if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) __start_cpu_timer_accounting(vcpu); vcpu->cpu = cpu; @@ -2325,7 +2353,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) __stop_cpu_timer_accounting(vcpu); - atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING); vcpu->arch.enabled_gmap = gmap_get_enabled(); gmap_disable(vcpu->arch.enabled_gmap); @@ -2422,9 +2450,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED); if (test_kvm_facility(vcpu->kvm, 78)) - atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2); else if (test_kvm_facility(vcpu->kvm, 8)) - atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED); kvm_s390_vcpu_setup_model(vcpu); @@ -2456,12 +2484,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) if (test_kvm_facility(vcpu->kvm, 139)) vcpu->arch.sie_block->ecd |= ECD_MEF; + if (vcpu->arch.sie_block->gd) { + vcpu->arch.sie_block->eca |= ECA_AIV; + VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", + vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); + } vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) | SDNXC; vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; if (sclp.has_kss) - atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS); else vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; @@ -2508,9 +2541,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->icpua = id; spin_lock_init(&vcpu->arch.local_int.lock); - vcpu->arch.local_int.float_int = &kvm->arch.float_int; - vcpu->arch.local_int.wq = &vcpu->wq; - vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; + vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa; + if (vcpu->arch.sie_block->gd && sclp.has_gisaf) + vcpu->arch.sie_block->gd |= GISA_FORMAT1; seqcount_init(&vcpu->arch.cputm_seqcount); rc = kvm_vcpu_init(vcpu, kvm, id); @@ -2567,7 +2600,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) * return immediately. */ void exit_sie(struct kvm_vcpu *vcpu) { - atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) cpu_relax(); } @@ -2720,47 +2753,70 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { + vcpu_load(vcpu); memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { + vcpu_load(vcpu); memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + vcpu_load(vcpu); + memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); + + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + vcpu_load(vcpu); + memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); + + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - if (test_fp_ctl(fpu->fpc)) - return -EINVAL; + int ret = 0; + + vcpu_load(vcpu); + + if (test_fp_ctl(fpu->fpc)) { + ret = -EINVAL; + goto out; + } vcpu->run->s.regs.fpc = fpu->fpc; if (MACHINE_HAS_VX) convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, (freg_t *) fpu->fprs); else memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); - return 0; + +out: + vcpu_put(vcpu); + return ret; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { + vcpu_load(vcpu); + /* make sure we have the latest values */ save_fpu_regs(); if (MACHINE_HAS_VX) @@ -2769,6 +2825,8 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) else memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); fpu->fpc = vcpu->run->s.regs.fpc; + + vcpu_put(vcpu); return 0; } @@ -2800,41 +2858,56 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, { int rc = 0; + vcpu_load(vcpu); + vcpu->guest_debug = 0; kvm_s390_clear_bp_data(vcpu); - if (dbg->control & ~VALID_GUESTDBG_FLAGS) - return -EINVAL; - if (!sclp.has_gpere) - return -EINVAL; + if (dbg->control & ~VALID_GUESTDBG_FLAGS) { + rc = -EINVAL; + goto out; + } + if (!sclp.has_gpere) { + rc = -EINVAL; + goto out; + } if (dbg->control & KVM_GUESTDBG_ENABLE) { vcpu->guest_debug = dbg->control; /* enforce guest PER */ - atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_P); if (dbg->control & KVM_GUESTDBG_USE_HW_BP) rc = kvm_s390_import_bp_data(vcpu, dbg); } else { - atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P); vcpu->arch.guestdbg.last_bp = 0; } if (rc) { vcpu->guest_debug = 0; kvm_s390_clear_bp_data(vcpu); - atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P); } +out: + vcpu_put(vcpu); return rc; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { + int ret; + + vcpu_load(vcpu); + /* CHECK_STOP and LOAD are not supported yet */ - return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : - KVM_MP_STATE_OPERATING; + ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : + KVM_MP_STATE_OPERATING; + + vcpu_put(vcpu); + return ret; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, @@ -2842,6 +2915,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, { int rc = 0; + vcpu_load(vcpu); + /* user space knows about this interface - let it control the state */ vcpu->kvm->arch.user_cpu_state_ctrl = 1; @@ -2859,12 +2934,13 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, rc = -ENXIO; } + vcpu_put(vcpu); return rc; } static bool ibs_enabled(struct kvm_vcpu *vcpu) { - return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; + return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS); } static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) @@ -2900,8 +2976,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (!ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); - atomic_or(CPUSTAT_IBS, - &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS); } goto retry; } @@ -2909,8 +2984,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { if (ibs_enabled(vcpu)) { trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); - atomic_andnot(CPUSTAT_IBS, - &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS); } goto retry; } @@ -3390,9 +3464,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (kvm_run->immediate_exit) return -EINTR; + vcpu_load(vcpu); + if (guestdbg_exit_pending(vcpu)) { kvm_s390_prepare_debug_exit(vcpu); - return 0; + rc = 0; + goto out; } kvm_sigset_activate(vcpu); @@ -3402,7 +3479,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) } else if (is_vcpu_stopped(vcpu)) { pr_err_ratelimited("can't run stopped vcpu %d\n", vcpu->vcpu_id); - return -EINVAL; + rc = -EINVAL; + goto out; } sync_regs(vcpu, kvm_run); @@ -3432,6 +3510,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) kvm_sigset_deactivate(vcpu); vcpu->stat.exit_userspace++; +out: + vcpu_put(vcpu); return rc; } @@ -3560,7 +3640,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) __disable_ibs_on_all_vcpus(vcpu->kvm); } - atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED); /* * Another VCPU might have used IBS while we were offline. * Let's play safe and flush the VCPU at startup. @@ -3586,7 +3666,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ kvm_s390_clear_stop_irq(vcpu); - atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { @@ -3693,36 +3773,45 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, return r; } -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; - int idx; - long r; switch (ioctl) { case KVM_S390_IRQ: { struct kvm_s390_irq s390irq; - r = -EFAULT; if (copy_from_user(&s390irq, argp, sizeof(s390irq))) - break; - r = kvm_s390_inject_vcpu(vcpu, &s390irq); - break; + return -EFAULT; + return kvm_s390_inject_vcpu(vcpu, &s390irq); } case KVM_S390_INTERRUPT: { struct kvm_s390_interrupt s390int; struct kvm_s390_irq s390irq; - r = -EFAULT; if (copy_from_user(&s390int, argp, sizeof(s390int))) - break; + return -EFAULT; if (s390int_to_s390irq(&s390int, &s390irq)) return -EINVAL; - r = kvm_s390_inject_vcpu(vcpu, &s390irq); - break; + return kvm_s390_inject_vcpu(vcpu, &s390irq); + } } + return -ENOIOCTLCMD; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + int idx; + long r; + + vcpu_load(vcpu); + + switch (ioctl) { case KVM_S390_STORE_STATUS: idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_s390_vcpu_store_status(vcpu, arg); @@ -3847,6 +3936,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, default: r = -ENOTTY; } + + vcpu_put(vcpu); return r; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 5e46ba429bcb4..bd31b37b0e6f8 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -47,14 +47,29 @@ do { \ d_args); \ } while (0) +static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags) +{ + atomic_or(flags, &vcpu->arch.sie_block->cpuflags); +} + +static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) +{ + atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); +} + +static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags) +{ + return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; +} + static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) { - return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; + return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED); } static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) { - return test_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); + return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask); } static inline int kvm_is_ucontrol(struct kvm *kvm) @@ -367,6 +382,9 @@ int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *buf, int len); int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len); +void kvm_s390_gisa_init(struct kvm *kvm); +void kvm_s390_gisa_clear(struct kvm *kvm); +void kvm_s390_gisa_destroy(struct kvm *kvm); /* implemented in guestdbg.c */ void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0714bfa56da0f..c4c4e157c0363 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -2,7 +2,7 @@ /* * handling privileged instructions * - * Copyright IBM Corp. 2008, 2013 + * Copyright IBM Corp. 2008, 2018 * * Author(s): Carsten Otte * Christian Borntraeger @@ -34,6 +34,8 @@ static int handle_ri(struct kvm_vcpu *vcpu) { + vcpu->stat.instruction_ri++; + if (test_kvm_facility(vcpu->kvm, 64)) { VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); vcpu->arch.sie_block->ecb3 |= ECB3_RI; @@ -53,6 +55,8 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) static int handle_gs(struct kvm_vcpu *vcpu) { + vcpu->stat.instruction_gs++; + if (test_kvm_facility(vcpu->kvm, 133)) { VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); preempt_disable(); @@ -85,6 +89,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) u8 ar; u64 op2, val; + vcpu->stat.instruction_sck++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -203,14 +209,14 @@ int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu) trace_kvm_s390_skey_related_inst(vcpu); if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) && - !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)) + !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) return rc; rc = s390_enable_skey(); VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc); if (!rc) { - if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS) - atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags); + if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS)) + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS); else sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); @@ -222,7 +228,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu) { int rc; - vcpu->stat.instruction_storage_key++; rc = kvm_s390_skey_check_enable(vcpu); if (rc) return rc; @@ -242,6 +247,8 @@ static int handle_iske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + vcpu->stat.instruction_iske++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -274,6 +281,8 @@ static int handle_rrbe(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + vcpu->stat.instruction_rrbe++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -312,6 +321,8 @@ static int handle_sske(struct kvm_vcpu *vcpu) int reg1, reg2; int rc; + vcpu->stat.instruction_sske++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -392,6 +403,8 @@ static int handle_test_block(struct kvm_vcpu *vcpu) gpa_t addr; int reg2; + vcpu->stat.instruction_tb++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -424,6 +437,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu) u64 addr; u8 ar; + vcpu->stat.instruction_tpi++; + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -484,6 +499,8 @@ static int handle_tsch(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info *inti = NULL; const u64 isc_mask = 0xffUL << 24; /* all iscs set */ + vcpu->stat.instruction_tsch++; + /* a valid schid has at least one bit set */ if (vcpu->run->s.regs.gprs[1]) inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask, @@ -527,6 +544,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->ipa == 0xb235) return handle_tsch(vcpu); /* Handle in userspace. */ + vcpu->stat.instruction_io_other++; return -EOPNOTSUPP; } else { /* @@ -592,6 +610,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) int rc; u8 ar; + vcpu->stat.instruction_lpsw++; + if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -619,6 +639,8 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) int rc; u8 ar; + vcpu->stat.instruction_lpswe++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -828,6 +850,8 @@ static int handle_epsw(struct kvm_vcpu *vcpu) { int reg1, reg2; + vcpu->stat.instruction_epsw++; + kvm_s390_get_regs_rre(vcpu, ®1, ®2); /* This basically extracts the mask half of the psw. */ @@ -1332,6 +1356,8 @@ static int handle_sckpf(struct kvm_vcpu *vcpu) { u32 value; + vcpu->stat.instruction_sckpf++; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -1347,6 +1373,8 @@ static int handle_sckpf(struct kvm_vcpu *vcpu) static int handle_ptff(struct kvm_vcpu *vcpu) { + vcpu->stat.instruction_ptff++; + /* we don't emulate any control instructions yet */ kvm_s390_set_psw_cc(vcpu, 3); return 0; diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index c1f5cde2c878e..683036c1c92a8 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -20,22 +20,18 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u64 *reg) { - struct kvm_s390_local_interrupt *li; - int cpuflags; + const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED); int rc; int ext_call_pending; - li = &dst_vcpu->arch.local_int; - - cpuflags = atomic_read(li->cpuflags); ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); - if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) + if (!stopped && !ext_call_pending) rc = SIGP_CC_ORDER_CODE_ACCEPTED; else { *reg &= 0xffffffff00000000UL; if (ext_call_pending) *reg |= SIGP_STATUS_EXT_CALL_PENDING; - if (cpuflags & CPUSTAT_STOPPED) + if (stopped) *reg |= SIGP_STATUS_STOPPED; rc = SIGP_CC_STATUS_STORED; } @@ -208,11 +204,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u32 addr, u64 *reg) { - int flags; int rc; - flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); - if (!(flags & CPUSTAT_STOPPED)) { + if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; return SIGP_CC_STATUS_STORED; @@ -231,7 +225,6 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, static int __sigp_sense_running(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u64 *reg) { - struct kvm_s390_local_interrupt *li; int rc; if (!test_kvm_facility(vcpu->kvm, 9)) { @@ -240,8 +233,7 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, return SIGP_CC_STATUS_STORED; } - li = &dst_vcpu->arch.local_int; - if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { + if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) { /* running */ rc = SIGP_CC_ORDER_CODE_ACCEPTED; } else { diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 7513483484778..ec772700ff965 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -28,13 +28,23 @@ struct vsie_page { * the same offset as that in struct sie_page! */ struct mcck_volatile_info mcck_info; /* 0x0200 */ - /* the pinned originial scb */ + /* + * The pinned original scb. Be aware that other VCPUs can modify + * it while we read from it. Values that are used for conditions or + * are reused conditionally, should be accessed via READ_ONCE. + */ struct kvm_s390_sie_block *scb_o; /* 0x0218 */ /* the shadow gmap in use by the vsie_page */ struct gmap *gmap; /* 0x0220 */ /* address of the last reported fault to guest2 */ unsigned long fault_addr; /* 0x0228 */ - __u8 reserved[0x0700 - 0x0230]; /* 0x0230 */ + /* calculated guest addresses of satellite control blocks */ + gpa_t sca_gpa; /* 0x0230 */ + gpa_t itdba_gpa; /* 0x0238 */ + gpa_t gvrd_gpa; /* 0x0240 */ + gpa_t riccbd_gpa; /* 0x0248 */ + gpa_t sdnx_gpa; /* 0x0250 */ + __u8 reserved[0x0700 - 0x0258]; /* 0x0258 */ struct kvm_s390_crypto_cb crycb; /* 0x0700 */ __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ }; @@ -140,12 +150,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; - u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U; + const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd); + const u32 crycb_addr = crycbd_o & 0x7ffffff8U; unsigned long *b1, *b2; u8 ecb3_flags; scb_s->crycbd = 0; - if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) + if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1)) return 0; /* format-1 is supported with message-security-assist extension 3 */ if (!test_kvm_facility(vcpu->kvm, 76)) @@ -183,12 +194,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; + /* READ_ONCE does not work on bitfields - use a temporary variable */ + const uint32_t __new_ibc = scb_o->ibc; + const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU; __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU; scb_s->ibc = 0; /* ibc installed in g2 and requested for g3 */ - if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) { - scb_s->ibc = scb_o->ibc & 0x0fffU; + if (vcpu->kvm->arch.model.ibc && new_ibc) { + scb_s->ibc = new_ibc; /* takte care of the minimum ibc level of the machine */ if (scb_s->ibc < min_ibc) scb_s->ibc = min_ibc; @@ -259,6 +273,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; + /* READ_ONCE does not work on bitfields - use a temporary variable */ + const uint32_t __new_prefix = scb_o->prefix; + const uint32_t new_prefix = READ_ONCE(__new_prefix); + const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE; bool had_tx = scb_s->ecb & ECB_TE; unsigned long new_mso = 0; int rc; @@ -306,14 +324,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->icpua = scb_o->icpua; if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) - new_mso = scb_o->mso & 0xfffffffffff00000UL; + new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL; /* if the hva of the prefix changes, we have to remap the prefix */ - if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix) + if (scb_s->mso != new_mso || scb_s->prefix != new_prefix) prefix_unmapped(vsie_page); /* SIE will do mso/msl validity and exception checks for us */ scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; scb_s->mso = new_mso; - scb_s->prefix = scb_o->prefix; + scb_s->prefix = new_prefix; /* We have to definetly flush the tlb if this scb never ran */ if (scb_s->ihcpu != 0xffffU) @@ -325,11 +343,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; /* transactional execution */ - if (test_kvm_facility(vcpu->kvm, 73)) { + if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) { /* remap the prefix is tx is toggled on */ - if ((scb_o->ecb & ECB_TE) && !had_tx) + if (!had_tx) prefix_unmapped(vsie_page); - scb_s->ecb |= scb_o->ecb & ECB_TE; + scb_s->ecb |= ECB_TE; } /* branch prediction */ if (test_kvm_facility(vcpu->kvm, 82)) @@ -473,46 +491,42 @@ static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa) /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { - struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; hpa_t hpa; - gpa_t gpa; hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol; if (hpa) { - gpa = scb_o->scaol & ~0xfUL; - if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) - gpa |= (u64) scb_o->scaoh << 32; - unpin_guest_page(vcpu->kvm, gpa, hpa); + unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa); + vsie_page->sca_gpa = 0; scb_s->scaol = 0; scb_s->scaoh = 0; } hpa = scb_s->itdba; if (hpa) { - gpa = scb_o->itdba & ~0xffUL; - unpin_guest_page(vcpu->kvm, gpa, hpa); + unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa); + vsie_page->itdba_gpa = 0; scb_s->itdba = 0; } hpa = scb_s->gvrd; if (hpa) { - gpa = scb_o->gvrd & ~0x1ffUL; - unpin_guest_page(vcpu->kvm, gpa, hpa); + unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa); + vsie_page->gvrd_gpa = 0; scb_s->gvrd = 0; } hpa = scb_s->riccbd; if (hpa) { - gpa = scb_o->riccbd & ~0x3fUL; - unpin_guest_page(vcpu->kvm, gpa, hpa); + unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa); + vsie_page->riccbd_gpa = 0; scb_s->riccbd = 0; } hpa = scb_s->sdnxo; if (hpa) { - gpa = scb_o->sdnxo; - unpin_guest_page(vcpu->kvm, gpa, hpa); + unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa); + vsie_page->sdnx_gpa = 0; scb_s->sdnxo = 0; } } @@ -539,9 +553,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) gpa_t gpa; int rc = 0; - gpa = scb_o->scaol & ~0xfUL; + gpa = READ_ONCE(scb_o->scaol) & ~0xfUL; if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO)) - gpa |= (u64) scb_o->scaoh << 32; + gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32; if (gpa) { if (!(gpa & ~0x1fffUL)) rc = set_validity_icpt(scb_s, 0x0038U); @@ -557,11 +571,12 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) } if (rc) goto unpin; + vsie_page->sca_gpa = gpa; scb_s->scaoh = (u32)((u64)hpa >> 32); scb_s->scaol = (u32)(u64)hpa; } - gpa = scb_o->itdba & ~0xffUL; + gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; if (gpa && (scb_s->ecb & ECB_TE)) { if (!(gpa & ~0x1fffU)) { rc = set_validity_icpt(scb_s, 0x0080U); @@ -573,10 +588,11 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) rc = set_validity_icpt(scb_s, 0x0080U); goto unpin; } + vsie_page->itdba_gpa = gpa; scb_s->itdba = hpa; } - gpa = scb_o->gvrd & ~0x1ffUL; + gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL; if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x1310U); @@ -591,10 +607,11 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) rc = set_validity_icpt(scb_s, 0x1310U); goto unpin; } + vsie_page->gvrd_gpa = gpa; scb_s->gvrd = hpa; } - gpa = scb_o->riccbd & ~0x3fUL; + gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL; if (gpa && (scb_s->ecb3 & ECB3_RI)) { if (!(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x0043U); @@ -607,13 +624,14 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) goto unpin; } /* Validity 0x0044 will be checked by SIE */ + vsie_page->riccbd_gpa = gpa; scb_s->riccbd = hpa; } if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { unsigned long sdnxc; - gpa = scb_o->sdnxo & ~0xfUL; - sdnxc = scb_o->sdnxo & 0xfUL; + gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL; + sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL; if (!gpa || !(gpa & ~0x1fffUL)) { rc = set_validity_icpt(scb_s, 0x10b0U); goto unpin; @@ -634,6 +652,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) rc = set_validity_icpt(scb_s, 0x10b0U); goto unpin; } + vsie_page->sdnx_gpa = gpa; scb_s->sdnxo = hpa | sdnxc; } return 0; @@ -778,7 +797,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page) static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) { struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; - __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U; + __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U; if (fac && test_kvm_facility(vcpu->kvm, 7)) { retry_vsie_icpt(vsie_page); @@ -904,7 +923,7 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu, * External calls have to lead to a kick of the vcpu and * therefore the vsie -> Simulate Wait state. */ - atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); /* * We have to adjust the g3 epoch by the g2 epoch. The epoch will * automatically be adjusted on tod clock changes via kvm_sync_clock. @@ -926,7 +945,7 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu, */ static void unregister_shadow_scb(struct kvm_vcpu *vcpu) { - atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); WRITE_ONCE(vcpu->arch.vsie_block, NULL); } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 05d459b638f55..2c55a2b9d6c65 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -815,27 +815,17 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, * @ptl: pointer to the spinlock pointer * * Returns a pointer to the locked pte for a guest address, or NULL - * - * Note: Can also be called for shadow gmaps. */ static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr, spinlock_t **ptl) { unsigned long *table; - if (gmap_is_shadow(gmap)) - spin_lock(&gmap->guest_table_lock); + BUG_ON(gmap_is_shadow(gmap)); /* Walk the gmap page table, lock and get pte pointer */ table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */ - if (!table || *table & _SEGMENT_ENTRY_INVALID) { - if (gmap_is_shadow(gmap)) - spin_unlock(&gmap->guest_table_lock); + if (!table || *table & _SEGMENT_ENTRY_INVALID) return NULL; - } - if (gmap_is_shadow(gmap)) { - *ptl = &gmap->guest_table_lock; - return pte_offset_map((pmd_t *) table, gaddr); - } return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); } @@ -889,8 +879,6 @@ static void gmap_pte_op_end(spinlock_t *ptl) * -EFAULT if gaddr is invalid (or mapping for shadows is missing). * * Called with sg->mm->mmap_sem in read. - * - * Note: Can also be called for shadow gmaps. */ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, unsigned long len, int prot, unsigned long bits) @@ -900,6 +888,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, pte_t *ptep; int rc; + BUG_ON(gmap_is_shadow(gmap)); while (len) { rc = -EAGAIN; ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); @@ -960,7 +949,8 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify); * @val: pointer to the unsigned long value to return * * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT - * if reading using the virtual address failed. + * if reading using the virtual address failed. -EINVAL if called on a gmap + * shadow. * * Called with gmap->mm->mmap_sem in read. */ @@ -971,6 +961,9 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) pte_t *ptep, pte; int rc; + if (gmap_is_shadow(gmap)) + return -EINVAL; + while (1) { rc = -EAGAIN; ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); @@ -1028,18 +1021,17 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, } /** - * gmap_protect_rmap - modify access rights to memory and create an rmap + * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap * @sg: pointer to the shadow guest address space structure * @raddr: rmap address in the shadow gmap * @paddr: address in the parent guest address space * @len: length of the memory area to protect - * @prot: indicates access rights: none, read-only or read-write * * Returns 0 if successfully protected and the rmap was created, -ENOMEM * if out of memory and -EFAULT if paddr is invalid. */ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, - unsigned long paddr, unsigned long len, int prot) + unsigned long paddr, unsigned long len) { struct gmap *parent; struct gmap_rmap *rmap; @@ -1067,7 +1059,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, ptep = gmap_pte_op_walk(parent, paddr, &ptl); if (ptep) { spin_lock(&sg->guest_table_lock); - rc = ptep_force_prot(parent->mm, paddr, ptep, prot, + rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ, PGSTE_VSIE_BIT); if (!rc) gmap_insert_rmap(sg, vmaddr, rmap); @@ -1077,7 +1069,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr, radix_tree_preload_end(); if (rc) { kfree(rmap); - rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot); + rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ); if (rc) return rc; continue; @@ -1616,7 +1608,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t, origin = r2t & _REGION_ENTRY_ORIGIN; offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; - rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin + offset, len); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 4); @@ -1699,7 +1691,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, origin = r3t & _REGION_ENTRY_ORIGIN; offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; - rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin + offset, len); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 3); @@ -1783,7 +1775,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt, origin = sgt & _REGION_ENTRY_ORIGIN; offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE; len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset; - rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin + offset, len); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 2); @@ -1902,7 +1894,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt, /* Make pgt read-only in parent gmap page table (not the pgste) */ raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT; origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK; - rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ); + rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE); spin_lock(&sg->guest_table_lock); if (!rc) { table = gmap_table_walk(sg, saddr, 1); @@ -2005,7 +1997,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_page); * Called with sg->parent->shadow_lock. */ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr, - unsigned long gaddr, pte_t *pte) + unsigned long gaddr) { struct gmap_rmap *rmap, *rnext, *head; unsigned long start, end, bits, raddr; @@ -2090,7 +2082,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, spin_lock(&gmap->shadow_lock); list_for_each_entry_safe(sg, next, &gmap->children, list) - gmap_shadow_notify(sg, vmaddr, gaddr, pte); + gmap_shadow_notify(sg, vmaddr, gaddr); spin_unlock(&gmap->shadow_lock); } if (bits & PGSTE_IN_BIT) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 6bf594ace663e..8767e45f1b2b7 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -430,6 +430,8 @@ config SPARC_LEON depends on SPARC32 select USB_EHCI_BIG_ENDIAN_MMIO select USB_EHCI_BIG_ENDIAN_DESC + select USB_UHCI_BIG_ENDIAN_MMIO + select USB_UHCI_BIG_ENDIAN_DESC ---help--- If you say Y here if you are running on a SPARC-LEON processor. The LEON processor is a synthesizable VHDL model of the diff --git a/arch/sparc/include/uapi/asm/poll.h b/arch/sparc/include/uapi/asm/poll.h index 2a81e79aa3ea6..72356c9991257 100644 --- a/arch/sparc/include/uapi/asm/poll.h +++ b/arch/sparc/include/uapi/asm/poll.h @@ -2,31 +2,11 @@ #ifndef __SPARC_POLL_H #define __SPARC_POLL_H -#ifndef __KERNEL__ #define POLLWRNORM POLLOUT -#define POLLWRBAND (__force __poll_t)256 -#define POLLMSG (__force __poll_t)512 -#define POLLREMOVE (__force __poll_t)1024 -#define POLLRDHUP (__force __poll_t)2048 -#else -#define __ARCH_HAS_MANGLED_POLL -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; - /* bit 9 -> bit 8, bit 8 -> bit 2, bit 13 -> bit 11 */ - return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6) | - ((v & 0x2000) >> 2); - - -} - -static inline __poll_t demangle_poll(__u16 v) -{ - /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */ - return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) | - ((v & 4) << 6) | ((v & 0x800) << 2)); -} -#endif +#define POLLWRBAND 256 +#define POLLMSG 512 +#define POLLREMOVE 1024 +#define POLLRDHUP 2048 #include diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index c4d162a94be9d..d5f9a2d1da1ba 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -130,6 +130,7 @@ void mconsole_proc(struct mc_request *req) struct file *file; int first_chunk = 1; char *ptr = req->request.data; + loff_t pos = 0; ptr += strlen("proc"); ptr = skip_spaces(ptr); @@ -148,7 +149,7 @@ void mconsole_proc(struct mc_request *req) } do { - len = kernel_read(file, buf, PAGE_SIZE - 1, &file->f_pos); + len = kernel_read(file, buf, PAGE_SIZE - 1, &pos); if (len < 0) { mconsole_reply(req, "Read of file failed", 1, 0); goto out_free; diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore index aff152c87cf4b..5a82bac5e0bc7 100644 --- a/arch/x86/.gitignore +++ b/arch/x86/.gitignore @@ -1,6 +1,7 @@ boot/compressed/vmlinux tools/test_get_len tools/insn_sanity +tools/insn_decoder_test purgatory/kexec-purgatory.c purgatory/purgatory.ro diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 63bf349b2b24a..c1236b187824e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -423,12 +423,6 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -config X86_BIGSMP - bool "Support for big SMP systems with more than 8 CPUs" - depends on X86_32 && SMP - ---help--- - This option is needed for the systems that have more than 8 CPUs - config GOLDFISH def_bool y depends on X86_GOLDFISH @@ -460,6 +454,12 @@ config INTEL_RDT Say N if unsure. if X86_32 +config X86_BIGSMP + bool "Support for big SMP systems with more than 8 CPUs" + depends on SMP + ---help--- + This option is needed for the systems that have more than 8 CPUs + config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" default y @@ -949,25 +949,66 @@ config MAXSMP Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. +# +# The maximum number of CPUs supported: +# +# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT, +# and which can be configured interactively in the +# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range. +# +# The ranges are different on 32-bit and 64-bit kernels, depending on +# hardware capabilities and scalability features of the kernel. +# +# ( If MAXSMP is enabled we just use the highest possible value and disable +# interactive configuration. ) +# + +config NR_CPUS_RANGE_BEGIN + int + default NR_CPUS_RANGE_END if MAXSMP + default 1 if !SMP + default 2 + +config NR_CPUS_RANGE_END + int + depends on X86_32 + default 64 if SMP && X86_BIGSMP + default 8 if SMP && !X86_BIGSMP + default 1 if !SMP + +config NR_CPUS_RANGE_END + int + depends on X86_64 + default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) + default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) + default 1 if !SMP + +config NR_CPUS_DEFAULT + int + depends on X86_32 + default 32 if X86_BIGSMP + default 8 if SMP + default 1 if !SMP + +config NR_CPUS_DEFAULT + int + depends on X86_64 + default 8192 if MAXSMP + default 64 if SMP + default 1 if !SMP + config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP - range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 64 if SMP && X86_32 && X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 - range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 - default "1" if !SMP - default "8192" if MAXSMP - default "32" if SMP && X86_BIGSMP - default "8" if SMP && X86_32 - default "64" if SMP + range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END + default NR_CPUS_DEFAULT ---help--- This allows you to specify the maximum number of CPUs which this kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum supported value is 8192, otherwise the maximum value is 512. The minimum value which makes sense is 2. - This is purely to save memory - each supported CPU adds - approximately eight kilobytes to the kernel image. + This is purely to save memory: each supported CPU adds about 8KB + to the kernel image. config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" @@ -1363,7 +1404,7 @@ config HIGHMEM4G config HIGHMEM64G bool "64GB" - depends on !M486 + depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6 select X86_PAE ---help--- Select this if you have a 32-bit processor and more than 4 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 65a9a4716e34f..8b8d2297d4867 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -374,7 +374,7 @@ config X86_TSC config X86_CMPXCHG64 def_bool y - depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM + depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 # this should be set for all -march=.. options where the compiler # generates cmov. @@ -385,7 +385,7 @@ config X86_CMOV config X86_MINIMUM_CPU_FAMILY int default "64" if X86_64 - default "6" if X86_32 && X86_P6_NOP + default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8) default "5" if X86_32 && X86_CMPXCHG64 default "4" diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c index 36870b26067a7..d08805032f019 100644 --- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c +++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c @@ -57,10 +57,12 @@ void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state) { unsigned int j; - state->lens[0] = 0; - state->lens[1] = 1; - state->lens[2] = 2; - state->lens[3] = 3; + /* initially all lanes are unused */ + state->lens[0] = 0xFFFFFFFF00000000; + state->lens[1] = 0xFFFFFFFF00000001; + state->lens[2] = 0xFFFFFFFF00000002; + state->lens[3] = 0xFFFFFFFF00000003; + state->unused_lanes = 0xFF03020100; for (j = 0; j < 4; j++) state->ldata[j].job_in_lane = NULL; diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3f48f695d5e6a..dce7092ab24a2 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -97,80 +97,69 @@ For 32-bit we have the following conventions - kernel is built with #define SIZEOF_PTREGS 21*8 - .macro ALLOC_PT_GPREGS_ON_STACK - addq $-(15*8), %rsp - .endm +.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax + /* + * Push registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ + pushq %rdi /* pt_regs->di */ + pushq %rsi /* pt_regs->si */ + pushq \rdx /* pt_regs->dx */ + pushq %rcx /* pt_regs->cx */ + pushq \rax /* pt_regs->ax */ + pushq %r8 /* pt_regs->r8 */ + xorq %r8, %r8 /* nospec r8 */ + pushq %r9 /* pt_regs->r9 */ + xorq %r9, %r9 /* nospec r9 */ + pushq %r10 /* pt_regs->r10 */ + xorq %r10, %r10 /* nospec r10 */ + pushq %r11 /* pt_regs->r11 */ + xorq %r11, %r11 /* nospec r11*/ + pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ + pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ + pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12*/ + pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13*/ + pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14*/ + pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15*/ + UNWIND_HINT_REGS +.endm - .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 - .if \r11 - movq %r11, 6*8+\offset(%rsp) - .endif - .if \r8910 - movq %r10, 7*8+\offset(%rsp) - movq %r9, 8*8+\offset(%rsp) - movq %r8, 9*8+\offset(%rsp) - .endif - .if \rax - movq %rax, 10*8+\offset(%rsp) - .endif - .if \rcx - movq %rcx, 11*8+\offset(%rsp) - .endif - movq %rdx, 12*8+\offset(%rsp) - movq %rsi, 13*8+\offset(%rsp) - movq %rdi, 14*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset extra=0 - .endm - .macro SAVE_C_REGS offset=0 - SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0 - SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1 - .endm - .macro SAVE_C_REGS_EXCEPT_R891011 - SAVE_C_REGS_HELPER 0, 1, 1, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RCX_R891011 - SAVE_C_REGS_HELPER 0, 1, 0, 0, 0 - .endm - .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11 - SAVE_C_REGS_HELPER 0, 0, 0, 1, 0 - .endm - - .macro SAVE_EXTRA_REGS offset=0 - movq %r15, 0*8+\offset(%rsp) - movq %r14, 1*8+\offset(%rsp) - movq %r13, 2*8+\offset(%rsp) - movq %r12, 3*8+\offset(%rsp) - movq %rbp, 4*8+\offset(%rsp) - movq %rbx, 5*8+\offset(%rsp) - UNWIND_HINT_REGS offset=\offset - .endm - - .macro POP_EXTRA_REGS +.macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx - .endm - - .macro POP_C_REGS + .if \skip_r11rcx + popq %rsi + .else popq %r11 + .endif popq %r10 popq %r9 popq %r8 popq %rax + .if \skip_r11rcx + popq %rsi + .else popq %rcx + .endif popq %rdx popq %rsi + .if \pop_rdi popq %rdi - .endm - - .macro icebp - .byte 0xf1 - .endm + .endif +.endm /* * This is a sneaky trick to help the unwinder find pt_regs on the stack. The @@ -178,7 +167,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts + * NOTE: This macro must be used *after* PUSH_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index abee6d2b9311e..16c2c022540d4 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -900,6 +900,9 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, hyperv_vector_handler) +BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, + hyperv_reenlightenment_intr) + #endif /* CONFIG_HYPERV */ ENTRY(page_fault) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4a9bef6aca346..8971bd64d515c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -213,7 +213,7 @@ ENTRY(entry_SYSCALL_64) swapgs /* - * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it + * This path is only taken when PAGE_TABLE_ISOLATION is disabled so it * is not required to switch CR3. */ movq %rsp, PER_CPU_VAR(rsp_scratch) @@ -227,22 +227,8 @@ ENTRY(entry_SYSCALL_64) pushq %rcx /* pt_regs->ip */ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq %rax /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq %rdx /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq $-ENOSYS /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - pushq %r11 /* pt_regs->r11 */ - pushq %rbx /* pt_regs->rbx */ - pushq %rbp /* pt_regs->rbp */ - pushq %r12 /* pt_regs->r12 */ - pushq %r13 /* pt_regs->r13 */ - pushq %r14 /* pt_regs->r14 */ - pushq %r15 /* pt_regs->r15 */ - UNWIND_HINT_REGS + + PUSH_AND_CLEAR_REGS rax=$-ENOSYS TRACE_IRQS_OFF @@ -321,15 +307,7 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ UNWIND_HINT_EMPTY - POP_EXTRA_REGS - popq %rsi /* skip r11 */ - popq %r10 - popq %r9 - popq %r8 - popq %rax - popq %rsi /* skip rcx */ - popq %rdx - popq %rsi + POP_REGS pop_rdi=0 skip_r11rcx=1 /* * Now all regs are restored except RSP and RDI. @@ -559,9 +537,7 @@ END(irq_entries_start) call switch_to_thread_stack 1: - ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) @@ -622,15 +598,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) ud2 1: #endif - POP_EXTRA_REGS - popq %r11 - popq %r10 - popq %r9 - popq %r8 - popq %rax - popq %rcx - popq %rdx - popq %rsi + POP_REGS pop_rdi=0 /* * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. @@ -688,8 +656,7 @@ GLOBAL(restore_regs_and_return_to_kernel) ud2 1: #endif - POP_EXTRA_REGS - POP_C_REGS + POP_REGS addq $8, %rsp /* skip regs->orig_ax */ /* * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization @@ -908,7 +875,9 @@ ENTRY(\sym) pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif - ALLOC_PT_GPREGS_ON_STACK + /* Save all registers in pt_regs */ + PUSH_AND_CLEAR_REGS + ENCODE_FRAME_POINTER .if \paranoid < 2 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ @@ -1121,9 +1090,7 @@ ENTRY(xen_failsafe_callback) addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ - ALLOC_PT_GPREGS_ON_STACK - SAVE_C_REGS - SAVE_EXTRA_REGS + PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) @@ -1136,6 +1103,9 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ #if IS_ENABLED(CONFIG_HYPERV) apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ hyperv_callback_vector hyperv_vector_handler + +apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ + hyperv_reenlightenment_vector hyperv_reenlightenment_intr #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK @@ -1160,16 +1130,13 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 #endif /* - * Save all registers in pt_regs, and switch gs if needed. + * Switch gs if needed. * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 - ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx rdmsr @@ -1208,21 +1175,18 @@ ENTRY(paranoid_exit) jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 .Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel END(paranoid_exit) /* - * Save all registers in pt_regs, and switch gs if needed. + * Switch gs if needed. * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) - UNWIND_HINT_FUNC + UNWIND_HINT_REGS offset=8 cld - SAVE_C_REGS 8 - SAVE_EXTRA_REGS 8 - ENCODE_FRAME_POINTER 8 - xorl %ebx, %ebx testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1403,22 +1367,7 @@ ENTRY(nmi) pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ - pushq %rdi /* pt_regs->di */ - pushq %rsi /* pt_regs->si */ - pushq (%rdx) /* pt_regs->dx */ - pushq %rcx /* pt_regs->cx */ - pushq %rax /* pt_regs->ax */ - pushq %r8 /* pt_regs->r8 */ - pushq %r9 /* pt_regs->r9 */ - pushq %r10 /* pt_regs->r10 */ - pushq %r11 /* pt_regs->r11 */ - pushq %rbx /* pt_regs->rbx */ - pushq %rbp /* pt_regs->rbp */ - pushq %r12 /* pt_regs->r12 */ - pushq %r13 /* pt_regs->r13 */ - pushq %r14 /* pt_regs->r14 */ - pushq %r15 /* pt_regs->r15 */ - UNWIND_HINT_REGS + PUSH_AND_CLEAR_REGS rdx=(%rdx) ENCODE_FRAME_POINTER /* @@ -1628,7 +1577,8 @@ end_repeat_nmi: * frame to point back to repeat_nmi. */ pushq $-1 /* ORIG_RAX: no syscall to restart */ - ALLOC_PT_GPREGS_ON_STACK + PUSH_AND_CLEAR_REGS + ENCODE_FRAME_POINTER /* * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit @@ -1652,8 +1602,7 @@ end_repeat_nmi: nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: - POP_EXTRA_REGS - POP_C_REGS + POP_REGS /* * Skip orig_ax and the "outermost" frame to point RSP at the "iret" diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 98d5358e4041a..fd65e016e4133 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -85,15 +85,25 @@ ENTRY(entry_SYSENTER_compat) pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorq %r8, %r8 /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorq %r9, %r9 /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorq %r10, %r10 /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorq %r12, %r12 /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorq %r13, %r13 /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorq %r14, %r14 /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorq %r15, %r15 /* nospec r15 */ cld /* @@ -214,15 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq %rbp /* pt_regs->cx (stashed in bp) */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorq %r8, %r8 /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorq %r9, %r9 /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorq %r10, %r10 /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp (will be overwritten) */ + xorl %ebp, %ebp /* nospec rbp */ pushq $0 /* pt_regs->r12 = 0 */ + xorq %r12, %r12 /* nospec r12 */ pushq $0 /* pt_regs->r13 = 0 */ + xorq %r13, %r13 /* nospec r13 */ pushq $0 /* pt_regs->r14 = 0 */ + xorq %r14, %r14 /* nospec r14 */ pushq $0 /* pt_regs->r15 = 0 */ + xorq %r15, %r15 /* nospec r15 */ /* * User mode is traced as though IRQs are on, and SYSENTER @@ -338,15 +358,25 @@ ENTRY(entry_INT80_compat) pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ + xorq %r8, %r8 /* nospec r8 */ pushq $0 /* pt_regs->r9 = 0 */ + xorq %r9, %r9 /* nospec r9 */ pushq $0 /* pt_regs->r10 = 0 */ + xorq %r10, %r10 /* nospec r10 */ pushq $0 /* pt_regs->r11 = 0 */ + xorq %r11, %r11 /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx */ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp */ pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12 */ pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13 */ pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14 */ pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15 */ cld /* diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 731153a4681e7..56457cb73448b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3559,7 +3559,7 @@ static int intel_snb_pebs_broken(int cpu) break; case INTEL_FAM6_SANDYBRIDGE_X: - switch (cpu_data(cpu).x86_mask) { + switch (cpu_data(cpu).x86_stepping) { case 6: rev = 0x618; break; case 7: rev = 0x70c; break; } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index ae64d0b69729d..cf372b90557ed 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1186,7 +1186,7 @@ void __init intel_pmu_lbr_init_atom(void) * on PMU interrupt */ if (boot_cpu_data.x86_model == 28 - && boot_cpu_data.x86_mask < 10) { + && boot_cpu_data.x86_stepping < 10) { pr_cont("LBR disabled due to erratum"); return; } diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c index a5604c3529308..408879b0c0d4e 100644 --- a/arch/x86/events/intel/p6.c +++ b/arch/x86/events/intel/p6.c @@ -234,7 +234,7 @@ static __initconst const struct x86_pmu p6_pmu = { static __init void p6_pmu_rdpmc_quirk(void) { - if (boot_cpu_data.x86_mask < 9) { + if (boot_cpu_data.x86_stepping < 9) { /* * PPro erratum 26; fixed in stepping 9 and above. */ diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index a0a206556919e..2edc49e7409ba 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -18,6 +18,8 @@ */ #include +#include +#include #include #include #include @@ -37,6 +39,7 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void) { return tsc_pg; } +EXPORT_SYMBOL_GPL(hv_get_tsc_page); static u64 read_hv_clock_tsc(struct clocksource *arg) { @@ -101,6 +104,115 @@ static int hv_cpu_init(unsigned int cpu) return 0; } +static void (*hv_reenlightenment_cb)(void); + +static void hv_reenlightenment_notify(struct work_struct *dummy) +{ + struct hv_tsc_emulation_status emu_status; + + rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); + + /* Don't issue the callback if TSC accesses are not emulated */ + if (hv_reenlightenment_cb && emu_status.inprogress) + hv_reenlightenment_cb(); +} +static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify); + +void hyperv_stop_tsc_emulation(void) +{ + u64 freq; + struct hv_tsc_emulation_status emu_status; + + rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); + emu_status.inprogress = 0; + wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status); + + rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq); + tsc_khz = div64_u64(freq, 1000); +} +EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation); + +static inline bool hv_reenlightenment_available(void) +{ + /* + * Check for required features and priviliges to make TSC frequency + * change notifications work. + */ + return ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS && + ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE && + ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT; +} + +__visible void __irq_entry hyperv_reenlightenment_intr(struct pt_regs *regs) +{ + entering_ack_irq(); + + inc_irq_stat(irq_hv_reenlightenment_count); + + schedule_delayed_work(&hv_reenlightenment_work, HZ/10); + + exiting_irq(); +} + +void set_hv_tscchange_cb(void (*cb)(void)) +{ + struct hv_reenlightenment_control re_ctrl = { + .vector = HYPERV_REENLIGHTENMENT_VECTOR, + .enabled = 1, + .target_vp = hv_vp_index[smp_processor_id()] + }; + struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1}; + + if (!hv_reenlightenment_available()) { + pr_warn("Hyper-V: reenlightenment support is unavailable\n"); + return; + } + + hv_reenlightenment_cb = cb; + + /* Make sure callback is registered before we write to MSRs */ + wmb(); + + wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); + wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl)); +} +EXPORT_SYMBOL_GPL(set_hv_tscchange_cb); + +void clear_hv_tscchange_cb(void) +{ + struct hv_reenlightenment_control re_ctrl; + + if (!hv_reenlightenment_available()) + return; + + rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); + re_ctrl.enabled = 0; + wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl); + + hv_reenlightenment_cb = NULL; +} +EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb); + +static int hv_cpu_die(unsigned int cpu) +{ + struct hv_reenlightenment_control re_ctrl; + unsigned int new_cpu; + + if (hv_reenlightenment_cb == NULL) + return 0; + + rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); + if (re_ctrl.target_vp == hv_vp_index[cpu]) { + /* Reassign to some other online CPU */ + new_cpu = cpumask_any_but(cpu_online_mask, cpu); + + re_ctrl.target_vp = hv_vp_index[new_cpu]; + wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl)); + } + + return 0; +} + /* * This function is to be invoked early in the boot sequence after the * hypervisor has been detected. @@ -110,12 +222,19 @@ static int hv_cpu_init(unsigned int cpu) */ void hyperv_init(void) { - u64 guest_id; + u64 guest_id, required_msrs; union hv_x64_msr_hypercall_contents hypercall_msr; if (x86_hyper_type != X86_HYPER_MS_HYPERV) return; + /* Absolutely required MSRs */ + required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE | + HV_X64_MSR_VP_INDEX_AVAILABLE; + + if ((ms_hyperv.features & required_msrs) != required_msrs) + return; + /* Allocate percpu VP index */ hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index), GFP_KERNEL); @@ -123,7 +242,7 @@ void hyperv_init(void) return; if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online", - hv_cpu_init, NULL) < 0) + hv_cpu_init, hv_cpu_die) < 0) goto free_vp_index; /* diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 44f5d79d51056..11881726ed372 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -94,7 +94,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) if (boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86_model <= 0x05 && - boot_cpu_data.x86_mask < 0x0A) + boot_cpu_data.x86_stepping < 0x0A) return 1; else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) return 1; diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 30d4061460164..e1259f043ae99 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -40,7 +40,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, asm ("cmp %1,%2; sbb %0,%0;" :"=r" (mask) - :"r"(size),"r" (index) + :"g"(size),"r" (index) :"cc"); return mask; } diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 34d99af439944..6804d66427673 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -5,23 +5,20 @@ #include /* - * Since some emulators terminate on UD2, we cannot use it for WARN. - * Since various instruction decoders disagree on the length of UD1, - * we cannot use it either. So use UD0 for WARN. + * Despite that some emulators terminate on UD2, we use it for WARN(). * - * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas - * our kernel decoder thinks it takes a ModRM byte, which seems consistent - * with various things like the Intel SDM instruction encoding rules) + * Since various instruction decoders/specs disagree on the encoding of + * UD0/UD1. */ -#define ASM_UD0 ".byte 0x0f, 0xff" +#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */ #define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */ #define ASM_UD2 ".byte 0x0f, 0x0b" #define INSN_UD0 0xff0f #define INSN_UD2 0x0b0f -#define LEN_UD0 2 +#define LEN_UD2 2 #ifdef CONFIG_GENERIC_BUG @@ -77,7 +74,11 @@ do { \ unreachable(); \ } while (0) -#define __WARN_FLAGS(flags) _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags)) +#define __WARN_FLAGS(flags) \ +do { \ + _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \ + annotate_reachable(); \ +} while (0) #include diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 70eddb3922ff7..736771c9822ef 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -148,45 +148,46 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("1: jmp 6f\n" - "2:\n" - ".skip -(((5f-4f) - (2b-1b)) > 0) * " - "((5f-4f) - (2b-1b)),0x90\n" - "3:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 4f - .\n" /* repl offset */ - " .word %P1\n" /* always replace */ - " .byte 3b - 1b\n" /* src len */ - " .byte 5f - 4f\n" /* repl len */ - " .byte 3b - 2b\n" /* pad len */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "4: jmp %l[t_no]\n" - "5:\n" - ".previous\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ - " .byte 3b - 1b\n" /* src len */ - " .byte 0\n" /* repl len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .altinstr_aux,\"ax\"\n" - "6:\n" - " testb %[bitnum],%[cap_byte]\n" - " jnz %l[t_yes]\n" - " jmp %l[t_no]\n" - ".previous\n" - : : "i" (bit), "i" (X86_FEATURE_ALWAYS), - [bitnum] "i" (1 << (bit & 7)), - [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) - : : t_yes, t_no); - t_yes: - return true; - t_no: - return false; + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 4f - .\n" /* repl offset */ + " .word %P[always]\n" /* always replace */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "4: jmp %l[t_no]\n" + "5:\n" + ".previous\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 0\n" /* no replacement */ + " .word %P[feature]\n" /* feature bit */ + " .byte 3b - 1b\n" /* src len */ + " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ + ".previous\n" + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" + : : [feature] "i" (bit), + [always] "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), + [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) + : : t_yes, t_no); +t_yes: + return true; +t_no: + return false; } #define static_cpu_has(bit) \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1d9199e1c2ad4..0dfe4d3f74e24 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -210,6 +210,7 @@ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ +#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 51cc979dd3642..7c341a74ec8c4 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -38,6 +38,9 @@ typedef struct { #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN) unsigned int irq_hv_callback_count; #endif +#if IS_ENABLED(CONFIG_HYPERV) + unsigned int irq_hv_reenlightenment_count; +#endif } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 67421f649cfa1..e71c1120426bb 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -103,7 +103,12 @@ #endif #define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef -#define LOCAL_TIMER_VECTOR 0xee + +#if IS_ENABLED(CONFIG_HYPERV) +#define HYPERV_REENLIGHTENMENT_VECTOR 0xee +#endif + +#define LOCAL_TIMER_VECTOR 0xed #define NR_VECTORS 256 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 5167984313282..dd6f57a54a262 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -86,7 +86,7 @@ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ - | X86_CR4_SMAP | X86_CR4_PKE)) + | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) @@ -504,6 +504,7 @@ struct kvm_vcpu_arch { int mp_state; u64 ia32_misc_enable_msr; u64 smbase; + u64 smi_count; bool tpr_access_reporting; u64 ia32_xss; @@ -760,6 +761,15 @@ enum kvm_irqchip_mode { KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */ }; +struct kvm_sev_info { + bool active; /* SEV enabled guest */ + unsigned int asid; /* ASID used for this guest */ + unsigned int handle; /* SEV firmware handle */ + int fd; /* SEV device fd */ + unsigned long pages_locked; /* Number of pages locked */ + struct list_head regions_list; /* List of registered regions */ +}; + struct kvm_arch { unsigned int n_used_mmu_pages; unsigned int n_requested_mmu_pages; @@ -847,6 +857,8 @@ struct kvm_arch { bool x2apic_format; bool x2apic_broadcast_quirk_disabled; + + struct kvm_sev_info sev_info; }; struct kvm_vm_stat { @@ -883,7 +895,6 @@ struct kvm_vcpu_stat { u64 request_irq_exits; u64 irq_exits; u64 host_state_reload; - u64 efer_reload; u64 fpu_reload; u64 insn_emulation; u64 insn_emulation_fail; @@ -965,7 +976,7 @@ struct kvm_x86_ops { unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); - void (*tlb_flush)(struct kvm_vcpu *vcpu); + void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); void (*run)(struct kvm_vcpu *vcpu); int (*handle_exit)(struct kvm_vcpu *vcpu); @@ -1017,6 +1028,7 @@ struct kvm_x86_ops { void (*handle_external_intr)(struct kvm_vcpu *vcpu); bool (*mpx_supported)(void); bool (*xsaves_supported)(void); + bool (*umip_emulated)(void); int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); @@ -1079,6 +1091,10 @@ struct kvm_x86_ops { int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); int (*enable_smi_window)(struct kvm_vcpu *vcpu); + + int (*mem_enc_op)(struct kvm *kvm, void __user *argp); + int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); }; struct kvm_arch_async_pf { diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index b52af150cbd8e..25283f7eb299e 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -160,6 +160,7 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) #define hv_set_synint_state(int_num, val) wrmsrl(int_num, val) void hyperv_callback_vector(void); +void hyperv_reenlightenment_vector(void); #ifdef CONFIG_TRACING #define trace_hyperv_callback_vector hyperv_callback_vector #endif @@ -316,18 +317,27 @@ void hyper_alloc_mmu(void); void hyperv_report_panic(struct pt_regs *regs, long err); bool hv_is_hyperv_initialized(void); void hyperv_cleanup(void); + +void hyperv_reenlightenment_intr(struct pt_regs *regs); +void set_hv_tscchange_cb(void (*cb)(void)); +void clear_hv_tscchange_cb(void); +void hyperv_stop_tsc_emulation(void); #else /* CONFIG_HYPERV */ static inline void hyperv_init(void) {} static inline bool hv_is_hyperv_initialized(void) { return false; } static inline void hyperv_cleanup(void) {} static inline void hyperv_setup_mmu_ops(void) {} +static inline void set_hv_tscchange_cb(void (*cb)(void)) {} +static inline void clear_hv_tscchange_cb(void) {} +static inline void hyperv_stop_tsc_emulation(void) {}; #endif /* CONFIG_HYPERV */ #ifdef CONFIG_HYPERV_TSCPAGE struct ms_hyperv_tsc_page *hv_get_tsc_page(void); -static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) +static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, + u64 *cur_tsc) { - u64 scale, offset, cur_tsc; + u64 scale, offset; u32 sequence; /* @@ -358,7 +368,7 @@ static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) scale = READ_ONCE(tsc_pg->tsc_scale); offset = READ_ONCE(tsc_pg->tsc_offset); - cur_tsc = rdtsc_ordered(); + *cur_tsc = rdtsc_ordered(); /* * Make sure we read sequence after we read all other values @@ -368,7 +378,14 @@ static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence); - return mul_u64_u64_shr(cur_tsc, scale, 64) + offset; + return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset; +} + +static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg) +{ + u64 cur_tsc; + + return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc); } #else @@ -376,5 +393,12 @@ static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void) { return NULL; } + +static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, + u64 *cur_tsc) +{ + BUG(); + return U64_MAX; +} #endif #endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index e520a1e6fc114..c9084dedfcfa2 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -397,6 +397,8 @@ #define MSR_K7_PERFCTR3 0xc0010007 #define MSR_K7_CLK_CTL 0xc001001b #define MSR_K7_HWCR 0xc0010015 +#define MSR_K7_HWCR_SMMLOCK_BIT 0 +#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT) #define MSR_K7_FID_VID_CTL 0xc0010041 #define MSR_K7_FID_VID_STATUS 0xc0010042 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4d57894635f24..76b058533e473 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -6,6 +6,7 @@ #include #include #include +#include #ifdef __ASSEMBLY__ @@ -164,10 +165,15 @@ static inline void vmexit_fill_RSB(void) static inline void indirect_branch_prediction_barrier(void) { - alternative_input("", - "call __ibp_barrier", - X86_FEATURE_USE_IBPB, - ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory")); + asm volatile(ALTERNATIVE("", + "movl %[msr], %%ecx\n\t" + "movl %[val], %%eax\n\t" + "movl $0, %%edx\n\t" + "wrmsr", + X86_FEATURE_USE_IBPB) + : : [msr] "i" (MSR_IA32_PRED_CMD), + [val] "i" (PRED_CMD_IBPB) + : "eax", "ecx", "edx", "memory"); } #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb2325..d652a38080659 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -52,10 +52,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); -#ifdef CONFIG_X86_MCE -#define arch_unmap_kpfn arch_unmap_kpfn -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 892df375b6155..554841fab717a 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void) { PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); } -static inline void __flush_tlb_single(unsigned long addr) +static inline void __flush_tlb_one_user(unsigned long addr) { - PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); + PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); } static inline void flush_tlb_others(const struct cpumask *cpumask, diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 6ec54d01972dc..f624f1f10316c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -217,7 +217,7 @@ struct pv_mmu_ops { /* TLB operations */ void (*flush_tlb_user)(void); void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); + void (*flush_tlb_one_user)(unsigned long addr); void (*flush_tlb_others)(const struct cpumask *cpus, const struct flush_tlb_info *info); diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 8a3ee355b4222..92015c65fa2ac 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -22,4 +22,6 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end, void io_free_memtype(resource_size_t start, resource_size_t end); +bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn); + #endif /* _ASM_X86_PAT_H */ diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index e67c0620aec2a..e55466760ff8e 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -61,7 +61,7 @@ void paging_init(void); #define kpte_clear_flush(ptep, vaddr) \ do { \ pte_clear(&init_mm, (vaddr), (ptep)); \ - __flush_tlb_one((vaddr)); \ + __flush_tlb_one_kernel((vaddr)); \ } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 793bae7e7ce36..1bd9ed87606f4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -91,7 +91,7 @@ struct cpuinfo_x86 { __u8 x86; /* CPU family */ __u8 x86_vendor; /* CPU vendor */ __u8 x86_model; - __u8 x86_mask; + __u8 x86_stepping; #ifdef CONFIG_X86_64 /* Number of 4K pages in DTLB/ITLB combined(in pages): */ int x86_tlbsize; @@ -109,7 +109,7 @@ struct cpuinfo_x86 { char x86_vendor_id[16]; char x86_model_id[64]; /* in KB - valid for CPUS which support this call: */ - int x86_cache_size; + unsigned int x86_cache_size; int x86_cache_alignment; /* In bytes */ /* Cache QoS architectural values: */ int x86_cache_max_rmid; /* max index */ @@ -977,7 +977,4 @@ bool xen_set_default_idle(void); void stop_this_cpu(void *dummy); void df_debug(struct pt_regs *regs, long error_code); - -void __ibp_barrier(void); - #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 461f53d27708a..a4189762b2667 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) void cpu_disable_common(void); void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); +void calculate_max_logical_packages(void); void native_smp_cpus_done(unsigned int max_cpus); void common_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 78dd9df881577..0487ac0548704 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -146,6 +146,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL #define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL +#define SVM_NESTED_CTL_NP_ENABLE BIT(0) +#define SVM_NESTED_CTL_SEV_ENABLE BIT(1) + struct __attribute__ ((__packed__)) vmcb_seg { u16 selector; u16 attrib; diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 2b8f18ca58747..84137c22fdfad 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) #else #define __flush_tlb() __native_flush_tlb() #define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr) #endif static inline bool tlb_defer_switch_to_init_mm(void) @@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void) /* * flush one page in the user mapping */ -static inline void __native_flush_tlb_single(unsigned long addr) +static inline void __native_flush_tlb_one_user(unsigned long addr) { u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); @@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void) /* * flush one page in the kernel mapping */ -static inline void __flush_tlb_one(unsigned long addr) +static inline void __flush_tlb_one_kernel(unsigned long addr) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); - __flush_tlb_single(addr); + + /* + * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its + * paravirt equivalent. Even with PCID, this is sufficient: we only + * use PCID if we also use global PTEs for the kernel mapping, and + * INVLPG flushes global translations across all address spaces. + * + * If PTI is on, then the kernel is mapped with non-global PTEs, and + * __flush_tlb_one_user() will flush the given address for the current + * kernel address space and for its usermode counterpart, but it does + * not flush it for other address spaces. + */ + __flush_tlb_one_user(addr); if (!static_cpu_has(X86_FEATURE_PTI)) return; /* - * __flush_tlb_single() will have cleared the TLB entry for this ASID, - * but since kernel space is replicated across all, we must also - * invalidate all others. + * See above. We need to propagate the flush to all other address + * spaces. In principle, we only need to propagate it to kernelmode + * address spaces, but the extra bookkeeping we would need is not + * worth it. */ invalidate_other_asid(); } diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 1a5bfead93b41..197c2e6c73765 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -40,6 +40,9 @@ */ #define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11) +/* AccessReenlightenmentControls privilege */ +#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13) + /* * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available @@ -234,6 +237,30 @@ #define HV_X64_MSR_CRASH_PARAMS \ (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0)) +/* TSC emulation after migration */ +#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 + +struct hv_reenlightenment_control { + u64 vector:8; + u64 reserved1:8; + u64 enabled:1; + u64 reserved2:15; + u64 target_vp:32; +}; + +#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 +#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 + +struct hv_tsc_emulation_control { + u64 enabled:1; + u64 reserved:63; +}; + +struct hv_tsc_emulation_status { + u64 inprogress:1; + u64 reserved:63; +}; + #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12 #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \ diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 09cc06483bed4..7a2ade4aa2353 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -25,6 +25,7 @@ #define KVM_FEATURE_STEAL_TIME 5 #define KVM_FEATURE_PV_EOI 6 #define KVM_FEATURE_PV_UNHALT 7 +#define KVM_FEATURE_PV_TLB_FLUSH 9 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. @@ -51,6 +52,9 @@ struct kvm_steal_time { __u32 pad[11]; }; +#define KVM_VCPU_PREEMPTED (1 << 0) +#define KVM_VCPU_FLUSH_TLB (1 << 1) + #define KVM_CLOCK_PAIRING_WALLCLOCK 0 struct kvm_clock_pairing { __s64 sec; diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ec3a286163c37..2aa92094b59d4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -1625,6 +1626,8 @@ int __init acpi_boot_init(void) if (!acpi_noirq) x86_init.pci.init = pci_acpi_init; + /* Do not enable ACPI SPCR console by default */ + acpi_parse_spcr(earlycon_acpi_spcr_enable, false); return 0; } diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6db28f17ff288..c88e0b127810f 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -235,7 +235,7 @@ int amd_cache_northbridges(void) if (boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model >= 0x8 && (boot_cpu_data.x86_model > 0x9 || - boot_cpu_data.x86_mask >= 0x1)) + boot_cpu_data.x86_stepping >= 0x1)) amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; if (boot_cpu_data.x86 == 0x15) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 25ddf02598d20..b203af0855b57 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -546,7 +546,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); static u32 hsx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x3a; /* EP */ case 0x04: return 0x0f; /* EX */ } @@ -556,7 +556,7 @@ static u32 hsx_deadline_rev(void) static u32 bdx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x02: return 0x00000011; case 0x03: return 0x0700000e; case 0x04: return 0x0f00000c; @@ -568,7 +568,7 @@ static u32 bdx_deadline_rev(void) static u32 skx_deadline_rev(void) { - switch (boot_cpu_data.x86_mask) { + switch (boot_cpu_data.x86_stepping) { case 0x03: return 0x01000136; case 0x04: return 0x02000014; } diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 46b675aaf20b8..f11910b44638c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) uv_gre_table = gre; for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { + unsigned long size = ((unsigned long)(gre->limit - lgre) + << UV_GAM_RANGE_SHFT); + int order = 0; + char suffix[] = " KMGTPE"; + + while (size > 9999 && order < sizeof(suffix)) { + size /= 1024; + order++; + } + if (!index) { pr_info("UV: GAM Range Table...\n"); pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); } - pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", + pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, - ((unsigned long)(gre->limit - lgre)) >> - (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ + size, suffix[order], gre->type, gre->nasid, gre->sockid, gre->pnode); lgre = gre->limit; diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index ab18653420029..dfcbe6924eafa 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1515,7 +1515,7 @@ static __poll_t do_poll(struct file *fp, poll_table *wait) return 0; poll_wait(fp, &apm_waitqueue, wait); if (!queue_empty(as)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } @@ -2389,6 +2389,7 @@ static int __init apm_init(void) if (HZ != 100) idle_period = (idle_period * HZ) / 100; if (idle_threshold < 100) { + cpuidle_poll_state_init(&apm_idle_driver); if (!cpuidle_register_driver(&apm_idle_driver)) if (cpuidle_register_device(&apm_cpuidle_device)) cpuidle_unregister_driver(&apm_idle_driver); diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index fa1261eefa16e..f91ba53e06c8b 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -18,7 +18,7 @@ void foo(void) OFFSET(CPUINFO_x86, cpuinfo_x86, x86); OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor); OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model); - OFFSET(CPUINFO_x86_mask, cpuinfo_x86, x86_mask); + OFFSET(CPUINFO_x86_stepping, cpuinfo_x86, x86_stepping); OFFSET(CPUINFO_cpuid_level, cpuinfo_x86, cpuid_level); OFFSET(CPUINFO_x86_capability, cpuinfo_x86, x86_capability); OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ea831c8581958..f0e6456ca7d3c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -119,7 +119,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if (c->x86_model == 6 && c->x86_mask == 1) { + if (c->x86_model == 6 && c->x86_stepping == 1) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); @@ -149,7 +149,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) /* K6 with old style WHCR */ if (c->x86_model < 8 || - (c->x86_model == 8 && c->x86_mask < 8)) { + (c->x86_model == 8 && c->x86_stepping < 8)) { /* We can only write allocate on the low 508Mb */ if (mbytes > 508) mbytes = 508; @@ -168,7 +168,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c) return; } - if ((c->x86_model == 8 && c->x86_mask > 7) || + if ((c->x86_model == 8 && c->x86_stepping > 7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ @@ -221,7 +221,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx * As per AMD technical note 27212 0.2 */ - if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { + if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { rdmsr(MSR_K7_CLK_CTL, l, h); if ((l & 0xfff00000) != 0x20000000) { pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", @@ -241,12 +241,12 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * but they are not certified as MP capable. */ /* Athlon 660/661 is valid. */ - if ((c->x86_model == 6) && ((c->x86_mask == 0) || - (c->x86_mask == 1))) + if ((c->x86_model == 6) && ((c->x86_stepping == 0) || + (c->x86_stepping == 1))) return; /* Duron 670 is valid */ - if ((c->x86_model == 7) && (c->x86_mask == 0)) + if ((c->x86_model == 7) && (c->x86_stepping == 0)) return; /* @@ -256,8 +256,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c) * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for * more. */ - if (((c->x86_model == 6) && (c->x86_mask >= 2)) || - ((c->x86_model == 7) && (c->x86_mask >= 1)) || + if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || + ((c->x86_model == 7) && (c->x86_stepping >= 1)) || (c->x86_model > 7)) if (cpu_has(c, X86_FEATURE_MP)) return; @@ -556,6 +556,51 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) } } +static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) +{ + u64 msr; + + /* + * BIOS support is required for SME and SEV. + * For SME: If BIOS has enabled SME then adjust x86_phys_bits by + * the SME physical address space reduction value. + * If BIOS has not enabled SME then don't advertise the + * SME feature (set in scattered.c). + * For SEV: If BIOS has not enabled SEV then don't advertise the + * SEV feature (set in scattered.c). + * + * In all cases, since support for SME and SEV requires long mode, + * don't advertise the feature under CONFIG_X86_32. + */ + if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { + /* Check if memory encryption is enabled */ + rdmsrl(MSR_K8_SYSCFG, msr); + if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) + goto clear_all; + + /* + * Always adjust physical address bits. Even though this + * will be a value above 32-bits this is still done for + * CONFIG_X86_32 so that accurate values are reported. + */ + c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; + + if (IS_ENABLED(CONFIG_X86_32)) + goto clear_all; + + rdmsrl(MSR_K7_HWCR, msr); + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_sev; + + return; + +clear_all: + clear_cpu_cap(c, X86_FEATURE_SME); +clear_sev: + clear_cpu_cap(c, X86_FEATURE_SEV); + } +} + static void early_init_amd(struct cpuinfo_x86 *c) { u32 dummy; @@ -583,7 +628,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) /* Set MTRR capability flag if appropriate */ if (c->x86 == 5) if (c->x86_model == 13 || c->x86_model == 9 || - (c->x86_model == 8 && c->x86_mask >= 8)) + (c->x86_model == 8 && c->x86_stepping >= 8)) set_cpu_cap(c, X86_FEATURE_K6_MTRR); #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) @@ -627,26 +672,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (cpu_has_amd_erratum(c, amd_erratum_400)) set_cpu_bug(c, X86_BUG_AMD_E400); - /* - * BIOS support is required for SME. If BIOS has enabled SME then - * adjust x86_phys_bits by the SME physical address space reduction - * value. If BIOS has not enabled SME then don't advertise the - * feature (set in scattered.c). Also, since the SME support requires - * long mode, don't advertise the feature under CONFIG_X86_32. - */ - if (cpu_has(c, X86_FEATURE_SME)) { - u64 msr; - - /* Check if SME is enabled */ - rdmsrl(MSR_K8_SYSCFG, msr); - if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) { - c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; - if (IS_ENABLED(CONFIG_X86_32)) - clear_cpu_cap(c, X86_FEATURE_SME); - } else { - clear_cpu_cap(c, X86_FEATURE_SME); - } - } + early_detect_mem_encrypt(c); } static void init_amd_k8(struct cpuinfo_x86 *c) @@ -769,7 +795,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c) * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects * all up to and including B1. */ - if (c->x86_model <= 1 && c->x86_mask <= 1) + if (c->x86_model <= 1 && c->x86_stepping <= 1) set_cpu_cap(c, X86_FEATURE_CPB); } @@ -880,11 +906,11 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) /* AMD errata T13 (order #21922) */ if ((c->x86 == 6)) { /* Duron Rev A0 */ - if (c->x86_model == 3 && c->x86_mask == 0) + if (c->x86_model == 3 && c->x86_stepping == 0) size = 64; /* Tbird rev A1/A2 */ if (c->x86_model == 4 && - (c->x86_mask == 0 || c->x86_mask == 1)) + (c->x86_stepping == 0 || c->x86_stepping == 1)) size = 256; } return size; @@ -1021,7 +1047,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) } /* OSVW unavailable or ID unknown, match family-model-stepping range */ - ms = (cpu->x86_model << 4) | cpu->x86_mask; + ms = (cpu->x86_model << 4) | cpu->x86_stepping; while ((range = *erratum++)) if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && (ms >= AMD_MODEL_RANGE_START(range)) && diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 71949bf2de5ad..d71c8b54b696d 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -162,8 +162,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) return SPECTRE_V2_CMD_NONE; else { - ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, - sizeof(arg)); + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); if (ret < 0) return SPECTRE_V2_CMD_AUTO; @@ -175,8 +174,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", - mitigation_options[i].option); + pr_err("unknown option (%s). Switching to AUTO select\n", arg); return SPECTRE_V2_CMD_AUTO; } } @@ -185,8 +183,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && !IS_ENABLED(CONFIG_RETPOLINE)) { - pr_err("%s selected but not compiled in. Switching to AUTO select\n", - mitigation_options[i].option); + pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } @@ -256,14 +253,14 @@ static void __init spectre_v2_select_mitigation(void) goto retpoline_auto; break; } - pr_err("kernel not compiled with retpoline; no mitigation available!"); + pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); return; retpoline_auto: if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { retpoline_amd: if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { - pr_err("LFENCE not serializing. Switching to generic retpoline\n"); + pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); goto retpoline_generic; } mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : @@ -281,7 +278,7 @@ static void __init spectre_v2_select_mitigation(void) pr_info("%s\n", spectre_v2_strings[mode]); /* - * If neither SMEP or KPTI are available, there is a risk of + * If neither SMEP nor PTI are available, there is a risk of * hitting userspace addresses in the RSB after a context switch * from a shallow call stack to a deeper one. To prevent this fill * the entire RSB, even when using IBRS. @@ -295,21 +292,20 @@ static void __init spectre_v2_select_mitigation(void) if ((!boot_cpu_has(X86_FEATURE_PTI) && !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) { setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Filling RSB on context switch\n"); + pr_info("Spectre v2 mitigation: Filling RSB on context switch\n"); } /* Initialize Indirect Branch Prediction Barrier if supported */ if (boot_cpu_has(X86_FEATURE_IBPB)) { setup_force_cpu_cap(X86_FEATURE_USE_IBPB); - pr_info("Enabling Indirect Branch Prediction Barrier\n"); + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); } } #undef pr_fmt #ifdef CONFIG_SYSFS -ssize_t cpu_show_meltdown(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) return sprintf(buf, "Not affected\n"); @@ -318,16 +314,14 @@ ssize_t cpu_show_meltdown(struct device *dev, return sprintf(buf, "Vulnerable\n"); } -ssize_t cpu_show_spectre_v1(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) return sprintf(buf, "Not affected\n"); return sprintf(buf, "Mitigation: __user pointer sanitization\n"); } -ssize_t cpu_show_spectre_v2(struct device *dev, - struct device_attribute *attr, char *buf) +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) return sprintf(buf, "Not affected\n"); @@ -337,9 +331,3 @@ ssize_t cpu_show_spectre_v2(struct device *dev, spectre_v2_module_string()); } #endif - -void __ibp_barrier(void) -{ - __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0); -} -EXPORT_SYMBOL_GPL(__ibp_barrier); diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index c578cd29c2d2c..e5ec0f11c0de7 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -140,7 +140,7 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_TSC); break; case 8: - switch (c->x86_mask) { + switch (c->x86_stepping) { default: name = "2"; break; @@ -215,7 +215,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) * - Note, it seems this may only be in engineering samples. */ if ((c->x86 == 6) && (c->x86_model == 9) && - (c->x86_mask == 1) && (size == 65)) + (c->x86_stepping == 1) && (size == 65)) size -= 1; return size; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d63f4b5706e4d..824aee0117bb5 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -731,7 +731,7 @@ void cpu_detect(struct cpuinfo_x86 *c) cpuid(0x00000001, &tfms, &misc, &junk, &cap0); c->x86 = x86_family(tfms); c->x86_model = x86_model(tfms); - c->x86_mask = x86_stepping(tfms); + c->x86_stepping = x86_stepping(tfms); if (cap0 & (1<<19)) { c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; @@ -1184,9 +1184,9 @@ static void identify_cpu(struct cpuinfo_x86 *c) int i; c->loops_per_jiffy = loops_per_jiffy; - c->x86_cache_size = -1; + c->x86_cache_size = 0; c->x86_vendor = X86_VENDOR_UNKNOWN; - c->x86_model = c->x86_mask = 0; /* So far unknown... */ + c->x86_model = c->x86_stepping = 0; /* So far unknown... */ c->x86_vendor_id[0] = '\0'; /* Unset */ c->x86_model_id[0] = '\0'; /* Unset */ c->x86_max_cores = 1; @@ -1378,8 +1378,8 @@ void print_cpu_info(struct cpuinfo_x86 *c) pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); - if (c->x86_mask || c->cpuid_level >= 0) - pr_cont(", stepping: 0x%x)\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + pr_cont(", stepping: 0x%x)\n", c->x86_stepping); else pr_cont(")\n"); } diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 6b4bb335641f3..8949b7ae6d925 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -215,7 +215,7 @@ static void init_cyrix(struct cpuinfo_x86 *c) /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; - c->x86_mask = dir1 & 0xf; + c->x86_stepping = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 319bf989fad1e..d19e903214b40 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -116,14 +116,13 @@ struct sku_microcode { u32 microcode; }; static const struct sku_microcode spectre_bad_microcodes[] = { - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, - { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, - { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 }, + { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, - { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, @@ -136,8 +135,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, - /* Updated in the 20180108 release; blacklist until we know otherwise */ - { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, /* Observed in the wild */ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, @@ -149,7 +146,7 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { if (c->x86_model == spectre_bad_microcodes[i].model && - c->x86_mask == spectre_bad_microcodes[i].stepping) + c->x86_stepping == spectre_bad_microcodes[i].stepping) return (c->microcode <= spectre_bad_microcodes[i].microcode); } return false; @@ -196,7 +193,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ - if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && + if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && c->microcode < 0x20e) { pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); @@ -212,7 +209,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 - && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) + && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) c->x86_phys_bits = 36; /* @@ -310,7 +307,7 @@ int ppro_with_ram_bug(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask < 8) { + boot_cpu_data.x86_stepping < 8) { pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); return 1; } @@ -327,7 +324,7 @@ static void intel_smp_check(struct cpuinfo_x86 *c) * Mask B, Pentium, but not Pentium MMX */ if (c->x86 == 5 && - c->x86_mask >= 1 && c->x86_mask <= 4 && + c->x86_stepping >= 1 && c->x86_stepping <= 4 && c->x86_model <= 3) { /* * Remember we have B step Pentia with bugs @@ -370,7 +367,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until * model 3 mask 3 */ - if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) + if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) clear_cpu_cap(c, X86_FEATURE_SEP); /* @@ -388,7 +385,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * P4 Xeon erratum 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ - if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { + if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { if (msr_set_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { pr_info("CPU: C0 stepping P4 Xeon detected.\n"); @@ -403,7 +400,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) * Specification Update"). */ if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && - (c->x86_mask < 0x6 || c->x86_mask == 0xb)) + (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) set_cpu_bug(c, X86_BUG_11AP); @@ -650,7 +647,7 @@ static void init_intel(struct cpuinfo_x86 *c) case 6: if (l2 == 128) p = "Celeron (Mendocino)"; - else if (c->x86_mask == 0 || c->x86_mask == 5) + else if (c->x86_stepping == 0 || c->x86_stepping == 5) p = "Celeron-A"; break; diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c index 410629f10ad37..589b948e6e01f 100644 --- a/arch/x86/kernel/cpu/intel_rdt.c +++ b/arch/x86/kernel/cpu/intel_rdt.c @@ -819,7 +819,7 @@ static __init void rdt_quirks(void) cache_alloc_hsw_probe(); break; case INTEL_FAM6_SKYLAKE_X: - if (boot_cpu_data.x86_mask <= 4) + if (boot_cpu_data.x86_stepping <= 4) set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); } } diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c index 213e8c2ca702f..97685a0c31751 100644 --- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c +++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c @@ -247,9 +247,9 @@ static __poll_t mce_chrdev_poll(struct file *file, poll_table *wait) { poll_wait(file, &mce_chrdev_wait, wait); if (READ_ONCE(mcelog.next)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; if (!mce_apei_read_done && apei_check_mce()) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60e..e956eb2670619 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } extern struct mca_config mca_cfg; +#ifndef CONFIG_X86_64 +/* + * On 32-bit systems it would be difficult to safely unmap a poison page + * from the kernel 1:1 map because there are no non-canonical addresses that + * we can use to refer to the address without risking a speculative access. + * However, this isn't much of an issue because: + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which + * are only mapped into the kernel as needed + * 2) Few people would run a 32-bit kernel on a machine that supports + * recoverable errors because they have too much memory to boot 32-bit. + */ +static inline void mce_unmap_kpfn(unsigned long pfn) {} +#define mce_unmap_kpfn mce_unmap_kpfn +#endif + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3a8e88a611ebf..8ff94d1e2dce5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -105,6 +105,10 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn); +#endif + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -234,7 +238,7 @@ static void __print_mce(struct mce *m) m->cs, m->ip); if (m->cs == __KERNEL_CS) - pr_cont("{%pS}", (void *)m->ip); + pr_cont("{%pS}", (void *)(unsigned long)m->ip); pr_cont("\n"); } @@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - memory_failure(pfn, 0); + if (!memory_failure(pfn, 0)) + mce_unmap_kpfn(pfn); } return NOTIFY_OK; @@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m) ret = memory_failure(m->addr >> PAGE_SHIFT, flags); if (ret) pr_err("Memory error not recovered"); + else + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); return ret; } -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) - -void arch_unmap_kpfn(unsigned long pfn) +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn) { unsigned long decoy_addr; @@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn) * We would like to just call: * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a - * speculative access to the posion page because we'd have + * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address @@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn) if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); - } #endif diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index f7c55b0e753ad..a15db2b4e0d66 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -921,7 +921,7 @@ static bool is_blacklisted(unsigned int cpu) */ if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X && - c->x86_mask == 0x01 && + c->x86_stepping == 0x01 && llc_size_per_core > 2621440 && c->microcode < 0x0b000021) { pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); @@ -944,7 +944,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, return UCODE_NFOUND; sprintf(name, "intel-ucode/%02x-%02x-%02x", - c->x86, c->x86_model, c->x86_mask); + c->x86, c->x86_model, c->x86_stepping); if (request_firmware_direct(&firmware, name, device)) { pr_debug("data file %s load failed\n", name); @@ -982,7 +982,7 @@ static struct microcode_ops microcode_intel_ops = { static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) { - u64 llc_size = c->x86_cache_size * 1024; + u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 85eb5fc180c81..9340f41ce8d3d 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -251,6 +251,12 @@ static void __init ms_hyperv_init_platform(void) hyperv_setup_mmu_ops(); /* Setup the IDT for hypervisor callback */ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); + + /* Setup the IDT for reenlightenment notifications */ + if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT) + alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR, + hyperv_reenlightenment_vector); + #endif } diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index fdc55215d44d0..e12ee86906c62 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -859,7 +859,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, */ if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask <= 7) { + boot_cpu_data.x86_stepping <= 7) { if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); return -EINVAL; diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 40d5a8a752125..7468de4290873 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -711,8 +711,8 @@ void __init mtrr_bp_init(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 0xF && boot_cpu_data.x86_model == 0x3 && - (boot_cpu_data.x86_mask == 0x3 || - boot_cpu_data.x86_mask == 0x4)) + (boot_cpu_data.x86_stepping == 0x3 || + boot_cpu_data.x86_stepping == 0x4)) phys_addr = 36; size_or_mask = SIZE_OR_MASK_BITS(phys_addr); diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index e7ecedafa1c8f..2c8522a39ed5d 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -72,8 +72,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); - if (c->x86_mask || c->cpuid_level >= 0) - seq_printf(m, "stepping\t: %d\n", c->x86_mask); + if (c->x86_stepping || c->cpuid_level >= 0) + seq_printf(m, "stepping\t: %d\n", c->x86_stepping); else seq_puts(m, "stepping\t: unknown\n"); if (c->microcode) @@ -91,8 +91,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) } /* Cache size */ - if (c->x86_cache_size >= 0) - seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); + if (c->x86_cache_size) + seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); show_cpuinfo_core(m, c, cpu); show_cpuinfo_misc(m, c); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 4075d2be5357e..772c219b68898 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -30,6 +30,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, + { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, { 0, 0, 0, 0, 0 } }; diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c29020907886a..b59e4fb40fd99 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -37,7 +37,7 @@ #define X86 new_cpu_data+CPUINFO_x86 #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor #define X86_MODEL new_cpu_data+CPUINFO_x86_model -#define X86_MASK new_cpu_data+CPUINFO_x86_mask +#define X86_STEPPING new_cpu_data+CPUINFO_x86_stepping #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability @@ -332,7 +332,7 @@ ENTRY(startup_32_smp) shrb $4,%al movb %al,X86_MODEL andb $0x0f,%cl # mask mask revision - movb %cl,X86_MASK + movb %cl,X86_STEPPING movl %edx,X86_CAPABILITY .Lis486: diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 68e1867cca804..45fb4d2565f80 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -141,6 +141,15 @@ int arch_show_interrupts(struct seq_file *p, int prec) irq_stats(j)->irq_hv_callback_count); seq_puts(p, " Hypervisor callback interrupts\n"); } +#endif +#if IS_ENABLED(CONFIG_HYPERV) + if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) { + seq_printf(p, "%*s: ", prec, "HRE"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", + irq_stats(j)->irq_hv_reenlightenment_count); + seq_puts(p, " Hyper-V reenlightenment interrupts\n"); + } #endif seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b40ffbf156c18..4e37d1a851a62 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -498,6 +498,34 @@ static void __init kvm_apf_trap_init(void) update_intr_gate(X86_TRAP_PF, async_page_fault); } +static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask); + +static void kvm_flush_tlb_others(const struct cpumask *cpumask, + const struct flush_tlb_info *info) +{ + u8 state; + int cpu; + struct kvm_steal_time *src; + struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); + + cpumask_copy(flushmask, cpumask); + /* + * We have to call flush only on online vCPUs. And + * queue flush_on_enter for pre-empted vCPUs + */ + for_each_cpu(cpu, flushmask) { + src = &per_cpu(steal_time, cpu); + state = READ_ONCE(src->preempted); + if ((state & KVM_VCPU_PREEMPTED)) { + if (try_cmpxchg(&src->preempted, &state, + state | KVM_VCPU_FLUSH_TLB)) + __cpumask_clear_cpu(cpu, flushmask); + } + } + + native_flush_tlb_others(flushmask, info); +} + static void __init kvm_guest_init(void) { int i; @@ -517,6 +545,9 @@ static void __init kvm_guest_init(void) pv_time_ops.steal_clock = kvm_steal_clock; } + if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) + pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; + if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) apic_set_eoi_write(kvm_guest_apic_eoi_write); @@ -598,6 +629,22 @@ static __init int activate_jump_labels(void) } arch_initcall(activate_jump_labels); +static __init int kvm_setup_pv_tlb_flush(void) +{ + int cpu; + + if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) { + for_each_possible_cpu(cpu) { + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + } + pr_info("KVM setup pv remote TLB flush\n"); + } + + return 0; +} +arch_initcall(kvm_setup_pv_tlb_flush); + #ifdef CONFIG_PARAVIRT_SPINLOCKS /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ @@ -643,7 +690,7 @@ __visible bool __kvm_vcpu_is_preempted(long cpu) { struct kvm_steal_time *src = &per_cpu(steal_time, cpu); - return !!src->preempted; + return !!(src->preempted & KVM_VCPU_PREEMPTED); } PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 27d0a17126636..f1c5eb99d4454 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -410,7 +410,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type) processor.apicver = mpc_default_type > 4 ? 0x10 : 0x01; processor.cpuflag = CPU_ENABLED; processor.cpufeature = (boot_cpu_data.x86 << 8) | - (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_stepping; processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX]; processor.reserved[0] = 0; processor.reserved[1] = 0; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 041096bdef860..99dc79e76bdc5 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -200,9 +200,9 @@ static void native_flush_tlb_global(void) __native_flush_tlb_global(); } -static void native_flush_tlb_single(unsigned long addr) +static void native_flush_tlb_one_user(unsigned long addr) { - __native_flush_tlb_single(addr); + __native_flush_tlb_one_user(addr); } struct static_key paravirt_steal_enabled; @@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = { .flush_tlb_user = native_flush_tlb, .flush_tlb_kernel = native_flush_tlb_global, - .flush_tlb_single = native_flush_tlb_single, + .flush_tlb_one_user = native_flush_tlb_one_user, .flush_tlb_others = native_flush_tlb_others, .pgd_alloc = __paravirt_pgd_alloc, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6f27facbaa9b0..9eee25d07586c 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1281,11 +1281,10 @@ void __init native_smp_prepare_boot_cpu(void) cpu_set_state_online(me); } -void __init native_smp_cpus_done(unsigned int max_cpus) +void __init calculate_max_logical_packages(void) { int ncpus; - pr_debug("Boot done\n"); /* * Today neither Intel nor AMD support heterogenous systems so * extrapolate the boot cpu's data to all packages. @@ -1293,6 +1292,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus) ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); pr_info("Max logical packages: %u\n", __max_logical_packages); +} + +void __init native_smp_cpus_done(unsigned int max_cpus) +{ + pr_debug("Boot done\n"); + + calculate_max_logical_packages(); if (x86_has_numa_in_package) set_sched_topology(x86_numa_in_package_topology); @@ -1430,7 +1436,6 @@ static void remove_siblinginfo(int cpu) cpumask_clear(cpu_llc_shared_mask(cpu)); cpumask_clear(topology_sibling_cpumask(cpu)); cpumask_clear(topology_core_cpumask(cpu)); - c->phys_proc_id = 0; c->cpu_core_id = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); recompute_smt_state(); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 446c9ef8cfc32..3d9b2308e7fad 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -181,7 +181,7 @@ int fixup_bug(struct pt_regs *regs, int trapnr) break; case BUG_TRAP_TYPE_WARN: - regs->ip += LEN_UD0; + regs->ip += LEN_UD2; return 1; } diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 3df51c2878442..92fd433c50b9b 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -81,6 +81,14 @@ config KVM_AMD To compile this as a module, choose M here: the module will be called kvm-amd. +config KVM_AMD_SEV + def_bool y + bool "AMD Secure Encrypted Virtualization (SEV) support" + depends on KVM_AMD && X86_64 + depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP + ---help--- + Provides support for launching Encrypted VMs on AMD processors. + config KVM_MMU_AUDIT bool "Audit KVM MMU" depends on KVM && TRACEPOINTS diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 13f5d4217e4f1..a0c5a69bc7c4a 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -291,13 +291,18 @@ static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, { switch (func) { case 0: - entry->eax = 1; /* only one leaf currently */ + entry->eax = 7; ++*nent; break; case 1: entry->ecx = F(MOVBE); ++*nent; break; + case 7: + entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + if (index == 0) + entry->ecx = F(RDPID); + ++*nent; default: break; } @@ -325,6 +330,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; + unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -363,7 +369,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | - 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM); + 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | + F(TOPOEXT); /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = @@ -389,8 +396,9 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 7.0.ecx*/ const u32 kvm_cpuid_7_0_ecx_x86_features = - F(AVX512VBMI) | F(LA57) | F(PKU) | - 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ); + F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | + F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | + F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG); /* cpuid 7.0.edx*/ const u32 kvm_cpuid_7_0_edx_x86_features = @@ -476,6 +484,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ebx |= F(TSC_ADJUST); entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; cpuid_mask(&entry->ecx, CPUID_7_ECX); + entry->ecx |= f_umip; /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); @@ -597,7 +606,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | - (1 << KVM_FEATURE_PV_UNHALT); + (1 << KVM_FEATURE_PV_UNHALT) | + (1 << KVM_FEATURE_PV_TLB_FLUSH); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); @@ -607,7 +617,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->edx = 0; break; case 0x80000000: - entry->eax = min(entry->eax, 0x8000001a); + entry->eax = min(entry->eax, 0x8000001f); break; case 0x80000001: entry->edx &= kvm_cpuid_8000_0001_edx_x86_features; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 290ecf711aec2..d91eaeb010347 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3533,6 +3533,16 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_rdpid(struct x86_emulate_ctxt *ctxt) +{ + u64 tsc_aux = 0; + + if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) + return emulate_gp(ctxt, 0); + ctxt->dst.val = tsc_aux; + return X86EMUL_CONTINUE; +} + static int em_rdtsc(struct x86_emulate_ctxt *ctxt) { u64 tsc = 0; @@ -3652,17 +3662,27 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) +static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment) { - if (ctxt->modrm_reg > VCPU_SREG_GS) - return emulate_ud(ctxt); + if (segment > VCPU_SREG_GS && + (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && + ctxt->ops->cpl(ctxt) > 0) + return emulate_gp(ctxt, 0); - ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); + ctxt->dst.val = get_segment_selector(ctxt, segment); if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; return X86EMUL_CONTINUE; } +static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt) +{ + if (ctxt->modrm_reg > VCPU_SREG_GS) + return emulate_ud(ctxt); + + return em_store_sreg(ctxt, ctxt->modrm_reg); +} + static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; @@ -3678,6 +3698,11 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt) return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); } +static int em_sldt(struct x86_emulate_ctxt *ctxt) +{ + return em_store_sreg(ctxt, VCPU_SREG_LDTR); +} + static int em_lldt(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; @@ -3687,6 +3712,11 @@ static int em_lldt(struct x86_emulate_ctxt *ctxt) return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR); } +static int em_str(struct x86_emulate_ctxt *ctxt) +{ + return em_store_sreg(ctxt, VCPU_SREG_TR); +} + static int em_ltr(struct x86_emulate_ctxt *ctxt) { u16 sel = ctxt->src.val; @@ -3739,6 +3769,10 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, { struct desc_ptr desc_ptr; + if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && + ctxt->ops->cpl(ctxt) > 0) + return emulate_gp(ctxt, 0); + if (ctxt->mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; get(ctxt, &desc_ptr); @@ -3798,6 +3832,10 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt) static int em_smsw(struct x86_emulate_ctxt *ctxt) { + if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) && + ctxt->ops->cpl(ctxt) > 0) + return emulate_gp(ctxt, 0); + if (ctxt->dst.type == OP_MEM) ctxt->dst.bytes = 2; ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); @@ -4383,8 +4421,8 @@ static const struct opcode group5[] = { }; static const struct opcode group6[] = { - DI(Prot | DstMem, sldt), - DI(Prot | DstMem, str), + II(Prot | DstMem, em_sldt, sldt), + II(Prot | DstMem, em_str, str), II(Prot | Priv | SrcMem16, em_lldt, lldt), II(Prot | Priv | SrcMem16, em_ltr, ltr), N, N, N, N, @@ -4415,10 +4453,20 @@ static const struct opcode group8[] = { F(DstMem | SrcImmByte | Lock | PageTable, em_btc), }; +/* + * The "memory" destination is actually always a register, since we come + * from the register case of group9. + */ +static const struct gprefix pfx_0f_c7_7 = { + N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp), +}; + + static const struct group_dual group9 = { { N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, }, { - N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, + GP(0, &pfx_0f_c7_7), } }; static const struct opcode group11[] = { diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 5c24811e8b0bc..f171051eecf34 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -79,7 +79,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) if (kvm_cpu_has_extint(v)) return 1; - if (kvm_vcpu_apicv_active(v)) + if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v)) return 0; return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e2c1fb8d35cea..924ac8ce9d500 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -364,32 +364,41 @@ static u8 count_vectors(void *bitmap) return count; } -int __kvm_apic_update_irr(u32 *pir, void *regs) +bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr) { u32 i, vec; - u32 pir_val, irr_val; - int max_irr = -1; + u32 pir_val, irr_val, prev_irr_val; + int max_updated_irr; + + max_updated_irr = -1; + *max_irr = -1; for (i = vec = 0; i <= 7; i++, vec += 32) { pir_val = READ_ONCE(pir[i]); irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10)); if (pir_val) { + prev_irr_val = irr_val; irr_val |= xchg(&pir[i], 0); *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val; + if (prev_irr_val != irr_val) { + max_updated_irr = + __fls(irr_val ^ prev_irr_val) + vec; + } } if (irr_val) - max_irr = __fls(irr_val) + vec; + *max_irr = __fls(irr_val) + vec; } - return max_irr; + return ((max_updated_irr != -1) && + (max_updated_irr == *max_irr)); } EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); -int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) +bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) { struct kvm_lapic *apic = vcpu->arch.apic; - return __kvm_apic_update_irr(pir, apic->regs); + return __kvm_apic_update_irr(pir, apic->regs, max_irr); } EXPORT_SYMBOL_GPL(kvm_apic_update_irr); @@ -581,7 +590,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr) { int highest_irr; - if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active) + if (apic->vcpu->arch.apicv_active) highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu); else highest_irr = apic_find_highest_irr(apic); diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 4b9935a383479..56c36014f7b76 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -75,8 +75,8 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, unsigned int dest, int dest_mode); -int __kvm_apic_update_irr(u32 *pir, void *regs); -int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir); +bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr); +bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr); void kvm_apic_update_ppr(struct kvm_vcpu *vcpu); int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, struct dest_map *dest_map); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2b8eb4da4d082..46ff304140c71 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include @@ -381,7 +382,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, } EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); -void kvm_mmu_clear_all_pte_masks(void) +static void kvm_mmu_clear_all_pte_masks(void) { shadow_user_mask = 0; shadow_accessed_mask = 0; @@ -2708,7 +2709,18 @@ static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) { if (pfn_valid(pfn)) - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && + /* + * Some reserved pages, such as those from NVDIMM + * DAX devices, are not for MMIO, and can be mapped + * with cached memory type for better performance. + * However, the above check misconceives those pages + * as MMIO, and results in KVM mapping them with UC + * memory type, which would hurt the performance. + * Therefore, we check the host memory type in addition + * and only treat UC/UC-/WC pages as MMIO. + */ + (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); return true; } @@ -4951,6 +4963,16 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, if (mmio_info_in_cache(vcpu, cr2, direct)) emulation_type = 0; emulate: + /* + * On AMD platforms, under certain conditions insn_len may be zero on #NPF. + * This can happen if a guest gets a page-fault on data access but the HW + * table walker is not able to read the instruction page (e.g instruction + * page is not present in memory). In those cases we simply restart the + * guest. + */ + if (unlikely(insn && !insn_len)) + return 1; + er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); switch (er) { @@ -5058,7 +5080,7 @@ void kvm_mmu_uninit_vm(struct kvm *kvm) typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); /* The caller should hold mmu-lock before calling this function. */ -static bool +static __always_inline bool slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) @@ -5088,7 +5110,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, return flush; } -static bool +static __always_inline bool slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, bool lock_flush_tlb) @@ -5099,7 +5121,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5107,7 +5129,7 @@ slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { @@ -5115,7 +5137,7 @@ slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); } -static bool +static __always_inline bool slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) { diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c index d22ddbdf5e6ed..1272861e77b9e 100644 --- a/arch/x86/kvm/mmu_audit.c +++ b/arch/x86/kvm/mmu_audit.c @@ -19,7 +19,7 @@ #include -char const *audit_point_name[] = { +static char const *audit_point_name[] = { "pre page fault", "post page fault", "pre pte write", diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4e3c795305263..b3e488a748281 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -37,6 +37,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -214,6 +218,9 @@ struct vcpu_svm { */ struct list_head ir_list; spinlock_t ir_list_lock; + + /* which host CPU was used for running this vcpu */ + unsigned int last_cpu; }; /* @@ -289,8 +296,12 @@ module_param(vls, int, 0444); static int vgif = true; module_param(vgif, int, 0444); +/* enable/disable SEV support */ +static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); +module_param(sev, int, 0444); + static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); -static void svm_flush_tlb(struct kvm_vcpu *vcpu); +static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); static void svm_complete_interrupts(struct vcpu_svm *svm); static int nested_svm_exit_handled(struct vcpu_svm *svm); @@ -324,6 +335,38 @@ enum { #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL +static unsigned int max_sev_asid; +static unsigned int min_sev_asid; +static unsigned long *sev_asid_bitmap; +#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) + +struct enc_region { + struct list_head list; + unsigned long npages; + struct page **pages; + unsigned long uaddr; + unsigned long size; +}; + +static inline bool svm_sev_enabled(void) +{ + return max_sev_asid; +} + +static inline bool sev_guest(struct kvm *kvm) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + + return sev->active; +} + +static inline int sev_get_asid(struct kvm *kvm) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + + return sev->asid; +} + static inline void mark_all_dirty(struct vmcb *vmcb) { vmcb->control.clean = 0; @@ -530,10 +573,14 @@ struct svm_cpu_data { u64 asid_generation; u32 max_asid; u32 next_asid; + u32 min_asid; struct kvm_ldttss_desc *tss_desc; struct page *save_area; struct vmcb *current_vmcb; + + /* index = sev_asid, value = vmcb pointer */ + struct vmcb **sev_vmcbs; }; static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); @@ -788,6 +835,7 @@ static int svm_hardware_enable(void) sd->asid_generation = 1; sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; sd->next_asid = sd->max_asid + 1; + sd->min_asid = max_sev_asid + 1; gdt = get_current_gdt_rw(); sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); @@ -846,6 +894,7 @@ static void svm_cpu_uninit(int cpu) return; per_cpu(svm_data, raw_smp_processor_id()) = NULL; + kfree(sd->sev_vmcbs); __free_page(sd->save_area); kfree(sd); } @@ -859,11 +908,18 @@ static int svm_cpu_init(int cpu) if (!sd) return -ENOMEM; sd->cpu = cpu; - sd->save_area = alloc_page(GFP_KERNEL); r = -ENOMEM; + sd->save_area = alloc_page(GFP_KERNEL); if (!sd->save_area) goto err_1; + if (svm_sev_enabled()) { + r = -ENOMEM; + sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL); + if (!sd->sev_vmcbs) + goto err_1; + } + per_cpu(svm_data, cpu) = sd; return 0; @@ -1070,6 +1126,48 @@ static int avic_ga_log_notifier(u32 ga_tag) return 0; } +static __init int sev_hardware_setup(void) +{ + struct sev_user_data_status *status; + int rc; + + /* Maximum number of encrypted guests supported simultaneously */ + max_sev_asid = cpuid_ecx(0x8000001F); + + if (!max_sev_asid) + return 1; + + /* Minimum ASID value that should be used for SEV guest */ + min_sev_asid = cpuid_edx(0x8000001F); + + /* Initialize SEV ASID bitmap */ + sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid), + sizeof(unsigned long), GFP_KERNEL); + if (!sev_asid_bitmap) + return 1; + + status = kmalloc(sizeof(*status), GFP_KERNEL); + if (!status) + return 1; + + /* + * Check SEV platform status. + * + * PLATFORM_STATUS can be called in any state, if we failed to query + * the PLATFORM status then either PSP firmware does not support SEV + * feature or SEV firmware is dead. + */ + rc = sev_platform_status(status, NULL); + if (rc) + goto err; + + pr_info("SEV supported\n"); + +err: + kfree(status); + return rc; +} + static __init int svm_hardware_setup(void) { int cpu; @@ -1105,6 +1203,17 @@ static __init int svm_hardware_setup(void) kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); } + if (sev) { + if (boot_cpu_has(X86_FEATURE_SEV) && + IS_ENABLED(CONFIG_KVM_AMD_SEV)) { + r = sev_hardware_setup(); + if (r) + sev = false; + } else { + sev = false; + } + } + for_each_possible_cpu(cpu) { r = svm_cpu_init(cpu); if (r) @@ -1166,6 +1275,9 @@ static __exit void svm_hardware_unsetup(void) { int cpu; + if (svm_sev_enabled()) + kfree(sev_asid_bitmap); + for_each_possible_cpu(cpu) svm_cpu_uninit(cpu); @@ -1318,7 +1430,7 @@ static void init_vmcb(struct vcpu_svm *svm) if (npt_enabled) { /* Setup VMCB for Nested Paging */ - control->nested_ctl = 1; + control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; clr_intercept(svm, INTERCEPT_INVLPG); clr_exception_intercept(svm, PF_VECTOR); clr_cr_intercept(svm, INTERCEPT_CR3_READ); @@ -1356,6 +1468,11 @@ static void init_vmcb(struct vcpu_svm *svm) svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; } + if (sev_guest(svm->vcpu.kvm)) { + svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; + clr_exception_intercept(svm, UD_VECTOR); + } + mark_all_dirty(svm->vmcb); enable_gif(svm); @@ -1438,6 +1555,179 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) return 0; } +static void __sev_asid_free(int asid) +{ + struct svm_cpu_data *sd; + int cpu, pos; + + pos = asid - 1; + clear_bit(pos, sev_asid_bitmap); + + for_each_possible_cpu(cpu) { + sd = per_cpu(svm_data, cpu); + sd->sev_vmcbs[pos] = NULL; + } +} + +static void sev_asid_free(struct kvm *kvm) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + + __sev_asid_free(sev->asid); +} + +static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) +{ + struct sev_data_decommission *decommission; + struct sev_data_deactivate *data; + + if (!handle) + return; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + /* deactivate handle */ + data->handle = handle; + sev_guest_deactivate(data, NULL); + + wbinvd_on_all_cpus(); + sev_guest_df_flush(NULL); + kfree(data); + + decommission = kzalloc(sizeof(*decommission), GFP_KERNEL); + if (!decommission) + return; + + /* decommission handle */ + decommission->handle = handle; + sev_guest_decommission(decommission, NULL); + + kfree(decommission); +} + +static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, + unsigned long ulen, unsigned long *n, + int write) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + unsigned long npages, npinned, size; + unsigned long locked, lock_limit; + struct page **pages; + int first, last; + + /* Calculate number of pages. */ + first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; + last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; + npages = (last - first + 1); + + locked = sev->pages_locked + npages; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { + pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit); + return NULL; + } + + /* Avoid using vmalloc for smaller buffers. */ + size = npages * sizeof(struct page *); + if (size > PAGE_SIZE) + pages = vmalloc(size); + else + pages = kmalloc(size, GFP_KERNEL); + + if (!pages) + return NULL; + + /* Pin the user virtual address. */ + npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + if (npinned != npages) { + pr_err("SEV: Failure locking %lu pages.\n", npages); + goto err; + } + + *n = npages; + sev->pages_locked = locked; + + return pages; + +err: + if (npinned > 0) + release_pages(pages, npinned); + + kvfree(pages); + return NULL; +} + +static void sev_unpin_memory(struct kvm *kvm, struct page **pages, + unsigned long npages) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + + release_pages(pages, npages); + kvfree(pages); + sev->pages_locked -= npages; +} + +static void sev_clflush_pages(struct page *pages[], unsigned long npages) +{ + uint8_t *page_virtual; + unsigned long i; + + if (npages == 0 || pages == NULL) + return; + + for (i = 0; i < npages; i++) { + page_virtual = kmap_atomic(pages[i]); + clflush_cache_range(page_virtual, PAGE_SIZE); + kunmap_atomic(page_virtual); + } +} + +static void __unregister_enc_region_locked(struct kvm *kvm, + struct enc_region *region) +{ + /* + * The guest may change the memory encryption attribute from C=0 -> C=1 + * or vice versa for this memory range. Lets make sure caches are + * flushed to ensure that guest data gets written into memory with + * correct C-bit. + */ + sev_clflush_pages(region->pages, region->npages); + + sev_unpin_memory(kvm, region->pages, region->npages); + list_del(®ion->list); + kfree(region); +} + +static void sev_vm_destroy(struct kvm *kvm) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct list_head *head = &sev->regions_list; + struct list_head *pos, *q; + + if (!sev_guest(kvm)) + return; + + mutex_lock(&kvm->lock); + + /* + * if userspace was terminated before unregistering the memory regions + * then lets unpin all the registered memory. + */ + if (!list_empty(head)) { + list_for_each_safe(pos, q, head) { + __unregister_enc_region_locked(kvm, + list_entry(pos, struct enc_region, list)); + } + } + + mutex_unlock(&kvm->lock); + + sev_unbind_asid(kvm, sev->handle); + sev_asid_free(kvm); +} + static void avic_vm_destroy(struct kvm *kvm) { unsigned long flags; @@ -1456,6 +1746,12 @@ static void avic_vm_destroy(struct kvm *kvm) spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); } +static void svm_vm_destroy(struct kvm *kvm) +{ + avic_vm_destroy(kvm); + sev_vm_destroy(kvm); +} + static int avic_vm_init(struct kvm *kvm) { unsigned long flags; @@ -2066,7 +2362,7 @@ static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) - svm_flush_tlb(vcpu); + svm_flush_tlb(vcpu, true); vcpu->arch.cr4 = cr4; if (!npt_enabled) @@ -2125,7 +2421,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) { if (sd->next_asid > sd->max_asid) { ++sd->asid_generation; - sd->next_asid = 1; + sd->next_asid = sd->min_asid; svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; } @@ -2173,22 +2469,24 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) static int pf_interception(struct vcpu_svm *svm) { - u64 fault_address = svm->vmcb->control.exit_info_2; + u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); u64 error_code = svm->vmcb->control.exit_info_1; return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, - svm->vmcb->control.insn_bytes, + static_cpu_has(X86_FEATURE_DECODEASSISTS) ? + svm->vmcb->control.insn_bytes : NULL, svm->vmcb->control.insn_len); } static int npf_interception(struct vcpu_svm *svm) { - u64 fault_address = svm->vmcb->control.exit_info_2; + u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); u64 error_code = svm->vmcb->control.exit_info_1; trace_kvm_page_fault(fault_address, error_code); return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, - svm->vmcb->control.insn_bytes, + static_cpu_has(X86_FEATURE_DECODEASSISTS) ? + svm->vmcb->control.insn_bytes : NULL, svm->vmcb->control.insn_len); } @@ -2415,7 +2713,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu, svm->vmcb->control.nested_cr3 = __sme_set(root); mark_dirty(svm->vmcb, VMCB_NPT); - svm_flush_tlb(vcpu); + svm_flush_tlb(vcpu, true); } static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, @@ -2957,7 +3255,8 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) if (vmcb->control.asid == 0) return false; - if (vmcb->control.nested_ctl && !npt_enabled) + if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && + !npt_enabled) return false; return true; @@ -2971,7 +3270,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, else svm->vcpu.arch.hflags &= ~HF_HIF_MASK; - if (nested_vmcb->control.nested_ctl) { + if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) { kvm_mmu_unload(&svm->vcpu); svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; nested_svm_init_mmu_context(&svm->vcpu); @@ -3019,7 +3318,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; svm->nested.intercept = nested_vmcb->control.intercept; - svm_flush_tlb(&svm->vcpu); + svm_flush_tlb(&svm->vcpu, true); svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) svm->vcpu.arch.hflags |= HF_VINTR_MASK; @@ -4442,12 +4741,39 @@ static void reload_tss(struct kvm_vcpu *vcpu) load_TR_desc(); } +static void pre_sev_run(struct vcpu_svm *svm, int cpu) +{ + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + int asid = sev_get_asid(svm->vcpu.kvm); + + /* Assign the asid allocated with this SEV guest */ + svm->vmcb->control.asid = asid; + + /* + * Flush guest TLB: + * + * 1) when different VMCB for the same ASID is to be run on the same host CPU. + * 2) or this VMCB was executed on different host CPU in previous VMRUNs. + */ + if (sd->sev_vmcbs[asid] == svm->vmcb && + svm->last_cpu == cpu) + return; + + svm->last_cpu = cpu; + sd->sev_vmcbs[asid] = svm->vmcb; + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; + mark_dirty(svm->vmcb, VMCB_ASID); +} + static void pre_svm_run(struct vcpu_svm *svm) { int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + if (sev_guest(svm->vcpu.kvm)) + return pre_sev_run(svm, cpu); + /* FIXME: handle wraparound of asid_generation */ if (svm->asid_generation != sd->asid_generation) new_asid(svm, sd); @@ -4865,7 +5191,7 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) return 0; } -static void svm_flush_tlb(struct kvm_vcpu *vcpu) +static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) { struct vcpu_svm *svm = to_svm(vcpu); @@ -5208,7 +5534,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) svm->vmcb->save.cr3 = __sme_set(root); mark_dirty(svm->vmcb, VMCB_CR); - svm_flush_tlb(vcpu); + svm_flush_tlb(vcpu, true); } static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) @@ -5222,7 +5548,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root) svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); mark_dirty(svm->vmcb, VMCB_CR); - svm_flush_tlb(vcpu); + svm_flush_tlb(vcpu, true); } static int is_disabled(void) @@ -5308,6 +5634,12 @@ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) entry->edx |= SVM_FEATURE_NPT; break; + case 0x8000001F: + /* Support memory encryption cpuid if host supports it */ + if (boot_cpu_has(X86_FEATURE_SEV)) + cpuid(0x8000001f, &entry->eax, &entry->ebx, + &entry->ecx, &entry->edx); + } } @@ -5336,6 +5668,11 @@ static bool svm_xsaves_supported(void) return false; } +static bool svm_umip_emulated(void) +{ + return false; +} + static bool svm_has_wbinvd_exit(void) { return true; @@ -5637,6 +5974,828 @@ static int enable_smi_window(struct kvm_vcpu *vcpu) return 0; } +static int sev_asid_new(void) +{ + int pos; + + /* + * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid. + */ + pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1); + if (pos >= max_sev_asid) + return -EBUSY; + + set_bit(pos, sev_asid_bitmap); + return pos + 1; +} + +static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + int asid, ret; + + ret = -EBUSY; + asid = sev_asid_new(); + if (asid < 0) + return ret; + + ret = sev_platform_init(&argp->error); + if (ret) + goto e_free; + + sev->active = true; + sev->asid = asid; + INIT_LIST_HEAD(&sev->regions_list); + + return 0; + +e_free: + __sev_asid_free(asid); + return ret; +} + +static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) +{ + struct sev_data_activate *data; + int asid = sev_get_asid(kvm); + int ret; + + wbinvd_on_all_cpus(); + + ret = sev_guest_df_flush(error); + if (ret) + return ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* activate ASID on the given handle */ + data->handle = handle; + data->asid = asid; + ret = sev_guest_activate(data, error); + kfree(data); + + return ret; +} + +static int __sev_issue_cmd(int fd, int id, void *data, int *error) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = sev_issue_cmd_external_user(f.file, id, data, error); + + fdput(f); + return ret; +} + +static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + + return __sev_issue_cmd(sev->fd, id, data, error); +} + +static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct sev_data_launch_start *start; + struct kvm_sev_launch_start params; + void *dh_blob, *session_blob; + int *error = &argp->error; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + start = kzalloc(sizeof(*start), GFP_KERNEL); + if (!start) + return -ENOMEM; + + dh_blob = NULL; + if (params.dh_uaddr) { + dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len); + if (IS_ERR(dh_blob)) { + ret = PTR_ERR(dh_blob); + goto e_free; + } + + start->dh_cert_address = __sme_set(__pa(dh_blob)); + start->dh_cert_len = params.dh_len; + } + + session_blob = NULL; + if (params.session_uaddr) { + session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len); + if (IS_ERR(session_blob)) { + ret = PTR_ERR(session_blob); + goto e_free_dh; + } + + start->session_address = __sme_set(__pa(session_blob)); + start->session_len = params.session_len; + } + + start->handle = params.handle; + start->policy = params.policy; + + /* create memory encryption context */ + ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error); + if (ret) + goto e_free_session; + + /* Bind ASID to this guest */ + ret = sev_bind_asid(kvm, start->handle, error); + if (ret) + goto e_free_session; + + /* return handle to userspace */ + params.handle = start->handle; + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) { + sev_unbind_asid(kvm, start->handle); + ret = -EFAULT; + goto e_free_session; + } + + sev->handle = start->handle; + sev->fd = argp->sev_fd; + +e_free_session: + kfree(session_blob); +e_free_dh: + kfree(dh_blob); +e_free: + kfree(start); + return ret; +} + +static int get_num_contig_pages(int idx, struct page **inpages, + unsigned long npages) +{ + unsigned long paddr, next_paddr; + int i = idx + 1, pages = 1; + + /* find the number of contiguous pages starting from idx */ + paddr = __sme_page_pa(inpages[idx]); + while (i < npages) { + next_paddr = __sme_page_pa(inpages[i++]); + if ((paddr + PAGE_SIZE) == next_paddr) { + pages++; + paddr = next_paddr; + continue; + } + break; + } + + return pages; +} + +static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + unsigned long vaddr, vaddr_end, next_vaddr, npages, size; + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct kvm_sev_launch_update_data params; + struct sev_data_launch_update_data *data; + struct page **inpages; + int i, ret, pages; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + vaddr = params.uaddr; + size = params.len; + vaddr_end = vaddr + size; + + /* Lock the user memory. */ + inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); + if (!inpages) { + ret = -ENOMEM; + goto e_free; + } + + /* + * The LAUNCH_UPDATE command will perform in-place encryption of the + * memory content (i.e it will write the same memory region with C=1). + * It's possible that the cache may contain the data with C=0, i.e., + * unencrypted so invalidate it first. + */ + sev_clflush_pages(inpages, npages); + + for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { + int offset, len; + + /* + * If the user buffer is not page-aligned, calculate the offset + * within the page. + */ + offset = vaddr & (PAGE_SIZE - 1); + + /* Calculate the number of pages that can be encrypted in one go. */ + pages = get_num_contig_pages(i, inpages, npages); + + len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); + + data->handle = sev->handle; + data->len = len; + data->address = __sme_page_pa(inpages[i]) + offset; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error); + if (ret) + goto e_unpin; + + size -= len; + next_vaddr = vaddr + len; + } + +e_unpin: + /* content of memory is updated, mark pages dirty */ + for (i = 0; i < npages; i++) { + set_page_dirty_lock(inpages[i]); + mark_page_accessed(inpages[i]); + } + /* unlock the user pages */ + sev_unpin_memory(kvm, inpages, npages); +e_free: + kfree(data); + return ret; +} + +static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct sev_data_launch_measure *data; + struct kvm_sev_launch_measure params; + void *blob = NULL; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* User wants to query the blob length */ + if (!params.len) + goto cmd; + + if (params.uaddr) { + if (params.len > SEV_FW_BLOB_MAX_SIZE) { + ret = -EINVAL; + goto e_free; + } + + if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) { + ret = -EFAULT; + goto e_free; + } + + ret = -ENOMEM; + blob = kmalloc(params.len, GFP_KERNEL); + if (!blob) + goto e_free; + + data->address = __psp_pa(blob); + data->len = params.len; + } + +cmd: + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error); + + /* + * If we query the session length, FW responded with expected data. + */ + if (!params.len) + goto done; + + if (ret) + goto e_free_blob; + + if (blob) { + if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len)) + ret = -EFAULT; + } + +done: + params.len = data->len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) + ret = -EFAULT; +e_free_blob: + kfree(blob); +e_free: + kfree(data); + return ret; +} + +static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct sev_data_launch_finish *data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error); + + kfree(data); + return ret; +} + +static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct kvm_sev_guest_status params; + struct sev_data_guest_status *data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error); + if (ret) + goto e_free; + + params.policy = data->policy; + params.state = data->state; + params.handle = data->handle; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) + ret = -EFAULT; +e_free: + kfree(data); + return ret; +} + +static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, + unsigned long dst, int size, + int *error, bool enc) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct sev_data_dbg *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + data->dst_addr = dst; + data->src_addr = src; + data->len = size; + + ret = sev_issue_cmd(kvm, + enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT, + data, error); + kfree(data); + return ret; +} + +static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, + unsigned long dst_paddr, int sz, int *err) +{ + int offset; + + /* + * Its safe to read more than we are asked, caller should ensure that + * destination has enough space. + */ + src_paddr = round_down(src_paddr, 16); + offset = src_paddr & 15; + sz = round_up(sz + offset, 16); + + return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false); +} + +static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, + unsigned long __user dst_uaddr, + unsigned long dst_paddr, + int size, int *err) +{ + struct page *tpage = NULL; + int ret, offset; + + /* if inputs are not 16-byte then use intermediate buffer */ + if (!IS_ALIGNED(dst_paddr, 16) || + !IS_ALIGNED(paddr, 16) || + !IS_ALIGNED(size, 16)) { + tpage = (void *)alloc_page(GFP_KERNEL); + if (!tpage) + return -ENOMEM; + + dst_paddr = __sme_page_pa(tpage); + } + + ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err); + if (ret) + goto e_free; + + if (tpage) { + offset = paddr & 15; + if (copy_to_user((void __user *)(uintptr_t)dst_uaddr, + page_address(tpage) + offset, size)) + ret = -EFAULT; + } + +e_free: + if (tpage) + __free_page(tpage); + + return ret; +} + +static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, + unsigned long __user vaddr, + unsigned long dst_paddr, + unsigned long __user dst_vaddr, + int size, int *error) +{ + struct page *src_tpage = NULL; + struct page *dst_tpage = NULL; + int ret, len = size; + + /* If source buffer is not aligned then use an intermediate buffer */ + if (!IS_ALIGNED(vaddr, 16)) { + src_tpage = alloc_page(GFP_KERNEL); + if (!src_tpage) + return -ENOMEM; + + if (copy_from_user(page_address(src_tpage), + (void __user *)(uintptr_t)vaddr, size)) { + __free_page(src_tpage); + return -EFAULT; + } + + paddr = __sme_page_pa(src_tpage); + } + + /* + * If destination buffer or length is not aligned then do read-modify-write: + * - decrypt destination in an intermediate buffer + * - copy the source buffer in an intermediate buffer + * - use the intermediate buffer as source buffer + */ + if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { + int dst_offset; + + dst_tpage = alloc_page(GFP_KERNEL); + if (!dst_tpage) { + ret = -ENOMEM; + goto e_free; + } + + ret = __sev_dbg_decrypt(kvm, dst_paddr, + __sme_page_pa(dst_tpage), size, error); + if (ret) + goto e_free; + + /* + * If source is kernel buffer then use memcpy() otherwise + * copy_from_user(). + */ + dst_offset = dst_paddr & 15; + + if (src_tpage) + memcpy(page_address(dst_tpage) + dst_offset, + page_address(src_tpage), size); + else { + if (copy_from_user(page_address(dst_tpage) + dst_offset, + (void __user *)(uintptr_t)vaddr, size)) { + ret = -EFAULT; + goto e_free; + } + } + + paddr = __sme_page_pa(dst_tpage); + dst_paddr = round_down(dst_paddr, 16); + len = round_up(size, 16); + } + + ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true); + +e_free: + if (src_tpage) + __free_page(src_tpage); + if (dst_tpage) + __free_page(dst_tpage); + return ret; +} + +static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) +{ + unsigned long vaddr, vaddr_end, next_vaddr; + unsigned long dst_vaddr, dst_vaddr_end; + struct page **src_p, **dst_p; + struct kvm_sev_dbg debug; + unsigned long n; + int ret, size; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) + return -EFAULT; + + vaddr = debug.src_uaddr; + size = debug.len; + vaddr_end = vaddr + size; + dst_vaddr = debug.dst_uaddr; + dst_vaddr_end = dst_vaddr + size; + + for (; vaddr < vaddr_end; vaddr = next_vaddr) { + int len, s_off, d_off; + + /* lock userspace source and destination page */ + src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); + if (!src_p) + return -EFAULT; + + dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); + if (!dst_p) { + sev_unpin_memory(kvm, src_p, n); + return -EFAULT; + } + + /* + * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the + * memory content (i.e it will write the same memory region with C=1). + * It's possible that the cache may contain the data with C=0, i.e., + * unencrypted so invalidate it first. + */ + sev_clflush_pages(src_p, 1); + sev_clflush_pages(dst_p, 1); + + /* + * Since user buffer may not be page aligned, calculate the + * offset within the page. + */ + s_off = vaddr & ~PAGE_MASK; + d_off = dst_vaddr & ~PAGE_MASK; + len = min_t(size_t, (PAGE_SIZE - s_off), size); + + if (dec) + ret = __sev_dbg_decrypt_user(kvm, + __sme_page_pa(src_p[0]) + s_off, + dst_vaddr, + __sme_page_pa(dst_p[0]) + d_off, + len, &argp->error); + else + ret = __sev_dbg_encrypt_user(kvm, + __sme_page_pa(src_p[0]) + s_off, + vaddr, + __sme_page_pa(dst_p[0]) + d_off, + dst_vaddr, + len, &argp->error); + + sev_unpin_memory(kvm, src_p, 1); + sev_unpin_memory(kvm, dst_p, 1); + + if (ret) + goto err; + + next_vaddr = vaddr + len; + dst_vaddr = dst_vaddr + len; + size -= len; + } +err: + return ret; +} + +static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct sev_data_launch_secret *data; + struct kvm_sev_launch_secret params; + struct page **pages; + void *blob, *hdr; + unsigned long n; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); + if (!pages) + return -ENOMEM; + + /* + * The secret must be copied into contiguous memory region, lets verify + * that userspace memory pages are contiguous before we issue command. + */ + if (get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto e_free; + } + + data->trans_address = __psp_pa(blob); + data->trans_len = params.trans_len; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) { + ret = PTR_ERR(hdr); + goto e_free_blob; + } + data->trans_address = __psp_pa(blob); + data->trans_len = params.trans_len; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); + + kfree(hdr); + +e_free_blob: + kfree(blob); +e_free: + kfree(data); +e_unpin_memory: + sev_unpin_memory(kvm, pages, n); + return ret; +} + +static int svm_mem_enc_op(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r; + + if (!svm_sev_enabled()) + return -ENOTTY; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_SEV_INIT: + r = sev_guest_init(kvm, &sev_cmd); + break; + case KVM_SEV_LAUNCH_START: + r = sev_launch_start(kvm, &sev_cmd); + break; + case KVM_SEV_LAUNCH_UPDATE_DATA: + r = sev_launch_update_data(kvm, &sev_cmd); + break; + case KVM_SEV_LAUNCH_MEASURE: + r = sev_launch_measure(kvm, &sev_cmd); + break; + case KVM_SEV_LAUNCH_FINISH: + r = sev_launch_finish(kvm, &sev_cmd); + break; + case KVM_SEV_GUEST_STATUS: + r = sev_guest_status(kvm, &sev_cmd); + break; + case KVM_SEV_DBG_DECRYPT: + r = sev_dbg_crypt(kvm, &sev_cmd, true); + break; + case KVM_SEV_DBG_ENCRYPT: + r = sev_dbg_crypt(kvm, &sev_cmd, false); + break; + case KVM_SEV_LAUNCH_SECRET: + r = sev_launch_secret(kvm, &sev_cmd); + break; + default: + r = -EINVAL; + goto out; + } + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + +out: + mutex_unlock(&kvm->lock); + return r; +} + +static int svm_register_enc_region(struct kvm *kvm, + struct kvm_enc_region *range) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct enc_region *region; + int ret = 0; + + if (!sev_guest(kvm)) + return -ENOTTY; + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return -ENOMEM; + + region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); + if (!region->pages) { + ret = -ENOMEM; + goto e_free; + } + + /* + * The guest may change the memory encryption attribute from C=0 -> C=1 + * or vice versa for this memory range. Lets make sure caches are + * flushed to ensure that guest data gets written into memory with + * correct C-bit. + */ + sev_clflush_pages(region->pages, region->npages); + + region->uaddr = range->addr; + region->size = range->size; + + mutex_lock(&kvm->lock); + list_add_tail(®ion->list, &sev->regions_list); + mutex_unlock(&kvm->lock); + + return ret; + +e_free: + kfree(region); + return ret; +} + +static struct enc_region * +find_enc_region(struct kvm *kvm, struct kvm_enc_region *range) +{ + struct kvm_sev_info *sev = &kvm->arch.sev_info; + struct list_head *head = &sev->regions_list; + struct enc_region *i; + + list_for_each_entry(i, head, list) { + if (i->uaddr == range->addr && + i->size == range->size) + return i; + } + + return NULL; +} + + +static int svm_unregister_enc_region(struct kvm *kvm, + struct kvm_enc_region *range) +{ + struct enc_region *region; + int ret; + + mutex_lock(&kvm->lock); + + if (!sev_guest(kvm)) { + ret = -ENOTTY; + goto failed; + } + + region = find_enc_region(kvm, range); + if (!region) { + ret = -EINVAL; + goto failed; + } + + __unregister_enc_region_locked(kvm, region); + + mutex_unlock(&kvm->lock); + return 0; + +failed: + mutex_unlock(&kvm->lock); + return ret; +} + static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -5653,7 +6812,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .vcpu_reset = svm_vcpu_reset, .vm_init = avic_vm_init, - .vm_destroy = avic_vm_destroy, + .vm_destroy = svm_vm_destroy, .prepare_guest_switch = svm_prepare_guest_switch, .vcpu_load = svm_vcpu_load, @@ -5713,6 +6872,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .load_eoi_exitmap = svm_load_eoi_exitmap, .hwapic_irr_update = svm_hwapic_irr_update, .hwapic_isr_update = svm_hwapic_isr_update, + .sync_pir_to_irr = kvm_lapic_find_highest_irr, .apicv_post_state_restore = avic_post_state_restore, .set_tss_addr = svm_set_tss_addr, @@ -5729,6 +6889,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .invpcid_supported = svm_invpcid_supported, .mpx_supported = svm_mpx_supported, .xsaves_supported = svm_xsaves_supported, + .umip_emulated = svm_umip_emulated, .set_supported_cpuid = svm_set_supported_cpuid, @@ -5752,6 +6913,10 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .pre_enter_smm = svm_pre_enter_smm, .pre_leave_smm = svm_pre_leave_smm, .enable_smi_window = enable_smi_window, + + .mem_enc_op = svm_mem_enc_op, + .mem_enc_reg_region = svm_register_enc_region, + .mem_enc_unreg_region = svm_unregister_enc_region, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bee4c49f6dd08..3dec126aa3022 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -418,6 +418,12 @@ struct __packed vmcs12 { */ #define VMCS12_SIZE 0x1000 +/* + * VMCS12_MAX_FIELD_INDEX is the highest index value used in any + * supported VMCS12 field encoding. + */ +#define VMCS12_MAX_FIELD_INDEX 0x17 + /* * The nested_vmx structure is part of vcpu_vmx, and holds information we need * for correct emulation of VMX (i.e., nested VMX) on this vcpu. @@ -441,6 +447,7 @@ struct nested_vmx { * data hold by vmcs12 */ bool sync_shadow_vmcs; + bool dirty_vmcs12; bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ @@ -664,6 +671,8 @@ struct vcpu_vmx { u32 host_pkru; + unsigned long host_debugctlmsr; + /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included @@ -692,67 +701,24 @@ static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) return &(to_vmx(vcpu)->pi_desc); } +#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n))))) #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x) -#define FIELD(number, name) [number] = VMCS12_OFFSET(name) -#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ - [number##_HIGH] = VMCS12_OFFSET(name)+4 +#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name) +#define FIELD64(number, name) \ + FIELD(number, name), \ + [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32) -static unsigned long shadow_read_only_fields[] = { - /* - * We do NOT shadow fields that are modified when L0 - * traps and emulates any vmx instruction (e.g. VMPTRLD, - * VMXON...) executed by L1. - * For example, VM_INSTRUCTION_ERROR is read - * by L1 if a vmx instruction fails (part of the error path). - * Note the code assumes this logic. If for some reason - * we start shadowing these fields then we need to - * force a shadow sync when L0 emulates vmx instructions - * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified - * by nested_vmx_failValid) - */ - VM_EXIT_REASON, - VM_EXIT_INTR_INFO, - VM_EXIT_INSTRUCTION_LEN, - IDT_VECTORING_INFO_FIELD, - IDT_VECTORING_ERROR_CODE, - VM_EXIT_INTR_ERROR_CODE, - EXIT_QUALIFICATION, - GUEST_LINEAR_ADDRESS, - GUEST_PHYSICAL_ADDRESS +static u16 shadow_read_only_fields[] = { +#define SHADOW_FIELD_RO(x) x, +#include "vmx_shadow_fields.h" }; static int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); -static unsigned long shadow_read_write_fields[] = { - TPR_THRESHOLD, - GUEST_RIP, - GUEST_RSP, - GUEST_CR0, - GUEST_CR3, - GUEST_CR4, - GUEST_INTERRUPTIBILITY_INFO, - GUEST_RFLAGS, - GUEST_CS_SELECTOR, - GUEST_CS_AR_BYTES, - GUEST_CS_LIMIT, - GUEST_CS_BASE, - GUEST_ES_BASE, - GUEST_BNDCFGS, - CR0_GUEST_HOST_MASK, - CR0_READ_SHADOW, - CR4_READ_SHADOW, - TSC_OFFSET, - EXCEPTION_BITMAP, - CPU_BASED_VM_EXEC_CONTROL, - VM_ENTRY_EXCEPTION_ERROR_CODE, - VM_ENTRY_INTR_INFO_FIELD, - VM_ENTRY_INSTRUCTION_LEN, - VM_ENTRY_EXCEPTION_ERROR_CODE, - HOST_FS_BASE, - HOST_GS_BASE, - HOST_FS_SELECTOR, - HOST_GS_SELECTOR +static u16 shadow_read_write_fields[] = { +#define SHADOW_FIELD_RW(x) x, +#include "vmx_shadow_fields.h" }; static int max_shadow_read_write_fields = ARRAY_SIZE(shadow_read_write_fields); @@ -905,13 +871,17 @@ static inline short vmcs_field_to_offset(unsigned long field) { const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table); unsigned short offset; + unsigned index; + + if (field >> 15) + return -ENOENT; - BUILD_BUG_ON(size > SHRT_MAX); - if (field >= size) + index = ROL16(field, 6); + if (index >= size) return -ENOENT; - field = array_index_nospec(field, size); - offset = vmcs_field_to_offset_table[field]; + index = array_index_nospec(index, size); + offset = vmcs_field_to_offset_table[index]; if (offset == 0) return -ENOENT; return offset; @@ -957,8 +927,6 @@ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); enum { - VMX_IO_BITMAP_A, - VMX_IO_BITMAP_B, VMX_VMREAD_BITMAP, VMX_VMWRITE_BITMAP, VMX_BITMAP_NR @@ -966,8 +934,6 @@ enum { static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; -#define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A]) -#define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B]) #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) @@ -2373,6 +2339,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vmx_vcpu_pi_load(vcpu, cpu); vmx->host_pkru = read_pkru(); + vmx->host_debugctlmsr = get_debugctlmsr(); } static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) @@ -2930,7 +2897,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1); /* highest index: VMX_PREEMPTION_TIMER_VALUE */ - vmx->nested.nested_vmx_vmcs_enum = 0x2e; + vmx->nested.nested_vmx_vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; } /* @@ -3266,6 +3233,7 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, */ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { + struct vcpu_vmx *vmx = to_vmx(vcpu); struct shared_msr_entry *msr; switch (msr_info->index) { @@ -3277,8 +3245,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_GS_BASE); break; case MSR_KERNEL_GS_BASE: - vmx_load_host_state(to_vmx(vcpu)); - msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base; + vmx_load_host_state(vmx); + msr_info->data = vmx->msr_guest_kernel_gs_base; break; #endif case MSR_EFER: @@ -3318,13 +3286,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; case MSR_IA32_MCG_EXT_CTL: if (!msr_info->host_initiated && - !(to_vmx(vcpu)->msr_ia32_feature_control & + !(vmx->msr_ia32_feature_control & FEATURE_CONTROL_LMCE)) return 1; msr_info->data = vcpu->arch.mcg_ext_ctl; break; case MSR_IA32_FEATURE_CONTROL: - msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control; + msr_info->data = vmx->msr_ia32_feature_control; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) @@ -3341,7 +3309,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; /* Otherwise falls through */ default: - msr = find_msr_entry(to_vmx(vcpu), msr_info->index); + msr = find_msr_entry(vmx, msr_info->index); if (msr) { msr_info->data = msr->data; break; @@ -3727,7 +3695,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) #endif CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | - CPU_BASED_USE_IO_BITMAPS | + CPU_BASED_UNCOND_IO_EXITING | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_INVLPG_EXITING | @@ -3757,6 +3725,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_PAUSE_LOOP_EXITING | + SECONDARY_EXEC_DESC | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_INVPCID | SECONDARY_EXEC_APIC_REGISTER_VIRT | @@ -3982,17 +3951,17 @@ static void free_kvm_area(void) } } -enum vmcs_field_type { - VMCS_FIELD_TYPE_U16 = 0, - VMCS_FIELD_TYPE_U64 = 1, - VMCS_FIELD_TYPE_U32 = 2, - VMCS_FIELD_TYPE_NATURAL_WIDTH = 3 +enum vmcs_field_width { + VMCS_FIELD_WIDTH_U16 = 0, + VMCS_FIELD_WIDTH_U64 = 1, + VMCS_FIELD_WIDTH_U32 = 2, + VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3 }; -static inline int vmcs_field_type(unsigned long field) +static inline int vmcs_field_width(unsigned long field) { if (0x1 & field) /* the *_HIGH fields are all 32 bit */ - return VMCS_FIELD_TYPE_U32; + return VMCS_FIELD_WIDTH_U32; return (field >> 13) & 0x3 ; } @@ -4005,43 +3974,66 @@ static void init_vmcs_shadow_fields(void) { int i, j; - /* No checks for read only fields yet */ + for (i = j = 0; i < max_shadow_read_only_fields; i++) { + u16 field = shadow_read_only_fields[i]; + if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && + (i + 1 == max_shadow_read_only_fields || + shadow_read_only_fields[i + 1] != field + 1)) + pr_err("Missing field from shadow_read_only_field %x\n", + field + 1); + + clear_bit(field, vmx_vmread_bitmap); +#ifdef CONFIG_X86_64 + if (field & 1) + continue; +#endif + if (j < i) + shadow_read_only_fields[j] = field; + j++; + } + max_shadow_read_only_fields = j; for (i = j = 0; i < max_shadow_read_write_fields; i++) { - switch (shadow_read_write_fields[i]) { - case GUEST_BNDCFGS: - if (!kvm_mpx_supported()) + u16 field = shadow_read_write_fields[i]; + if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && + (i + 1 == max_shadow_read_write_fields || + shadow_read_write_fields[i + 1] != field + 1)) + pr_err("Missing field from shadow_read_write_field %x\n", + field + 1); + + /* + * PML and the preemption timer can be emulated, but the + * processor cannot vmwrite to fields that don't exist + * on bare metal. + */ + switch (field) { + case GUEST_PML_INDEX: + if (!cpu_has_vmx_pml()) + continue; + break; + case VMX_PREEMPTION_TIMER_VALUE: + if (!cpu_has_vmx_preemption_timer()) + continue; + break; + case GUEST_INTR_STATUS: + if (!cpu_has_vmx_apicv()) continue; break; default: break; } + clear_bit(field, vmx_vmwrite_bitmap); + clear_bit(field, vmx_vmread_bitmap); +#ifdef CONFIG_X86_64 + if (field & 1) + continue; +#endif if (j < i) - shadow_read_write_fields[j] = - shadow_read_write_fields[i]; + shadow_read_write_fields[j] = field; j++; } max_shadow_read_write_fields = j; - - /* shadowed fields guest access without vmexit */ - for (i = 0; i < max_shadow_read_write_fields; i++) { - unsigned long field = shadow_read_write_fields[i]; - - clear_bit(field, vmx_vmwrite_bitmap); - clear_bit(field, vmx_vmread_bitmap); - if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) { - clear_bit(field + 1, vmx_vmwrite_bitmap); - clear_bit(field + 1, vmx_vmread_bitmap); - } - } - for (i = 0; i < max_shadow_read_only_fields; i++) { - unsigned long field = shadow_read_only_fields[i]; - - clear_bit(field, vmx_vmread_bitmap); - if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) - clear_bit(field + 1, vmx_vmread_bitmap); - } } static __init int alloc_kvm_area(void) @@ -4254,9 +4246,10 @@ static void exit_lmode(struct kvm_vcpu *vcpu) #endif -static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) +static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid, + bool invalidate_gpa) { - if (enable_ept) { + if (enable_ept && (invalidate_gpa || !enable_vpid)) { if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa)); @@ -4265,15 +4258,15 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) } } -static void vmx_flush_tlb(struct kvm_vcpu *vcpu) +static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) { - __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); } static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) { if (enable_ept) - vmx_flush_tlb(vcpu); + vmx_flush_tlb(vcpu, true); } static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) @@ -4471,7 +4464,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ept_load_pdptrs(vcpu); } - vmx_flush_tlb(vcpu); + vmx_flush_tlb(vcpu, true); vmcs_writel(GUEST_CR3, guest_cr3); } @@ -4488,6 +4481,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); + if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) { + vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_DESC); + hw_cr4 &= ~X86_CR4_UMIP; + } else + vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, + SECONDARY_EXEC_DESC); + if (cr4 & X86_CR4_VMXE) { /* * To use VMXON (and later other VMX instructions), a guest @@ -5119,11 +5120,6 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, { int f = sizeof(unsigned long); - if (!cpu_has_vmx_msr_bitmap()) { - WARN_ON(1); - return; - } - /* * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals * have the write-low and read-high bitmap offsets the wrong way round. @@ -5263,7 +5259,8 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); if (max_irr != 256) { vapic_page = kmap(vmx->nested.virtual_apic_page); - __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); + __kvm_apic_update_irr(vmx->nested.pi_desc->pir, + vapic_page, &max_irr); kunmap(vmx->nested.virtual_apic_page); status = vmcs_read16(GUEST_INTR_STATUS); @@ -5323,14 +5320,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu, if (is_guest_mode(vcpu) && vector == vmx->nested.posted_intr_nv) { - /* the PIR and ON have been set by L1. */ - kvm_vcpu_trigger_posted_interrupt(vcpu, true); /* * If a posted intr is not recognized by hardware, * we will accomplish it in the next vmentry. */ vmx->nested.pi_pending = true; kvm_make_request(KVM_REQ_EVENT, vcpu); + /* the PIR and ON have been set by L1. */ + if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true)) + kvm_vcpu_kick(vcpu); return 0; } return -1; @@ -5509,6 +5507,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) struct kvm_vcpu *vcpu = &vmx->vcpu; u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; + if (!cpu_need_virtualize_apic_accesses(vcpu)) exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) @@ -5527,6 +5526,11 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; + + /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP, + * in vmx_set_cr4. */ + exec_control &= ~SECONDARY_EXEC_DESC; + /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD (handle_vmptrld). We can NOT enable shadow_vmcs here because we don't have yet @@ -5646,10 +5650,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) #endif int i; - /* I/O */ - vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); - vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); - if (enable_shadow_vmcs) { vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); @@ -6304,6 +6304,12 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val) return kvm_set_cr4(vcpu, val); } +static int handle_desc(struct kvm_vcpu *vcpu) +{ + WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); + return emulate_instruction(vcpu, 0) == EMULATE_DONE; +} + static int handle_cr(struct kvm_vcpu *vcpu) { unsigned long exit_qualification, val; @@ -6760,7 +6766,21 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) if (!is_guest_mode(vcpu) && !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) { trace_kvm_fast_mmio(gpa); - return kvm_skip_emulated_instruction(vcpu); + /* + * Doing kvm_skip_emulated_instruction() depends on undefined + * behavior: Intel's manual doesn't mandate + * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG + * occurs and while on real hardware it was observed to be set, + * other hypervisors (namely Hyper-V) don't set it, we end up + * advancing IP with some random value. Disable fast mmio when + * running nested and keep it for real hardware in hope that + * VM_EXIT_INSTRUCTION_LEN will always be set correctly. + */ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return kvm_skip_emulated_instruction(vcpu); + else + return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, + NULL, 0) == EMULATE_DONE; } ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); @@ -6957,10 +6977,6 @@ static __init int hardware_setup(void) memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); - memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); - - memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); - if (setup_vmcs_config(&vmcs_config) < 0) { r = -EIO; goto out; @@ -6973,11 +6989,6 @@ static __init int hardware_setup(void) !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) enable_vpid = 0; - if (!cpu_has_vmx_shadow_vmcs()) - enable_shadow_vmcs = 0; - if (enable_shadow_vmcs) - init_vmcs_shadow_fields(); - if (!cpu_has_vmx_ept() || !cpu_has_vmx_ept_4levels() || !cpu_has_vmx_ept_mt_wb() || @@ -7063,6 +7074,11 @@ static __init int hardware_setup(void) kvm_x86_ops->cancel_hv_timer = NULL; } + if (!cpu_has_vmx_shadow_vmcs()) + enable_shadow_vmcs = 0; + if (enable_shadow_vmcs) + init_vmcs_shadow_fields(); + kvm_set_posted_intr_wakeup_handler(wakeup_handler); kvm_mce_cap_supported |= MCG_LMCE_P; @@ -7593,17 +7609,17 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu, p = ((char *)(get_vmcs12(vcpu))) + offset; - switch (vmcs_field_type(field)) { - case VMCS_FIELD_TYPE_NATURAL_WIDTH: + switch (vmcs_field_width(field)) { + case VMCS_FIELD_WIDTH_NATURAL_WIDTH: *ret = *((natural_width *)p); return 0; - case VMCS_FIELD_TYPE_U16: + case VMCS_FIELD_WIDTH_U16: *ret = *((u16 *)p); return 0; - case VMCS_FIELD_TYPE_U32: + case VMCS_FIELD_WIDTH_U32: *ret = *((u32 *)p); return 0; - case VMCS_FIELD_TYPE_U64: + case VMCS_FIELD_WIDTH_U64: *ret = *((u64 *)p); return 0; default: @@ -7620,17 +7636,17 @@ static inline int vmcs12_write_any(struct kvm_vcpu *vcpu, if (offset < 0) return offset; - switch (vmcs_field_type(field)) { - case VMCS_FIELD_TYPE_U16: + switch (vmcs_field_width(field)) { + case VMCS_FIELD_WIDTH_U16: *(u16 *)p = field_value; return 0; - case VMCS_FIELD_TYPE_U32: + case VMCS_FIELD_WIDTH_U32: *(u32 *)p = field_value; return 0; - case VMCS_FIELD_TYPE_U64: + case VMCS_FIELD_WIDTH_U64: *(u64 *)p = field_value; return 0; - case VMCS_FIELD_TYPE_NATURAL_WIDTH: + case VMCS_FIELD_WIDTH_NATURAL_WIDTH: *(natural_width *)p = field_value; return 0; default: @@ -7646,7 +7662,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) unsigned long field; u64 field_value; struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; - const unsigned long *fields = shadow_read_write_fields; + const u16 *fields = shadow_read_write_fields; const int num_fields = max_shadow_read_write_fields; preempt_disable(); @@ -7655,23 +7671,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) for (i = 0; i < num_fields; i++) { field = fields[i]; - switch (vmcs_field_type(field)) { - case VMCS_FIELD_TYPE_U16: - field_value = vmcs_read16(field); - break; - case VMCS_FIELD_TYPE_U32: - field_value = vmcs_read32(field); - break; - case VMCS_FIELD_TYPE_U64: - field_value = vmcs_read64(field); - break; - case VMCS_FIELD_TYPE_NATURAL_WIDTH: - field_value = vmcs_readl(field); - break; - default: - WARN_ON(1); - continue; - } + field_value = __vmcs_readl(field); vmcs12_write_any(&vmx->vcpu, field, field_value); } @@ -7683,7 +7683,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) { - const unsigned long *fields[] = { + const u16 *fields[] = { shadow_read_write_fields, shadow_read_only_fields }; @@ -7702,24 +7702,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) for (i = 0; i < max_fields[q]; i++) { field = fields[q][i]; vmcs12_read_any(&vmx->vcpu, field, &field_value); - - switch (vmcs_field_type(field)) { - case VMCS_FIELD_TYPE_U16: - vmcs_write16(field, (u16)field_value); - break; - case VMCS_FIELD_TYPE_U32: - vmcs_write32(field, (u32)field_value); - break; - case VMCS_FIELD_TYPE_U64: - vmcs_write64(field, (u64)field_value); - break; - case VMCS_FIELD_TYPE_NATURAL_WIDTH: - vmcs_writel(field, (long)field_value); - break; - default: - WARN_ON(1); - break; - } + __vmcs_writel(field, field_value); } } @@ -7788,8 +7771,10 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) { unsigned long field; gva_t gva; + struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); + /* The value to write might be 32 or 64 bits, depending on L1's long * mode, and eventually we need to write that into a field of several * possible lengths. The code below first zero-extends the value to 64 @@ -7832,6 +7817,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } + switch (field) { +#define SHADOW_FIELD_RW(x) case x: +#include "vmx_shadow_fields.h" + /* + * The fields that can be updated by L1 without a vmexit are + * always updated in the vmcs02, the others go down the slow + * path of prepare_vmcs02. + */ + break; + default: + vmx->nested.dirty_vmcs12 = true; + break; + } + nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); } @@ -7846,6 +7845,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) __pa(vmx->vmcs01.shadow_vmcs)); vmx->nested.sync_shadow_vmcs = true; } + vmx->nested.dirty_vmcs12 = true; } /* Emulate the VMPTRLD instruction */ @@ -8066,7 +8066,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } - __vmx_flush_tlb(vcpu, vmx->nested.vpid02); + __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); nested_vmx_succeed(vcpu); return kvm_skip_emulated_instruction(vcpu); @@ -8260,6 +8260,8 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_XSETBV] = handle_xsetbv, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, + [EXIT_REASON_GDTR_IDTR] = handle_desc, + [EXIT_REASON_LDTR_TR] = handle_desc, [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, @@ -9069,36 +9071,23 @@ static void vmx_set_rvi(int vector) static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) { - if (!is_guest_mode(vcpu)) { - vmx_set_rvi(max_irr); - return; - } - - if (max_irr == -1) - return; - - /* - * In guest mode. If a vmexit is needed, vmx_check_nested_events - * handles it. - */ - if (nested_exit_on_intr(vcpu)) - return; - /* - * Else, fall back to pre-APICv interrupt injection since L2 - * is run without virtual interrupt delivery. + * When running L2, updating RVI is only relevant when + * vmcs12 virtual-interrupt-delivery enabled. + * However, it can be enabled only when L1 also + * intercepts external-interrupts and in that case + * we should not update vmcs02 RVI but instead intercept + * interrupt. Therefore, do nothing when running L2. */ - if (!kvm_event_needs_reinjection(vcpu) && - vmx_interrupt_allowed(vcpu)) { - kvm_queue_interrupt(vcpu, max_irr, false); - vmx_inject_irq(vcpu); - } + if (!is_guest_mode(vcpu)) + vmx_set_rvi(max_irr); } static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); int max_irr; + bool max_irr_updated; WARN_ON(!vcpu->arch.apicv_active); if (pi_test_on(&vmx->pi_desc)) { @@ -9108,7 +9097,23 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu) * But on x86 this is just a compiler barrier anyway. */ smp_mb__after_atomic(); - max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); + max_irr_updated = + kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); + + /* + * If we are running L2 and L1 has a new pending interrupt + * which can be injected, we should re-evaluate + * what should be done with this new L1 interrupt. + * If L1 intercepts external-interrupts, we should + * exit from L2 to L1. Otherwise, interrupt should be + * delivered directly to L2. + */ + if (is_guest_mode(vcpu) && max_irr_updated) { + if (nested_exit_on_intr(vcpu)) + kvm_vcpu_exiting_guest_mode(vcpu); + else + kvm_make_request(KVM_REQ_EVENT, vcpu); + } } else { max_irr = kvm_lapic_find_highest_irr(vcpu); } @@ -9223,6 +9228,12 @@ static bool vmx_xsaves_supported(void) SECONDARY_EXEC_XSAVES; } +static bool vmx_umip_emulated(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_DESC; +} + static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) { u32 exit_intr_info; @@ -9378,7 +9389,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long debugctlmsr, cr3, cr4; + unsigned long cr3, cr4; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!enable_vnmi && @@ -9431,7 +9442,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) __write_pkru(vcpu->arch.pkru); atomic_switch_perf_msrs(vmx); - debugctlmsr = get_debugctlmsr(); vmx_arm_hv_timer(vcpu); @@ -9587,8 +9597,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmexit_fill_RSB(); /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ - if (debugctlmsr) - update_debugctlmsr(debugctlmsr); + if (vmx->host_debugctlmsr) + update_debugctlmsr(vmx->host_debugctlmsr); #ifndef CONFIG_X86_64 /* @@ -9668,10 +9678,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - int r; - r = vcpu_load(vcpu); - BUG_ON(r); + vcpu_load(vcpu); vmx_switch_vmcs(vcpu, &vmx->vmcs01); free_nested(vmx); vcpu_put(vcpu); @@ -9871,7 +9879,8 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl) u32 mask = SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_DESC; u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); @@ -10037,8 +10046,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, } } -static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12); +static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12); static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) @@ -10127,10 +10136,9 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu, (unsigned long)(vmcs12->posted_intr_desc_addr & (PAGE_SIZE - 1))); } - if (cpu_has_vmx_msr_bitmap() && - nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) && - nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) - ; + if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) + vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, + CPU_BASED_USE_MSR_BITMAPS); else vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_USE_MSR_BITMAPS); @@ -10199,8 +10207,8 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, * Merge L0's and L1's MSR bitmap, return false to indicate that * we do not use the hardware. */ -static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, - struct vmcs12 *vmcs12) +static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) { int msr; struct page *page; @@ -10219,8 +10227,13 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, * updated to reflect this when L1 (or its L2s) actually write to * the MSR. */ - bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); - bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); + bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD); + bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL); + + /* Nothing to do if the MSR bitmap is not in use. */ + if (!cpu_has_vmx_msr_bitmap() || + !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) + return false; if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && !pred_cmd && !spec_ctrl) @@ -10229,32 +10242,41 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap); if (is_error_page(page)) return false; - msr_bitmap_l1 = (unsigned long *)kmap(page); - memset(msr_bitmap_l0, 0xff, PAGE_SIZE); + msr_bitmap_l1 = (unsigned long *)kmap(page); + if (nested_cpu_has_apic_reg_virt(vmcs12)) { + /* + * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it + * just lets the processor take the value from the virtual-APIC page; + * take those 256 bits directly from the L1 bitmap. + */ + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap_l0[word] = msr_bitmap_l1[word]; + msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; + } + } else { + for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { + unsigned word = msr / BITS_PER_LONG; + msr_bitmap_l0[word] = ~0; + msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0; + } + } - if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { - if (nested_cpu_has_apic_reg_virt(vmcs12)) - for (msr = 0x800; msr <= 0x8ff; msr++) - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - msr, MSR_TYPE_R); + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_TASKPRI), + MSR_TYPE_W); + if (nested_cpu_has_vid(vmcs12)) { nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - APIC_BASE_MSR + (APIC_TASKPRI >> 4), - MSR_TYPE_R | MSR_TYPE_W); - - if (nested_cpu_has_vid(vmcs12)) { - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - APIC_BASE_MSR + (APIC_EOI >> 4), - MSR_TYPE_W); - nested_vmx_disable_intercept_for_msr( - msr_bitmap_l1, msr_bitmap_l0, - APIC_BASE_MSR + (APIC_SELF_IPI >> 4), - MSR_TYPE_W); - } + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_EOI), + MSR_TYPE_W); + nested_vmx_disable_intercept_for_msr( + msr_bitmap_l1, msr_bitmap_l0, + X2APIC_MSR(APIC_SELF_IPI), + MSR_TYPE_W); } if (spec_ctrl) @@ -10534,25 +10556,12 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne return 0; } -/* - * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested - * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it - * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 - * guest in a way that will both be appropriate to L1's requests, and our - * needs. In addition to modifying the active vmcs (which is vmcs02), this - * function also has additional necessary side-effects, like setting various - * vcpu->arch fields. - * Returns 0 on success, 1 on failure. Invalid state exit qualification code - * is assigned to entry_failure_code on failure. - */ -static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, - bool from_vmentry, u32 *entry_failure_code) +static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + bool from_vmentry) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u32 exec_control, vmcs12_exec_ctrl; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); - vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); @@ -10560,7 +10569,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); - vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); @@ -10570,15 +10578,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); - vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); - vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); - vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); @@ -10588,6 +10593,125 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); + vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs12->guest_pending_dbg_exceptions); + vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); + vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); + + if (nested_cpu_has_xsaves(vmcs12)) + vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); + vmcs_write64(VMCS_LINK_POINTER, -1ull); + + if (cpu_has_vmx_posted_intr()) + vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); + + /* + * Whether page-faults are trapped is determined by a combination of + * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. + * If enable_ept, L0 doesn't care about page faults and we should + * set all of these to L1's desires. However, if !enable_ept, L0 does + * care about (at least some) page faults, and because it is not easy + * (if at all possible?) to merge L0 and L1's desires, we simply ask + * to exit on each and every L2 page fault. This is done by setting + * MASK=MATCH=0 and (see below) EB.PF=1. + * Note that below we don't need special code to set EB.PF beyond the + * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, + * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when + * !enable_ept, EB.PF is 1, so the "or" will always be 1. + */ + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, + enable_ept ? vmcs12->page_fault_error_code_mask : 0); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, + enable_ept ? vmcs12->page_fault_error_code_match : 0); + + /* All VMFUNCs are currently emulated through L0 vmexits. */ + if (cpu_has_vmx_vmfunc()) + vmcs_write64(VM_FUNCTION_CONTROL, 0); + + if (cpu_has_vmx_apicv()) { + vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); + vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); + vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); + vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); + } + + /* + * Set host-state according to L0's settings (vmcs12 is irrelevant here) + * Some constant fields are set here by vmx_set_constant_host_state(). + * Other fields are different per CPU, and will be set later when + * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. + */ + vmx_set_constant_host_state(vmx); + + /* + * Set the MSR load/store lists to match L0's settings. + */ + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); + vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); + vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); + + set_cr4_guest_host_mask(vmx); + + if (vmx_mpx_supported()) + vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); + + if (enable_vpid) { + if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); + else + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); + } + + /* + * L1 may access the L2's PDPTR, so save them to construct vmcs12 + */ + if (enable_ept) { + vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); + vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); + vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); + vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); + } + + if (cpu_has_vmx_msr_bitmap()) + vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); +} + +/* + * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested + * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it + * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 + * guest in a way that will both be appropriate to L1's requests, and our + * needs. In addition to modifying the active vmcs (which is vmcs02), this + * function also has additional necessary side-effects, like setting various + * vcpu->arch fields. + * Returns 0 on success, 1 on failure. Invalid state exit qualification code + * is assigned to entry_failure_code on failure. + */ +static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + bool from_vmentry, u32 *entry_failure_code) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 exec_control, vmcs12_exec_ctrl; + + /* + * First, the fields that are shadowed. This must be kept in sync + * with vmx_shadow_fields.h. + */ + + vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); + vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); + vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); + vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); + vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); + + /* + * Not in vmcs02: GUEST_PML_INDEX, HOST_FS_SELECTOR, HOST_GS_SELECTOR, + * HOST_FS_BASE, HOST_GS_BASE. + */ + if (from_vmentry && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); @@ -10610,16 +10734,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } else { vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); } - vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); vmx_set_rflags(vcpu, vmcs12->guest_rflags); - vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, - vmcs12->guest_pending_dbg_exceptions); - vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); - vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); - - if (nested_cpu_has_xsaves(vmcs12)) - vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); - vmcs_write64(VMCS_LINK_POINTER, -1ull); exec_control = vmcs12->pin_based_vm_exec_control; @@ -10633,7 +10748,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_posted_intr(vmcs12)) { vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; vmx->nested.pi_pending = false; - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); } else { exec_control &= ~PIN_BASED_POSTED_INTR; } @@ -10644,25 +10758,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_preemption_timer(vmcs12)) vmx_start_preemption_timer(vcpu); - /* - * Whether page-faults are trapped is determined by a combination of - * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. - * If enable_ept, L0 doesn't care about page faults and we should - * set all of these to L1's desires. However, if !enable_ept, L0 does - * care about (at least some) page faults, and because it is not easy - * (if at all possible?) to merge L0 and L1's desires, we simply ask - * to exit on each and every L2 page fault. This is done by setting - * MASK=MATCH=0 and (see below) EB.PF=1. - * Note that below we don't need special code to set EB.PF beyond the - * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, - * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when - * !enable_ept, EB.PF is 1, so the "or" will always be 1. - */ - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, - enable_ept ? vmcs12->page_fault_error_code_mask : 0); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, - enable_ept ? vmcs12->page_fault_error_code_match : 0); - if (cpu_has_secondary_exec_ctrls()) { exec_control = vmx->secondary_exec_control; @@ -10681,22 +10776,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, exec_control |= vmcs12_exec_ctrl; } - /* All VMFUNCs are currently emulated through L0 vmexits. */ - if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC) - vmcs_write64(VM_FUNCTION_CONTROL, 0); - - if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) { - vmcs_write64(EOI_EXIT_BITMAP0, - vmcs12->eoi_exit_bitmap0); - vmcs_write64(EOI_EXIT_BITMAP1, - vmcs12->eoi_exit_bitmap1); - vmcs_write64(EOI_EXIT_BITMAP2, - vmcs12->eoi_exit_bitmap2); - vmcs_write64(EOI_EXIT_BITMAP3, - vmcs12->eoi_exit_bitmap3); + if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) vmcs_write16(GUEST_INTR_STATUS, vmcs12->guest_intr_status); - } /* * Write an illegal value to APIC_ACCESS_ADDR. Later, @@ -10709,24 +10791,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } - - /* - * Set host-state according to L0's settings (vmcs12 is irrelevant here) - * Some constant fields are set here by vmx_set_constant_host_state(). - * Other fields are different per CPU, and will be set later when - * vmx_vcpu_load() is called, and when vmx_save_host_state() is called. - */ - vmx_set_constant_host_state(vmx); - - /* - * Set the MSR load/store lists to match L0's settings. - */ - vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); - vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); - vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr); - vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); - /* * HOST_RSP is normally set correctly in vmx_vcpu_run() just before * entry, but only if the current (host) sp changed from the value @@ -10758,8 +10822,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } /* - * Merging of IO bitmap not currently supported. - * Rather, exit every time. + * A vmexit (to either L1 hypervisor or L0 userspace) is always needed + * for I/O port accesses. */ exec_control &= ~CPU_BASED_USE_IO_BITMAPS; exec_control |= CPU_BASED_UNCOND_IO_EXITING; @@ -10796,12 +10860,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); } - set_cr4_guest_host_mask(vmx); - - if (from_vmentry && - vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) - vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset + vmcs12->tsc_offset); @@ -10810,9 +10868,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); - if (cpu_has_vmx_msr_bitmap()) - vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); - if (enable_vpid) { /* * There is no direct mapping between vpid02 and vpid12, the @@ -10823,16 +10878,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, * even if spawn a lot of nested vCPUs. */ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { vmx->nested.last_vpid = vmcs12->virtual_processor_id; - __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); + __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true); } } else { - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); - vmx_flush_tlb(vcpu); + vmx_flush_tlb(vcpu, true); } - } if (enable_pml) { @@ -10881,6 +10933,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); + if (vmx->nested.dirty_vmcs12) { + prepare_vmcs02_full(vcpu, vmcs12, from_vmentry); + vmx->nested.dirty_vmcs12 = false; + } + /* Shadow page tables on either EPT or shadow page tables. */ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) @@ -10889,16 +10946,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; - /* - * L1 may access the L2's PDPTR, so save them to construct vmcs12 - */ - if (enable_ept) { - vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); - vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); - vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); - vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); - } - kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); return 0; @@ -11254,7 +11301,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) if (block_nested_events) return -EBUSY; nested_vmx_inject_exception_vmexit(vcpu, exit_qual); - vcpu->arch.exception.pending = false; return 0; } @@ -11535,11 +11581,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, * L1's vpid. TODO: move to a more elaborate solution, giving * each L2 its own vpid and exposing the vpid feature to L1. */ - vmx_flush_tlb(vcpu); + vmx_flush_tlb(vcpu, true); } - /* Restore posted intr vector. */ - if (nested_cpu_has_posted_intr(vmcs12)) - vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR); vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); @@ -11800,6 +11843,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage) { + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; + + /* + * RDPID causes #UD if disabled through secondary execution controls. + * Because it is marked as EmulateOnUD, we need to intercept it here. + */ + if (info->intercept == x86_intercept_rdtscp && + !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) { + ctxt->exception.vector = UD_VECTOR; + ctxt->exception.error_code_valid = false; + return X86EMUL_PROPAGATE_FAULT; + } + + /* TODO: check more intercepts... */ return X86EMUL_CONTINUE; } @@ -12313,6 +12371,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .handle_external_intr = vmx_handle_external_intr, .mpx_supported = vmx_mpx_supported, .xsaves_supported = vmx_xsaves_supported, + .umip_emulated = vmx_umip_emulated, .check_nested_events = vmx_check_nested_events, diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h new file mode 100644 index 0000000000000..cd0c75f6d037a --- /dev/null +++ b/arch/x86/kvm/vmx_shadow_fields.h @@ -0,0 +1,77 @@ +#ifndef SHADOW_FIELD_RO +#define SHADOW_FIELD_RO(x) +#endif +#ifndef SHADOW_FIELD_RW +#define SHADOW_FIELD_RW(x) +#endif + +/* + * We do NOT shadow fields that are modified when L0 + * traps and emulates any vmx instruction (e.g. VMPTRLD, + * VMXON...) executed by L1. + * For example, VM_INSTRUCTION_ERROR is read + * by L1 if a vmx instruction fails (part of the error path). + * Note the code assumes this logic. If for some reason + * we start shadowing these fields then we need to + * force a shadow sync when L0 emulates vmx instructions + * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified + * by nested_vmx_failValid) + * + * When adding or removing fields here, note that shadowed + * fields must always be synced by prepare_vmcs02, not just + * prepare_vmcs02_full. + */ + +/* + * Keeping the fields ordered by size is an attempt at improving + * branch prediction in vmcs_read_any and vmcs_write_any. + */ + +/* 16-bits */ +SHADOW_FIELD_RW(GUEST_CS_SELECTOR) +SHADOW_FIELD_RW(GUEST_INTR_STATUS) +SHADOW_FIELD_RW(GUEST_PML_INDEX) +SHADOW_FIELD_RW(HOST_FS_SELECTOR) +SHADOW_FIELD_RW(HOST_GS_SELECTOR) + +/* 32-bits */ +SHADOW_FIELD_RO(VM_EXIT_REASON) +SHADOW_FIELD_RO(VM_EXIT_INTR_INFO) +SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN) +SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD) +SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE) +SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE) +SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL) +SHADOW_FIELD_RW(EXCEPTION_BITMAP) +SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE) +SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD) +SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN) +SHADOW_FIELD_RW(TPR_THRESHOLD) +SHADOW_FIELD_RW(GUEST_CS_LIMIT) +SHADOW_FIELD_RW(GUEST_CS_AR_BYTES) +SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO) +SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE) + +/* Natural width */ +SHADOW_FIELD_RO(EXIT_QUALIFICATION) +SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS) +SHADOW_FIELD_RW(GUEST_RIP) +SHADOW_FIELD_RW(GUEST_RSP) +SHADOW_FIELD_RW(GUEST_CR0) +SHADOW_FIELD_RW(GUEST_CR3) +SHADOW_FIELD_RW(GUEST_CR4) +SHADOW_FIELD_RW(GUEST_RFLAGS) +SHADOW_FIELD_RW(GUEST_CS_BASE) +SHADOW_FIELD_RW(GUEST_ES_BASE) +SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK) +SHADOW_FIELD_RW(CR0_READ_SHADOW) +SHADOW_FIELD_RW(CR4_READ_SHADOW) +SHADOW_FIELD_RW(HOST_FS_BASE) +SHADOW_FIELD_RW(HOST_GS_BASE) + +/* 64-bit */ +SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS) +SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH) + +#undef SHADOW_FIELD_RO +#undef SHADOW_FIELD_RW diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f9c5171dad2b9..c8a0b545ac20c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -67,6 +67,8 @@ #include #include #include +#include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -177,7 +179,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, - { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, @@ -702,7 +703,8 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } @@ -794,6 +796,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) return 1; + if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP)) + return 1; + if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; @@ -1037,6 +1042,7 @@ static u32 emulated_msrs[] = { MSR_IA32_MCG_CTL, MSR_IA32_MCG_EXT_CTL, MSR_IA32_SMBASE, + MSR_SMI_COUNT, MSR_PLATFORM_INFO, MSR_MISC_FEATURES_ENABLES, }; @@ -1378,6 +1384,11 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) return tsc; } +static inline int gtod_is_based_on_tsc(int mode) +{ + return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK; +} + static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 @@ -1397,7 +1408,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) * perform request to enable masterclock. */ if (ka->use_master_clock || - (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched)) + (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, @@ -1460,6 +1471,19 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) vcpu->arch.tsc_offset = offset; } +static inline bool kvm_check_tsc_unstable(void) +{ +#ifdef CONFIG_X86_64 + /* + * TSC is marked unstable when we're running on Hyper-V, + * 'TSC page' clocksource is good. + */ + if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK) + return false; +#endif + return check_tsc_unstable(); +} + void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; @@ -1505,7 +1529,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) */ if (synchronizing && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { - if (!check_tsc_unstable()) { + if (!kvm_check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { @@ -1605,18 +1629,43 @@ static u64 read_tsc(void) return last; } -static inline u64 vgettsc(u64 *cycle_now) +static inline u64 vgettsc(u64 *tsc_timestamp, int *mode) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + u64 tsc_pg_val; + + switch (gtod->clock.vclock_mode) { + case VCLOCK_HVCLOCK: + tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(), + tsc_timestamp); + if (tsc_pg_val != U64_MAX) { + /* TSC page valid */ + *mode = VCLOCK_HVCLOCK; + v = (tsc_pg_val - gtod->clock.cycle_last) & + gtod->clock.mask; + } else { + /* TSC page invalid */ + *mode = VCLOCK_NONE; + } + break; + case VCLOCK_TSC: + *mode = VCLOCK_TSC; + *tsc_timestamp = read_tsc(); + v = (*tsc_timestamp - gtod->clock.cycle_last) & + gtod->clock.mask; + break; + default: + *mode = VCLOCK_NONE; + } - *cycle_now = read_tsc(); + if (*mode == VCLOCK_NONE) + *tsc_timestamp = v = 0; - v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; } -static int do_monotonic_boot(s64 *t, u64 *cycle_now) +static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; @@ -1625,9 +1674,8 @@ static int do_monotonic_boot(s64 *t, u64 *cycle_now) do { seq = read_seqcount_begin(>od->seq); - mode = gtod->clock.vclock_mode; ns = gtod->nsec_base; - ns += vgettsc(cycle_now); + ns += vgettsc(tsc_timestamp, &mode); ns >>= gtod->clock.shift; ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -1636,7 +1684,7 @@ static int do_monotonic_boot(s64 *t, u64 *cycle_now) return mode; } -static int do_realtime(struct timespec *ts, u64 *cycle_now) +static int do_realtime(struct timespec *ts, u64 *tsc_timestamp) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; @@ -1645,10 +1693,9 @@ static int do_realtime(struct timespec *ts, u64 *cycle_now) do { seq = read_seqcount_begin(>od->seq); - mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ns = gtod->nsec_base; - ns += vgettsc(cycle_now); + ns += vgettsc(tsc_timestamp, &mode); ns >>= gtod->clock.shift; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -1658,25 +1705,26 @@ static int do_realtime(struct timespec *ts, u64 *cycle_now) return mode; } -/* returns true if host is using tsc clocksource */ -static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now) +/* returns true if host is using TSC based clocksource */ +static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp) { /* checked again under seqlock below */ - if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) + if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) return false; - return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; + return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns, + tsc_timestamp)); } -/* returns true if host is using tsc clocksource */ +/* returns true if host is using TSC based clocksource */ static bool kvm_get_walltime_and_clockread(struct timespec *ts, - u64 *cycle_now) + u64 *tsc_timestamp) { /* checked again under seqlock below */ - if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) + if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode)) return false; - return do_realtime(ts, cycle_now) == VCLOCK_TSC; + return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp)); } #endif @@ -2119,6 +2167,12 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) vcpu->arch.pv_time_enabled = false; } +static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) +{ + ++vcpu->stat.tlb_flush; + kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa); +} + static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) @@ -2128,7 +2182,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu) &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; - vcpu->arch.st.steal.preempted = 0; + /* + * Doing a TLB flush here, on the guest's behalf, can avoid + * expensive IPIs. + */ + if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB) + kvm_vcpu_flush_tlb(vcpu, false); if (vcpu->arch.st.steal.version & 1) vcpu->arch.st.steal.version += 1; /* first time write, random junk */ @@ -2229,6 +2288,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vcpu->arch.smbase = data; break; + case MSR_SMI_COUNT: + if (!msr_info->host_initiated) + return 1; + vcpu->arch.smi_count = data; + break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; @@ -2503,6 +2567,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; msr_info->data = vcpu->arch.smbase; break; + case MSR_SMI_COUNT: + msr_info->data = vcpu->arch.smi_count; + break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ msr_info->data = 1000ULL; @@ -2870,13 +2937,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); } - if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { + if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : rdtsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); - if (check_tsc_unstable()) { + if (kvm_check_tsc_unstable()) { u64 offset = kvm_compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_vcpu_write_tsc_offset(vcpu, offset); @@ -2905,7 +2972,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; - vcpu->arch.st.steal.preempted = 1; + vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED; kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal.preempted, @@ -2939,12 +3006,18 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) pagefault_enable(); kvm_x86_ops->vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); + /* + * If userspace has set any breakpoints or watchpoints, dr6 is restored + * on every vmexit, but if not, we might have a stale dr6 from the + * guest. do_debug expects dr6 to be cleared after it runs, do the same. + */ + set_debugreg(0, 6); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { - if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) + if (vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); return kvm_apic_get_state(vcpu, s); @@ -3473,6 +3546,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, void *buffer; } u; + vcpu_load(vcpu); + u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { @@ -3498,8 +3573,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (!lapic_in_kernel(vcpu)) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); - if (IS_ERR(u.lapic)) - return PTR_ERR(u.lapic); + if (IS_ERR(u.lapic)) { + r = PTR_ERR(u.lapic); + goto out_nofree; + } r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); break; @@ -3673,8 +3750,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); - if (IS_ERR(u.xsave)) - return PTR_ERR(u.xsave); + if (IS_ERR(u.xsave)) { + r = PTR_ERR(u.xsave); + goto out_nofree; + } r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; @@ -3696,8 +3775,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); - if (IS_ERR(u.xcrs)) - return PTR_ERR(u.xcrs); + if (IS_ERR(u.xcrs)) { + r = PTR_ERR(u.xcrs); + goto out_nofree; + } r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; @@ -3741,6 +3822,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp, } out: kfree(u.buffer); +out_nofree: + vcpu_put(vcpu); return r; } @@ -4297,6 +4380,36 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_vm_ioctl_enable_cap(kvm, &cap); break; } + case KVM_MEMORY_ENCRYPT_OP: { + r = -ENOTTY; + if (kvm_x86_ops->mem_enc_op) + r = kvm_x86_ops->mem_enc_op(kvm, argp); + break; + } + case KVM_MEMORY_ENCRYPT_REG_REGION: { + struct kvm_enc_region region; + + r = -EFAULT; + if (copy_from_user(®ion, argp, sizeof(region))) + goto out; + + r = -ENOTTY; + if (kvm_x86_ops->mem_enc_reg_region) + r = kvm_x86_ops->mem_enc_reg_region(kvm, ®ion); + break; + } + case KVM_MEMORY_ENCRYPT_UNREG_REGION: { + struct kvm_enc_region region; + + r = -EFAULT; + if (copy_from_user(®ion, argp, sizeof(region))) + goto out; + + r = -ENOTTY; + if (kvm_x86_ops->mem_enc_unreg_region) + r = kvm_x86_ops->mem_enc_unreg_region(kvm, ®ion); + break; + } default: r = -ENOTTY; } @@ -5705,7 +5818,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, * handle watchpoints yet, those would be handled in * the emulate_ops. */ - if (kvm_vcpu_check_breakpoint(vcpu, &r)) + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_breakpoint(vcpu, &r)) return r; ctxt->interruptibility = 0; @@ -5891,6 +6005,43 @@ static void tsc_khz_changed(void *data) __this_cpu_write(cpu_tsc_khz, khz); } +#ifdef CONFIG_X86_64 +static void kvm_hyperv_tsc_notifier(void) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + int cpu; + + spin_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_make_mclock_inprogress_request(kvm); + + hyperv_stop_tsc_emulation(); + + /* TSC frequency always matches when on Hyper-V */ + for_each_present_cpu(cpu) + per_cpu(cpu_tsc_khz, cpu) = tsc_khz; + kvm_max_guest_tsc_khz = tsc_khz; + + list_for_each_entry(kvm, &vm_list, vm_list) { + struct kvm_arch *ka = &kvm->arch; + + spin_lock(&ka->pvclock_gtod_sync_lock); + + pvclock_update_vm_gtod_copy(kvm); + + kvm_for_each_vcpu(cpu, vcpu, kvm) + kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); + + kvm_for_each_vcpu(cpu, vcpu, kvm) + kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); + + spin_unlock(&ka->pvclock_gtod_sync_lock); + } + spin_unlock(&kvm_lock); +} +#endif + static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { @@ -6112,9 +6263,9 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not - * use, TSC clocksource + * use, TSC based clocksource. */ - if (gtod->clock.vclock_mode != VCLOCK_TSC && + if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); @@ -6176,6 +6327,9 @@ int kvm_arch_init(void *opaque) kvm_lapic_init(); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); + + if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) + set_hv_tscchange_cb(kvm_hyperv_tsc_notifier); #endif return 0; @@ -6188,6 +6342,10 @@ int kvm_arch_init(void *opaque) void kvm_arch_exit(void) { +#ifdef CONFIG_X86_64 + if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) + clear_hv_tscchange_cb(); +#endif kvm_lapic_exit(); perf_unregister_guest_info_callbacks(&kvm_guest_cbs); @@ -6450,6 +6608,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) kvm_x86_ops->queue_exception(vcpu); } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) { vcpu->arch.smi_pending = false; + ++vcpu->arch.smi_count; enter_smm(vcpu); } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; @@ -6751,7 +6910,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) if (irqchip_split(vcpu->kvm)) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); else { - if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) + if (vcpu->arch.apicv_active) kvm_x86_ops->sync_pir_to_irr(vcpu); kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); } @@ -6760,12 +6919,6 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); } -static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) -{ - ++vcpu->stat.tlb_flush; - kvm_x86_ops->tlb_flush(vcpu); -} - void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end) { @@ -6834,7 +6987,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) - kvm_vcpu_flush_tlb(vcpu); + kvm_vcpu_flush_tlb(vcpu, true); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; @@ -6983,10 +7136,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * This handles the case where a posted interrupt was * notified with kvm_vcpu_kick. */ - if (kvm_lapic_enabled(vcpu)) { - if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active) - kvm_x86_ops->sync_pir_to_irr(vcpu); - } + if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) + kvm_x86_ops->sync_pir_to_irr(vcpu); if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || need_resched() || signal_pending(current)) { @@ -7007,7 +7158,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } trace_kvm_entry(vcpu->vcpu_id); - wait_lapic_expire(vcpu); + if (lapic_timer_advance_ns) + wait_lapic_expire(vcpu); guest_enter_irqoff(); if (unlikely(vcpu->arch.switch_db_regs)) { @@ -7268,8 +7420,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; + vcpu_load(vcpu); kvm_sigset_activate(vcpu); - kvm_load_guest_fpu(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { @@ -7316,11 +7468,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) post_kvm_run_save(vcpu); kvm_sigset_deactivate(vcpu); + vcpu_put(vcpu); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { + vcpu_load(vcpu); + if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of @@ -7354,11 +7509,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { + vcpu_load(vcpu); + vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; @@ -7388,6 +7546,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) kvm_make_request(KVM_REQ_EVENT, vcpu); + vcpu_put(vcpu); return 0; } @@ -7406,6 +7565,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, { struct desc_ptr dt; + vcpu_load(vcpu); + kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); @@ -7437,12 +7598,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { + vcpu_load(vcpu); + kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && vcpu->arch.pv.pv_unhalted) @@ -7450,21 +7614,26 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, else mp_state->mp_state = vcpu->arch.mp_state; + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { + int ret = -EINVAL; + + vcpu_load(vcpu); + if (!lapic_in_kernel(vcpu) && mp_state->mp_state != KVM_MP_STATE_RUNNABLE) - return -EINVAL; + goto out; /* INITs are latched while in SMM */ if ((is_smm(vcpu) || vcpu->arch.smi_pending) && (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) - return -EINVAL; + goto out; if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; @@ -7472,7 +7641,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, } else vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); - return 0; + + ret = 0; +out: + vcpu_put(vcpu); + return ret; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, @@ -7526,18 +7699,21 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; + int ret = -EINVAL; + + vcpu_load(vcpu); if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (sregs->cr4 & X86_CR4_OSXSAVE)) - return -EINVAL; + goto out; if (kvm_valid_sregs(vcpu, sregs)) - return -EINVAL; + goto out; apic_base_msr.data = sregs->apic_base; apic_base_msr.host_initiated = true; if (kvm_set_apic_base(vcpu, &apic_base_msr)) - return -EINVAL; + goto out; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; @@ -7603,7 +7779,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, kvm_make_request(KVM_REQ_EVENT, vcpu); - return 0; + ret = 0; +out: + vcpu_put(vcpu); + return ret; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, @@ -7612,6 +7791,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, unsigned long rflags; int i, r; + vcpu_load(vcpu); + if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) @@ -7657,7 +7838,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, r = 0; out: - + vcpu_put(vcpu); return r; } @@ -7671,6 +7852,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, gpa_t gpa; int idx; + vcpu_load(vcpu); + idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); @@ -7679,14 +7862,17 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, tr->writeable = 1; tr->usermode = 0; + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - struct fxregs_state *fxsave = - &vcpu->arch.guest_fpu.state.fxsave; + struct fxregs_state *fxsave; + + vcpu_load(vcpu); + fxsave = &vcpu->arch.guest_fpu.state.fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; @@ -7696,13 +7882,17 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); + vcpu_put(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - struct fxregs_state *fxsave = - &vcpu->arch.guest_fpu.state.fxsave; + struct fxregs_state *fxsave; + + vcpu_load(vcpu); + + fxsave = &vcpu->arch.guest_fpu.state.fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; @@ -7713,6 +7903,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); + vcpu_put(vcpu); return 0; } @@ -7769,7 +7960,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, { struct kvm_vcpu *vcpu; - if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) + if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); @@ -7781,16 +7972,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - int r; - kvm_vcpu_mtrr_init(vcpu); - r = vcpu_load(vcpu); - if (r) - return r; + vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); kvm_mmu_setup(vcpu); vcpu_put(vcpu); - return r; + return 0; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -7800,13 +7987,15 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) kvm_hv_vcpu_postcreate(vcpu); - if (vcpu_load(vcpu)) + if (mutex_lock_killable(&vcpu->mutex)) return; + vcpu_load(vcpu); msr.data = 0x0; msr.index = MSR_IA32_TSC; msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); + mutex_unlock(&vcpu->mutex); if (!kvmclock_periodic_sync) return; @@ -7817,11 +8006,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { - int r; vcpu->arch.apf.msr_val = 0; - r = vcpu_load(vcpu); - BUG_ON(r); + vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); @@ -7833,6 +8020,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vcpu->arch.hflags = 0; vcpu->arch.smi_pending = 0; + vcpu->arch.smi_count = 0; atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; @@ -7926,7 +8114,7 @@ int kvm_arch_hardware_enable(void) return ret; local_tsc = rdtsc(); - stable = !check_tsc_unstable(); + stable = !kvm_check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!stable && vcpu->cpu == smp_processor_id()) @@ -8192,9 +8380,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { - int r; - r = vcpu_load(vcpu); - BUG_ON(r); + vcpu_load(vcpu); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index d0b95b7a90b4e..b91215d1fd80d 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -12,6 +12,7 @@ static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { + vcpu->arch.exception.pending = false; vcpu->arch.exception.injected = false; } @@ -265,36 +266,8 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) static inline bool kvm_mwait_in_guest(void) { - unsigned int eax, ebx, ecx, edx; - - if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT)) - return false; - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - /* All AMD CPUs have a working MWAIT implementation */ - return true; - case X86_VENDOR_INTEL: - /* Handle Intel below */ - break; - default: - return false; - } - - /* - * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as - * they would allow guest to stop the CPU completely by disabling - * interrupts then invoking MWAIT. - */ - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return false; - - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); - - if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK)) - return false; - - return true; + return boot_cpu_has(X86_FEATURE_MWAIT) && + !boot_cpu_has_bug(X86_BUG_MONITOR); } #endif diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c index d6f848d1211d4..2dd1fe13a37b3 100644 --- a/arch/x86/lib/cpu.c +++ b/arch/x86/lib/cpu.c @@ -18,7 +18,7 @@ unsigned int x86_model(unsigned int sig) { unsigned int fam, model; - fam = x86_family(sig); + fam = x86_family(sig); model = (sig >> 4) & 0xf; diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c index 7b881d03d0ddd..3cdf06128d13c 100644 --- a/arch/x86/lib/error-inject.c +++ b/arch/x86/lib/error-inject.c @@ -7,6 +7,7 @@ asmlinkage void just_return_func(void); asm( ".type just_return_func, @function\n" + ".globl just_return_func\n" "just_return_func:\n" " ret\n" ".size just_return_func, .-just_return_func\n" diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1ab42c8520693..8b72923f1d35c 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) @@ -1193,8 +1193,8 @@ void __init mem_init(void) register_page_bootmem_info(); /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, - PAGE_SIZE, KCORE_OTHER); + if (get_gate_vma(&init_mm)) + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index c45b6ec5357bc..e2db83bebc3b7 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx, set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else pte_clear(&init_mm, addr, pte); - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 58477ec3d66d0..7c86867096361 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear) return -1; } - __flush_tlb_one(f->addr); + __flush_tlb_one_kernel(f->addr); return 0; } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index fe7d57a8fb600..1555bd7d34493 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -677,6 +677,25 @@ static enum page_cache_mode lookup_memtype(u64 paddr) return rettype; } +/** + * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type + * of @pfn cannot be overridden by UC MTRR memory type. + * + * Only to be called when PAT is enabled. + * + * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. + * Returns false in other cases. + */ +bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) +{ + enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); + + return cm == _PAGE_CACHE_MODE_UC || + cm == _PAGE_CACHE_MODE_UC_MINUS || + cm == _PAGE_CACHE_MODE_WC; +} +EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); + /** * io_reserve_memtype - Request a memory type mapping for a region of memory * @start: start (physical address) of the region diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index c3c5274410a90..9bb7f0ab9fe62 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval) * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ - __flush_tlb_one(vaddr); + __flush_tlb_one_kernel(vaddr); } unsigned long __FIXADDR_TOP = 0xfffff000; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 8dcc0607f8058..7f1a51399674b 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -498,7 +498,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, * flush that changes context.tlb_gen from 2 to 3. If they get * processed on this CPU in reverse order, we'll see * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. - * If we were to use __flush_tlb_single() and set local_tlb_gen to + * If we were to use __flush_tlb_one_user() and set local_tlb_gen to * 3, we'd be break the invariant: we'd update local_tlb_gen above * 1 without the full flush that's needed for tlb_gen 2. * @@ -519,7 +519,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, addr = f->start; while (addr < f->end) { - __flush_tlb_single(addr); + __flush_tlb_one_user(addr); addr += PAGE_SIZE; } if (local) @@ -666,7 +666,7 @@ static void do_kernel_range_flush(void *info) /* flush range by one by one 'invlpg' */ for (addr = f->start; addr < f->end; addr += PAGE_SIZE) - __flush_tlb_one(addr); + __flush_tlb_one_kernel(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index c2e9285d1bf11..db77e087adaf8 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, local_flush_tlb(); stat->d_alltlb++; } else { - __flush_tlb_single(msg->address); + __flush_tlb_one_user(msg->address); stat->d_onetlb++; } stat->d_requestee++; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index d85076223a696..aae88fec9941a 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void) preempt_enable(); } -static void xen_flush_tlb_single(unsigned long addr) +static void xen_flush_tlb_one_user(unsigned long addr) { struct mmuext_op *op; struct multicall_space mcs; - trace_xen_mmu_flush_tlb_single(addr); + trace_xen_mmu_flush_tlb_one_user(addr); preempt_disable(); @@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .flush_tlb_user = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb, - .flush_tlb_single = xen_flush_tlb_single, + .flush_tlb_one_user = xen_flush_tlb_one_user, .flush_tlb_others = xen_flush_tlb_others, .pgd_alloc = xen_pgd_alloc, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 13b4f19b91313..159a897151d64 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int i, ret = 0; pte_t *pte; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + if (kmap_ops) { ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, kmap_ops, count); @@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, { int i, ret = 0; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 77c959cf81e7c..7a43b2ae19f12 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus) if (xen_hvm_domain()) native_smp_cpus_done(max_cpus); + else + calculate_max_logical_packages(); if (xen_have_vcpu_info_placement) return; diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index 497cc55a0c16c..96f26e026783d 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -9,7 +9,9 @@ #include #include +#include #include +#include #include #include @@ -35,6 +37,20 @@ ENTRY(startup_xen) mov %_ASM_SI, xen_start_info mov $init_thread_union+THREAD_SIZE, %_ASM_SP +#ifdef CONFIG_X86_64 + /* Set up %gs. + * + * The base of %gs always points to the bottom of the irqstack + * union. If the stack protector canary is enabled, it is + * located at %gs:40. Note that, on SMP, the boot cpu uses + * init data section till per cpu areas are set up. + */ + movl $MSR_GS_BASE,%ecx + movq $INIT_PER_CPU_VAR(irq_stack_union),%rax + cdq + wrmsr +#endif + jmp xen_start_kernel END(startup_xen) __FINIT diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h index 54be80876e578..216b6f32c3759 100644 --- a/arch/xtensa/include/asm/kasan.h +++ b/arch/xtensa/include/asm/kasan.h @@ -10,6 +10,8 @@ #include #include +#define KASAN_SHADOW_SCALE_SHIFT 3 + /* Start of area covered by KASAN */ #define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000) /* Start of the shadow map */ diff --git a/arch/xtensa/include/uapi/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h index e3246d41182c2..4d249040b33d2 100644 --- a/arch/xtensa/include/uapi/asm/poll.h +++ b/arch/xtensa/include/uapi/asm/poll.h @@ -12,26 +12,9 @@ #ifndef _XTENSA_POLL_H #define _XTENSA_POLL_H -#ifndef __KERNEL__ #define POLLWRNORM POLLOUT -#define POLLWRBAND (__force __poll_t)0x0100 -#define POLLREMOVE (__force __poll_t)0x0800 -#else -#define __ARCH_HAS_MANGLED_POLL -static inline __u16 mangle_poll(__poll_t val) -{ - __u16 v = (__force __u16)val; - /* bit 9 -> bit 8, bit 8 -> bit 2 */ - return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6); -} - -static inline __poll_t demangle_poll(__u16 v) -{ - /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */ - return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) | - ((v & 4) << 6)); -} -#endif +#define POLLWRBAND 0x0100 +#define POLLREMOVE 0x0800 #include diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 47e6ec7427c44..aeca22d911010 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -3823,24 +3823,26 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) } /* - * We exploit the bfq_finish_request hook to decrement - * rq_in_driver, but bfq_finish_request will not be - * invoked on this request. So, to avoid unbalance, - * just start this request, without incrementing - * rq_in_driver. As a negative consequence, - * rq_in_driver is deceptively lower than it should be - * while this request is in service. This may cause - * bfq_schedule_dispatch to be invoked uselessly. + * We exploit the bfq_finish_requeue_request hook to + * decrement rq_in_driver, but + * bfq_finish_requeue_request will not be invoked on + * this request. So, to avoid unbalance, just start + * this request, without incrementing rq_in_driver. As + * a negative consequence, rq_in_driver is deceptively + * lower than it should be while this request is in + * service. This may cause bfq_schedule_dispatch to be + * invoked uselessly. * * As for implementing an exact solution, the - * bfq_finish_request hook, if defined, is probably - * invoked also on this request. So, by exploiting - * this hook, we could 1) increment rq_in_driver here, - * and 2) decrement it in bfq_finish_request. Such a - * solution would let the value of the counter be - * always accurate, but it would entail using an extra - * interface function. This cost seems higher than the - * benefit, being the frequency of non-elevator-private + * bfq_finish_requeue_request hook, if defined, is + * probably invoked also on this request. So, by + * exploiting this hook, we could 1) increment + * rq_in_driver here, and 2) decrement it in + * bfq_finish_requeue_request. Such a solution would + * let the value of the counter be always accurate, + * but it would entail using an extra interface + * function. This cost seems higher than the benefit, + * being the frequency of non-elevator-private * requests very low. */ goto start_rq; @@ -4515,6 +4517,8 @@ static inline void bfq_update_insert_stats(struct request_queue *q, unsigned int cmd_flags) {} #endif +static void bfq_prepare_request(struct request *rq, struct bio *bio); + static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { @@ -4541,6 +4545,18 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, else list_add_tail(&rq->queuelist, &bfqd->dispatch); } else { + if (WARN_ON_ONCE(!bfqq)) { + /* + * This should never happen. Most likely rq is + * a requeued regular request, being + * re-inserted without being first + * re-prepared. Do a prepare, to avoid + * failure. + */ + bfq_prepare_request(rq, rq->bio); + bfqq = RQ_BFQQ(rq); + } + idle_timer_disabled = __bfq_insert_request(bfqd, rq); /* * Update bfqq, because, if a queue merge has occurred @@ -4697,22 +4713,44 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) bfq_schedule_dispatch(bfqd); } -static void bfq_finish_request_body(struct bfq_queue *bfqq) +static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq) { bfqq->allocated--; bfq_put_queue(bfqq); } -static void bfq_finish_request(struct request *rq) +/* + * Handle either a requeue or a finish for rq. The things to do are + * the same in both cases: all references to rq are to be dropped. In + * particular, rq is considered completed from the point of view of + * the scheduler. + */ +static void bfq_finish_requeue_request(struct request *rq) { - struct bfq_queue *bfqq; + struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd; - if (!rq->elv.icq) + /* + * Requeue and finish hooks are invoked in blk-mq without + * checking whether the involved request is actually still + * referenced in the scheduler. To handle this fact, the + * following two checks make this function exit in case of + * spurious invocations, for which there is nothing to do. + * + * First, check whether rq has nothing to do with an elevator. + */ + if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) + return; + + /* + * rq either is not associated with any icq, or is an already + * requeued request that has not (yet) been re-inserted into + * a bfq_queue. + */ + if (!rq->elv.icq || !bfqq) return; - bfqq = RQ_BFQQ(rq); bfqd = bfqq->bfqd; if (rq->rq_flags & RQF_STARTED) @@ -4727,13 +4765,14 @@ static void bfq_finish_request(struct request *rq) spin_lock_irqsave(&bfqd->lock, flags); bfq_completed_request(bfqq, bfqd); - bfq_finish_request_body(bfqq); + bfq_finish_requeue_request_body(bfqq); spin_unlock_irqrestore(&bfqd->lock, flags); } else { /* * Request rq may be still/already in the scheduler, - * in which case we need to remove it. And we cannot + * in which case we need to remove it (this should + * never happen in case of requeue). And we cannot * defer such a check and removal, to avoid * inconsistencies in the time interval from the end * of this function to the start of the deferred work. @@ -4748,9 +4787,26 @@ static void bfq_finish_request(struct request *rq) bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); } - bfq_finish_request_body(bfqq); + bfq_finish_requeue_request_body(bfqq); } + /* + * Reset private fields. In case of a requeue, this allows + * this function to correctly do nothing if it is spuriously + * invoked again on this same request (see the check at the + * beginning of the function). Probably, a better general + * design would be to prevent blk-mq from invoking the requeue + * or finish hooks of an elevator, for a request that is not + * referred by that elevator. + * + * Resetting the following fields would break the + * request-insertion logic if rq is re-inserted into a bfq + * internal queue, without a re-preparation. Here we assume + * that re-insertions of requeued requests, without + * re-preparation, can happen only for pass_through or at_head + * requests (which are not re-inserted into bfq internal + * queues). + */ rq->elv.priv[0] = NULL; rq->elv.priv[1] = NULL; } @@ -5426,7 +5482,8 @@ static struct elevator_type iosched_bfq_mq = { .ops.mq = { .limit_depth = bfq_limit_depth, .prepare_request = bfq_prepare_request, - .finish_request = bfq_finish_request, + .requeue_request = bfq_finish_requeue_request, + .finish_request = bfq_finish_requeue_request, .exit_icq = bfq_exit_icq, .insert_requests = bfq_insert_requests, .dispatch_request = bfq_dispatch_request, diff --git a/block/blk-core.c b/block/blk-core.c index d0d104268f1a9..2d1a7bbe06343 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -34,6 +34,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -2083,6 +2084,14 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) return false; } +static noinline int should_fail_bio(struct bio *bio) +{ + if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) + return -EIO; + return 0; +} +ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO); + /* * Remap block n of partition p to block n+start(p) of the disk. */ @@ -2174,7 +2183,7 @@ generic_make_request_checks(struct bio *bio) if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) goto not_supported; - if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size)) + if (should_fail_bio(bio)) goto end_io; if (!bio->bi_partno) { diff --git a/block/blk-mq.c b/block/blk-mq.c index df93102e21494..357492712b0ea 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3164,6 +3164,7 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq) cpu_relax(); } + __set_current_state(TASK_RUNNING); return false; } diff --git a/block/blk-wbt.c b/block/blk-wbt.c index ae8de9780085a..f92fc84b5e2c4 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -697,7 +697,15 @@ u64 wbt_default_latency_nsec(struct request_queue *q) static int wbt_data_dir(const struct request *rq) { - return rq_data_dir(rq); + const int op = req_op(rq); + + if (op == REQ_OP_READ) + return READ; + else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH) + return WRITE; + + /* don't account */ + return -1; } int wbt_init(struct request_queue *q) diff --git a/block/bsg.c b/block/bsg.c index 2e2c1e2222097..06dc96e1f6700 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -849,9 +849,9 @@ static __poll_t bsg_poll(struct file *file, poll_table *wait) spin_lock_irq(&bd->lock); if (!list_empty(&bd->done_list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (bd->queued_cmds < bd->max_queue) - mask |= POLLOUT; + mask |= EPOLLOUT; spin_unlock_irq(&bd->lock); return mask; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 0f8d8d5523c31..c49766b03165c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -735,9 +735,9 @@ void af_alg_wmem_wakeup(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | - POLLRDBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | + EPOLLRDNORM | + EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } @@ -800,9 +800,9 @@ void af_alg_data_wakeup(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLRDNORM | - POLLRDBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | + EPOLLRDNORM | + EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } @@ -1076,10 +1076,10 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock, mask = 0; if (!ctx->more || ctx->used) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (af_alg_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index a965b9d805598..ded1487833033 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -20,6 +20,20 @@ #include #include +/* + * On some 32-bit architectures (mn10300 and h8300), GCC ends up using + * over 1 KB of stack if we inline the round calculation into the loop + * in keccakf(). On the other hand, on 64-bit architectures with plenty + * of [64-bit wide] general purpose registers, not inlining it severely + * hurts performance. So let's use 64-bitness as a heuristic to decide + * whether to inline or not. + */ +#ifdef CONFIG_64BIT +#define SHA3_INLINE inline +#else +#define SHA3_INLINE noinline +#endif + #define KECCAK_ROUNDS 24 static const u64 keccakf_rndc[24] = { @@ -35,111 +49,115 @@ static const u64 keccakf_rndc[24] = { /* update the state with given number of rounds */ -static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) +static SHA3_INLINE void keccakf_round(u64 st[25]) { u64 t[5], tt, bc[5]; - int round; - for (round = 0; round < KECCAK_ROUNDS; round++) { + /* Theta */ + bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; + bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; + bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; + bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; + bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; + + t[0] = bc[4] ^ rol64(bc[1], 1); + t[1] = bc[0] ^ rol64(bc[2], 1); + t[2] = bc[1] ^ rol64(bc[3], 1); + t[3] = bc[2] ^ rol64(bc[4], 1); + t[4] = bc[3] ^ rol64(bc[0], 1); + + st[0] ^= t[0]; + + /* Rho Pi */ + tt = st[1]; + st[ 1] = rol64(st[ 6] ^ t[1], 44); + st[ 6] = rol64(st[ 9] ^ t[4], 20); + st[ 9] = rol64(st[22] ^ t[2], 61); + st[22] = rol64(st[14] ^ t[4], 39); + st[14] = rol64(st[20] ^ t[0], 18); + st[20] = rol64(st[ 2] ^ t[2], 62); + st[ 2] = rol64(st[12] ^ t[2], 43); + st[12] = rol64(st[13] ^ t[3], 25); + st[13] = rol64(st[19] ^ t[4], 8); + st[19] = rol64(st[23] ^ t[3], 56); + st[23] = rol64(st[15] ^ t[0], 41); + st[15] = rol64(st[ 4] ^ t[4], 27); + st[ 4] = rol64(st[24] ^ t[4], 14); + st[24] = rol64(st[21] ^ t[1], 2); + st[21] = rol64(st[ 8] ^ t[3], 55); + st[ 8] = rol64(st[16] ^ t[1], 45); + st[16] = rol64(st[ 5] ^ t[0], 36); + st[ 5] = rol64(st[ 3] ^ t[3], 28); + st[ 3] = rol64(st[18] ^ t[3], 21); + st[18] = rol64(st[17] ^ t[2], 15); + st[17] = rol64(st[11] ^ t[1], 10); + st[11] = rol64(st[ 7] ^ t[2], 6); + st[ 7] = rol64(st[10] ^ t[0], 3); + st[10] = rol64( tt ^ t[1], 1); + + /* Chi */ + bc[ 0] = ~st[ 1] & st[ 2]; + bc[ 1] = ~st[ 2] & st[ 3]; + bc[ 2] = ~st[ 3] & st[ 4]; + bc[ 3] = ~st[ 4] & st[ 0]; + bc[ 4] = ~st[ 0] & st[ 1]; + st[ 0] ^= bc[ 0]; + st[ 1] ^= bc[ 1]; + st[ 2] ^= bc[ 2]; + st[ 3] ^= bc[ 3]; + st[ 4] ^= bc[ 4]; + + bc[ 0] = ~st[ 6] & st[ 7]; + bc[ 1] = ~st[ 7] & st[ 8]; + bc[ 2] = ~st[ 8] & st[ 9]; + bc[ 3] = ~st[ 9] & st[ 5]; + bc[ 4] = ~st[ 5] & st[ 6]; + st[ 5] ^= bc[ 0]; + st[ 6] ^= bc[ 1]; + st[ 7] ^= bc[ 2]; + st[ 8] ^= bc[ 3]; + st[ 9] ^= bc[ 4]; + + bc[ 0] = ~st[11] & st[12]; + bc[ 1] = ~st[12] & st[13]; + bc[ 2] = ~st[13] & st[14]; + bc[ 3] = ~st[14] & st[10]; + bc[ 4] = ~st[10] & st[11]; + st[10] ^= bc[ 0]; + st[11] ^= bc[ 1]; + st[12] ^= bc[ 2]; + st[13] ^= bc[ 3]; + st[14] ^= bc[ 4]; + + bc[ 0] = ~st[16] & st[17]; + bc[ 1] = ~st[17] & st[18]; + bc[ 2] = ~st[18] & st[19]; + bc[ 3] = ~st[19] & st[15]; + bc[ 4] = ~st[15] & st[16]; + st[15] ^= bc[ 0]; + st[16] ^= bc[ 1]; + st[17] ^= bc[ 2]; + st[18] ^= bc[ 3]; + st[19] ^= bc[ 4]; + + bc[ 0] = ~st[21] & st[22]; + bc[ 1] = ~st[22] & st[23]; + bc[ 2] = ~st[23] & st[24]; + bc[ 3] = ~st[24] & st[20]; + bc[ 4] = ~st[20] & st[21]; + st[20] ^= bc[ 0]; + st[21] ^= bc[ 1]; + st[22] ^= bc[ 2]; + st[23] ^= bc[ 3]; + st[24] ^= bc[ 4]; +} - /* Theta */ - bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; - bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; - bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; - bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; - bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; - - t[0] = bc[4] ^ rol64(bc[1], 1); - t[1] = bc[0] ^ rol64(bc[2], 1); - t[2] = bc[1] ^ rol64(bc[3], 1); - t[3] = bc[2] ^ rol64(bc[4], 1); - t[4] = bc[3] ^ rol64(bc[0], 1); - - st[0] ^= t[0]; - - /* Rho Pi */ - tt = st[1]; - st[ 1] = rol64(st[ 6] ^ t[1], 44); - st[ 6] = rol64(st[ 9] ^ t[4], 20); - st[ 9] = rol64(st[22] ^ t[2], 61); - st[22] = rol64(st[14] ^ t[4], 39); - st[14] = rol64(st[20] ^ t[0], 18); - st[20] = rol64(st[ 2] ^ t[2], 62); - st[ 2] = rol64(st[12] ^ t[2], 43); - st[12] = rol64(st[13] ^ t[3], 25); - st[13] = rol64(st[19] ^ t[4], 8); - st[19] = rol64(st[23] ^ t[3], 56); - st[23] = rol64(st[15] ^ t[0], 41); - st[15] = rol64(st[ 4] ^ t[4], 27); - st[ 4] = rol64(st[24] ^ t[4], 14); - st[24] = rol64(st[21] ^ t[1], 2); - st[21] = rol64(st[ 8] ^ t[3], 55); - st[ 8] = rol64(st[16] ^ t[1], 45); - st[16] = rol64(st[ 5] ^ t[0], 36); - st[ 5] = rol64(st[ 3] ^ t[3], 28); - st[ 3] = rol64(st[18] ^ t[3], 21); - st[18] = rol64(st[17] ^ t[2], 15); - st[17] = rol64(st[11] ^ t[1], 10); - st[11] = rol64(st[ 7] ^ t[2], 6); - st[ 7] = rol64(st[10] ^ t[0], 3); - st[10] = rol64( tt ^ t[1], 1); - - /* Chi */ - bc[ 0] = ~st[ 1] & st[ 2]; - bc[ 1] = ~st[ 2] & st[ 3]; - bc[ 2] = ~st[ 3] & st[ 4]; - bc[ 3] = ~st[ 4] & st[ 0]; - bc[ 4] = ~st[ 0] & st[ 1]; - st[ 0] ^= bc[ 0]; - st[ 1] ^= bc[ 1]; - st[ 2] ^= bc[ 2]; - st[ 3] ^= bc[ 3]; - st[ 4] ^= bc[ 4]; - - bc[ 0] = ~st[ 6] & st[ 7]; - bc[ 1] = ~st[ 7] & st[ 8]; - bc[ 2] = ~st[ 8] & st[ 9]; - bc[ 3] = ~st[ 9] & st[ 5]; - bc[ 4] = ~st[ 5] & st[ 6]; - st[ 5] ^= bc[ 0]; - st[ 6] ^= bc[ 1]; - st[ 7] ^= bc[ 2]; - st[ 8] ^= bc[ 3]; - st[ 9] ^= bc[ 4]; - - bc[ 0] = ~st[11] & st[12]; - bc[ 1] = ~st[12] & st[13]; - bc[ 2] = ~st[13] & st[14]; - bc[ 3] = ~st[14] & st[10]; - bc[ 4] = ~st[10] & st[11]; - st[10] ^= bc[ 0]; - st[11] ^= bc[ 1]; - st[12] ^= bc[ 2]; - st[13] ^= bc[ 3]; - st[14] ^= bc[ 4]; - - bc[ 0] = ~st[16] & st[17]; - bc[ 1] = ~st[17] & st[18]; - bc[ 2] = ~st[18] & st[19]; - bc[ 3] = ~st[19] & st[15]; - bc[ 4] = ~st[15] & st[16]; - st[15] ^= bc[ 0]; - st[16] ^= bc[ 1]; - st[17] ^= bc[ 2]; - st[18] ^= bc[ 3]; - st[19] ^= bc[ 4]; - - bc[ 0] = ~st[21] & st[22]; - bc[ 1] = ~st[22] & st[23]; - bc[ 2] = ~st[23] & st[24]; - bc[ 3] = ~st[24] & st[20]; - bc[ 4] = ~st[20] & st[21]; - st[20] ^= bc[ 0]; - st[21] ^= bc[ 1]; - st[22] ^= bc[ 2]; - st[23] ^= bc[ 3]; - st[24] ^= bc[ 4]; +static void __optimize("O3") keccakf(u64 st[25]) +{ + int round; + for (round = 0; round < KECCAK_ROUNDS; round++) { + keccakf_round(st); /* Iota */ st[0] ^= keccakf_rndc[round]; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index d650c5b6ec90c..f505e9a01b2d0 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -79,7 +79,12 @@ config ACPI_DEBUGGER_USER endif config ACPI_SPCR_TABLE - bool + bool "ACPI Serial Port Console Redirection Support" + default y if X86 + help + Enable support for Serial Port Console Redirection (SPCR) Table. + This table provides information about the configuration of the + earlycon console. config ACPI_LPIT bool diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index 2ff5c8c04e3b9..f21c99ec46ee0 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -724,9 +724,9 @@ static __poll_t acpi_aml_poll(struct file *file, poll_table *wait) poll_wait(file, &acpi_aml_io.wait, wait); if (acpi_aml_user_readable()) - masks |= POLLIN | POLLRDNORM; + masks |= EPOLLIN | EPOLLRDNORM; if (acpi_aml_user_writable()) - masks |= POLLOUT | POLLWRNORM; + masks |= EPOLLOUT | EPOLLWRNORM; return masks; } diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index f53ccc6802381..76fb96966f7b1 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -53,7 +53,7 @@ MODULE_AUTHOR("Bruno Ducrot"); MODULE_DESCRIPTION("ACPI Video Driver"); MODULE_LICENSE("GPL"); -static bool brightness_switch_enabled = 1; +static bool brightness_switch_enabled = true; module_param(brightness_switch_enabled, bool, 0644); /* diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 2243c8164b341..e65478593f9ae 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -51,7 +51,7 @@ /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" -#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2017 Intel Corporation" +#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2018 Intel Corporation" #if ACPI_MACHINE_WIDTH == 64 #define ACPI_WIDTH " (64-bit version)" diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 49bf47ca54777..c349ffdf55578 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index c84223b60b356..ce6e8db83e274 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 54b8d9df9423c..8b2cca5a717b7 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index f8f3a6e74128a..fab590bc5fd3e 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index a2adfd42f85cc..1b0269f6ac2dd 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 45ef3f5dc9ad2..27f322b2fed1a 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index cd722d8edacbb..3569aa3bf5ee8 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index 29555c8789a31..744374ab92853 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index a56675f0661eb..3ba3ff0f1c042 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 128a3d71b5986..6463340c45220 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 2fb1bb78d85c6..6c8f364fe2fc5 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 5226146190bf9..a1f4d3f385c82 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index cbd59a302679e..36c2c58259866 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index c23c47328060b..e25634951d038 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index cdfcad8eb74c8..7c27bcee6ac75 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 438f3098a093a..20f36949928ac 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 62134bdbeda65..0338ac32f9c6a 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 84a3ceb6e3842..15b23414245aa 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index b6b29d7178246..00d21d2f766ea 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index f54dc5a34bdc4..b0e9492a6297e 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -7,7 +7,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index 1236e9a414e4d..b680c229ddd59 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c index 5984b90eb5907..4112c85f2aab3 100644 --- a/drivers/acpi/acpica/dbcmds.c +++ b/drivers/acpi/acpica/dbcmds.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c index 32d546f0db2f5..27236a6c51ff3 100644 --- a/drivers/acpi/acpica/dbconvert.c +++ b/drivers/acpi/acpica/dbconvert.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c index 5a606eac0c224..7df920cda77dd 100644 --- a/drivers/acpi/acpica/dbdisply.c +++ b/drivers/acpi/acpica/dbdisply.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c index ed088fceb18d3..8ad9e6d9e54b3 100644 --- a/drivers/acpi/acpica/dbexec.c +++ b/drivers/acpi/acpica/dbexec.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c index cf9607945704c..084bb332f8e26 100644 --- a/drivers/acpi/acpica/dbfileio.c +++ b/drivers/acpi/acpica/dbfileio.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index 7d08974c64c2c..55c0f2742339e 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index 954ca3b981a73..f7c661e06f379 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c index df62c9245efc2..2cda0bff6f2cd 100644 --- a/drivers/acpi/acpica/dbmethod.c +++ b/drivers/acpi/acpica/dbmethod.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index 8c207c7725179..8796fc1e0360e 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c index e7b415c20aa88..d2063cbab39af 100644 --- a/drivers/acpi/acpica/dbobject.c +++ b/drivers/acpi/acpica/dbobject.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c index 99fb0160b8fb5..d6aaef54e3693 100644 --- a/drivers/acpi/acpica/dbstats.c +++ b/drivers/acpi/acpica/dbstats.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c index c6bee6143266b..56e446b89d18b 100644 --- a/drivers/acpi/acpica/dbtest.c +++ b/drivers/acpi/acpica/dbtest.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c index bfa972b641719..cd40854ee9be6 100644 --- a/drivers/acpi/acpica/dbutils.c +++ b/drivers/acpi/acpica/dbutils.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c index b6985323e7eb9..77bbfa97cf913 100644 --- a/drivers/acpi/acpica/dbxface.c +++ b/drivers/acpi/acpica/dbxface.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 2873455c986d7..04a9f60e7ad10 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 4b6ebc2a28510..606697e741a51 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index d1f457eda9805..14ec52eba4087 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 0cab34a593d50..95ea639a94242 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index b1842dd4edf7e..946ff2e130d93 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index d7fc36917c671..b9c460c2d7636 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c index 27a7de95f7b0a..157f1645d91a5 100644 --- a/drivers/acpi/acpica/dsmthdat.c +++ b/drivers/acpi/acpica/dsmthdat.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index b21fe084ffc8c..4fa3400a95ba0 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 0336df7ac47dd..0181cd3177511 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c index 5a602b75084ef..902bee78036c2 100644 --- a/drivers/acpi/acpica/dspkginit.c +++ b/drivers/acpi/acpica/dspkginit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c index 4c5faf629a831..a4ce0b4a55a61 100644 --- a/drivers/acpi/acpica/dsutils.c +++ b/drivers/acpi/acpica/dsutils.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 22f45d0907332..2c07d220a50fe 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 5771e4e4a99af..fa4ef9229e171 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index b3d0aaec8203a..3b1313ba60d0a 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 3e081983d2ee6..8b5c3613c0608 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index da111a1f5bfbc..ee002d17526e0 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index d3b6b314fa507..4b2b0b44a16b6 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 0ce33b0f430c4..012b80de15013 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 263d8fc4a9e2f..410a3907c0518 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index 3a3cb8624f419..7ce756cc28abb 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 8649c6242478e..8ad4816c99500 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index c8adb400330af..729a8960a3af4 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 2db61ef1b4a3b..20fb51c06b8dd 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index 4f6bb3f016abd..40d0b1f541a04 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index bb58419f0d610..de196c8e3f304 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 93ec528bcd9a9..4187f563fede2 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c index 8ce73b962006f..d5594f79f877a 100644 --- a/drivers/acpi/acpica/evsci.c +++ b/drivers/acpi/acpica/evsci.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index dd1b9dd64cef8..9b3c01bf1438e 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index c773ac4892cb7..96c2520f95706 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 67c7c4ce276c7..cbb1598df9dcf 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index beba9d56a0d87..705fcd86151ae 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index 59b8de2f07d39..ea20e10dd1f2b 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 61813bd43f9e4..827f47b726632 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 23ebadb06a95b..9abcc41a573bf 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index b8adb11f1b075..3dece45dd997e 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index a8191d2ca5e36..d931a66a16e3c 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index b2ff61bdb9a8f..4989ce9591aef 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 5fda981f64988..e3b0650e5bb63 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index a656608dca849..3d0f274be88b2 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index dbad3ebd7df50..1518fcb22ae12 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index ecd95b3f35f19..24c9741dee482 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index caa5ed1f65eca..c7b249cda5c0e 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index f787651348c11..dae01c93e4800 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 57980b7d35940..3cafa1d6f31a5 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index ce857addc8dbd..f16c655121ffd 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 688032b58a213..8b39fffce6dcd 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 8de060664204e..1d1040f2e3f87 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 7bcc9d809b7e9..387c438aa4853 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index 91c1de046442f..77fa8d9aa5bf7 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 5e1854ea85f60..b104bc3ca809b 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index 1c7c9962b0de7..2643d34f194d9 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index bdd43cde8f364..8f106bdcad5fb 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 56f59cf5da293..3d458d1996b0f 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 4ba7fcbf23b05..905443a3c28f7 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index ad3b610057f3d..420d9b145d2ed 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index ae9df8672d9ef..9a67d507a132f 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 34d608358eaf0..fb80d3f55d63f 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index fad249e774b41..68e958d4c25f2 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index 12626d021a9b5..64855b62a5aeb 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 09b6822aa5cc2..c1c54af148d0b 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c index 283819930be6b..faa2fa45eb1c8 100644 --- a/drivers/acpi/acpica/hwpci.c +++ b/drivers/acpi/acpica/hwpci.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index aa6e000819155..f3e7b7851a3a1 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index 1fe7387a00e67..c85c373ecbc4e 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 5b4282902a839..511e3b8ffc6d9 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index d1679035d5f31..65d82e6add0b1 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index b3c6e439933c5..d320b129b7d7d 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index e5c095ca6083a..589c774bbf9a7 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c index 33e652a12fca5..07f672b5a1d1b 100644 --- a/drivers/acpi/acpica/nsaccess.c +++ b/drivers/acpi/acpica/nsaccess.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c index 8ba5b32c9f717..ce57ccf4c1bf1 100644 --- a/drivers/acpi/acpica/nsalloc.c +++ b/drivers/acpi/acpica/nsalloc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index 67b7370dcae52..ce296ac14cf06 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index d55dcc82f4348..2f9d5d190fa96 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 4123b5077a7d4..e2ac16818dc3b 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 5026594763eaf..09ac00dee4506 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c index d22167cbd0ca6..c2d883b8c45e9 100644 --- a/drivers/acpi/acpica/nseval.c +++ b/drivers/acpi/acpica/nseval.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 9c62979497122..c17af4a3ab679 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index d2915e186ae14..fdfe9309bd330 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index 22c92d1a24d8f..c686eda7ca66f 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c index 707b2aa501e1b..757e44555ec30 100644 --- a/drivers/acpi/acpica/nsobject.c +++ b/drivers/acpi/acpica/nsobject.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index 2fc33a5203f40..c5b22ea5b3695 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 9d14b509529e3..4f1f6d6d9ddff 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index a8ea8fb1d2994..7805d5ce81272 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index 418ef2ac82abe..7b6b6d281f1cb 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 06037e0446941..29c3973c78153 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c index e91dbee9235f3..a469447f5c02d 100644 --- a/drivers/acpi/acpica/nssearch.c +++ b/drivers/acpi/acpica/nssearch.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index b43fe5fce64ba..0487fdb59b0ee 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index 6b6e6f498cffb..dd7ae1bc8af8c 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 9b51f65823b29..1075bd9541f56 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index 1069662358050..e9603fc9586ce 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -6,7 +6,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c index 47f689ec3fcbc..ac1fbf767cac3 100644 --- a/drivers/acpi/acpica/nsxfobj.c +++ b/drivers/acpi/acpica/nsxfobj.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 171e2faa7c506..dbc51bc5fdd67 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index bb04dec168ad7..7dca287d7690a 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index c0b179883ff25..b18f1e0489854 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index a402ad772a1e5..d31f3eb232254 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index eff22950232b6..1dc1fc79297eb 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index ac88319dc1114..2474ff9612940 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index 22d7f1d6849b3..f49cdcc65700a 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index c06d6e2fc7a5d..f9fa88c79b32d 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index cd59dfe6a47d9..fe151f42de3a5 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index 22a37c82af19a..bc5c779e54e80 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index c88a681586bf3..d2270ade5cf82 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c index a131a28bb09d6..213bad89675b7 100644 --- a/drivers/acpi/acpica/rsaddr.c +++ b/drivers/acpi/acpica/rsaddr.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c index 659fb718504a6..576f7aae162b4 100644 --- a/drivers/acpi/acpica/rscalc.c +++ b/drivers/acpi/acpica/rscalc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c index f72ff0b54a639..fe07001ea8651 100644 --- a/drivers/acpi/acpica/rscreate.c +++ b/drivers/acpi/acpica/rscreate.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c index 55fd1880efbec..bc4c4755aeb9e 100644 --- a/drivers/acpi/acpica/rsdump.c +++ b/drivers/acpi/acpica/rsdump.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c index da150e17795b9..c4a2a08e31acc 100644 --- a/drivers/acpi/acpica/rsdumpinfo.c +++ b/drivers/acpi/acpica/rsdumpinfo.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c index b0e50518d7666..e819bb0f45af5 100644 --- a/drivers/acpi/acpica/rsinfo.c +++ b/drivers/acpi/acpica/rsinfo.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c index b7a47fbc519b6..eafd993592f6a 100644 --- a/drivers/acpi/acpica/rsio.c +++ b/drivers/acpi/acpica/rsio.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c index 092a733c42b80..aabd73298eb8b 100644 --- a/drivers/acpi/acpica/rsirq.c +++ b/drivers/acpi/acpica/rsirq.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c index 36a6657dd34d2..11214780ea8fd 100644 --- a/drivers/acpi/acpica/rslist.c +++ b/drivers/acpi/acpica/rslist.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c index 273eecb3001b4..05e375abc6b56 100644 --- a/drivers/acpi/acpica/rsmemory.c +++ b/drivers/acpi/acpica/rsmemory.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c index cc4b5486c4bcc..7b4627181cc6f 100644 --- a/drivers/acpi/acpica/rsmisc.c +++ b/drivers/acpi/acpica/rsmisc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c index 14d12d6eb716f..87dac2812072b 100644 --- a/drivers/acpi/acpica/rsserial.c +++ b/drivers/acpi/acpica/rsserial.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index b2aeca01204a0..49ff7f851d58c 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c index be65e65e216eb..3b481f0b81c57 100644 --- a/drivers/acpi/acpica/rsxface.c +++ b/drivers/acpi/acpica/rsxface.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index b19a2f0ea331d..ec69267f1447a 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 5f051d82188d1..d1763c5e4e913 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index fea89c8d305cb..999a64a48e1a2 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 0dfc0ac3c141f..00be16da1ee2c 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index edfd7b10be19f..8cdcdd2c46975 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 0c6768d203958..30d40ff8992b4 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 5ecb8d2e68347..dca91b6f8cc2b 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index d81f442228b8b..e09b4b26300e7 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index f9f9a7da2cade..abf3c62e1e800 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index 26a0633115be3..d8540f380ae5f 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index 5594a359dbf17..12fbaddbfb0db 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c index 909bdb1986515..95565e46a6956 100644 --- a/drivers/acpi/acpica/utascii.c +++ b/drivers/acpi/acpica/utascii.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index f17eaa009dde7..2c5a14c2f46bd 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 531493306dee9..08e6944404b3e 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index e9382255d6c6c..01434af99035d 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index cff7154b7feea..2201be1bf4c29 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index 55debbad487dc..1a3f316a18a8e 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index c6eb9fae70f9a..7d8d0208f0a35 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index 42388dcb5ccc9..ce5e891291bf8 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index 3fce7519c6902..b8be0b82a130e 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c index eb6dcab33d2f8..e3dbad8b73e56 100644 --- a/drivers/acpi/acpica/utexcep.c +++ b/drivers/acpi/acpica/utexcep.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index 230a50c82f22c..933595b0e5942 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index fb406daf47fac..f5886d557a942 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index a6eb580ee21d7..db3c3c1d33da1 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 45eeb0dcf283b..a2005b0303474 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index db2d9910866ee..0636074a4c23f 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c index 2055a858e5f59..eddf719904331 100644 --- a/drivers/acpi/acpica/utmath.c +++ b/drivers/acpi/acpica/utmath.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c index 45c78c2adbf0f..a331313ad5fa2 100644 --- a/drivers/acpi/acpica/utmisc.c +++ b/drivers/acpi/acpica/utmisc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 524ba931d5e83..6767bd1626f70 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c index 33a0970646df5..94219610e259e 100644 --- a/drivers/acpi/acpica/utnonansi.c +++ b/drivers/acpi/acpica/utnonansi.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index cb3db9fed50d1..375901c0a596e 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index f6b8dd24b006c..00ea104f6a0a9 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c index 1b3ee74a87ebf..9923dfa708be8 100644 --- a/drivers/acpi/acpica/utownerid.c +++ b/drivers/acpi/acpica/utownerid.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 350709f23e4cf..ae6fef02b692b 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index c008589b41bd9..ac07700f5b799 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utresdecode.c b/drivers/acpi/acpica/utresdecode.c index e15a2538558b3..93fa3450ca884 100644 --- a/drivers/acpi/acpica/utresdecode.c +++ b/drivers/acpi/acpica/utresdecode.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index f9801d13547fe..4d289d9c734cf 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c index eafabcd2fada3..7750c48739d8c 100644 --- a/drivers/acpi/acpica/utstate.c +++ b/drivers/acpi/acpica/utstate.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c index 9eacbcb9e4f45..a9507d1976ffa 100644 --- a/drivers/acpi/acpica/utstring.c +++ b/drivers/acpi/acpica/utstring.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c index 97f48d71f9e60..6fc76f0b60e9d 100644 --- a/drivers/acpi/acpica/utstrsuppt.c +++ b/drivers/acpi/acpica/utstrsuppt.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c index e2067dcb93893..9f7cef1de34aa 100644 --- a/drivers/acpi/acpica/utstrtoul64.c +++ b/drivers/acpi/acpica/utstrtoul64.c @@ -6,7 +6,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 633b4e2c669f6..8cc70ca4e0fb8 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 5028e06718b1c..95946fdb55d5b 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index 6b9ba4029f8e9..25ef2ce646036 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 9da4f8ef2e77b..a78861ded8940 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index 6d5180601cf2a..e727db52a55ed 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -5,7 +5,7 @@ *****************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c index 0b85f113f7262..764782fcf1bda 100644 --- a/drivers/acpi/acpica/utxfmutex.c +++ b/drivers/acpi/acpica/utxfmutex.c @@ -5,7 +5,7 @@ ******************************************************************************/ /* - * Copyright (C) 2000 - 2017, Intel Corp. + * Copyright (C) 2000 - 2018, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 19bc440820e64..7128488a3a728 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -1209,6 +1209,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"), }, }, + { + .callback = battery_full_discharging_quirk, + .ident = "ASUS UX360UA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"), + }, + }, + { + .callback = battery_full_discharging_quirk, + .ident = "ASUS UX410UAK", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"), + }, + }, {}, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index f87ed3be779ae..0dad0bd9327b5 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id) return 0; } #endif +static int set_gbl_term_list(const struct dmi_system_id *id) +{ + acpi_gbl_parse_table_as_term_list = 1; + return 0; +} -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = { + /* + * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C + * mode. + * https://bugzilla.kernel.org/show_bug.cgi?id=198515 + */ + { + .callback = set_gbl_term_list, + .ident = "Dell Precision M5530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"), + }, + }, + { + .callback = set_gbl_term_list, + .ident = "Dell XPS 15 9570", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"), + }, + }, /* * Invoke DSDT corruption work-around on all Toshiba Satellite. + * DSDT will be copied to memory. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 */ { @@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = { {} }; #else -static const struct dmi_system_id dsdt_dmi_table[] __initconst = { +static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = { {} }; #endif @@ -108,6 +135,7 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle, } return status; } +EXPORT_SYMBOL_GPL(acpi_bus_get_status_handle); int acpi_bus_get_status(struct acpi_device *device) { @@ -119,6 +147,12 @@ int acpi_bus_get_status(struct acpi_device *device) return 0; } + /* Battery devices must have their deps met before calling _STA */ + if (acpi_device_is_battery(device) && device->dep_unmet) { + acpi_set_device_status(device, 0); + return 0; + } + status = acpi_bus_get_status_handle(device->handle, &sta); if (ACPI_FAILURE(status)) return -ENODEV; @@ -626,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev) * acpi_of_match_device - Match device object using the "compatible" property. * @adev: ACPI device object to match. * @of_match_table: List of device IDs to match against. + * @of_id: OF ID if matched * * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of * identifiers and a _DSD object with the "compatible" property, use that * property to match against the given list of identifiers. */ static bool acpi_of_match_device(struct acpi_device *adev, - const struct of_device_id *of_match_table) + const struct of_device_id *of_match_table, + const struct of_device_id **of_id) { const union acpi_object *of_compatible, *obj; int i, nval; @@ -656,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev, const struct of_device_id *id; for (id = of_match_table; id->compatible[0]; id++) - if (!strcasecmp(obj->string.pointer, id->compatible)) + if (!strcasecmp(obj->string.pointer, id->compatible)) { + if (of_id) + *of_id = id; return true; + } } return false; @@ -728,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id, return true; } -static const struct acpi_device_id *__acpi_match_device( - struct acpi_device *device, - const struct acpi_device_id *ids, - const struct of_device_id *of_ids) +static bool __acpi_match_device(struct acpi_device *device, + const struct acpi_device_id *acpi_ids, + const struct of_device_id *of_ids, + const struct acpi_device_id **acpi_id, + const struct of_device_id **of_id) { const struct acpi_device_id *id; struct acpi_hardware_id *hwid; @@ -741,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device( * driver for it. */ if (!device || !device->status.present) - return NULL; + return false; list_for_each_entry(hwid, &device->pnp.ids, list) { /* First, check the ACPI/PNP IDs provided by the caller. */ - for (id = ids; id->id[0] || id->cls; id++) { - if (id->id[0] && !strcmp((char *) id->id, hwid->id)) - return id; - else if (id->cls && __acpi_match_device_cls(id, hwid)) - return id; + if (acpi_ids) { + for (id = acpi_ids; id->id[0] || id->cls; id++) { + if (id->id[0] && !strcmp((char *)id->id, hwid->id)) + goto out_acpi_match; + if (id->cls && __acpi_match_device_cls(id, hwid)) + goto out_acpi_match; + } } /* * Next, check ACPI_DT_NAMESPACE_HID and try to match the * "compatible" property if found. - * - * The id returned by the below is not valid, but the only - * caller passing non-NULL of_ids here is only interested in - * whether or not the return value is NULL. */ - if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) - && acpi_of_match_device(device, of_ids)) - return id; + if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)) + return acpi_of_match_device(device, of_ids, of_id); } - return NULL; + return false; + +out_acpi_match: + if (acpi_id) + *acpi_id = id; + return true; } /** @@ -781,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device( const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev) { - return __acpi_match_device(acpi_companion_match(dev), ids, NULL); + const struct acpi_device_id *id = NULL; + + __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL); + return id; } EXPORT_SYMBOL_GPL(acpi_match_device); -void *acpi_get_match_data(const struct device *dev) +const void *acpi_device_get_match_data(const struct device *dev) { const struct acpi_device_id *match; - if (!dev->driver) - return NULL; - - if (!dev->driver->acpi_match_table) - return NULL; - match = acpi_match_device(dev->driver->acpi_match_table, dev); if (!match) return NULL; - return (void *)match->driver_data; + return (const void *)match->driver_data; } -EXPORT_SYMBOL_GPL(acpi_get_match_data); +EXPORT_SYMBOL_GPL(acpi_device_get_match_data); int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids) { - return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; + return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT; } EXPORT_SYMBOL(acpi_match_device_ids); @@ -815,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev, { if (!drv->acpi_match_table) return acpi_of_match_device(ACPI_COMPANION(dev), - drv->of_match_table); + drv->of_match_table, + NULL); - return !!__acpi_match_device(acpi_companion_match(dev), - drv->acpi_match_table, drv->of_match_table); + return __acpi_match_device(acpi_companion_match(dev), + drv->acpi_match_table, drv->of_match_table, + NULL, NULL); } EXPORT_SYMBOL_GPL(acpi_driver_match_device); @@ -1019,11 +1060,8 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; - /* - * If the machine falls into the DMI check table, - * DSDT will be copied to memory - */ - dmi_check_system(dsdt_dmi_table); + /* Check machine-specific quirks */ + dmi_check_system(acpi_quirks_dmi_table); status = acpi_reallocate_root_table(); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 06ea4749ebd98..0afbb2658cbc0 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -119,7 +119,7 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); * to PCC commands. Keeping it high enough to cover emulators where * the processors run painfully slow. */ -#define NUM_RETRIES 500 +#define NUM_RETRIES 500ULL struct cppc_attr { struct attribute attr; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d9f38c645e4a1..30a5729565575 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev) ec->reference_count >= 1) acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); + if (acpi_sleep_no_ec_events()) + acpi_ec_enter_noirq(ec); + return 0; } @@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev) { struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); + if (acpi_sleep_no_ec_events()) + acpi_ec_leave_noirq(ec); + if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5f0071c7e2e11..abb559cd28d79 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -292,6 +292,9 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr) pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; pr->power.states[ACPI_STATE_C1].valid = 1; pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; + + snprintf(pr->power.states[ACPI_STATE_C1].desc, + ACPI_CX_DESC_LEN, "ACPI HLT"); } /* the C0 state only exists as a filler in our array */ pr->power.states[ACPI_STATE_C0].valid = 1; diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 18b72eec35076..c7cf48ad5cb9d 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -159,7 +159,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) { int ret; - if (ignore_ppc) { + if (ignore_ppc || !pr->performance) { /* * Only when it is notification event, the _OST object * will be evaluated. Otherwise it is skipped. diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 466d1503aba0e..5815356ea6ad3 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, return 0; } -static void * +static const void * acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, const struct device *dev) { - return acpi_get_match_data(dev); + return acpi_device_get_match_data(dev); } #define DECLARE_ACPI_FWNODE_OPS(ops) \ diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 2fa8304171e09..7a3431018e0ab 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device) device->driver_data = hc; acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc); - printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n", - hc->ec, hc->offset, hc->query_bit); + dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n", + hc->offset, hc->query_bit); return 0; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index b0fe5272c76aa..8e63d937babb0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1565,6 +1565,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device_initialize(&device->dev); dev_set_uevent_suppress(&device->dev, true); acpi_init_coherency(device); + /* Assume there are unmet deps until acpi_device_dep_initialize() runs */ + device->dep_unmet = 1; } void acpi_device_add_finalize(struct acpi_device *device) @@ -1588,6 +1590,14 @@ static int acpi_add_single_object(struct acpi_device **child, } acpi_init_device_object(device, handle, type, sta); + /* + * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so + * that we can call acpi_bus_get_status() and use its quirk handling. + * Note this must be done before the get power-/wakeup_dev-flags calls. + */ + if (type == ACPI_BUS_TYPE_DEVICE) + acpi_bus_get_status(device); + acpi_bus_get_power_flags(device); acpi_bus_get_wakeup_device_flags(device); @@ -1660,9 +1670,11 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, return -ENODEV; *type = ACPI_BUS_TYPE_DEVICE; - status = acpi_bus_get_status_handle(handle, sta); - if (ACPI_FAILURE(status)) - *sta = 0; + /* + * acpi_add_single_object updates this once we've an acpi_device + * so that acpi_bus_get_status' quirk handling can be used. + */ + *sta = 0; break; case ACPI_TYPE_PROCESSOR: *type = ACPI_BUS_TYPE_PROCESSOR; @@ -1760,6 +1772,8 @@ static void acpi_device_dep_initialize(struct acpi_device *adev) acpi_status status; int i; + adev->dep_unmet = 0; + if (!acpi_has_method(adev->handle, "_DEP")) return; diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 324b35bfe781d..9d52743080a4f 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -21,7 +21,7 @@ * occasionally getting stuck as 1. To avoid the potential for a hang, check * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART * implementations, so only do so if an affected platform is detected in - * parse_spcr(). + * acpi_parse_spcr(). */ bool qdf2400_e44_present; EXPORT_SYMBOL(qdf2400_e44_present); @@ -74,19 +74,21 @@ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb) } /** - * parse_spcr() - parse ACPI SPCR table and add preferred console + * acpi_parse_spcr() - parse ACPI SPCR table and add preferred console * - * @earlycon: set up earlycon for the console specified by the table + * @enable_earlycon: set up earlycon for the console specified by the table + * @enable_console: setup the console specified by the table. * * For the architectures with support for ACPI, CONFIG_ACPI_SPCR_TABLE may be * defined to parse ACPI SPCR table. As a result of the parsing preferred - * console is registered and if @earlycon is true, earlycon is set up. + * console is registered and if @enable_earlycon is true, earlycon is set up. + * If @enable_console is true the system console is also configured. * * When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called * from arch initialization code as soon as the DT/ACPI decision is made. * */ -int __init parse_spcr(bool earlycon) +int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) { static char opts[64]; struct acpi_table_spcr *table; @@ -105,17 +107,15 @@ int __init parse_spcr(bool earlycon) if (ACPI_FAILURE(status)) return -ENOENT; - if (table->header.revision < 2) { - err = -ENOENT; - pr_err("wrong table version\n"); - goto done; - } + if (table->header.revision < 2) + pr_info("SPCR table version %d\n", table->header.revision); if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { switch (ACPI_ACCESS_BIT_WIDTH(( table->serial_port.access_width))) { default: pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); + /* fall through */ case 8: iotype = "mmio"; break; @@ -185,7 +185,7 @@ int __init parse_spcr(bool earlycon) */ if (qdf2400_erratum_44_present(&table->header)) { qdf2400_e44_present = true; - if (earlycon) + if (enable_earlycon) uart = "qdf2400_e44"; } @@ -205,11 +205,13 @@ int __init parse_spcr(bool earlycon) pr_info("console: %s\n", opts); - if (earlycon) + if (enable_earlycon) setup_earlycon(opts); - err = add_preferred_console(uart, 0, opts + strlen(uart) + 1); - + if (enable_console) + err = add_preferred_console(uart, 0, opts + strlen(uart) + 1); + else + err = 0; done: acpi_put_table((struct acpi_table_header *)table); return err; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 80ce2a7d224b6..7bcb66ccccf3e 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -456,7 +456,8 @@ static const char * const table_sigs[] = { ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA, ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, - ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL }; + ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, + NULL }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d21040c5d343f..15e3d3c2260dd 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -4371,7 +4371,7 @@ static int binder_thread_release(struct binder_proc *proc, */ if ((thread->looper & BINDER_LOOPER_STATE_POLL) && waitqueue_active(&thread->wait)) { - wake_up_poll(&thread->wait, POLLHUP | POLLFREE); + wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE); } binder_inner_proc_unlock(thread->proc); @@ -4401,7 +4401,7 @@ static __poll_t binder_poll(struct file *filp, poll_wait(filp, &thread->wait, wait); if (binder_has_work(thread, wait_for_proc_work)) - return POLLIN; + return EPOLLIN; return 0; } diff --git a/drivers/atm/he.c b/drivers/atm/he.c index e58538c293777..29f102dcfec49 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -738,13 +738,13 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev) #else /* this is pretty, but avoids _divdu3 and is mostly correct */ mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR; - if (rate_cps > (272 * mult)) + if (rate_cps > (272ULL * mult)) buf = 4; - else if (rate_cps > (204 * mult)) + else if (rate_cps > (204ULL * mult)) buf = 3; - else if (rate_cps > (136 * mult)) + else if (rate_cps > (136ULL * mult)) buf = 2; - else if (rate_cps > (68 * mult)) + else if (rate_cps > (68ULL * mult)) buf = 1; else buf = 0; diff --git a/drivers/base/core.c b/drivers/base/core.c index b2261f92f2f1c..5847364f25d96 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link) dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); + if (link->flags & DL_FLAG_PM_RUNTIME) + pm_runtime_drop_link(link->consumer); + list_del(&link->s_node); list_del(&link->c_node); device_link_free(link); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 528b24149bc70..1ea0e2502e8ed 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2290,6 +2290,38 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, return 0; } +static int genpd_iterate_idle_states(struct device_node *dn, + struct genpd_power_state *states) +{ + int ret; + struct of_phandle_iterator it; + struct device_node *np; + int i = 0; + + ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); + if (ret <= 0) + return ret; + + /* Loop over the phandles until all the requested entry is found */ + of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { + np = it.node; + if (!of_match_node(idle_state_match, np)) + continue; + if (states) { + ret = genpd_parse_state(&states[i], np); + if (ret) { + pr_err("Parsing idle state node %pOF failed with err %d\n", + np, ret); + of_node_put(np); + return ret; + } + } + i++; + } + + return i; +} + /** * of_genpd_parse_idle_states: Return array of idle states for the genpd. * @@ -2299,49 +2331,31 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, * * Returns the device states parsed from the OF node. The memory for the states * is allocated by this function and is the responsibility of the caller to - * free the memory after use. + * free the memory after use. If no domain idle states is found it returns + * -EINVAL and in case of errors, a negative error code. */ int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n) { struct genpd_power_state *st; - struct device_node *np; - int i = 0; - int err, ret; - int count; - struct of_phandle_iterator it; - const struct of_device_id *match_id; + int ret; - count = of_count_phandle_with_args(dn, "domain-idle-states", NULL); - if (count <= 0) - return -EINVAL; + ret = genpd_iterate_idle_states(dn, NULL); + if (ret <= 0) + return ret < 0 ? ret : -EINVAL; - st = kcalloc(count, sizeof(*st), GFP_KERNEL); + st = kcalloc(ret, sizeof(*st), GFP_KERNEL); if (!st) return -ENOMEM; - /* Loop over the phandles until all the requested entry is found */ - of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) { - np = it.node; - match_id = of_match_node(idle_state_match, np); - if (!match_id) - continue; - ret = genpd_parse_state(&st[i++], np); - if (ret) { - pr_err - ("Parsing idle state node %pOF failed with err %d\n", - np, ret); - of_node_put(np); - kfree(st); - return ret; - } + ret = genpd_iterate_idle_states(dn, st); + if (ret <= 0) { + kfree(st); + return ret < 0 ? ret : -EINVAL; } - *n = i; - if (!i) - kfree(st); - else - *states = st; + *states = st; + *n = ret; return 0; } diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index a8ac86e4d79e7..6637fc319269b 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) return; if (device_may_wakeup(wirq->dev)) { - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && + !pm_runtime_status_suspended(wirq->dev)) enable_irq(wirq->irq); enable_irq_wake(wirq->irq); @@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) if (device_may_wakeup(wirq->dev)) { disable_irq_wake(wirq->irq); - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && + !pm_runtime_status_suspended(wirq->dev)) disable_irq_nosync(wirq->irq); } } diff --git a/drivers/base/property.c b/drivers/base/property.c index 302236281d830..8f205f6461ed8 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, } EXPORT_SYMBOL(fwnode_graph_parse_endpoint); -void *device_get_match_data(struct device *dev) +const void *device_get_match_data(struct device *dev) { - return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, - dev); + return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev); } EXPORT_SYMBOL_GPL(device_get_match_data); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 0521748a1972a..22f9145a426fd 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -306,9 +306,9 @@ static __poll_t vhci_poll(struct file *file, poll_table *wait) poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; - return POLLOUT | POLLWRNORM; + return EPOLLOUT | EPOLLWRNORM; } static void vhci_open_timeout(struct work_struct *work) diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index a2a1c1478cd08..a5e2f9e557eaa 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -241,7 +241,7 @@ static __poll_t apm_poll(struct file *fp, poll_table * wait) struct apm_user *as = fp->private_data; poll_wait(fp, &apm_waitqueue, wait); - return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; + return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM; } /* diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c index 2f92cc46698b2..06749e295adaf 100644 --- a/drivers/char/dsp56k.c +++ b/drivers/char/dsp56k.c @@ -414,7 +414,7 @@ static __poll_t dsp56k_poll(struct file *file, poll_table *wait) { case DSP56K_DEV_56001: /* poll_wait(file, ???, wait); */ - return POLLIN | POLLRDNORM | POLLOUT; + return EPOLLIN | EPOLLRDNORM | EPOLLOUT; default: printk("DSP56k driver: Unknown minor device: %d\n", dev); diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 2697c22e3be25..f882460b5a442 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -62,7 +62,7 @@ #include /* for get_user, etc. */ #include /* for wait_queue */ #include /* for __init, module_{init,exit} */ -#include /* for POLLIN, etc. */ +#include /* for EPOLLIN, etc. */ #include /* local header file for DoubleTalk values */ #ifdef TRACING @@ -244,11 +244,11 @@ static __poll_t dtlk_poll(struct file *file, poll_table * wait) if (dtlk_has_indexing && dtlk_readable()) { del_timer(&dtlk_timer); - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; } if (dtlk_writeable()) { del_timer(&dtlk_timer); - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } /* there are no exception conditions */ diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index dbed4953f86cd..be426eb2a3535 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -359,7 +359,7 @@ static __poll_t hpet_poll(struct file *file, poll_table * wait) spin_unlock_irq(&hpet_lock); if (v != 0) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d1f5bb534e0e3..6e9df558325be 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng) /* Enable secondary noise source on CPUs where it is present. */ /* Nehemiah stepping 8 and higher */ - if ((c->x86_model == 9) && (c->x86_mask > 7)) + if ((c->x86_model == 9) && (c->x86_stepping > 7)) lo |= VIA_NOISESRC2; /* Esther */ diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index 7992c870b0a21..c95b93b7598ba 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -349,10 +349,10 @@ static __poll_t bt_bmc_poll(struct file *file, poll_table *wait) ctrl = bt_inb(bt_bmc, BT_CTRL); if (ctrl & BT_CTRL_H2B_ATN) - mask |= POLLIN; + mask |= EPOLLIN; if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) - mask |= POLLOUT; + mask |= EPOLLOUT; return mask; } diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index a011a7739f5e7..5f1bc91747358 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -89,7 +89,7 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) spin_lock_irqsave(&priv->recv_msg_lock, flags); if (!list_empty(&(priv->recv_msgs))) - mask |= (POLLIN | POLLRDNORM); + mask |= (EPOLLIN | EPOLLRDNORM); spin_unlock_irqrestore(&priv->recv_msg_lock, flags); diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 34bc1f3ca4143..a58acdcf74146 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -895,7 +895,7 @@ static __poll_t ipmi_poll(struct file *file, poll_table *wait) spin_lock(&ipmi_read_lock); if (data_to_read) - mask |= (POLLIN | POLLRDNORM); + mask |= (EPOLLIN | EPOLLRDNORM); spin_unlock(&ipmi_read_lock); return mask; diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 819fe37a3683b..f80965407d3cc 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -423,9 +423,9 @@ static __poll_t cm4040_poll(struct file *filp, poll_table *wait) poll_wait(filp, &dev->poll_wait, wait); if (test_and_clear_bit(BS_READABLE, &dev->buffer_status)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (test_and_clear_bit(BS_WRITABLE, &dev->buffer_status)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; DEBUGP(2, dev, "<- cm4040_poll(%u)\n", mask); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 7a56d1a13ec3a..1ae77b41050ab 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -776,7 +776,7 @@ static __poll_t pp_poll(struct file *file, poll_table *wait) poll_wait(file, &pp->irq_wait, wait); if (atomic_read(&pp->irqc)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 80f2c326db47d..e5b3d3ba46604 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1793,9 +1793,9 @@ random_poll(struct file *file, poll_table * wait) poll_wait(file, &random_write_wait, wait); mask = 0; if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index c6a317120a550..0c858d027bf3d 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -804,7 +804,7 @@ static __poll_t rtc_poll(struct file *file, poll_table *wait) spin_unlock_irq(&rtc_lock); if (l != 0) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } #endif diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index 7f49fa0f41d77..5918ea7499bb1 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -340,10 +340,10 @@ scdrv_poll(struct file *file, struct poll_table_struct *wait) if (status > 0) { if (status & SAL_IROUTER_INTR_RECV) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } if (status & SAL_IROUTER_INTR_XMIT) { - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } } diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index fc041c462aa4e..186689833231d 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c @@ -944,7 +944,7 @@ static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &sonypi_device.fifo_proc_list, wait); if (kfifo_len(&sonypi_device.fifo)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 674218b50b13b..e4f79f9204507 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -180,15 +180,15 @@ static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait) poll_wait(filp, &proxy_dev->wq, wait); - ret = POLLOUT; + ret = EPOLLOUT; mutex_lock(&proxy_dev->buf_lock); if (proxy_dev->req_len) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; if (!(proxy_dev->state & STATE_OPENED_FLAG)) - ret |= POLLHUP; + ret |= EPOLLHUP; mutex_unlock(&proxy_dev->buf_lock); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 813a2e46824d4..468f061340126 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -992,15 +992,15 @@ static __poll_t port_fops_poll(struct file *filp, poll_table *wait) if (!port->guest_connected) { /* Port got unplugged */ - return POLLHUP; + return EPOLLHUP; } ret = 0; if (!will_read_block(port)) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; if (!will_write_block(port)) - ret |= POLLOUT; + ret |= EPOLLOUT; if (!port->host_connected) - ret |= POLLHUP; + ret |= EPOLLHUP; return ret; } diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c index 88e1cf475d3f5..a11af94e2e650 100644 --- a/drivers/char/xillybus/xillybus_core.c +++ b/drivers/char/xillybus/xillybus_core.c @@ -1758,15 +1758,15 @@ static __poll_t xillybus_poll(struct file *filp, poll_table *wait) spin_lock_irqsave(&channel->wr_spinlock, flags); if (!channel->wr_empty || channel->wr_ready) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (channel->wr_hangup) /* - * Not POLLHUP, because its behavior is in the - * mist, and POLLIN does what we want: Wake up + * Not EPOLLHUP, because its behavior is in the + * mist, and EPOLLIN does what we want: Wake up * the read file descriptor so it sees EOF. */ - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&channel->wr_spinlock, flags); } @@ -1781,12 +1781,12 @@ static __poll_t xillybus_poll(struct file *filp, poll_table *wait) spin_lock_irqsave(&channel->rd_spinlock, flags); if (!channel->rd_full) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&channel->rd_spinlock, flags); } if (channel->endpoint->fatal_error) - mask |= POLLERR; + mask |= EPOLLERR; return mask; } diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index d8addbce40bcc..608af20a34940 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -239,16 +239,6 @@ if PPC32 || PPC64 source "drivers/cpufreq/Kconfig.powerpc" endif -if AVR32 -config AVR32_AT32AP_CPUFREQ - bool "CPU frequency driver for AT32AP" - depends on PLATFORM_AT32AP - default n - help - This enables the CPU frequency driver for AT32AP processors. - If in doubt, say N. -endif - if IA64 config IA64_ACPI_CPUFREQ tristate "ACPI Processor P-States driver" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index e07715ce88440..c60c1e141d9d9 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -100,7 +100,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o ################################################################################## # Other platform drivers -obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o obj-$(CONFIG_BFIN_CPU_FREQ) += blackfin-cpufreq.o obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3a2ca0f79daf2..d0c34df0529c8 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) if (c->x86_vendor == X86_VENDOR_INTEL) { if ((c->x86 == 15) && (c->x86_model == 6) && - (c->x86_mask == 8)) { + (c->x86_stepping == 8)) { pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); return -ENODEV; } diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index 042023bbbf621..be926d9a66e57 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -109,12 +110,18 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, static int __init amd_freq_sensitivity_init(void) { u64 val; + struct pci_dev *pcidev; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return -ENODEV; - if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) - return -ENODEV; + pcidev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); + + if (!pcidev) { + if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) + return -ENODEV; + } if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) return -ENODEV; diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c deleted file mode 100644 index 7b612c8bb09ea..0000000000000 --- a/drivers/cpufreq/at32ap-cpufreq.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (C) 2004-2007 Atmel Corporation - * - * Based on MIPS implementation arch/mips/kernel/time.c - * Copyright 2001 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/*#define DEBUG*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct cpufreq_frequency_table *freq_table; - -static unsigned int ref_freq; -static unsigned long loops_per_jiffy_ref; - -static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) -{ - unsigned int old_freq, new_freq; - - old_freq = policy->cur; - new_freq = freq_table[index].frequency; - - if (!ref_freq) { - ref_freq = old_freq; - loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy; - } - - if (old_freq < new_freq) - boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, new_freq); - clk_set_rate(policy->clk, new_freq * 1000); - if (new_freq < old_freq) - boot_cpu_data.loops_per_jiffy = cpufreq_scale( - loops_per_jiffy_ref, ref_freq, new_freq); - - return 0; -} - -static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) -{ - unsigned int frequency, rate, min_freq; - struct clk *cpuclk; - int retval, steps, i; - - if (policy->cpu != 0) - return -EINVAL; - - cpuclk = clk_get(NULL, "cpu"); - if (IS_ERR(cpuclk)) { - pr_debug("cpufreq: could not get CPU clk\n"); - retval = PTR_ERR(cpuclk); - goto out_err; - } - - min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000; - frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; - policy->cpuinfo.transition_latency = 0; - - /* - * AVR32 CPU frequency rate scales in power of two between maximum and - * minimum, also add space for the table end marker. - * - * Further validate that the frequency is usable, and append it to the - * frequency table. - */ - steps = fls(frequency / min_freq) + 1; - freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table), - GFP_KERNEL); - if (!freq_table) { - retval = -ENOMEM; - goto out_err_put_clk; - } - - for (i = 0; i < (steps - 1); i++) { - rate = clk_round_rate(cpuclk, frequency * 1000) / 1000; - - if (rate != frequency) - freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; - else - freq_table[i].frequency = frequency; - - frequency /= 2; - } - - policy->clk = cpuclk; - freq_table[steps - 1].frequency = CPUFREQ_TABLE_END; - - retval = cpufreq_table_validate_and_show(policy, freq_table); - if (!retval) { - printk("cpufreq: AT32AP CPU frequency driver\n"); - return 0; - } - - kfree(freq_table); -out_err_put_clk: - clk_put(cpuclk); -out_err: - return retval; -} - -static struct cpufreq_driver at32_driver = { - .name = "at32ap", - .init = at32_cpufreq_driver_init, - .verify = cpufreq_generic_frequency_table_verify, - .target_index = at32_set_target, - .get = cpufreq_generic_get, - .flags = CPUFREQ_STICKY, -}; - -static int __init at32_cpufreq_init(void) -{ - return cpufreq_register_driver(&at32_driver); -} -late_initcall(at32_cpufreq_init); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 421f318c0e669..de33ebf008ada 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1686,6 +1686,9 @@ void cpufreq_resume(void) if (!cpufreq_driver) return; + if (unlikely(!cpufreq_suspended)) + return; + cpufreq_suspended = false; if (!has_target() && !cpufreq_driver->resume) diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index b6b369c222726..932caa386ecec 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -115,10 +115,10 @@ static struct cpufreq_freqs freqs; static int init_div_table(void) { struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table; - unsigned int tmp, clk_div, ema_div, freq, volt_id; + unsigned int tmp, clk_div, ema_div, freq, volt_id, idx; struct dev_pm_opp *opp; - cpufreq_for_each_entry(pos, freq_tbl) { + cpufreq_for_each_entry_idx(pos, freq_tbl, idx) { opp = dev_pm_opp_find_freq_exact(dvfs_info->dev, pos->frequency * 1000, true); if (IS_ERR(opp)) { @@ -154,8 +154,7 @@ static int init_div_table(void) tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT) | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT)); - __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * - (pos - freq_tbl)); + __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * idx); dev_pm_opp_put(opp); } diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 3bbbf9e6960cd..6d007f824ca74 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -143,10 +143,9 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, break; } - cpufreq_for_each_valid_entry(pos, table) { + cpufreq_for_each_valid_entry_idx(pos, table, i) { freq = pos->frequency; - i = pos - table; if ((freq < policy->min) || (freq > policy->max)) continue; if (freq == target_freq) { @@ -211,15 +210,16 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq) { struct cpufreq_frequency_table *pos, *table = policy->freq_table; + int idx; if (unlikely(!table)) { pr_debug("%s: Unable to find frequency table\n", __func__); return -ENOENT; } - cpufreq_for_each_valid_entry(pos, table) + cpufreq_for_each_valid_entry_idx(pos, table, idx) if (pos->frequency == freq) - return pos - table; + return idx; return -EINVAL; } diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 741f22e5cee31..ff67859948b34 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -504,6 +504,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = { }; module_platform_driver(imx6q_cpufreq_platdrv); +MODULE_ALIAS("platform:imx6q-cpufreq"); MODULE_AUTHOR("Shawn Guo "); MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7edf7a0e5a96a..6d084c61ee253 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -779,6 +779,8 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) return 0; } +static void intel_pstate_hwp_enable(struct cpudata *cpudata); + static int intel_pstate_resume(struct cpufreq_policy *policy) { if (!hwp_active) @@ -786,6 +788,9 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) mutex_lock(&intel_pstate_limits_lock); + if (policy->cpu == 0) + intel_pstate_hwp_enable(all_cpu_data[policy->cpu]); + all_cpu_data[policy->cpu]->epp_policy = 0; intel_pstate_hwp_set(policy->cpu); diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 5faa37c5b0910..f730b6528c185 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -600,7 +600,7 @@ static void longhaul_setup_voltagescaling(void) /* Calculate kHz for one voltage step */ kHz_step = (highest_speed - min_vid_speed) / numvscales; - cpufreq_for_each_entry(freq_pos, longhaul_table) { + cpufreq_for_each_entry_idx(freq_pos, longhaul_table, j) { speed = freq_pos->frequency; if (speed > min_vid_speed) pos = (speed - min_vid_speed) / kHz_step + minvid.pos; @@ -609,7 +609,7 @@ static void longhaul_setup_voltagescaling(void) freq_pos->driver_data |= mV_vrm_table[pos] << 8; vid = vrm_mV_table[mV_vrm_table[pos]]; pr_info("f: %d kHz, index: %d, vid: %d mV\n", - speed, (int)(freq_pos - longhaul_table), vid.mV); + speed, j, vid.mV); } can_scale_voltage = 1; @@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 7: - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0: longhaul_version = TYPE_LONGHAUL_V1; cpu_model = CPU_SAMUEL2; @@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) break; case 1 ... 15: longhaul_version = TYPE_LONGHAUL_V2; - if (c->x86_mask < 8) { + if (c->x86_stepping < 8) { cpu_model = CPU_SAMUEL2; cpuname = "C3 'Samuel 2' [C5B]"; } else { @@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) numscales = 32; memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 0 ... 1: cpu_model = CPU_NEHEMIAH; cpuname = "C3 'Nehemiah A' [C5XLOE]"; diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd77812313f3e..a25741b1281b4 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c @@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) #endif /* Errata workaround */ - cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; switch (cpuid) { case 0x0f07: case 0x0f0a: diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index b257fc7d52041..75dfbd2a58ea6 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c @@ -139,7 +139,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *pos; const u32 *max_freqp; u32 max_freq; - int cur_astate; + int cur_astate, idx; struct resource res; struct device_node *cpu, *dn; int err = -ENODEV; @@ -198,9 +198,9 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_debug("initializing frequency table\n"); /* initialize frequency table */ - cpufreq_for_each_entry(pos, pas_freqs) { + cpufreq_for_each_entry_idx(pos, pas_freqs, idx) { pos->frequency = get_astate_freq(pos->driver_data) * 100000; - pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency); + pr_debug("%d: %d\n", idx, pos->frequency); } cur_astate = get_cur_astate(policy->cpu); diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 80ac313e6c59c..302e9ce793a01 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -131,7 +131,7 @@ static int check_powernow(void) return 0; } - if ((c->x86_model == 6) && (c->x86_mask == 0)) { + if ((c->x86_model == 6) && (c->x86_stepping == 0)) { pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); have_a0 = 1; } diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 247fcbfa4cb5b..c32a833e1b005 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -145,6 +145,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) if (IS_ERR(priv->clk)) { dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n", __func__, cpu_dev->id); + ret = PTR_ERR(priv->clk); goto out_free_cpufreq_table; } @@ -197,11 +198,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) static void scpi_cpufreq_ready(struct cpufreq_policy *policy) { struct scpi_data *priv = policy->driver_data; - struct thermal_cooling_device *cdev; - cdev = of_cpufreq_cooling_register(policy); - if (!IS_ERR(cdev)) - priv->cdev = cdev; + priv->cdev = of_cpufreq_cooling_register(policy); } static struct cpufreq_driver scpi_cpufreq_driver = { diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f4bbb..4fa5adf16c701 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c @@ -37,7 +37,7 @@ struct cpu_id { __u8 x86; /* CPU family */ __u8 x86_model; /* model */ - __u8 x86_mask; /* stepping */ + __u8 x86_stepping; /* stepping */ }; enum { @@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, { if ((c->x86 == x->x86) && (c->x86_model == x->x86_model) && - (c->x86_mask == x->x86_mask)) + (c->x86_stepping == x->x86_stepping)) return 1; return 0; } diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 8085ec9000d19..e3a9962ee4109 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c @@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void) ebx = cpuid_ebx(0x00000001); ebx &= 0x000000FF; - pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); - switch (c->x86_mask) { + switch (c->x86_stepping) { case 4: /* * B-stepping [M-P4-M] @@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void) msr_lo, msr_hi); if ((msr_hi & (1<<18)) && (relaxed_check ? 1 : (msr_hi & (3<<24)))) { - if (c->x86_mask == 0x01) { + if (c->x86_stepping == 0x01) { pr_debug("early PIII version\n"); return SPEEDSTEP_CPU_PIII_C_EARLY; } else diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 75d280cb2dc05..e843cf4103736 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, * without any error (HW optimizations for later * CAAM eras), then try again. */ + if (ret) + break; + rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || - !(rdsta_val & (1 << sh_idx))) + !(rdsta_val & (1 << sh_idx))) { ret = -EAGAIN; - if (ret) break; + } + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); /* Clear the contents before recreating the descriptor */ memset(desc, 0x00, CAAM_CMD_SZ * 7); diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 6d626606b9c51..b9dfae47aefd8 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -1,5 +1,6 @@ config CRYPTO_DEV_CCP_DD tristate "Secure Processor device driver" + depends on CPU_SUP_AMD || ARM64 default m help Provides AMD Secure Processor device driver. @@ -32,3 +33,14 @@ config CRYPTO_DEV_CCP_CRYPTO Support for using the cryptographic API with the AMD Cryptographic Coprocessor. This module supports offload of SHA and AES algorithms. If you choose 'M' here, this module will be called ccp_crypto. + +config CRYPTO_DEV_SP_PSP + bool "Platform Security Processor (PSP) device" + default y + depends on CRYPTO_DEV_CCP_DD && X86_64 + help + Provide support for the AMD Platform Security Processor (PSP). + The PSP is a dedicated processor that provides support for key + management commands in Secure Encrypted Virtualization (SEV) mode, + along with software-based Trusted Execution Environment (TEE) to + enable third-party trusted applications. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index c4ce726b931e3..51d1c0cf66c73 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -8,6 +8,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-dmaengine.o \ ccp-debugfs.o ccp-$(CONFIG_PCI) += sp-pci.o +ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c new file mode 100644 index 0000000000000..fcfa5b1eae616 --- /dev/null +++ b/drivers/crypto/ccp/psp-dev.c @@ -0,0 +1,805 @@ +/* + * AMD Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2017 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sp-dev.h" +#include "psp-dev.h" + +#define DEVICE_NAME "sev" + +static DEFINE_MUTEX(sev_cmd_mutex); +static struct sev_misc_dev *misc_dev; +static struct psp_device *psp_master; + +static struct psp_device *psp_alloc_struct(struct sp_device *sp) +{ + struct device *dev = sp->dev; + struct psp_device *psp; + + psp = devm_kzalloc(dev, sizeof(*psp), GFP_KERNEL); + if (!psp) + return NULL; + + psp->dev = dev; + psp->sp = sp; + + snprintf(psp->name, sizeof(psp->name), "psp-%u", sp->ord); + + return psp; +} + +static irqreturn_t psp_irq_handler(int irq, void *data) +{ + struct psp_device *psp = data; + unsigned int status; + int reg; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS); + + /* Check if it is command completion: */ + if (!(status & BIT(PSP_CMD_COMPLETE_REG))) + goto done; + + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + PSP_CMDRESP); + if (reg & PSP_CMDRESP_RESP) { + psp->sev_int_rcvd = 1; + wake_up(&psp->sev_int_queue); + } + +done: + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS); + + return IRQ_HANDLED; +} + +static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) +{ + psp->sev_int_rcvd = 0; + + wait_event(psp->sev_int_queue, psp->sev_int_rcvd); + *reg = ioread32(psp->io_regs + PSP_CMDRESP); +} + +static int sev_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case SEV_CMD_INIT: return sizeof(struct sev_data_init); + case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); + case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); + case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); + case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); + case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); + case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); + case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); + case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); + case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); + case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); + case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); + case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); + case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); + case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); + case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); + case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); + case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); + case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); + case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); + case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); + case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); + case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); + case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); + case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); + default: return 0; + } + + return 0; +} + +static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp) + return -ENODEV; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", + cmd, phys_msb, phys_lsb); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO); + iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI); + + reg = cmd; + reg <<= PSP_CMDRESP_CMD_SHIFT; + reg |= PSP_CMDRESP_IOC; + iowrite32(reg, psp->io_regs + PSP_CMDRESP); + + /* wait for command completion */ + sev_wait_cmd_ioc(psp, ®); + + if (psp_ret) + *psp_ret = reg & PSP_CMDRESP_ERR_MASK; + + if (reg & PSP_CMDRESP_ERR_MASK) { + dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n", + cmd, reg & PSP_CMDRESP_ERR_MASK); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + +static int sev_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + + mutex_lock(&sev_cmd_mutex); + rc = __sev_do_cmd_locked(cmd, data, psp_ret); + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + +static int __sev_platform_init_locked(int *error) +{ + struct psp_device *psp = psp_master; + int rc = 0; + + if (!psp) + return -ENODEV; + + if (psp->sev_state == SEV_STATE_INIT) + return 0; + + rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error); + if (rc) + return rc; + + psp->sev_state = SEV_STATE_INIT; + dev_dbg(psp->dev, "SEV firmware initialized\n"); + + return rc; +} + +int sev_platform_init(int *error) +{ + int rc; + + mutex_lock(&sev_cmd_mutex); + rc = __sev_platform_init_locked(error); + mutex_unlock(&sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(sev_platform_init); + +static int __sev_platform_shutdown_locked(int *error) +{ + int ret; + + ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error); + if (ret) + return ret; + + psp_master->sev_state = SEV_STATE_UNINIT; + dev_dbg(psp_master->dev, "SEV firmware shutdown\n"); + + return ret; +} + +static int sev_platform_shutdown(int *error) +{ + int rc; + + mutex_lock(&sev_cmd_mutex); + rc = __sev_platform_shutdown_locked(NULL); + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + +static int sev_get_platform_state(int *state, int *error) +{ + int rc; + + rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &psp_master->status_cmd_buf, error); + if (rc) + return rc; + + *state = psp_master->status_cmd_buf.state; + return rc; +} + +static int sev_ioctl_do_reset(struct sev_issue_cmd *argp) +{ + int state, rc; + + /* + * The SEV spec requires that FACTORY_RESET must be issued in + * UNINIT state. Before we go further lets check if any guest is + * active. + * + * If FW is in WORKING state then deny the request otherwise issue + * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. + * + */ + rc = sev_get_platform_state(&state, &argp->error); + if (rc) + return rc; + + if (state == SEV_STATE_WORKING) + return -EBUSY; + + if (state == SEV_STATE_INIT) { + rc = __sev_platform_shutdown_locked(&argp->error); + if (rc) + return rc; + } + + return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error); +} + +static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) +{ + struct sev_user_data_status *data = &psp_master->status_cmd_buf; + int ret; + + ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error); + if (ret) + return ret; + + if (copy_to_user((void __user *)argp->data, data, sizeof(*data))) + ret = -EFAULT; + + return ret; +} + +static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp) +{ + int rc; + + if (psp_master->sev_state == SEV_STATE_UNINIT) { + rc = __sev_platform_init_locked(&argp->error); + if (rc) + return rc; + } + + return __sev_do_cmd_locked(cmd, 0, &argp->error); +} + +static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) +{ + struct sev_user_data_pek_csr input; + struct sev_data_pek_csr *data; + void *blob = NULL; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* userspace wants to query CSR length */ + if (!input.address || !input.length) + goto cmd; + + /* allocate a physically contiguous buffer to store the CSR blob */ + if (!access_ok(VERIFY_WRITE, input.address, input.length) || + input.length > SEV_FW_BLOB_MAX_SIZE) { + ret = -EFAULT; + goto e_free; + } + + blob = kmalloc(input.length, GFP_KERNEL); + if (!blob) { + ret = -ENOMEM; + goto e_free; + } + + data->address = __psp_pa(blob); + data->len = input.length; + +cmd: + if (psp_master->sev_state == SEV_STATE_UNINIT) { + ret = __sev_platform_init_locked(&argp->error); + if (ret) + goto e_free_blob; + } + + ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error); + + /* If we query the CSR length, FW responded with expected data. */ + input.length = data->len; + + if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { + ret = -EFAULT; + goto e_free_blob; + } + + if (blob) { + if (copy_to_user((void __user *)input.address, blob, input.length)) + ret = -EFAULT; + } + +e_free_blob: + kfree(blob); +e_free: + kfree(data); + return ret; +} + +void *psp_copy_user_blob(u64 __user uaddr, u32 len) +{ + void *data; + + if (!uaddr || !len) + return ERR_PTR(-EINVAL); + + /* verify that blob length does not exceed our limit */ + if (len > SEV_FW_BLOB_MAX_SIZE) + return ERR_PTR(-EINVAL); + + data = kmalloc(len, GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len)) + goto e_free; + + return data; + +e_free: + kfree(data); + return ERR_PTR(-EFAULT); +} +EXPORT_SYMBOL_GPL(psp_copy_user_blob); + +static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp) +{ + struct sev_user_data_pek_cert_import input; + struct sev_data_pek_cert_import *data; + void *pek_blob, *oca_blob; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy PEK certificate blobs from userspace */ + pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); + if (IS_ERR(pek_blob)) { + ret = PTR_ERR(pek_blob); + goto e_free; + } + + data->pek_cert_address = __psp_pa(pek_blob); + data->pek_cert_len = input.pek_cert_len; + + /* copy PEK certificate blobs from userspace */ + oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); + if (IS_ERR(oca_blob)) { + ret = PTR_ERR(oca_blob); + goto e_free_pek; + } + + data->oca_cert_address = __psp_pa(oca_blob); + data->oca_cert_len = input.oca_cert_len; + + /* If platform is not in INIT state then transition it to INIT */ + if (psp_master->sev_state != SEV_STATE_INIT) { + ret = __sev_platform_init_locked(&argp->error); + if (ret) + goto e_free_oca; + } + + ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error); + +e_free_oca: + kfree(oca_blob); +e_free_pek: + kfree(pek_blob); +e_free: + kfree(data); + return ret; +} + +static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp) +{ + struct sev_user_data_pdh_cert_export input; + void *pdh_blob = NULL, *cert_blob = NULL; + struct sev_data_pdh_cert_export *data; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* Userspace wants to query the certificate length. */ + if (!input.pdh_cert_address || + !input.pdh_cert_len || + !input.cert_chain_address) + goto cmd; + + /* Allocate a physically contiguous buffer to store the PDH blob. */ + if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) || + !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Allocate a physically contiguous buffer to store the cert chain blob. */ + if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) || + !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) { + ret = -EFAULT; + goto e_free; + } + + pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL); + if (!pdh_blob) { + ret = -ENOMEM; + goto e_free; + } + + data->pdh_cert_address = __psp_pa(pdh_blob); + data->pdh_cert_len = input.pdh_cert_len; + + cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL); + if (!cert_blob) { + ret = -ENOMEM; + goto e_free_pdh; + } + + data->cert_chain_address = __psp_pa(cert_blob); + data->cert_chain_len = input.cert_chain_len; + +cmd: + /* If platform is not in INIT state then transition it to INIT. */ + if (psp_master->sev_state != SEV_STATE_INIT) { + ret = __sev_platform_init_locked(&argp->error); + if (ret) + goto e_free_cert; + } + + ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error); + + /* If we query the length, FW responded with expected data. */ + input.cert_chain_len = data->cert_chain_len; + input.pdh_cert_len = data->pdh_cert_len; + + if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { + ret = -EFAULT; + goto e_free_cert; + } + + if (pdh_blob) { + if (copy_to_user((void __user *)input.pdh_cert_address, + pdh_blob, input.pdh_cert_len)) { + ret = -EFAULT; + goto e_free_cert; + } + } + + if (cert_blob) { + if (copy_to_user((void __user *)input.cert_chain_address, + cert_blob, input.cert_chain_len)) + ret = -EFAULT; + } + +e_free_cert: + kfree(cert_blob); +e_free_pdh: + kfree(pdh_blob); +e_free: + kfree(data); + return ret; +} + +static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sev_issue_cmd input; + int ret = -EFAULT; + + if (!psp_master) + return -ENODEV; + + if (ioctl != SEV_ISSUE_CMD) + return -EINVAL; + + if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) + return -EFAULT; + + if (input.cmd > SEV_MAX) + return -EINVAL; + + mutex_lock(&sev_cmd_mutex); + + switch (input.cmd) { + + case SEV_FACTORY_RESET: + ret = sev_ioctl_do_reset(&input); + break; + case SEV_PLATFORM_STATUS: + ret = sev_ioctl_do_platform_status(&input); + break; + case SEV_PEK_GEN: + ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input); + break; + case SEV_PDH_GEN: + ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input); + break; + case SEV_PEK_CSR: + ret = sev_ioctl_do_pek_csr(&input); + break; + case SEV_PEK_CERT_IMPORT: + ret = sev_ioctl_do_pek_import(&input); + break; + case SEV_PDH_CERT_EXPORT: + ret = sev_ioctl_do_pdh_export(&input); + break; + default: + ret = -EINVAL; + goto out; + } + + if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) + ret = -EFAULT; +out: + mutex_unlock(&sev_cmd_mutex); + + return ret; +} + +static const struct file_operations sev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sev_ioctl, +}; + +int sev_platform_status(struct sev_user_data_status *data, int *error) +{ + return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); +} +EXPORT_SYMBOL_GPL(sev_platform_status); + +int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) +{ + return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); +} +EXPORT_SYMBOL_GPL(sev_guest_deactivate); + +int sev_guest_activate(struct sev_data_activate *data, int *error) +{ + return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); +} +EXPORT_SYMBOL_GPL(sev_guest_activate); + +int sev_guest_decommission(struct sev_data_decommission *data, int *error) +{ + return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); +} +EXPORT_SYMBOL_GPL(sev_guest_decommission); + +int sev_guest_df_flush(int *error) +{ + return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error); +} +EXPORT_SYMBOL_GPL(sev_guest_df_flush); + +static void sev_exit(struct kref *ref) +{ + struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); +} + +static int sev_misc_init(struct psp_device *psp) +{ + struct device *dev = psp->dev; + int ret; + + /* + * SEV feature support can be detected on multiple devices but the SEV + * FW commands must be issued on the master. During probe, we do not + * know the master hence we create /dev/sev on the first device probe. + * sev_do_cmd() finds the right master device to which to issue the + * command to the firmware. + */ + if (!misc_dev) { + struct miscdevice *misc; + + misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL); + if (!misc_dev) + return -ENOMEM; + + misc = &misc_dev->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = DEVICE_NAME; + misc->fops = &sev_fops; + + ret = misc_register(misc); + if (ret) + return ret; + + kref_init(&misc_dev->refcount); + } else { + kref_get(&misc_dev->refcount); + } + + init_waitqueue_head(&psp->sev_int_queue); + psp->sev_misc = misc_dev; + dev_dbg(dev, "registered SEV device\n"); + + return 0; +} + +static int sev_init(struct psp_device *psp) +{ + /* Check if device supports SEV feature */ + if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) { + dev_dbg(psp->dev, "device does not support SEV\n"); + return 1; + } + + return sev_misc_init(psp); +} + +int psp_dev_init(struct sp_device *sp) +{ + struct device *dev = sp->dev; + struct psp_device *psp; + int ret; + + ret = -ENOMEM; + psp = psp_alloc_struct(sp); + if (!psp) + goto e_err; + + sp->psp_data = psp; + + psp->vdata = (struct psp_vdata *)sp->dev_vdata->psp_vdata; + if (!psp->vdata) { + ret = -ENODEV; + dev_err(dev, "missing driver data\n"); + goto e_err; + } + + psp->io_regs = sp->io_map + psp->vdata->offset; + + /* Disable and clear interrupts until ready */ + iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN); + iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS); + + /* Request an irq */ + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + if (ret) { + dev_err(dev, "psp: unable to allocate an IRQ\n"); + goto e_err; + } + + ret = sev_init(psp); + if (ret) + goto e_irq; + + if (sp->set_psp_master_device) + sp->set_psp_master_device(sp); + + /* Enable interrupt */ + iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN); + + return 0; + +e_irq: + sp_free_psp_irq(psp->sp, psp); +e_err: + sp->psp_data = NULL; + + dev_notice(dev, "psp initialization failed\n"); + + return ret; +} + +void psp_dev_destroy(struct sp_device *sp) +{ + struct psp_device *psp = sp->psp_data; + + if (psp->sev_misc) + kref_put(&misc_dev->refcount, sev_exit); + + sp_free_psp_irq(sp, psp); +} + +int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, + void *data, int *error) +{ + if (!filep || filep->f_op != &sev_fops) + return -EBADF; + + return sev_do_cmd(cmd, data, error); +} +EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); + +void psp_pci_init(void) +{ + struct sev_user_data_status *status; + struct sp_device *sp; + int error, rc; + + sp = sp_get_psp_master_device(); + if (!sp) + return; + + psp_master = sp->psp_data; + + /* Initialize the platform */ + rc = sev_platform_init(&error); + if (rc) { + dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error); + goto err; + } + + /* Display SEV firmware version */ + status = &psp_master->status_cmd_buf; + rc = sev_platform_status(status, &error); + if (rc) { + dev_err(sp->dev, "SEV: failed to get status error %#x\n", error); + goto err; + } + + dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major, + status->api_minor, status->build); + return; + +err: + psp_master = NULL; +} + +void psp_pci_exit(void) +{ + if (!psp_master) + return; + + sev_platform_shutdown(NULL); +} diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h new file mode 100644 index 0000000000000..c81f0b11287ae --- /dev/null +++ b/drivers/crypto/ccp/psp-dev.h @@ -0,0 +1,83 @@ +/* + * AMD Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2017 Advanced Micro Devices, Inc. + * + * Author: Brijesh Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __PSP_DEV_H__ +#define __PSP_DEV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sp-dev.h" + +#define PSP_C2PMSG(_num) ((_num) << 2) +#define PSP_CMDRESP PSP_C2PMSG(32) +#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56) +#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57) +#define PSP_FEATURE_REG PSP_C2PMSG(63) + +#define PSP_P2CMSG(_num) ((_num) << 2) +#define PSP_CMD_COMPLETE_REG 1 +#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG) + +#define PSP_P2CMSG_INTEN 0x0110 +#define PSP_P2CMSG_INTSTS 0x0114 + +#define PSP_C2PMSG_ATTR_0 0x0118 +#define PSP_C2PMSG_ATTR_1 0x011c +#define PSP_C2PMSG_ATTR_2 0x0120 +#define PSP_C2PMSG_ATTR_3 0x0124 +#define PSP_P2CMSG_ATTR_0 0x0128 + +#define PSP_CMDRESP_CMD_SHIFT 16 +#define PSP_CMDRESP_IOC BIT(0) +#define PSP_CMDRESP_RESP BIT(31) +#define PSP_CMDRESP_ERR_MASK 0xffff + +#define MAX_PSP_NAME_LEN 16 + +struct sev_misc_dev { + struct kref refcount; + struct miscdevice misc; +}; + +struct psp_device { + struct list_head entry; + + struct psp_vdata *vdata; + char name[MAX_PSP_NAME_LEN]; + + struct device *dev; + struct sp_device *sp; + + void __iomem *io_regs; + + int sev_state; + unsigned int sev_int_rcvd; + wait_queue_head_t sev_int_queue; + struct sev_misc_dev *sev_misc; + struct sev_user_data_status status_cmd_buf; + struct sev_data_init init_cmd_buf; +}; + +#endif /* __PSP_DEV_H */ diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index bef387c8abfd7..eb0da65727204 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -198,6 +198,8 @@ int sp_init(struct sp_device *sp) if (sp->dev_vdata->ccp_vdata) ccp_dev_init(sp); + if (sp->dev_vdata->psp_vdata) + psp_dev_init(sp); return 0; } @@ -206,6 +208,9 @@ void sp_destroy(struct sp_device *sp) if (sp->dev_vdata->ccp_vdata) ccp_dev_destroy(sp); + if (sp->dev_vdata->psp_vdata) + psp_dev_destroy(sp); + sp_del_device(sp); } @@ -237,6 +242,27 @@ int sp_resume(struct sp_device *sp) } #endif +struct sp_device *sp_get_psp_master_device(void) +{ + struct sp_device *i, *ret = NULL; + unsigned long flags; + + write_lock_irqsave(&sp_unit_lock, flags); + if (list_empty(&sp_units)) + goto unlock; + + list_for_each_entry(i, &sp_units, entry) { + if (i->psp_data) + break; + } + + if (i->get_psp_master_device) + ret = i->get_psp_master_device(); +unlock: + write_unlock_irqrestore(&sp_unit_lock, flags); + return ret; +} + static int __init sp_mod_init(void) { #ifdef CONFIG_X86 @@ -246,6 +272,10 @@ static int __init sp_mod_init(void) if (ret) return ret; +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + psp_pci_init(); +#endif + return 0; #endif @@ -265,6 +295,11 @@ static int __init sp_mod_init(void) static void __exit sp_mod_exit(void) { #ifdef CONFIG_X86 + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + psp_pci_exit(); +#endif + sp_pci_exit(); #endif diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 5ab486ade1ad9..acb197b66ced9 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -42,12 +42,17 @@ struct ccp_vdata { const unsigned int offset; const unsigned int rsamax; }; + +struct psp_vdata { + const unsigned int offset; +}; + /* Structure to hold SP device data */ struct sp_dev_vdata { const unsigned int bar; const struct ccp_vdata *ccp_vdata; - void *psp_vdata; + const struct psp_vdata *psp_vdata; }; struct sp_device { @@ -68,6 +73,10 @@ struct sp_device { /* DMA caching attribute support */ unsigned int axcache; + /* get and set master device */ + struct sp_device*(*get_psp_master_device)(void); + void (*set_psp_master_device)(struct sp_device *); + bool irq_registered; bool use_tasklet; @@ -103,6 +112,7 @@ void sp_free_ccp_irq(struct sp_device *sp, void *data); int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); void sp_free_psp_irq(struct sp_device *sp, void *data); +struct sp_device *sp_get_psp_master_device(void); #ifdef CONFIG_CRYPTO_DEV_SP_CCP @@ -130,4 +140,20 @@ static inline int ccp_dev_resume(struct sp_device *sp) } #endif /* CONFIG_CRYPTO_DEV_SP_CCP */ +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + +int psp_dev_init(struct sp_device *sp); +void psp_pci_init(void); +void psp_dev_destroy(struct sp_device *sp); +void psp_pci_exit(void); + +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ + +static inline int psp_dev_init(struct sp_device *sp) { return 0; } +static inline void psp_pci_init(void) { } +static inline void psp_dev_destroy(struct sp_device *sp) { } +static inline void psp_pci_exit(void) { } + +#endif /* CONFIG_CRYPTO_DEV_SP_PSP */ + #endif diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 9859aa683a283..f5f43c50698ac 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -25,6 +25,7 @@ #include #include "ccp-dev.h" +#include "psp-dev.h" #define MSIX_VECTORS 2 @@ -32,6 +33,7 @@ struct sp_pci { int msix_count; struct msix_entry msix_entry[MSIX_VECTORS]; }; +static struct sp_device *sp_dev_master; static int sp_get_msix_irqs(struct sp_device *sp) { @@ -108,6 +110,45 @@ static void sp_free_irqs(struct sp_device *sp) sp->psp_irq = 0; } +static bool sp_pci_is_master(struct sp_device *sp) +{ + struct device *dev_cur, *dev_new; + struct pci_dev *pdev_cur, *pdev_new; + + dev_new = sp->dev; + dev_cur = sp_dev_master->dev; + + pdev_new = to_pci_dev(dev_new); + pdev_cur = to_pci_dev(dev_cur); + + if (pdev_new->bus->number < pdev_cur->bus->number) + return true; + + if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) + return true; + + if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) + return true; + + return false; +} + +static void psp_set_master(struct sp_device *sp) +{ + if (!sp_dev_master) { + sp_dev_master = sp; + return; + } + + if (sp_pci_is_master(sp)) + sp_dev_master = sp; +} + +static struct sp_device *psp_get_master(void) +{ + return sp_dev_master; +} + static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct sp_device *sp; @@ -166,6 +207,8 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto e_err; pci_set_master(pdev); + sp->set_psp_master_device = psp_set_master; + sp->get_psp_master_device = psp_get_master; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { @@ -225,6 +268,12 @@ static int sp_pci_resume(struct pci_dev *pdev) } #endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +static const struct psp_vdata psp_entry = { + .offset = 0x10500, +}; +#endif + static const struct sp_dev_vdata dev_vdata[] = { { .bar = 2, @@ -236,6 +285,9 @@ static const struct sp_dev_vdata dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &psp_entry #endif }, { diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 4b6642a25df51..1c6cbda56afe9 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -512,7 +512,7 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); - if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 0d01d16242527..63d636424161d 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c @@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; - spin_lock(&ss->slock); + spin_lock_bh(&ss->slock); writel(mode, ss->base + SS_CTL); @@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, } writel(0, ss->base + SS_CTL); - spin_unlock(&ss->slock); - return dlen; + spin_unlock_bh(&ss->slock); + return 0; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9c80e0cb16647..6882fa2f8badd 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, struct talitos_private *priv = dev_get_drvdata(dev); bool is_sec1 = has_ftr_sec1(priv); + if (!src) { + to_talitos_ptr(ptr, 0, 0, is_sec1); + return 1; + } if (sg_count == 1) { to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); return sg_count; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 5394507138381..d78d5fc173dc3 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -135,10 +135,10 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: * - * - Checking for POLLIN, i.e. read access, can be use to query the state of the + * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the * most recent write or exclusive fence. * - * - Checking for POLLOUT, i.e. write access, can be used to query the state of + * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of * all attached fences, shared and exclusive ones. * * Note that this only signals the completion of the respective fences, i.e. the @@ -168,13 +168,13 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) - return POLLERR; + return EPOLLERR; resv = dmabuf->resv; poll_wait(file, &dmabuf->poll, poll); - events = poll_requested_events(poll) & (POLLIN | POLLOUT); + events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); if (!events) return 0; @@ -193,12 +193,12 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) goto retry; } - if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) { + if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; - __poll_t pevents = POLLIN; + __poll_t pevents = EPOLLIN; if (shared_count == 0) - pevents |= POLLOUT; + pevents |= EPOLLOUT; spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) { @@ -228,19 +228,19 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) } } - if ((events & POLLOUT) && shared_count > 0) { + if ((events & EPOLLOUT) && shared_count > 0) { struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; int i; /* Only queue a new callback if no event has fired yet */ spin_lock_irq(&dmabuf->poll.lock); if (dcb->active) - events &= ~POLLOUT; + events &= ~EPOLLOUT; else - dcb->active = POLLOUT; + dcb->active = EPOLLOUT; spin_unlock_irq(&dmabuf->poll.lock); - if (!(events & POLLOUT)) + if (!(events & EPOLLOUT)) goto out; for (i = 0; i < shared_count; ++i) { @@ -253,14 +253,14 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) * * call dma_buf_poll_cb and force a recheck! */ - events &= ~POLLOUT; + events &= ~EPOLLOUT; dma_buf_poll_cb(NULL, &dcb->cb); break; } if (!dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb)) { dma_fence_put(fence); - events &= ~POLLOUT; + events &= ~EPOLLOUT; break; } dma_fence_put(fence); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 8e8c4a12a0bc0..35dd06479867f 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -325,7 +325,7 @@ static __poll_t sync_file_poll(struct file *file, poll_table *wait) wake_up_all(&sync_file->wq); } - return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; + return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0; } static long sync_file_ioctl_merge(struct sync_file *sync_file, diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 8b16ec595fa72..329cb96f886fd 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) struct amd64_family_type *fam_type = NULL; pvt->ext_model = boot_cpu_data.x86_model >> 4; - pvt->stepping = boot_cpu_data.x86_mask; + pvt->stepping = boot_cpu_data.x86_stepping; pvt->model = boot_cpu_data.x86_model; pvt->fam = boot_cpu_data.x86; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 523391bb3fbe3..f0587273940e4 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1792,9 +1792,9 @@ static __poll_t fw_device_op_poll(struct file *file, poll_table * pt) poll_wait(file, &client->wait, pt); if (fw_device_is_shutdown(client->device)) - mask |= POLLHUP | POLLERR; + mask |= EPOLLHUP | EPOLLERR; if (!list_empty(&client->event_list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index fee2e9e7ea20a..a128dd1126ae4 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -337,10 +337,10 @@ nosy_poll(struct file *file, poll_table *pt) poll_wait(file, &client->buffer.wait, pt); if (atomic_read(&client->buffer.size) > 0) - ret = POLLIN | POLLRDNORM; + ret = EPOLLIN | EPOLLRDNORM; if (list_empty(&client->lynx->link)) - ret |= POLLHUP; + ret |= EPOLLHUP; return ret; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 36ca5064486e8..d66de67ef307c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -630,7 +630,7 @@ static __poll_t lineevent_poll(struct file *filep, poll_wait(filep, &le->wait, wait); if (!kfifo_is_empty(&le->events)) - events = POLLIN | POLLRDNORM; + events = EPOLLIN | EPOLLRDNORM; return events; } @@ -775,7 +775,7 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) ret = kfifo_put(&le->events, ge); if (ret != 0) - wake_up_poll(&le->wait, POLLIN); + wake_up_poll(&le->wait, EPOLLIN); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index e2c3c5ec42d15..c53095b3b0fb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { /* HG _PR3 doesn't seem to work on this A+A weston board */ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 9a17bd3639d12..e394799979a6e 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -567,7 +567,7 @@ __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait) poll_wait(filp, &file_priv->event_wait, wait); if (!list_empty(&file_priv->event_list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 909499b73d03a..021f722e24816 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, return ret == 0 ? count : ret; } +static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct intel_gvt *gvt = vgpu->gvt; + int offset; + + /* Only allow MMIO GGTT entry access */ + if (index != PCI_BASE_ADDRESS_0) + return false; + + offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - + intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); + + return (offset >= gvt->device_info.gtt_start_offset && + offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? + true : false; +} + static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos) { @@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, while (count) { size_t filled; - if (count >= 4 && !(*ppos % 4)) { + /* Only support GGTT entry 8 bytes read */ + if (count >= 8 && !(*ppos % 8) && + gtt_entry(mdev, ppos)) { + u64 val; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(val))) + goto read_err; + + filled = 8; + } else if (count >= 4 && !(*ppos % 4)) { u32 val; ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), @@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, while (count) { size_t filled; - if (count >= 4 && !(*ppos % 4)) { + /* Only support GGTT entry 8 bytes write */ + if (count >= 8 && !(*ppos % 8) && + gtt_entry(mdev, ppos)) { + u64 val; + + if (copy_from_user(&val, buf, sizeof(val))) + goto write_err; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (ret <= 0) + goto write_err; + + filled = 8; + } else if (count >= 4 && !(*ppos % 4)) { u32 val; if (copy_from_user(&val, buf, sizeof(val))) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 73ad6e90e49db..256f1bb522b7a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ + {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 7a2511538f340..736bd2bc5127f 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio, TP_PROTO(int old_id, int new_id, char *action, unsigned int reg, unsigned int old_val, unsigned int new_val), - TP_ARGS(old_id, new_id, action, reg, new_val, old_val), + TP_ARGS(old_id, new_id, action, reg, old_val, new_val), TP_STRUCT__entry( __field(int, old_id) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 173d0095e3b21..2f5209de03915 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev) intel_modeset_cleanup(dev); - /* - * free the memory space allocated for the child device - * config parsed from VBT - */ - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; - } - kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); - dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; - kfree(dev_priv->vbt.lfp_lvds_vbt_mode); - dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + intel_bios_cleanup(dev_priv); vga_switcheroo_unregister_client(pdev); vga_client_register(pdev, NULL, NULL, NULL); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a42deebedb0f1..d307429a5ae0a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1349,6 +1349,7 @@ struct intel_vbt_data { u32 size; u8 *data; const u8 *sequence[MIPI_SEQ_MAX]; + u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ } dsi; int crt_ddc_pin; @@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv); /* intel_bios.c */ void intel_bios_init(struct drm_i915_private *dev_priv); +void intel_bios_cleanup(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 648e7536ff51e..0c963fcf31ffd 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_PRIORITY: { - int priority = args->value; + s64 priority = args->value; if (args->size) ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c index 42ff06fe54a3a..792facdb6702b 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c @@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) { - strncpy(dev_priv->perf.oa.test_config.uuid, + strlcpy(dev_priv->perf.oa.test_config.uuid, "577e8e2c-3fa0-4875-8743-3538d585e3b0", - UUID_STRING_LEN); + sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c index ff0ac3627cc4b..ba9140c87cc0b 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/i915_oa_cnl.c @@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) { - strncpy(dev_priv->perf.oa.test_config.uuid, + strlcpy(dev_priv->perf.oa.test_config.uuid, "db41edd4-d8e7-4730-ad11-b9a2d6833503", - UUID_STRING_LEN); + sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e42d9a4de322e..0be50e43507de 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -244,7 +244,7 @@ * The two separate pointers let us decouple read()s from tail pointer aging. * * The tail pointers are checked and updated at a limited rate within a hrtimer - * callback (the same callback that is used for delivering POLLIN events) + * callback (the same callback that is used for delivering EPOLLIN events) * * Initially the tails are marked invalid with %INVALID_TAIL_PTR which * indicates that an updated tail pointer is needed. @@ -2292,13 +2292,13 @@ static ssize_t i915_perf_read(struct file *file, mutex_unlock(&dev_priv->perf.lock); } - /* We allow the poll checking to sometimes report false positive POLLIN + /* We allow the poll checking to sometimes report false positive EPOLLIN * events where we might actually report EAGAIN on read() if there's * not really any data available. In this situation though we don't - * want to enter a busy loop between poll() reporting a POLLIN event + * want to enter a busy loop between poll() reporting a EPOLLIN event * and read() returning -EAGAIN. Clearing the oa.pollin state here * effectively ensures we back off until the next hrtimer callback - * before reporting another POLLIN event. + * before reporting another EPOLLIN event. */ if (ret >= 0 || ret == -EAGAIN) { /* Maybe make ->pollin per-stream state if we support multiple @@ -2358,7 +2358,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, * samples to read. */ if (dev_priv->perf.oa.pollin) - events |= POLLIN; + events |= EPOLLIN; return events; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 55a8a1e294248..0e9b98c32b62b 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915) return sum; } -static void i915_pmu_event_destroy(struct perf_event *event) +static void engine_event_destroy(struct perf_event *event) { - WARN_ON(event->parent); + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct intel_engine_cs *engine; + + engine = intel_engine_lookup_user(i915, + engine_event_class(event), + engine_event_instance(event)); + if (WARN_ON_ONCE(!engine)) + return; + + if (engine_event_sample(event) == I915_SAMPLE_BUSY && + intel_engine_supports_stats(engine)) + intel_disable_engine_stats(engine); } -static int engine_event_init(struct perf_event *event) +static void i915_pmu_event_destroy(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + WARN_ON(event->parent); - if (!intel_engine_lookup_user(i915, engine_event_class(event), - engine_event_instance(event))) - return -ENODEV; + if (is_engine_event(event)) + engine_event_destroy(event); +} - switch (engine_event_sample(event)) { +static int +engine_event_status(struct intel_engine_cs *engine, + enum drm_i915_pmu_engine_sample sample) +{ + switch (sample) { case I915_SAMPLE_BUSY: case I915_SAMPLE_WAIT: break; case I915_SAMPLE_SEMA: - if (INTEL_GEN(i915) < 6) + if (INTEL_GEN(engine->i915) < 6) return -ENODEV; break; default: @@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event) return 0; } +static int engine_event_init(struct perf_event *event) +{ + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct intel_engine_cs *engine; + u8 sample; + int ret; + + engine = intel_engine_lookup_user(i915, engine_event_class(event), + engine_event_instance(event)); + if (!engine) + return -ENODEV; + + sample = engine_event_sample(event); + ret = engine_event_status(engine, sample); + if (ret) + return ret; + + if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) + ret = intel_enable_engine_stats(engine); + + return ret; +} + static int i915_pmu_event_init(struct perf_event *event) { struct drm_i915_private *i915 = @@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event) return 0; } -static u64 __i915_pmu_event_read(struct perf_event *event) +static u64 __get_rc6(struct drm_i915_private *i915) +{ + u64 val; + + val = intel_rc6_residency_ns(i915, + IS_VALLEYVIEW(i915) ? + VLV_GT_RENDER_RC6 : + GEN6_GT_GFX_RC6); + + if (HAS_RC6p(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); + + if (HAS_RC6pp(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); + + return val; +} + +static u64 get_rc6(struct drm_i915_private *i915, bool locked) +{ +#if IS_ENABLED(CONFIG_PM) + unsigned long flags; + u64 val; + + if (intel_runtime_pm_get_if_in_use(i915)) { + val = __get_rc6(i915); + intel_runtime_pm_put(i915); + + /* + * If we are coming back from being runtime suspended we must + * be careful not to report a larger value than returned + * previously. + */ + + if (!locked) + spin_lock_irqsave(&i915->pmu.lock, flags); + + if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; + i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } + + if (!locked) + spin_unlock_irqrestore(&i915->pmu.lock, flags); + } else { + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; + unsigned long flags2; + + /* + * We are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. + */ + if (!locked) + spin_lock_irqsave(&i915->pmu.lock, flags); + + spin_lock_irqsave(&kdev->power.lock, flags2); + + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; + + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; + + spin_unlock_irqrestore(&kdev->power.lock, flags2); + + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + + if (!locked) + spin_unlock_irqrestore(&i915->pmu.lock, flags); + } + + return val; +#else + return __get_rc6(i915); +#endif +} + +static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); @@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) if (WARN_ON_ONCE(!engine)) { /* Do nothing */ } else if (sample == I915_SAMPLE_BUSY && - engine->pmu.busy_stats) { + intel_engine_supports_stats(engine)) { val = ktime_to_ns(intel_engine_get_busy_time(engine)); } else { val = engine->pmu.sample[sample].cur; @@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = count_interrupts(i915); break; case I915_PMU_RC6_RESIDENCY: - intel_runtime_pm_get(i915); - val = intel_rc6_residency_ns(i915, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); - if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, - GEN6_GT_GFX_RC6p); - if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, - GEN6_GT_GFX_RC6pp); - intel_runtime_pm_put(i915); + val = get_rc6(i915, locked); break; } } @@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event) again: prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event); + new = __i915_pmu_event_read(event, false); if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) goto again; @@ -442,12 +557,6 @@ static void i915_pmu_event_read(struct perf_event *event) local64_add(new - prev, &event->count); } -static bool engine_needs_busy_stats(struct intel_engine_cs *engine) -{ - return intel_engine_supports_stats(engine) && - (engine->pmu.enable & BIT(I915_SAMPLE_BUSY)); -} - static void i915_pmu_enable(struct perf_event *event) { struct drm_i915_private *i915 = @@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); - if (engine->pmu.enable_count[sample]++ == 0) { - /* - * Enable engine busy stats tracking if needed or - * alternatively cancel the scheduled disable. - * - * If the delayed disable was pending, cancel it and - * in this case do not enable since it already is. - */ - if (engine_needs_busy_stats(engine) && - !engine->pmu.busy_stats) { - engine->pmu.busy_stats = true; - if (!cancel_delayed_work(&engine->pmu.disable_busy_stats)) - intel_enable_engine_stats(engine); - } - } + engine->pmu.enable_count[sample]++; } /* @@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event) * for all listeners. Even when the event was already enabled and has * an existing non-zero value. */ - local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); + local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true)); spin_unlock_irqrestore(&i915->pmu.lock, flags); } -static void __disable_busy_stats(struct work_struct *work) -{ - struct intel_engine_cs *engine = - container_of(work, typeof(*engine), pmu.disable_busy_stats.work); - - intel_disable_engine_stats(engine); -} - static void i915_pmu_disable(struct perf_event *event) { struct drm_i915_private *i915 = @@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event) * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. */ - if (--engine->pmu.enable_count[sample] == 0) { + if (--engine->pmu.enable_count[sample] == 0) engine->pmu.enable &= ~BIT(sample); - if (!engine_needs_busy_stats(engine) && - engine->pmu.busy_stats) { - engine->pmu.busy_stats = false; - /* - * We request a delayed disable to handle the - * rapid on/off cycles on events, which can - * happen when tools like perf stat start, in a - * nicer way. - * - * In addition, this also helps with busy stats - * accuracy with background CPU offline/online - * migration events. - */ - queue_delayed_work(system_wq, - &engine->pmu.disable_busy_stats, - round_jiffies_up_relative(HZ)); - } - } } GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); @@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) void i915_pmu_register(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; int ret; if (INTEL_GEN(i915) <= 2) { @@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); i915->pmu.timer.function = i915_sample; - for_each_engine(engine, i915, id) - INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats, - __disable_busy_stats); - ret = perf_pmu_register(&i915->pmu.base, "i915", -1); if (ret) goto err; @@ -843,9 +906,6 @@ void i915_pmu_register(struct drm_i915_private *i915) void i915_pmu_unregister(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - if (!i915->pmu.base.event_init) return; @@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915) hrtimer_cancel(&i915->pmu.timer); - for_each_engine(engine, i915, id) { - GEM_BUG_ON(engine->pmu.busy_stats); - flush_delayed_work(&engine->pmu.disable_busy_stats); - } - i915_pmu_unregister_cpuhp_state(i915); perf_pmu_unregister(&i915->pmu.base); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 40c154d13565a..bb62df15afa4f 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -27,6 +27,8 @@ enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, + __I915_SAMPLE_RC6, + __I915_SAMPLE_RC6_ESTIMATED, __I915_NUM_PMU_SAMPLERS }; @@ -94,6 +96,10 @@ struct i915_pmu { * struct intel_engine_cs. */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; + /** + * @suspended_jiffies_last: Cached suspend time from PM core. + */ + unsigned long suspended_jiffies_last; }; #ifdef CONFIG_PERF_EVENTS diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index f7f771749e480..b49a2df444301 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) return 0; } +/* + * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, + * skip all delay + gpio operands and stop at the first DSI packet op. + */ +static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv) +{ + const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; + int index, len; + + if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1)) + return 0; + + /* index = 1 to skip sequence byte */ + for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) { + switch (data[index]) { + case MIPI_SEQ_ELEM_SEND_PKT: + return index == 1 ? 0 : index; + case MIPI_SEQ_ELEM_DELAY: + len = 5; /* 1 byte for operand + uint32 */ + break; + case MIPI_SEQ_ELEM_GPIO: + len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */ + break; + default: + return 0; + } + } + + return 0; +} + +/* + * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * The deassert must be done before calling intel_dsi_device_ready, so for + * these devices we split the init OTP sequence into a deassert sequence and + * the actual init OTP part. + */ +static void fixup_mipi_sequences(struct drm_i915_private *dev_priv) +{ + u8 *init_otp; + int len; + + /* Limit this to VLV for now. */ + if (!IS_VALLEYVIEW(dev_priv)) + return; + + /* Limit this to v1 vid-mode sequences */ + if (dev_priv->vbt.dsi.config->is_cmd_mode || + dev_priv->vbt.dsi.seq_version != 1) + return; + + /* Only do this if there are otp and assert seqs and no deassert seq */ + if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || + !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) + return; + + /* The deassert-sequence ends at the first DSI packet */ + len = get_init_otp_deassert_fragment_len(dev_priv); + if (!len) + return; + + DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); + + /* Copy the fragment, update seq byte and terminate it */ + init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; + dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); + if (!dev_priv->vbt.dsi.deassert_seq) + return; + dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; + dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; + /* Use the copy for deassert */ + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = + dev_priv->vbt.dsi.deassert_seq; + /* Replace the last byte of the fragment with init OTP seq byte */ + init_otp[len - 1] = MIPI_SEQ_INIT_OTP; + /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ + dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; +} + static void parse_mipi_sequence(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) @@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, dev_priv->vbt.dsi.size = seq_size; dev_priv->vbt.dsi.seq_version = sequence->version; + fixup_mipi_sequences(dev_priv); + DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); return; @@ -1588,6 +1670,29 @@ void intel_bios_init(struct drm_i915_private *dev_priv) pci_unmap_rom(pdev, bios); } +/** + * intel_bios_cleanup - Free any resources allocated by intel_bios_init() + * @dev_priv: i915 device instance + */ +void intel_bios_cleanup(struct drm_i915_private *dev_priv) +{ + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.dsi.data); + dev_priv->vbt.dsi.data = NULL; + kfree(dev_priv->vbt.dsi.pps); + dev_priv->vbt.dsi.pps = NULL; + kfree(dev_priv->vbt.dsi.config); + dev_priv->vbt.dsi.config = NULL; + kfree(dev_priv->vbt.dsi.deassert_seq); + dev_priv->vbt.dsi.deassert_seq = NULL; +} + /** * intel_bios_is_tv_present - is integrated TV present in VBT * @dev_priv: i915 device instance diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index bd40fea16b4f1..f54ddda9fdada 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, spin_unlock_irq(&b->rb_lock); } -static bool signal_valid(const struct drm_i915_gem_request *request) -{ - return intel_wait_check_request(&request->signaling.wait, request); -} - static bool signal_complete(const struct drm_i915_gem_request *request) { if (!request) return false; - /* If another process served as the bottom-half it may have already - * signalled that this wait is already completed. - */ - if (intel_wait_complete(&request->signaling.wait)) - return signal_valid(request); - - /* Carefully check if the request is complete, giving time for the + /* + * Carefully check if the request is complete, giving time for the * seqno to be visible or if the GPU hung. */ - if (__i915_request_irq_complete(request)) - return true; - - return false; + return __i915_request_irq_complete(request); } static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) @@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg) request = i915_gem_request_get_rcu(request); rcu_read_unlock(); if (signal_complete(request)) { - local_bh_disable(); - dma_fence_signal(&request->fence); - local_bh_enable(); /* kick start the tasklets */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &request->fence.flags)) { + local_bh_disable(); + dma_fence_signal(&request->fence); + GEM_BUG_ON(!i915_gem_request_completed(request)); + local_bh_enable(); /* kick start the tasklets */ + } spin_lock_irq(&b->rb_lock); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 5dc118f26b51b..1704c8897afd0 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); + /* + * On Valleyview some DSI panels lose (v|h)sync when the clock is lower + * than 320000KHz. + */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && + IS_VALLEYVIEW(dev_priv)) + min_cdclk = max(320000, min_cdclk); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index d790bdc227ffb..fa960cfd2764f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; bool idle = true; - intel_runtime_pm_get(dev_priv); + /* If the whole device is asleep, the engine must be idle */ + if (!intel_runtime_pm_get_if_in_use(dev_priv)) + return true; /* First check that no commands are left in the ring */ if ((I915_READ_HEAD(engine) & HEAD_ADDR) != @@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) */ int intel_enable_engine_stats(struct intel_engine_cs *engine) { + struct intel_engine_execlists *execlists = &engine->execlists; unsigned long flags; + int err = 0; if (!intel_engine_supports_stats(engine)) return -ENODEV; + tasklet_disable(&execlists->tasklet); spin_lock_irqsave(&engine->stats.lock, flags); - if (engine->stats.enabled == ~0) - goto busy; + + if (unlikely(engine->stats.enabled == ~0)) { + err = -EBUSY; + goto unlock; + } + if (engine->stats.enabled++ == 0) { - struct intel_engine_execlists *execlists = &engine->execlists; const struct execlist_port *port = execlists->port; unsigned int num_ports = execlists_num_ports(execlists); @@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) if (engine->stats.active) engine->stats.start = engine->stats.enabled_at; } - spin_unlock_irqrestore(&engine->stats.lock, flags); - - return 0; -busy: +unlock: spin_unlock_irqrestore(&engine->stats.lock, flags); + tasklet_enable(&execlists->tasklet); - return -EBUSY; + return err; } static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c5ff203e42d6a..a0e7a6c2a57cd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -366,20 +366,6 @@ struct intel_engine_cs { */ #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; - /** - * @busy_stats: Has enablement of engine stats tracking been - * requested. - */ - bool busy_stats; - /** - * @disable_busy_stats: Work item for busy stats disabling. - * - * Same as with @enable_busy_stats action, with the difference - * that we delay it in case there are rapid enable-disable - * actions, which can happen during tool startup (like perf - * stat). - */ - struct delayed_work disable_busy_stats; } pmu; /* diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index bf62303571b39..3695cde669f88 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c @@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm, void nvkm_therm_clkgate_enable(struct nvkm_therm *therm) { - if (!therm->func->clkgate_enable || !therm->clkgating_enabled) + if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled) return; nvkm_debug(&therm->subdev, @@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm) void nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend) { - if (!therm->func->clkgate_fini || !therm->clkgating_enabled) + if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled) return; nvkm_debug(&therm->subdev, @@ -395,7 +395,7 @@ void nvkm_therm_clkgate_init(struct nvkm_therm *therm, const struct nvkm_therm_clkgate_pack *p) { - if (!therm->func->clkgate_init || !therm->clkgating_enabled) + if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled) return; therm->func->clkgate_init(therm, p); diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index dfd8d0048980a..1c5e74cb9279b 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -1271,7 +1271,7 @@ static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait) pr_debug("%s\n", __func__); poll_wait(file, &vga_wait_queue, wait); - return POLLIN; + return EPOLLIN; } static int vga_arb_open(struct inode *inode, struct file *file) diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c783fd5ef8096..4f4e7a08a07be 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1185,9 +1185,9 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) poll_wait(file, &list->hdev->debug_wait, wait); if (list->head != list->tail) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; if (!list->hdev->debug) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; return 0; } diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index b7e86aba6f337..5be8de70c6517 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -142,9 +142,9 @@ static __poll_t roccat_poll(struct file *file, poll_table *wait) struct roccat_reader *reader = file->private_data; poll_wait(file, &reader->device->wait, wait); if (reader->cbuf_start != reader->device->cbuf_end) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; if (!reader->device->exist) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; return 0; } diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 21ed6c55c40a4..e8a114157f87b 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -714,7 +714,7 @@ static __poll_t hid_sensor_custom_poll(struct file *file, poll_wait(file, &sensor_inst->wait, wait); if (!kfifo_is_empty(&sensor_inst->data_fifo)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index be210219f9829..fbfcc80094329 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -255,9 +255,9 @@ static __poll_t hidraw_poll(struct file *file, poll_table *wait) poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; if (!list->hidraw->exist) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; return 0; } diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index fc43850a155ee..4e0e7baf85136 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -760,7 +760,7 @@ static __poll_t uhid_char_poll(struct file *file, poll_table *wait) poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 0ff3e7e70c8df..e3ce233f8bdcc 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -428,9 +428,9 @@ static __poll_t hiddev_poll(struct file *file, poll_table *wait) poll_wait(file, &list->hiddev->wait, wait); if (list->head != list->tail) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; if (!list->hiddev->exist) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; return 0; } diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 8fbbacb0fe21e..324cb8ec94050 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -1132,9 +1132,9 @@ static __poll_t cs_char_poll(struct file *file, poll_table *wait) poll_wait(file, &cs_char_data.wait, wait); spin_lock_bh(&csdata->lock); if (!list_empty(&csdata->chardev_queue)) - ret = POLLIN | POLLRDNORM; + ret = EPOLLIN | EPOLLRDNORM; else if (!list_empty(&csdata->dataind_queue)) - ret = POLLIN | POLLRDNORM; + ret = EPOLLIN | EPOLLRDNORM; spin_unlock_bh(&csdata->lock); return ret; diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index 047959e74bb10..8327775279362 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c @@ -113,10 +113,10 @@ static __poll_t hvt_op_poll(struct file *file, poll_table *wait) poll_wait(file, &hvt->outmsg_q, wait); if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; if (hvt->outmsg_len > 0) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 4bdbf77f7197f..72c338eb5fae5 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { const struct tjmax_model *tm = &tjmax_model_table[i]; if (c->x86_model == tm->model && - (tm->mask == ANY || c->x86_mask == tm->mask)) + (tm->mask == ANY || c->x86_stepping == tm->mask)) return tm->tjmax; } /* Early chips have no MSR for TjMax */ - if (c->x86_model == 0xf && c->x86_mask < 4) + if (c->x86_model == 0xf && c->x86_stepping < 4) usemsr_ee = 0; if (c->x86_model > 0xe && usemsr_ee) { @@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu) * Readings might stop update when processor visited too deep sleep, * fixed for stepping D0 (6EC). */ - if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { + if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); return -ENODEV; } diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index ef91b8a675492..84e91286fc4fd 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c @@ -293,7 +293,7 @@ u8 vid_which_vrm(void) if (c->x86 < 6) /* Any CPU with family lower than 6 */ return 0; /* doesn't have VID */ - vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); + vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); if (vrm_ret == 134) vrm_ret = get_via_model_d_vrm(); if (vrm_ret == 0) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 06b4e1c78bd8f..051a72eecb245 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev, data->read_tempreg(data->pdev, ®val); temp = (regval >> 21) * 125; - temp -= data->temp_offset; + if (temp > data->temp_offset) + temp -= data->temp_offset; + else + temp = 0; return sprintf(buf, "%u\n", temp); } @@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev) * and AM3 formats, but that's the best we can do. */ return boot_cpu_data.x86_model < 4 || - (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); + (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } static int k10temp_probe(struct pci_dev *pdev, diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5a632bcf869bb..e59f9113fb93b 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, return -ENOMEM; model = boot_cpu_data.x86_model; - stepping = boot_cpu_data.x86_mask; + stepping = boot_cpu_data.x86_stepping; /* feature available since SH-C0, exclude older revisions */ if ((model == 4 && stepping == 0) || diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index ff03324dee132..05e0c353e0898 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -222,7 +222,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) spin_unlock_irqrestore(&queue->list_lock, flags); iio_buffer_block_put_atomic(block); - wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); } EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); @@ -251,7 +251,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, } spin_unlock_irqrestore(&queue->list_lock, flags); - wake_up_interruptible_poll(&queue->buffer.pollq, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); } EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 6184c100a94a5..79abf70a126dd 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -166,7 +166,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, * @wait: Poll table structure pointer for which the driver adds * a wait queue * - * Return: (POLLIN | POLLRDNORM) if data is available for reading + * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading * or 0 for other cases */ __poll_t iio_buffer_poll(struct file *filp, @@ -180,7 +180,7 @@ __poll_t iio_buffer_poll(struct file *filp, poll_wait(filp, &rb->pollq, wait); if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } @@ -1396,7 +1396,7 @@ static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) * We can't just test for watermark to decide if we wake the poll queue * because read may request less samples than the watermark. */ - wake_up_interruptible_poll(&buffer->pollq, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); return 0; } diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 0bcf073e46dbe..c6dfdf0aaac51 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -80,7 +80,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) copied = kfifo_put(&ev_int->det_events, ev); if (copied != 0) - wake_up_poll(&ev_int->wait, POLLIN); + wake_up_poll(&ev_int->wait, EPOLLIN); } return 0; @@ -92,7 +92,7 @@ EXPORT_SYMBOL(iio_push_event); * @filep: File structure pointer to identify the device * @wait: Poll table pointer to add the wait queue on * - * Return: (POLLIN | POLLRDNORM) if data is available for reading + * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading * or a negative error code on failure */ static __poll_t iio_event_poll(struct file *filep, @@ -108,7 +108,7 @@ static __poll_t iio_event_poll(struct file *filep, poll_wait(filep, &ev_int->wait, wait); if (!kfifo_is_empty(&ev_int->det_events)) - events = POLLIN | POLLRDNORM; + events = EPOLLIN | EPOLLRDNORM; return events; } diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 8ae636bb09e57..01702265c1e1a 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1144,7 +1144,7 @@ static __poll_t ib_ucm_poll(struct file *filp, poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->events)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 6ba4231f2b073..f015f1bf88c9c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1639,7 +1639,7 @@ static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->event_list)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 78c77962422e0..bb98c9e4a7fd2 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -633,12 +633,12 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait) struct ib_umad_file *file = filp->private_data; /* we will always be able to post a MAD send */ - __poll_t mask = POLLOUT | POLLWRNORM; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; poll_wait(filp, &file->recv_wait, wait); if (!list_empty(&file->recv_list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5b811bf574d69..395a3b091229f 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -351,7 +351,7 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, spin_lock_irq(&ev_queue->lock); if (!list_empty(&ev_queue->event_list)) - pollflags = POLLIN | POLLRDNORM; + pollflags = EPOLLIN | EPOLLRDNORM; spin_unlock_irq(&ev_queue->lock); return pollflags; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a0f2590294b..41fafebe3b0d9 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -612,13 +612,13 @@ static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt) uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt; if (!uctxt) - pollflag = POLLERR; + pollflag = EPOLLERR; else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT) pollflag = poll_urgent(fp, pt); else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV) pollflag = poll_next(fp, pt); else /* invalid */ - pollflag = POLLERR; + pollflag = EPOLLERR; return pollflag; } @@ -1435,7 +1435,7 @@ static __poll_t poll_urgent(struct file *fp, spin_lock_irq(&dd->uctxt_lock); if (uctxt->urgent != uctxt->urgent_poll) { - pollflag = POLLIN | POLLRDNORM; + pollflag = EPOLLIN | EPOLLRDNORM; uctxt->urgent_poll = uctxt->urgent; } else { pollflag = 0; @@ -1462,7 +1462,7 @@ static __poll_t poll_next(struct file *fp, hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt); pollflag = 0; } else { - pollflag = POLLIN | POLLRDNORM; + pollflag = EPOLLIN | EPOLLRDNORM; } spin_unlock_irq(&dd->uctxt_lock); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index f7593b5e2b761..52c29db3a2f4a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1085,7 +1085,7 @@ static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd, spin_lock_irq(&dd->uctxt_lock); if (rcd->urgent != rcd->urgent_poll) { - pollflag = POLLIN | POLLRDNORM; + pollflag = EPOLLIN | EPOLLRDNORM; rcd->urgent_poll = rcd->urgent; } else { pollflag = 0; @@ -1111,7 +1111,7 @@ static __poll_t qib_poll_next(struct qib_ctxtdata *rcd, dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); pollflag = 0; } else - pollflag = POLLIN | POLLRDNORM; + pollflag = EPOLLIN | EPOLLRDNORM; spin_unlock_irq(&dd->uctxt_lock); return pollflag; @@ -1124,13 +1124,13 @@ static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt) rcd = ctxt_fp(fp); if (!rcd) - pollflag = POLLERR; + pollflag = EPOLLERR; else if (rcd->poll_type == QIB_POLL_TYPE_URGENT) pollflag = qib_poll_urgent(rcd, fp, pt); else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV) pollflag = qib_poll_next(rcd, fp, pt); else /* invalid */ - pollflag = POLLERR; + pollflag = EPOLLERR; return pollflag; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 19624e023ebd9..0336643c2ed65 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -874,7 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) iser_info("iser conn %p rc = %d\n", iser_conn, rc); if (rc > 0) - return 1; /* success, this is the equivalent of POLLOUT */ + return 1; /* success, this is the equivalent of EPOLLOUT */ else if (!rc) return 0; /* timeout */ else diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index 94049fdc583cc..c81c79d01d930 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -650,12 +650,12 @@ static __poll_t evdev_poll(struct file *file, poll_table *wait) poll_wait(file, &evdev->wait, wait); if (evdev->exist && !client->revoked) - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; else - mask = POLLHUP | POLLERR; + mask = EPOLLHUP | EPOLLERR; if (client->packet_head != client->tail) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/input/input.c b/drivers/input/input.c index 0d0b2ab1bb6bc..9785546420a72 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1053,7 +1053,7 @@ static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) poll_wait(file, &input_devices_poll_wait, wait); if (file->f_version != input_devices_state) { file->f_version = input_devices_state; - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } return 0; diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index fe3255572886c..4c1e427dfabb9 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -442,8 +442,8 @@ static __poll_t joydev_poll(struct file *file, poll_table *wait) struct joydev *joydev = client->joydev; poll_wait(file, &joydev->wait, wait); - return (joydev_data_pending(client) ? (POLLIN | POLLRDNORM) : 0) | - (joydev->exist ? 0 : (POLLHUP | POLLERR)); + return (joydev_data_pending(client) ? (EPOLLIN | EPOLLRDNORM) : 0) | + (joydev->exist ? 0 : (EPOLLHUP | EPOLLERR)); } static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c index 9c3f7ec3bd3d6..49b34de0aed4d 100644 --- a/drivers/input/misc/hp_sdc_rtc.c +++ b/drivers/input/misc/hp_sdc_rtc.c @@ -414,7 +414,7 @@ static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait) l = 0; if (l != 0) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index f640c591ef23f..96a887f336982 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -704,7 +704,7 @@ static __poll_t uinput_poll(struct file *file, poll_table *wait) poll_wait(file, &udev->waitq, wait); if (udev->head != udev->tail) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 731d84ae51017..e08228061bcdd 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -765,9 +765,9 @@ static __poll_t mousedev_poll(struct file *file, poll_table *wait) poll_wait(file, &mousedev->wait, wait); - mask = mousedev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR; + mask = mousedev->exist ? EPOLLOUT | EPOLLWRNORM : EPOLLHUP | EPOLLERR; if (client->ready || client->buffer) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index fccf55a380b2f..17b7fbecd9fe7 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -247,9 +247,9 @@ static __poll_t serio_raw_poll(struct file *file, poll_table *wait) poll_wait(file, &serio_raw->wait, wait); - mask = serio_raw->dead ? POLLHUP | POLLERR : POLLOUT | POLLWRNORM; + mask = serio_raw->dead ? EPOLLHUP | EPOLLERR : EPOLLOUT | EPOLLWRNORM; if (serio_raw->head != serio_raw->tail) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c index a63de06b08bcc..9ab5c45c3a9fe 100644 --- a/drivers/input/serio/userio.c +++ b/drivers/input/serio/userio.c @@ -255,7 +255,7 @@ static __poll_t userio_char_poll(struct file *file, poll_table *wait) poll_wait(file, &userio->waitq, wait); if (userio->head != userio->tail) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 55cfb986225be..faf734ff4cf3b 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn, goto out_unmap; } - pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n", - intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words); - return 0; out_unmap: diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 983640eba418e..8968e5e93fcb8 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, } } - pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n", - intc_name, data->map_base[0], data->num_parent_irqs); - return 0; out_free_domain: diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 691d20eb0bec1..0e65f609352ec 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, ct->chip.irq_set_wake = irq_gc_set_wake; } - pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", - base, parent_irq); - return 0; out_free_domain: diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 993a8426a4538..1ff38aff9f29f 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = { static struct msi_domain_info gicv2m_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), + MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), .chip = &gicv2m_msi_irq_chip, }; @@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, return 0; } -static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) +static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq, + int nr_irqs) { - int pos; - - pos = hwirq - v2m->spi_start; - if (pos < 0 || pos >= v2m->nr_spis) { - pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); - return; - } - spin_lock(&v2m_lock); - __clear_bit(pos, v2m->bm); + bitmap_release_region(v2m->bm, hwirq - v2m->spi_start, + get_count_order(nr_irqs)); spin_unlock(&v2m_lock); } @@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct v2m_data *v2m = NULL, *tmp; - int hwirq, offset, err = 0; + int hwirq, offset, i, err = 0; spin_lock(&v2m_lock); list_for_each_entry(tmp, &v2m_nodes, entry) { - offset = find_first_zero_bit(tmp->bm, tmp->nr_spis); - if (offset < tmp->nr_spis) { - __set_bit(offset, tmp->bm); + offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis, + get_count_order(nr_irqs)); + if (offset >= 0) { v2m = tmp; break; } @@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, hwirq = v2m->spi_start + offset; - err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); - if (err) { - gicv2m_unalloc_msi(v2m, hwirq); - return err; - } + for (i = 0; i < nr_irqs; i++) { + err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + goto fail; - irq_domain_set_hwirq_and_chip(domain, virq, hwirq, - &gicv2m_irq_chip, v2m); + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &gicv2m_irq_chip, v2m); + } return 0; + +fail: + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); + return err; } static void gicv2m_irq_domain_free(struct irq_domain *domain, @@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain, struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct v2m_data *v2m = irq_data_get_irq_chip_data(d); - BUG_ON(nr_irqs != 1); - gicv2m_unalloc_msi(v2m, d->hwirq); + gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs); irq_domain_free_irqs_parent(domain, virq, nr_irqs); } diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 14a8c0a7e095e..25a98de5cfb28 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void) for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) continue; diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 833a90fe33aed..8881a053c173e 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c @@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void) for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) continue; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 06f025fd5726f..1d3056f537472 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -3314,6 +3314,8 @@ static int __init its_of_probe(struct device_node *node) for (np = of_find_matching_node(node, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) { pr_warn("%pOF: no msi-controller property, ITS ignored\n", np); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index a57c0fbbd34a4..d99cc07903ec4 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) MPIDR_TO_SGI_RS(cluster_id) | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); - pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); gic_write_sgi1r(val); } @@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) * Ensure that stores to Normal memory are visible to the * other CPUs before issuing the IPI. */ - smp_wmb(); + wmb(); for_each_cpu(cpu, mask) { u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index ef92a4d2038ee..d32268cc1174c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, spin_lock_irqsave(&gic_lock, flags); write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); - gic_clear_pcpu_masks(intr); - set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); irq_data_update_effective_affinity(data, cpumask_of(cpu)); spin_unlock_irqrestore(&gic_lock, flags); diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index e268811dc544b..19cd93783c87d 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -731,12 +731,12 @@ capi_poll(struct file *file, poll_table *wait) __poll_t mask = 0; if (!cdev->ap.applid) - return POLLERR; + return EPOLLERR; poll_wait(file, &(cdev->recvwait), wait); - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; if (!skb_queue_empty(&cdev->recvqueue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 34b7704042a4a..342585e04fd3f 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -125,9 +125,9 @@ isdn_divert_poll(struct file *file, poll_table *wait) __poll_t mask = 0; poll_wait(file, &(rd_queue), wait); - /* mask = POLLOUT | POLLWRNORM; */ + /* mask = EPOLLOUT | EPOLLWRNORM; */ if (*((struct divert_info **) file->private_data)) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } return mask; } /* isdn_divert_poll */ diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c index 70f16102a0010..5a95587b31172 100644 --- a/drivers/isdn/hardware/eicon/divamnt.c +++ b/drivers/isdn/hardware/eicon/divamnt.c @@ -103,9 +103,9 @@ static __poll_t maint_poll(struct file *file, poll_table *wait) __poll_t mask = 0; poll_wait(file, &msgwaitq, wait); - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; if (file->private_data || diva_dbg_q_length()) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } return (mask); } diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c index da5cc5ab7e2de..525518c945fe6 100644 --- a/drivers/isdn/hardware/eicon/divasi.c +++ b/drivers/isdn/hardware/eicon/divasi.c @@ -370,31 +370,31 @@ static __poll_t um_idi_poll(struct file *file, poll_table *wait) diva_um_idi_os_context_t *p_os; if (!file->private_data) { - return (POLLERR); + return (EPOLLERR); } if ((!(p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->private_data))) || p_os->aborted) { - return (POLLERR); + return (EPOLLERR); } poll_wait(file, &p_os->read_wait, wait); if (p_os->aborted) { - return (POLLERR); + return (EPOLLERR); } switch (diva_user_mode_idi_ind_ready(file->private_data, file)) { case (-1): - return (POLLERR); + return (EPOLLERR); case 0: return (0); } - return (POLLIN | POLLRDNORM); + return (EPOLLIN | EPOLLRDNORM); } static int um_idi_open(struct inode *inode, struct file *file) diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index fbc788e6f0db9..b9980e84f9db5 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c @@ -653,9 +653,9 @@ static ssize_t divas_read(struct file *file, char __user *buf, static __poll_t divas_poll(struct file *file, poll_table *wait) { if (!file->private_data) { - return (POLLERR); + return (EPOLLERR); } - return (POLLIN | POLLRDNORM); + return (EPOLLIN | EPOLLRDNORM); } static const struct file_operations divas_fops = { diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c index 3478f6f099eba..f52f4622b10b0 100644 --- a/drivers/isdn/hardware/eicon/divasproc.c +++ b/drivers/isdn/hardware/eicon/divasproc.c @@ -101,7 +101,7 @@ divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off static __poll_t divas_poll(struct file *file, poll_table *wait) { - return (POLLERR); + return (EPOLLERR); } static int divas_open(struct inode *inode, struct file *file) diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 6abea6915f494..6e898b90e86e6 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c @@ -294,7 +294,7 @@ hysdn_log_poll(struct file *file, poll_table *wait) poll_wait(file, &(pd->rd_queue), wait); if (*((struct log_data **) file->private_data)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } /* hysdn_log_poll */ diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 0521c32949d47..7c6f3f5d9d9a2 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1237,22 +1237,22 @@ isdn_poll(struct file *file, poll_table *wait) mutex_lock(&isdn_mutex); if (minor == ISDN_MINOR_STATUS) { poll_wait(file, &(dev->info_waitq), wait); - /* mask = POLLOUT | POLLWRNORM; */ + /* mask = EPOLLOUT | EPOLLWRNORM; */ if (file->private_data) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } goto out; } if (minor >= ISDN_MINOR_CTRL && minor <= ISDN_MINOR_CTRLMAX) { if (drvidx < 0) { /* driver deregistered while file open */ - mask = POLLHUP; + mask = EPOLLHUP; goto out; } poll_wait(file, &(dev->drv[drvidx]->st_waitq), wait); - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; if (dev->drv[drvidx]->stavail) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } goto out; } @@ -1262,7 +1262,7 @@ isdn_poll(struct file *file, poll_table *wait) goto out; } #endif - mask = POLLERR; + mask = EPOLLERR; out: mutex_unlock(&isdn_mutex); return mask; diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 57884319b4b13..a7b275ea5de1d 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -704,12 +704,12 @@ isdn_ppp_poll(struct file *file, poll_table *wait) if (!(is->state & IPPP_OPEN)) { if (is->state == IPPP_CLOSEWAIT) - return POLLHUP; + return EPOLLHUP; printk(KERN_DEBUG "isdn_ppp: device not open\n"); - return POLLERR; + return EPOLLERR; } /* we're always ready to send .. */ - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; spin_lock_irqsave(&is->buflock, flags); bl = is->last; @@ -719,7 +719,7 @@ isdn_ppp_poll(struct file *file, poll_table *wait) */ if (bf->next != bl || (is->state & IPPP_NOBLOCK)) { is->state &= ~IPPP_NOBLOCK; - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } spin_unlock_irqrestore(&is->buflock, flags); return mask; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index f4272d4e0a26c..211ed6cffd10e 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -145,7 +145,7 @@ static __poll_t mISDN_poll(struct file *filep, poll_table *wait) { struct mISDNtimerdev *dev = filep->private_data; - __poll_t mask = POLLERR; + __poll_t mask = EPOLLERR; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait); @@ -153,7 +153,7 @@ mISDN_poll(struct file *filep, poll_table *wait) poll_wait(filep, &dev->wait, wait); mask = 0; if (dev->work || !list_empty(&dev->expired)) - mask |= (POLLIN | POLLRDNORM); + mask |= (EPOLLIN | EPOLLRDNORM); if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__, dev->work, list_empty(&dev->expired)); diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c index 5beacab05ed74..0c43bfac9598e 100644 --- a/drivers/leds/uleds.c +++ b/drivers/leds/uleds.c @@ -183,7 +183,7 @@ static __poll_t uleds_poll(struct file *file, poll_table *wait) poll_wait(file, &udev->waitq, wait); if (udev->new_data) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 62f541f968f6f..07074820a1674 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, dev->ofdev.dev.of_node = np; dev->ofdev.archdata.dma_mask = 0xffffffffUL; dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; + dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask; dev->ofdev.dev.parent = parent; dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.release = macio_release_dev; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 346e6f5f77be7..e8ae2e54151cc 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -1259,7 +1259,7 @@ static __poll_t smu_fpoll(struct file *file, poll_table *wait) spin_lock_irqsave(&pp->lock, flags); if (pp->busy && pp->cmd.status != 1) - mask |= POLLIN; + mask |= EPOLLIN; spin_unlock_irqrestore(&pp->lock, flags); } if (pp->mode == smu_file_events) { diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 08849e33c5679..94c0f3f7df699 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -2169,7 +2169,7 @@ pmu_fpoll(struct file *filp, poll_table *wait) poll_wait(filp, &pp->wait, wait); spin_lock_irqsave(&pp->lock, flags); if (pp->rb_get != pp->rb_put) - mask |= POLLIN; + mask |= EPOLLIN; spin_unlock_irqrestore(&pp->lock, flags); return mask; } diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index f84730d63b1f2..58bfafc34bc46 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -243,7 +243,7 @@ mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait) poll_wait(filp, &tdev->waitq, wait); if (mbox_test_message_data_ready(tdev)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 6cc6c0f9c3a95..458e1d38577db 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -287,8 +287,10 @@ do { \ break; \ \ mutex_unlock(&(ca)->set->bucket_lock); \ - if (kthread_should_stop()) \ + if (kthread_should_stop()) { \ + set_current_state(TASK_RUNNING); \ return 0; \ + } \ \ schedule(); \ mutex_lock(&(ca)->set->bucket_lock); \ diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 5e2d4e80198e5..12e5197f186cd 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -658,10 +658,15 @@ struct cache_set { atomic_long_t writeback_keys_done; atomic_long_t writeback_keys_failed; + atomic_long_t reclaim; + atomic_long_t flush_write; + atomic_long_t retry_flush_write; + enum { ON_ERROR_UNREGISTER, ON_ERROR_PANIC, } on_error; +#define DEFAULT_IO_ERROR_LIMIT 8 unsigned error_limit; unsigned error_decay; @@ -675,6 +680,8 @@ struct cache_set { #define BUCKET_HASH_BITS 12 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; + + DECLARE_HEAP(struct btree *, flush_btree); }; struct bbio { @@ -917,7 +924,7 @@ void bcache_write_super(struct cache_set *); int bch_flash_dev_create(struct cache_set *c, uint64_t size); -int bch_cached_dev_attach(struct cached_dev *, struct cache_set *); +int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *); void bch_cached_dev_detach(struct cached_dev *); void bch_cached_dev_run(struct cached_dev *); void bcache_device_stop(struct bcache_device *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index bf3a48aa9a9a4..fad9fe8817eb1 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1869,14 +1869,17 @@ void bch_initial_gc_finish(struct cache_set *c) */ for_each_cache(ca, c, i) { for_each_bucket(b, ca) { - if (fifo_full(&ca->free[RESERVE_PRIO])) + if (fifo_full(&ca->free[RESERVE_PRIO]) && + fifo_full(&ca->free[RESERVE_BTREE])) break; if (bch_can_invalidate_bucket(ca, b) && !GC_MARK(b)) { __bch_invalidate_one_bucket(ca, b); - fifo_push(&ca->free[RESERVE_PRIO], - b - ca->buckets); + if (!fifo_push(&ca->free[RESERVE_PRIO], + b - ca->buckets)) + fifo_push(&ca->free[RESERVE_BTREE], + b - ca->buckets); } } } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index a87165c1d8e52..1b736b8607399 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -368,6 +368,12 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) } /* Journalling */ +#define journal_max_cmp(l, r) \ + (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \ + fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal)) +#define journal_min_cmp(l, r) \ + (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \ + fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal)) static void btree_flush_write(struct cache_set *c) { @@ -375,28 +381,41 @@ static void btree_flush_write(struct cache_set *c) * Try to find the btree node with that references the oldest journal * entry, best is our current candidate and is locked if non NULL: */ - struct btree *b, *best; - unsigned i; + struct btree *b; + int i; + + atomic_long_inc(&c->flush_write); + retry: - best = NULL; - - for_each_cached_btree(b, c, i) - if (btree_current_write(b)->journal) { - if (!best) - best = b; - else if (journal_pin_cmp(c, - btree_current_write(best)->journal, - btree_current_write(b)->journal)) { - best = b; + spin_lock(&c->journal.lock); + if (heap_empty(&c->flush_btree)) { + for_each_cached_btree(b, c, i) + if (btree_current_write(b)->journal) { + if (!heap_full(&c->flush_btree)) + heap_add(&c->flush_btree, b, + journal_max_cmp); + else if (journal_max_cmp(b, + heap_peek(&c->flush_btree))) { + c->flush_btree.data[0] = b; + heap_sift(&c->flush_btree, 0, + journal_max_cmp); + } } - } - b = best; + for (i = c->flush_btree.used / 2 - 1; i >= 0; --i) + heap_sift(&c->flush_btree, i, journal_min_cmp); + } + + b = NULL; + heap_pop(&c->flush_btree, b, journal_min_cmp); + spin_unlock(&c->journal.lock); + if (b) { mutex_lock(&b->write_lock); if (!btree_current_write(b)->journal) { mutex_unlock(&b->write_lock); /* We raced */ + atomic_long_inc(&c->retry_flush_write); goto retry; } @@ -476,6 +495,8 @@ static void journal_reclaim(struct cache_set *c) unsigned iter, n = 0; atomic_t p; + atomic_long_inc(&c->reclaim); + while (!atomic_read(&fifo_front(&c->journal.pin))) fifo_pop(&c->journal.pin, p); @@ -819,7 +840,8 @@ int bch_journal_alloc(struct cache_set *c) j->w[0].c = c; j->w[1].c = c; - if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || + if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) || + !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) return -ENOMEM; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 133b81225ea9c..3128957880367 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -957,7 +957,8 @@ void bch_cached_dev_detach(struct cached_dev *dc) cached_dev_put(dc); } -int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, + uint8_t *set_uuid) { uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; @@ -965,7 +966,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) bdevname(dc->bdev, buf); - if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)) + if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || + (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) return -ENOENT; if (dc->disk.c) { @@ -1194,7 +1196,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, list_add(&dc->list, &uncached_devices); list_for_each_entry(c, &bch_cache_sets, list) - bch_cached_dev_attach(dc, c); + bch_cached_dev_attach(dc, c, NULL); if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE || BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) @@ -1553,7 +1555,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->congested_read_threshold_us = 2000; c->congested_write_threshold_us = 20000; - c->error_limit = 8 << IO_ERROR_SHIFT; + c->error_limit = DEFAULT_IO_ERROR_LIMIT; return c; err: @@ -1716,7 +1718,7 @@ static void run_cache_set(struct cache_set *c) bcache_write_super(c); list_for_each_entry_safe(dc, t, &uncached_devices, list) - bch_cached_dev_attach(dc, c); + bch_cached_dev_attach(dc, c, NULL); flash_devs_run(c); @@ -1833,6 +1835,7 @@ void bch_cache_release(struct kobject *kobj) static int cache_alloc(struct cache *ca) { size_t free; + size_t btree_buckets; struct bucket *b; __module_get(THIS_MODULE); @@ -1840,9 +1843,19 @@ static int cache_alloc(struct cache *ca) bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8); + /* + * when ca->sb.njournal_buckets is not zero, journal exists, + * and in bch_journal_replay(), tree node may split, + * so bucket of RESERVE_BTREE type is needed, + * the worst situation is all journal buckets are valid journal, + * and all the keys need to replay, + * so the number of RESERVE_BTREE type buckets should be as much + * as journal buckets + */ + btree_buckets = ca->sb.njournal_buckets ?: 8; free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; - if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || + if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) || !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index b4184092c7279..78cd7bd50fddd 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -65,6 +65,9 @@ read_attribute(bset_tree_stats); read_attribute(state); read_attribute(cache_read_races); +read_attribute(reclaim); +read_attribute(flush_write); +read_attribute(retry_flush_write); read_attribute(writeback_keys_done); read_attribute(writeback_keys_failed); read_attribute(io_errors); @@ -195,7 +198,7 @@ STORE(__cached_dev) { struct cached_dev *dc = container_of(kobj, struct cached_dev, disk.kobj); - ssize_t v = size; + ssize_t v; struct cache_set *c; struct kobj_uevent_env *env; @@ -215,7 +218,9 @@ STORE(__cached_dev) sysfs_strtoul_clamp(writeback_rate, dc->writeback_rate.rate, 1, INT_MAX); - d_strtoul_nonzero(writeback_rate_update_seconds); + sysfs_strtoul_clamp(writeback_rate_update_seconds, + dc->writeback_rate_update_seconds, + 1, WRITEBACK_RATE_UPDATE_SECS_MAX); d_strtoul(writeback_rate_i_term_inverse); d_strtoul_nonzero(writeback_rate_p_term_inverse); @@ -267,17 +272,20 @@ STORE(__cached_dev) } if (attr == &sysfs_attach) { - if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16) + uint8_t set_uuid[16]; + + if (bch_parse_uuid(buf, set_uuid) < 16) return -EINVAL; + v = -ENOENT; list_for_each_entry(c, &bch_cache_sets, list) { - v = bch_cached_dev_attach(dc, c); + v = bch_cached_dev_attach(dc, c, set_uuid); if (!v) return size; } pr_err("Can't attach %s: cache set not found", buf); - size = v; + return v; } if (attr == &sysfs_detach && dc->disk.c) @@ -545,6 +553,15 @@ SHOW(__bch_cache_set) sysfs_print(cache_read_races, atomic_long_read(&c->cache_read_races)); + sysfs_print(reclaim, + atomic_long_read(&c->reclaim)); + + sysfs_print(flush_write, + atomic_long_read(&c->flush_write)); + + sysfs_print(retry_flush_write, + atomic_long_read(&c->retry_flush_write)); + sysfs_print(writeback_keys_done, atomic_long_read(&c->writeback_keys_done)); sysfs_print(writeback_keys_failed, @@ -556,7 +573,7 @@ SHOW(__bch_cache_set) /* See count_io_errors for why 88 */ sysfs_print(io_error_halflife, c->error_decay * 88); - sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT); + sysfs_print(io_error_limit, c->error_limit); sysfs_hprint(congested, ((uint64_t) bch_get_congested(c)) << 9); @@ -656,7 +673,7 @@ STORE(__bch_cache_set) } if (attr == &sysfs_io_error_limit) - c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; + c->error_limit = strtoul_or_return(buf); /* See count_io_errors() for why 88 */ if (attr == &sysfs_io_error_halflife) @@ -731,6 +748,9 @@ static struct attribute *bch_cache_set_internal_files[] = { &sysfs_bset_tree_stats, &sysfs_cache_read_races, + &sysfs_reclaim, + &sysfs_flush_write, + &sysfs_retry_flush_write, &sysfs_writeback_keys_done, &sysfs_writeback_keys_failed, diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 4df4c5c1cab2e..a6763db7f061b 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -112,6 +112,8 @@ do { \ #define heap_full(h) ((h)->used == (h)->size) +#define heap_empty(h) ((h)->used == 0) + #define DECLARE_FIFO(type, name) \ struct { \ size_t front, back, size, mask; \ diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 51306a19ab032..f1d2fc15abcc0 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -564,18 +564,21 @@ static int bch_writeback_thread(void *arg) while (!kthread_should_stop()) { down_write(&dc->writeback_lock); + set_current_state(TASK_INTERRUPTIBLE); if (!atomic_read(&dc->has_dirty) || (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && !dc->writeback_running)) { up_write(&dc->writeback_lock); - set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); return 0; + } schedule(); continue; } + set_current_state(TASK_RUNNING); searched_full_index = refill_dirty(dc); @@ -652,7 +655,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate.rate = 1024; dc->writeback_rate_minimum = 8; - dc->writeback_rate_update_seconds = 5; + dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; dc->writeback_rate_p_term_inverse = 40; dc->writeback_rate_i_term_inverse = 10000; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 66f1c527fa243..587b255998568 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -8,6 +8,9 @@ #define MAX_WRITEBACKS_IN_PASS 5 #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ +#define WRITEBACK_RATE_UPDATE_SECS_MAX 60 +#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 + /* * 14 (16384ths) is chosen here as something that each backing device * should be a reasonable fraction of the share, and not to blow up diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 3f6791afd3e45..a89fd8f44453e 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1937,7 +1937,7 @@ static __poll_t dm_poll(struct file *filp, poll_table *wait) poll_wait(filp, &dm_global_eventq, wait); if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0) - mask |= POLLIN; + mask |= EPOLLIN; return mask; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d6de00f367efd..68136806d3658 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -903,7 +903,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) queue_io(md, bio); } else { /* done with normal IO or empty flush */ - bio->bi_status = io_error; + if (io_error) + bio->bi_status = io_error; bio_endio(bio); } } diff --git a/drivers/md/md.c b/drivers/md/md.c index 0081ace39a649..bc67ab6844f02 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7891,14 +7891,14 @@ static __poll_t mdstat_poll(struct file *filp, poll_table *wait) __poll_t mask; if (md_unloading) - return POLLIN|POLLRDNORM|POLLERR|POLLPRI; + return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; poll_wait(filp, &md_event_waiters, wait); /* always allow read */ - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; if (seq->poll_event != atomic_read(&md_event_count)) - mask |= POLLERR | POLLPRI; + mask |= EPOLLERR | EPOLLPRI; return mask; } diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index ecc89d9a279b2..492db12b8c4dc 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -51,15 +51,15 @@ static __poll_t cec_poll(struct file *filp, __poll_t res = 0; if (!cec_is_registered(adap)) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; mutex_lock(&adap->lock); if (adap->is_configured && adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ) - res |= POLLOUT | POLLWRNORM; + res |= EPOLLOUT | EPOLLWRNORM; if (fh->queued_msgs) - res |= POLLIN | POLLRDNORM; + res |= EPOLLIN | EPOLLRDNORM; if (fh->total_queued_events) - res |= POLLPRI; + res |= EPOLLPRI; poll_wait(filp, &fh->wait, poll); mutex_unlock(&adap->lock); return res; diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index 8ee3eebef4dbd..d4987fd05d05f 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -332,7 +332,7 @@ static __poll_t __fops_poll(struct file *file, struct poll_table_struct *wait) if (vdev->vfl_type == VFL_TYPE_VBI) { if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_SLICED_VBI_OUTPUT) - return res | POLLOUT | POLLWRNORM; + return res | EPOLLOUT | EPOLLWRNORM; if( 0 == fh->vbi_q.streaming ) return res | videobuf_poll_stream(file, &fh->vbi_q, wait); q = &fh->vbi_q; @@ -346,13 +346,13 @@ static __poll_t __fops_poll(struct file *file, struct poll_table_struct *wait) if (!buf) { DEB_D("buf == NULL!\n"); - return res | POLLERR; + return res | EPOLLERR; } poll_wait(file, &buf->done, wait); if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) { DEB_D("poll succeeded!\n"); - return res | POLLIN | POLLRDNORM; + return res | EPOLLIN | EPOLLRDNORM; } DEB_D("nothing to poll for, buf->state:%d\n", buf->state); diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c index 403645fe90795..40891f4f842b9 100644 --- a/drivers/media/common/siano/smsdvb-debugfs.c +++ b/drivers/media/common/siano/smsdvb-debugfs.c @@ -371,7 +371,7 @@ static __poll_t smsdvb_stats_poll(struct file *file, poll_table *wait) rc = smsdvb_stats_wait_read(debug_data); kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); - return rc > 0 ? POLLIN | POLLRDNORM : 0; + return rc > 0 ? EPOLLIN | EPOLLRDNORM : 0; } static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf, diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 9a84c70927145..debe35fc66b41 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -2038,9 +2038,9 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, struct vb2_buffer *vb = NULL; unsigned long flags; - if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM))) + if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) return 0; - if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM))) + if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) return 0; /* @@ -2048,18 +2048,18 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, */ if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { if (!q->is_output && (q->io_modes & VB2_READ) && - (req_events & (POLLIN | POLLRDNORM))) { + (req_events & (EPOLLIN | EPOLLRDNORM))) { if (__vb2_init_fileio(q, 1)) - return POLLERR; + return EPOLLERR; } if (q->is_output && (q->io_modes & VB2_WRITE) && - (req_events & (POLLOUT | POLLWRNORM))) { + (req_events & (EPOLLOUT | EPOLLWRNORM))) { if (__vb2_init_fileio(q, 0)) - return POLLERR; + return EPOLLERR; /* * Write to OUTPUT queue can be done immediately. */ - return POLLOUT | POLLWRNORM; + return EPOLLOUT | EPOLLWRNORM; } } @@ -2068,24 +2068,24 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, * error flag is set. */ if (!vb2_is_streaming(q) || q->error) - return POLLERR; + return EPOLLERR; /* * If this quirk is set and QBUF hasn't been called yet then - * return POLLERR as well. This only affects capture queues, output + * return EPOLLERR as well. This only affects capture queues, output * queues will always initialize waiting_for_buffers to false. * This quirk is set by V4L2 for backwards compatibility reasons. */ if (q->quirk_poll_must_check_waiting_for_buffers && - q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) - return POLLERR; + q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) + return EPOLLERR; /* * For output streams you can call write() as long as there are fewer * buffers queued than there are buffers available. */ if (q->is_output && q->fileio && q->queued_count < q->num_buffers) - return POLLOUT | POLLWRNORM; + return EPOLLOUT | EPOLLWRNORM; if (list_empty(&q->done_list)) { /* @@ -2093,7 +2093,7 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, * return immediately. DQBUF will return -EPIPE. */ if (q->last_buffer_dequeued) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; poll_wait(file, &q->done_wq, wait); } @@ -2110,8 +2110,8 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, if (vb && (vb->state == VB2_BUF_STATE_DONE || vb->state == VB2_BUF_STATE_ERROR)) { return (q->is_output) ? - POLLOUT | POLLWRNORM : - POLLIN | POLLRDNORM; + EPOLLOUT | EPOLLWRNORM : + EPOLLIN | EPOLLRDNORM; } return 0; } diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index d9a487aab99c9..886a2d8d5c6c4 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -658,7 +658,7 @@ int vb2_queue_init(struct vb2_queue *q) == V4L2_BUF_FLAG_TIMESTAMP_COPY; /* * For compatibility with vb1: if QBUF hasn't been called yet, then - * return POLLERR as well. This only affects capture queues, output + * return EPOLLERR as well. This only affects capture queues, output * queues will always initialize waiting_for_buffers to false. */ q->quirk_poll_must_check_waiting_for_buffers = true; @@ -683,8 +683,8 @@ __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) struct v4l2_fh *fh = file->private_data; if (v4l2_event_pending(fh)) - res = POLLPRI; - else if (req_events & POLLPRI) + res = EPOLLPRI; + else if (req_events & EPOLLPRI) poll_wait(file, &fh->wait, wait); } @@ -921,7 +921,7 @@ __poll_t vb2_fop_poll(struct file *file, poll_table *wait) WARN_ON(!lock); if (lock && mutex_lock_interruptible(lock)) - return POLLERR; + return EPOLLERR; fileio = q->fileio; diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index bc198f84b9cd0..6d53af00190e3 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -1179,7 +1179,7 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) __poll_t mask = 0; if ((!dmxdevfilter) || dmxdevfilter->dev->exit) - return POLLERR; + return EPOLLERR; if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait); @@ -1191,10 +1191,10 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) return 0; if (dmxdevfilter->buffer.error) - mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer)) - mask |= (POLLIN | POLLRDNORM | POLLPRI); + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); return mask; } @@ -1331,7 +1331,7 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) dprintk("%s\n", __func__); if (dmxdev->exit) - return POLLERR; + return EPOLLERR; if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait); @@ -1343,12 +1343,12 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) #endif if (need_ringbuffer) { if (dmxdev->dvr_buffer.error) - mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer)) - mask |= (POLLIN | POLLRDNORM | POLLPRI); + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); } else - mask |= (POLLOUT | POLLWRNORM | POLLPRI); + mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI); return mask; } diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index b462ebc0c544d..204d0f6c678da 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -1796,7 +1796,7 @@ static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) dprintk("%s\n", __func__); if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) - mask |= POLLIN; + mask |= EPOLLIN; /* if there is something, return now */ if (mask) @@ -1806,7 +1806,7 @@ static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) poll_wait(file, &ca->wait_queue, wait); if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) - mask |= POLLIN; + mask |= EPOLLIN; return mask; } diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 87fc1bcae5ae6..a7ed16e0841d5 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -2646,7 +2646,7 @@ static __poll_t dvb_frontend_poll(struct file *file, struct poll_table_struct *w poll_wait (file, &fepriv->events.wait_queue, wait); if (fepriv->events.eventw != fepriv->events.eventr) - return (POLLIN | POLLRDNORM | POLLPRI); + return (EPOLLIN | EPOLLRDNORM | EPOLLPRI); return 0; } diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c index b4ddfff742671..8dc5a7495abee 100644 --- a/drivers/media/firewire/firedtv-ci.c +++ b/drivers/media/firewire/firedtv-ci.c @@ -209,7 +209,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg) static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait) { - return POLLIN; + return EPOLLIN; } static const struct file_operations fdtv_ca_fops = { diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c index 00640233a5e32..c3089bd34df25 100644 --- a/drivers/media/i2c/saa6588.c +++ b/drivers/media/i2c/saa6588.c @@ -413,7 +413,7 @@ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) case SAA6588_CMD_POLL: a->result = 0; if (s->data_available_for_read) - a->result |= POLLIN | POLLRDNORM; + a->result |= EPOLLIN | EPOLLRDNORM; poll_wait(a->instance, &s->read_queue, a->event_list); break; diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c index 3049b1f505e58..67ac51eff15c3 100644 --- a/drivers/media/media-devnode.c +++ b/drivers/media/media-devnode.c @@ -105,7 +105,7 @@ static __poll_t media_poll(struct file *filp, struct media_devnode *devnode = media_devnode_data(filp); if (!media_devnode_is_registered(devnode)) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; if (!devnode->fops->poll) return DEFAULT_POLLMASK; return devnode->fops->poll(filp, poll); diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index c988669e22ff9..f697698fe38de 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -2964,39 +2964,39 @@ static __poll_t bttv_poll(struct file *file, poll_table *wait) __poll_t req_events = poll_requested_events(wait); if (v4l2_event_pending(&fh->fh)) - rc = POLLPRI; - else if (req_events & POLLPRI) + rc = EPOLLPRI; + else if (req_events & EPOLLPRI) poll_wait(file, &fh->fh.wait, wait); - if (!(req_events & (POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return rc; if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) { if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI)) - return rc | POLLERR; + return rc | EPOLLERR; return rc | videobuf_poll_stream(file, &fh->vbi, wait); } if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { /* streaming capture */ if (list_empty(&fh->cap.stream)) - return rc | POLLERR; + return rc | EPOLLERR; buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream); } else { /* read() capture */ if (NULL == fh->cap.read_buf) { /* need to capture a new frame */ if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM)) - return rc | POLLERR; + return rc | EPOLLERR; fh->cap.read_buf = videobuf_sg_alloc(fh->cap.msize); if (NULL == fh->cap.read_buf) - return rc | POLLERR; + return rc | EPOLLERR; fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR; field = videobuf_next_field(&fh->cap); if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) { kfree (fh->cap.read_buf); fh->cap.read_buf = NULL; - return rc | POLLERR; + return rc | EPOLLERR; } fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); fh->cap.read_off = 0; @@ -3007,7 +3007,7 @@ static __poll_t bttv_poll(struct file *file, poll_table *wait) poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) - rc = rc | POLLIN|POLLRDNORM; + rc = rc | EPOLLIN|EPOLLRDNORM; return rc; } @@ -3338,8 +3338,8 @@ static __poll_t radio_poll(struct file *file, poll_table *wait) __poll_t res = 0; if (v4l2_event_pending(&fh->fh)) - res = POLLPRI; - else if (req_events & POLLPRI) + res = EPOLLPRI; + else if (req_events & EPOLLPRI) poll_wait(file, &fh->fh.wait, wait); radio_enable(btv); cmd.instance = file; diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c index a8dbb922ba4b9..a3f44e30f8219 100644 --- a/drivers/media/pci/cx18/cx18-fileops.c +++ b/drivers/media/pci/cx18/cx18-fileops.c @@ -613,7 +613,7 @@ __poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait) /* Start a capture if there is none */ if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) && - (req_events & (POLLIN | POLLRDNORM))) { + (req_events & (EPOLLIN | EPOLLRDNORM))) { int rc; mutex_lock(&cx->serialize_lock); @@ -622,7 +622,7 @@ __poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait) if (rc) { CX18_DEBUG_INFO("Could not start capture for %s (%d)\n", s->name, rc); - return POLLERR; + return EPOLLERR; } CX18_DEBUG_FILE("Encoder poll started capture\n"); } @@ -632,23 +632,23 @@ __poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait) __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait); if (v4l2_event_pending(&id->fh)) - res |= POLLPRI; - if (eof && videobuf_poll == POLLERR) - return res | POLLHUP; + res |= EPOLLPRI; + if (eof && videobuf_poll == EPOLLERR) + return res | EPOLLHUP; return res | videobuf_poll; } /* add stream's waitq to the poll list */ CX18_DEBUG_HI_FILE("Encoder poll\n"); if (v4l2_event_pending(&id->fh)) - res |= POLLPRI; + res |= EPOLLPRI; else poll_wait(filp, &s->waitq, wait); if (atomic_read(&s->q_full.depth)) - return res | POLLIN | POLLRDNORM; + return res | EPOLLIN | EPOLLRDNORM; if (eof) - return res | POLLHUP; + return res | EPOLLHUP; return res; } diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 42b42824382cf..f9bee36f1cadb 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c @@ -745,9 +745,9 @@ static __poll_t ts_poll(struct file *file, poll_table *wait) poll_wait(file, &input->dma->wq, wait); poll_wait(file, &output->dma->wq, wait); if (ddb_input_avail(input) >= 188) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (ddb_output_free(output) >= 188) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c index 4aa7735072017..6196daae4b3e0 100644 --- a/drivers/media/pci/ivtv/ivtv-fileops.c +++ b/drivers/media/pci/ivtv/ivtv-fileops.c @@ -747,7 +747,7 @@ __poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait) /* Turn off the old-style vsync events */ clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); if (v4l2_event_pending(&id->fh)) - res = POLLPRI; + res = EPOLLPRI; } else { /* This is the old-style API which is here only for backwards compatibility. */ @@ -755,12 +755,12 @@ __poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait) set_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags); if (test_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags) || test_bit(IVTV_F_I_EV_DEC_STOPPED, &itv->i_flags)) - res = POLLPRI; + res = EPOLLPRI; } /* Allow write if buffers are available for writing */ if (s->q_free.buffers) - res |= POLLOUT | POLLWRNORM; + res |= EPOLLOUT | EPOLLWRNORM; return res; } @@ -776,7 +776,7 @@ __poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait) /* Start a capture if there is none */ if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) && s->type != IVTV_ENC_STREAM_TYPE_RAD && - (req_events & (POLLIN | POLLRDNORM))) { + (req_events & (EPOLLIN | EPOLLRDNORM))) { int rc; mutex_lock(&itv->serialize_lock); @@ -785,7 +785,7 @@ __poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait) if (rc) { IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n", s->name, rc); - return POLLERR; + return EPOLLERR; } IVTV_DEBUG_FILE("Encoder poll started capture\n"); } @@ -794,14 +794,14 @@ __poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait) IVTV_DEBUG_HI_FILE("Encoder poll\n"); poll_wait(filp, &s->waitq, wait); if (v4l2_event_pending(&id->fh)) - res |= POLLPRI; + res |= EPOLLPRI; else poll_wait(filp, &id->fh.wait, wait); if (s->q_full.length || s->q_io.length) - return res | POLLIN | POLLRDNORM; + return res | EPOLLIN | EPOLLRDNORM; if (eof) - return res | POLLHUP; + return res | EPOLLHUP; return res; } diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index ae83293723bac..dedcdb5734270 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -1430,7 +1430,7 @@ static __poll_t meye_poll(struct file *file, poll_table *wait) mutex_lock(&meye.lock); poll_wait(file, &meye.proc_list, wait); if (kfifo_len(&meye.doneq)) - res |= POLLIN | POLLRDNORM; + res |= EPOLLIN | EPOLLRDNORM; mutex_unlock(&meye.lock); return res; } diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c index e7b31a5b14fdd..32136ebe4f61c 100644 --- a/drivers/media/pci/saa7164/saa7164-encoder.c +++ b/drivers/media/pci/saa7164/saa7164-encoder.c @@ -925,13 +925,13 @@ static __poll_t fops_poll(struct file *file, poll_table *wait) saa7164_histogram_update(&port->poll_interval, port->last_poll_msecs_diff); - if (!(req_events & (POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return mask; if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) { if (atomic_inc_return(&port->v4l_reader_count) == 1) { if (saa7164_encoder_initialize(port) < 0) - return mask | POLLERR; + return mask | EPOLLERR; saa7164_encoder_start_streaming(port); msleep(200); } @@ -939,7 +939,7 @@ static __poll_t fops_poll(struct file *file, poll_table *wait) /* Pull the first buffer from the used list */ if (!list_empty(&port->list_buf_used.list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c index 6f97c8f2e00d8..64ab91c24c186 100644 --- a/drivers/media/pci/saa7164/saa7164-vbi.c +++ b/drivers/media/pci/saa7164/saa7164-vbi.c @@ -650,7 +650,7 @@ static __poll_t fops_poll(struct file *file, poll_table *wait) /* Pull the first buffer from the used list */ if (!list_empty(&port->list_buf_used.list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c index 4d10e2f979d20..4daba76ec240b 100644 --- a/drivers/media/pci/ttpci/av7110_av.c +++ b/drivers/media/pci/ttpci/av7110_av.c @@ -951,15 +951,15 @@ static __poll_t dvb_video_poll(struct file *file, poll_table *wait) poll_wait(file, &av7110->video_events.wait_queue, wait); if (av7110->video_events.eventw != av7110->video_events.eventr) - mask = POLLPRI; + mask = EPOLLPRI; if ((file->f_flags & O_ACCMODE) != O_RDONLY) { if (av7110->playing) { if (FREE_COND) - mask |= (POLLOUT | POLLWRNORM); + mask |= (EPOLLOUT | EPOLLWRNORM); } else { /* if not playing: may play if asked for */ - mask |= (POLLOUT | POLLWRNORM); + mask |= (EPOLLOUT | EPOLLWRNORM); } } @@ -1001,9 +1001,9 @@ static __poll_t dvb_audio_poll(struct file *file, poll_table *wait) if (av7110->playing) { if (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024) - mask |= (POLLOUT | POLLWRNORM); + mask |= (EPOLLOUT | EPOLLWRNORM); } else /* if not playing: may play if asked for */ - mask = (POLLOUT | POLLWRNORM); + mask = (EPOLLOUT | EPOLLWRNORM); return mask; } diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c index 96ca227cf51b6..d8c2f1b34d74c 100644 --- a/drivers/media/pci/ttpci/av7110_ca.c +++ b/drivers/media/pci/ttpci/av7110_ca.c @@ -237,10 +237,10 @@ static __poll_t dvb_ca_poll (struct file *file, poll_table *wait) poll_wait(file, &wbuf->queue, wait); if (!dvb_ringbuffer_empty(rbuf)) - mask |= (POLLIN | POLLRDNORM); + mask |= (EPOLLIN | EPOLLRDNORM); if (dvb_ringbuffer_free(wbuf) > 1024) - mask |= (POLLOUT | POLLWRNORM); + mask |= (EPOLLOUT | EPOLLWRNORM); return mask; } diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c index c464dae0389c9..8d4e7d930a663 100644 --- a/drivers/media/pci/zoran/zoran_driver.c +++ b/drivers/media/pci/zoran/zoran_driver.c @@ -2513,10 +2513,10 @@ zoran_poll (struct file *file, /* we should check whether buffers are ready to be synced on * (w/o waits - O_NONBLOCK) here - * if ready for read (sync), return POLLIN|POLLRDNORM, - * if ready for write (sync), return POLLOUT|POLLWRNORM, - * if error, return POLLERR, - * if no buffers queued or so, return POLLNVAL + * if ready for read (sync), return EPOLLIN|EPOLLRDNORM, + * if ready for write (sync), return EPOLLOUT|EPOLLWRNORM, + * if error, return EPOLLERR, + * if no buffers queued or so, return EPOLLNVAL */ switch (fh->map_mode) { @@ -2536,7 +2536,7 @@ zoran_poll (struct file *file, if (fh->buffers.active != ZORAN_FREE && /* Buffer ready to DQBUF? */ zr->v4l_buffers.buffer[frame].state == BUZ_STATE_DONE) - res |= POLLIN | POLLRDNORM; + res |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&zr->spinlock, flags); break; @@ -2557,9 +2557,9 @@ zoran_poll (struct file *file, if (fh->buffers.active != ZORAN_FREE && zr->jpg_buffers.buffer[frame].state == BUZ_STATE_DONE) { if (fh->map_mode == ZORAN_MAP_MODE_JPG_REC) - res |= POLLIN | POLLRDNORM; + res |= EPOLLIN | EPOLLRDNORM; else - res |= POLLOUT | POLLWRNORM; + res |= EPOLLOUT | EPOLLWRNORM; } spin_unlock_irqrestore(&zr->spinlock, flags); @@ -2570,7 +2570,7 @@ zoran_poll (struct file *file, KERN_ERR "%s: %s - internal error, unknown map_mode=%d\n", ZR_DEVNAME(zr), __func__, fh->map_mode); - res |= POLLERR; + res |= EPOLLERR; } return res; diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index de285a269390a..200c47c69a758 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -1272,9 +1272,9 @@ static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait) __poll_t res = v4l2_ctrl_poll(file, wait); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) - return POLLERR; + return EPOLLERR; - if (!(req_events & (POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return res; mutex_lock(&dev->lock); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index f15cf24c1c639..d5b94fc0040e4 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1008,7 +1008,7 @@ static __poll_t s5p_mfc_poll(struct file *file, */ if ((!src_q->streaming || list_empty(&src_q->queued_list)) && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { - rc = POLLERR; + rc = EPOLLERR; goto end; } mutex_unlock(&dev->mfc_mutex); @@ -1017,14 +1017,14 @@ static __poll_t s5p_mfc_poll(struct file *file, poll_wait(file, &dst_q->done_wq, wait); mutex_lock(&dev->mfc_mutex); if (v4l2_event_pending(&ctx->fh)) - rc |= POLLPRI; + rc |= EPOLLPRI; spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, done_entry); if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE || src_vb->state == VB2_BUF_STATE_ERROR)) - rc |= POLLOUT | POLLWRNORM; + rc |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&src_q->done_lock, flags); spin_lock_irqsave(&dst_q->done_lock, flags); if (!list_empty(&dst_q->done_list)) @@ -1032,7 +1032,7 @@ static __poll_t s5p_mfc_poll(struct file *file, done_entry); if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE || dst_vb->state == VB2_BUF_STATE_ERROR)) - rc |= POLLIN | POLLRDNORM; + rc |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&dst_q->done_lock, flags); end: mutex_unlock(&dev->mfc_mutex); diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index 70fc5f01942de..c86dd2fdab84a 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -809,10 +809,10 @@ static __poll_t soc_camera_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - __poll_t res = POLLERR; + __poll_t res = EPOLLERR; if (icd->streamer != file) - return POLLERR; + return EPOLLERR; mutex_lock(&ici->host_lock); res = ici->ops->poll(file, pt); diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c index fcb7a9f015b6b..f834f7df8cf9f 100644 --- a/drivers/media/platform/vivid/vivid-radio-rx.c +++ b/drivers/media/platform/vivid/vivid-radio-rx.c @@ -142,7 +142,7 @@ ssize_t vivid_radio_rx_read(struct file *file, char __user *buf, __poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait) { - return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait); + return EPOLLIN | EPOLLRDNORM | v4l2_ctrl_poll(file, wait); } int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band) diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c index af4907a197a33..308b13f85dc08 100644 --- a/drivers/media/platform/vivid/vivid-radio-tx.c +++ b/drivers/media/platform/vivid/vivid-radio-tx.c @@ -105,7 +105,7 @@ ssize_t vivid_radio_tx_write(struct file *file, const char __user *buf, __poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait) { - return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait); + return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait); } int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a) diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index af7c68b344d1a..5b82e63885cdd 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -488,14 +488,14 @@ static __poll_t cadet_poll(struct file *file, struct poll_table_struct *wait) __poll_t res = v4l2_ctrl_poll(file, wait); poll_wait(file, &dev->read_queue, wait); - if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) { + if (dev->rdsstat == 0 && (req_events & (EPOLLIN | EPOLLRDNORM))) { mutex_lock(&dev->lock); if (dev->rdsstat == 0) cadet_start_rds(dev); mutex_unlock(&dev->lock); } if (cadet_has_rds_data(dev)) - res |= POLLIN | POLLRDNORM; + res |= EPOLLIN | EPOLLRDNORM; return res; } diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index bff9789ae9bc1..b52e678c6901c 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -1158,15 +1158,15 @@ static __poll_t si476x_radio_fops_poll(struct file *file, __poll_t req_events = poll_requested_events(pts); __poll_t err = v4l2_ctrl_poll(file, pts); - if (req_events & (POLLIN | POLLRDNORM)) { + if (req_events & (EPOLLIN | EPOLLRDNORM)) { if (atomic_read(&radio->core->is_alive)) poll_wait(file, &radio->core->rds_read_queue, pts); if (!atomic_read(&radio->core->is_alive)) - err = POLLHUP; + err = EPOLLHUP; if (!kfifo_is_empty(&radio->core->rds_fifo)) - err = POLLIN | POLLRDNORM; + err = EPOLLIN | EPOLLRDNORM; } return err; diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index f92b0f9241a99..58e9445916026 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c @@ -1104,10 +1104,10 @@ static __poll_t wl1273_fm_fops_poll(struct file *file, poll_wait(file, &radio->read_queue, pts); if (radio->rd_index != radio->wr_index) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } else if (core->mode == WL1273_MODE_TX) { - return POLLOUT | POLLWRNORM; + return EPOLLOUT | EPOLLWRNORM; } return 0; diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c index 5b477b7d6a66d..e0054e0f410df 100644 --- a/drivers/media/radio/si470x/radio-si470x-common.c +++ b/drivers/media/radio/si470x/radio-si470x-common.c @@ -514,7 +514,7 @@ static __poll_t si470x_fops_poll(struct file *file, __poll_t req_events = poll_requested_events(pts); __poll_t retval = v4l2_ctrl_poll(file, pts); - if (req_events & (POLLIN | POLLRDNORM)) { + if (req_events & (EPOLLIN | EPOLLRDNORM)) { /* switch on rds reception */ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) si470x_rds_on(radio); @@ -522,7 +522,7 @@ static __poll_t si470x_fops_poll(struct file *file, poll_wait(file, &radio->read_queue, pts); if (radio->rd_index != radio->wr_index) - retval |= POLLIN | POLLRDNORM; + retval |= EPOLLIN | EPOLLRDNORM; } return retval; diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index fd603c1b96bbf..dccdf6558e6ab 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c @@ -112,7 +112,7 @@ static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *p ret = fmc_is_rds_data_available(fmdev, file, pts); mutex_unlock(&fmdev->mutex); if (ret < 0) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index b3544988586e8..cc863044c880a 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -109,7 +109,7 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev) if (LIRC_IS_TIMEOUT(sample) && !fh->send_timeout_reports) continue; if (kfifo_put(&fh->rawir, sample)) - wake_up_poll(&fh->wait_poll, POLLIN | POLLRDNORM); + wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM); } spin_unlock_irqrestore(&dev->lirc_fh_lock, flags); } @@ -130,7 +130,7 @@ void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc) spin_lock_irqsave(&dev->lirc_fh_lock, flags); list_for_each_entry(fh, &dev->lirc_fh, list) { if (kfifo_put(&fh->scancodes, *lsc)) - wake_up_poll(&fh->wait_poll, POLLIN | POLLRDNORM); + wake_up_poll(&fh->wait_poll, EPOLLIN | EPOLLRDNORM); } spin_unlock_irqrestore(&dev->lirc_fh_lock, flags); } @@ -603,15 +603,15 @@ static __poll_t ir_lirc_poll(struct file *file, struct poll_table_struct *wait) poll_wait(file, &fh->wait_poll, wait); if (!rcdev->registered) { - events = POLLHUP | POLLERR; + events = EPOLLHUP | EPOLLERR; } else if (rcdev->driver_type != RC_DRIVER_IR_RAW_TX) { if (fh->rec_mode == LIRC_MODE_SCANCODE && !kfifo_is_empty(&fh->scancodes)) - events = POLLIN | POLLRDNORM; + events = EPOLLIN | EPOLLRDNORM; if (fh->rec_mode == LIRC_MODE_MODE2 && !kfifo_is_empty(&fh->rawir)) - events = POLLIN | POLLRDNORM; + events = EPOLLIN | EPOLLRDNORM; } return events; @@ -779,7 +779,7 @@ void ir_lirc_unregister(struct rc_dev *dev) spin_lock_irqsave(&dev->lirc_fh_lock, flags); list_for_each_entry(fh, &dev->lirc_fh, list) - wake_up_poll(&fh->wait_poll, POLLHUP | POLLERR); + wake_up_poll(&fh->wait_poll, EPOLLHUP | EPOLLERR); spin_unlock_irqrestore(&dev->lirc_fh_lock, flags); cdev_device_del(&dev->lirc_cdev, &dev->lirc_dev); diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c index e7524920c6187..3dfbb545c0e38 100644 --- a/drivers/media/usb/cpia2/cpia2_core.c +++ b/drivers/media/usb/cpia2/cpia2_core.c @@ -2375,7 +2375,7 @@ __poll_t cpia2_poll(struct camera_data *cam, struct file *filp, { __poll_t status = v4l2_ctrl_poll(filp, wait); - if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) && + if ((poll_requested_events(wait) & (EPOLLIN | EPOLLRDNORM)) && !cam->streaming) { /* Start streaming */ cpia2_usb_stream_start(cam, @@ -2385,7 +2385,7 @@ __poll_t cpia2_poll(struct camera_data *cam, struct file *filp, poll_wait(filp, &cam->wq_stream, wait); if (cam->curbuff->status == FRAME_READY) - status |= POLLIN | POLLRDNORM; + status |= EPOLLIN | EPOLLRDNORM; return status; } diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index 103e3299b77f1..b80e6857e2eba 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1821,11 +1821,11 @@ static __poll_t mpeg_poll(struct file *file, __poll_t res = 0; if (v4l2_event_pending(&fh->fh)) - res |= POLLPRI; + res |= EPOLLPRI; else poll_wait(file, &fh->fh.wait, wait); - if (!(req_events & (POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return res; mutex_lock(&dev->lock); diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index 271f35208c494..5b321b8ada3ac 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -2018,19 +2018,19 @@ static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait) rc = check_dev(dev); if (rc < 0) - return POLLERR; + return EPOLLERR; rc = res_get(fh); if (unlikely(rc < 0)) - return POLLERR; + return EPOLLERR; if (v4l2_event_pending(&fh->fh)) - res |= POLLPRI; + res |= EPOLLPRI; else poll_wait(filp, &fh->fh.wait, wait); - if (!(req_events & (POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return res; if ((V4L2_BUF_TYPE_VIDEO_CAPTURE == fh->type) || @@ -2040,7 +2040,7 @@ static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait) mutex_unlock(&dev->lock); return res; } - return res | POLLERR; + return res | EPOLLERR; } /* diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 87e18d0c57664..d29773b8f696d 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -1877,14 +1877,14 @@ static __poll_t dev_poll(struct file *file, poll_table *wait) gspca_dbg(gspca_dev, D_FRAM, "poll\n"); - if (req_events & POLLPRI) + if (req_events & EPOLLPRI) ret |= v4l2_ctrl_poll(file, wait); - if (req_events & (POLLIN | POLLRDNORM)) { + if (req_events & (EPOLLIN | EPOLLRDNORM)) { /* if reqbufs is not done, the user would use read() */ if (gspca_dev->memory == GSPCA_MEMORY_NO) { if (read_alloc(gspca_dev, file) != 0) { - ret |= POLLERR; + ret |= EPOLLERR; goto out; } } @@ -1893,17 +1893,17 @@ static __poll_t dev_poll(struct file *file, poll_table *wait) /* check if an image has been received */ if (mutex_lock_interruptible(&gspca_dev->queue_lock) != 0) { - ret |= POLLERR; + ret |= EPOLLERR; goto out; } if (gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i)) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; mutex_unlock(&gspca_dev->queue_lock); } out: if (!gspca_dev->present) - ret |= POLLHUP; + ret |= EPOLLHUP; return ret; } diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 660d4a65401f5..77c3d331ff314 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -528,7 +528,7 @@ static __poll_t hdpvr_poll(struct file *filp, poll_table *wait) struct hdpvr_device *dev = video_drvdata(filp); __poll_t mask = v4l2_ctrl_poll(filp, wait); - if (!(req_events & (POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLIN | EPOLLRDNORM))) return mask; mutex_lock(&dev->io_mutex); @@ -553,7 +553,7 @@ static __poll_t hdpvr_poll(struct file *filp, poll_table *wait) buf = hdpvr_get_next_buffer(dev); } if (buf && buf->status == BUFSTAT_READY) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index ad6290e1b6999..9fdc57c1658fc 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -1181,19 +1181,19 @@ static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait) int ret; if (fh->fw_mode_flag) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } if (!fh->rhp) { ret = pvr2_v4l2_iosetup(fh); - if (ret) return POLLERR; + if (ret) return EPOLLERR; } poll_wait(file,&fh->wait_data,wait); if (pvr2_ioread_avail(fh->rhp) >= 0) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } return mask; diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 17ad978c01726..22389b56ec246 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -729,10 +729,10 @@ static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait) poll_wait(fp, &dev->wait_frame, wait); if (!is_present(dev)) - return POLLERR; + return EPOLLERR; if (!list_empty(&dev->sio_full)) - return res | POLLIN | POLLRDNORM; + return res | EPOLLIN | EPOLLRDNORM; return res; } diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index 96266fa4738c5..8314d3fa9241b 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -1424,25 +1424,25 @@ __tm6000_poll(struct file *file, struct poll_table_struct *wait) __poll_t res = 0; if (v4l2_event_pending(&fh->fh)) - res = POLLPRI; - else if (req_events & POLLPRI) + res = EPOLLPRI; + else if (req_events & EPOLLPRI) poll_wait(file, &fh->fh.wait, wait); if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) - return res | POLLERR; + return res | EPOLLERR; if (!!is_res_streaming(fh->dev, fh)) - return res | POLLERR; + return res | EPOLLERR; if (!is_res_read(fh->dev, fh)) { /* streaming capture */ if (list_empty(&fh->vb_vidq.stream)) - return res | POLLERR; + return res | EPOLLERR; buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream); poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) - return res | POLLIN | POLLRDNORM; - } else if (req_events & (POLLIN | POLLRDNORM)) { + return res | EPOLLIN | EPOLLRDNORM; + } else if (req_events & (EPOLLIN | EPOLLRDNORM)) { /* read() capture */ return res | videobuf_poll_stream(file, &fh->vb_vidq, wait); } diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index b076571494345..ce08b50b82900 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -3462,7 +3462,7 @@ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) struct v4l2_fh *fh = file->private_data; if (v4l2_event_pending(fh)) - return POLLPRI; + return EPOLLPRI; poll_wait(file, &fh->wait, wait); return 0; } diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index cd8127d3f863b..0301fe426a435 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -334,7 +334,7 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf, static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll) { struct video_device *vdev = video_devdata(filp); - __poll_t res = POLLERR | POLLHUP; + __poll_t res = EPOLLERR | EPOLLHUP; if (!vdev->fops->poll) return DEFAULT_POLLMASK; diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 186156f8952ab..c4f963d96a79d 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -514,10 +514,10 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_fh *fh = file->private_data; if (v4l2_event_pending(fh)) - rc = POLLPRI; - else if (req_events & POLLPRI) + rc = EPOLLPRI; + else if (req_events & EPOLLPRI) poll_wait(file, &fh->wait, wait); - if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) + if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) return rc; } @@ -531,7 +531,7 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, */ if ((!src_q->streaming || list_empty(&src_q->queued_list)) && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { - rc |= POLLERR; + rc |= EPOLLERR; goto end; } @@ -548,7 +548,7 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, */ if (dst_q->last_buffer_dequeued) { spin_unlock_irqrestore(&dst_q->done_lock, flags); - return rc | POLLIN | POLLRDNORM; + return rc | EPOLLIN | EPOLLRDNORM; } poll_wait(file, &dst_q->done_wq, wait); @@ -561,7 +561,7 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, done_entry); if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE || src_vb->state == VB2_BUF_STATE_ERROR)) - rc |= POLLOUT | POLLWRNORM; + rc |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&src_q->done_lock, flags); spin_lock_irqsave(&dst_q->done_lock, flags); @@ -570,7 +570,7 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, done_entry); if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE || dst_vb->state == VB2_BUF_STATE_ERROR)) - rc |= POLLIN | POLLRDNORM; + rc |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&dst_q->done_lock, flags); end: diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c index 28966fa8c6100..c5639817db349 100644 --- a/drivers/media/v4l2-core/v4l2-subdev.c +++ b/drivers/media/v4l2-core/v4l2-subdev.c @@ -476,12 +476,12 @@ static __poll_t subdev_poll(struct file *file, poll_table *wait) struct v4l2_fh *fh = file->private_data; if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) - return POLLERR; + return EPOLLERR; poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) - return POLLPRI; + return EPOLLPRI; return 0; } diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 9a89d3ae170fd..2b3981842b4b7 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -1131,11 +1131,11 @@ __poll_t videobuf_poll_stream(struct file *file, if (!list_empty(&q->stream)) buf = list_entry(q->stream.next, struct videobuf_buffer, stream); - } else if (req_events & (POLLIN | POLLRDNORM)) { + } else if (req_events & (EPOLLIN | EPOLLRDNORM)) { if (!q->reading) __videobuf_read_start(q); if (!q->reading) { - rc = POLLERR; + rc = EPOLLERR; } else if (NULL == q->read_buf) { q->read_buf = list_entry(q->stream.next, struct videobuf_buffer, @@ -1146,7 +1146,7 @@ __poll_t videobuf_poll_stream(struct file *file, buf = q->read_buf; } if (!buf) - rc = POLLERR; + rc = EPOLLERR; if (0 == rc) { poll_wait(file, &buf->done, wait); @@ -1157,10 +1157,10 @@ __poll_t videobuf_poll_stream(struct file *file, case V4L2_BUF_TYPE_VBI_OUTPUT: case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: case V4L2_BUF_TYPE_SDR_OUTPUT: - rc = POLLOUT | POLLWRNORM; + rc = EPOLLOUT | EPOLLWRNORM; break; default: - rc = POLLIN | POLLRDNORM; + rc = EPOLLIN | EPOLLRDNORM; break; } } diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index fcb3a92ae85f8..8ba41073dd89f 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -1267,7 +1267,7 @@ static irqreturn_t ab8500_debug_handler(int irq, void *data) if (irq_abb < num_irqs) irq_count[irq_abb]++; /* - * This makes it possible to use poll for events (POLLPRI | POLLERR) + * This makes it possible to use poll for events (EPOLLPRI | EPOLLERR) * from userspace on sysfs file named */ sprintf(buf, "%d", irq); diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index 0162516f5e57b..bd6ddbdb5cd16 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -378,11 +378,11 @@ __poll_t afu_poll(struct file *file, struct poll_table_struct *poll) spin_lock_irqsave(&ctx->lock, flags); if (ctx_event_pending(ctx)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; else if (ctx->status == CLOSED) /* Only error on closed when there are no futher events pending */ - mask |= POLLERR; + mask |= EPOLLERR; spin_unlock_irqrestore(&ctx->lock, flags); pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask); diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 35693c0a78e28..e9c9ef52c76a8 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -519,9 +519,9 @@ static __poll_t ilo_poll(struct file *fp, poll_table *wait) poll_wait(fp, &data->ccb_waitq, wait); if (is_channel_reset(driver_ccb)) - return POLLERR; + return EPOLLERR; else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index e49888eab87d5..e9bb1cfa6a7a2 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c @@ -658,7 +658,7 @@ static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait) poll_wait(file, &lis3->misc_wait, wait); if (atomic_read(&lis3->count)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 505b710291e69..758dc73602d5e 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -551,31 +551,31 @@ static __poll_t mei_poll(struct file *file, poll_table *wait) bool notify_en; if (WARN_ON(!cl || !cl->dev)) - return POLLERR; + return EPOLLERR; dev = cl->dev; mutex_lock(&dev->device_lock); - notify_en = cl->notify_en && (req_events & POLLPRI); + notify_en = cl->notify_en && (req_events & EPOLLPRI); if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) { - mask = POLLERR; + mask = EPOLLERR; goto out; } if (notify_en) { poll_wait(file, &cl->ev_wait, wait); if (cl->notify_ev) - mask |= POLLPRI; + mask |= EPOLLPRI; } - if (req_events & (POLLIN | POLLRDNORM)) { + if (req_events & (EPOLLIN | EPOLLRDNORM)) { poll_wait(file, &cl->rx_wait, wait); if (!list_empty(&cl->rd_completed)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; else mei_cl_read_start(cl, mei_cl_mtu(cl), file); } diff --git a/drivers/misc/mic/cosm/cosm_scif_server.c b/drivers/misc/mic/cosm/cosm_scif_server.c index 85f7d09cc65fd..05a63286741c8 100644 --- a/drivers/misc/mic/cosm/cosm_scif_server.c +++ b/drivers/misc/mic/cosm/cosm_scif_server.c @@ -55,7 +55,7 @@ * message being sent to host SCIF. SCIF_DISCNCT message processing on the * host SCIF sets the host COSM SCIF endpoint state to DISCONNECTED and wakes * up the host COSM thread blocked in scif_poll(..) resulting in - * scif_poll(..) returning POLLHUP. + * scif_poll(..) returning EPOLLHUP. * 5. On the card, scif_peer_release_dev is next called which results in an * SCIF_EXIT message being sent to the host and after receiving the * SCIF_EXIT_ACK from the host the peer device teardown on the card is @@ -79,7 +79,7 @@ * processing. This results in the COSM endpoint on the card being closed and * the SCIF host peer device on the card getting unregistered similar to * steps 3, 4 and 5 for the card shutdown case above. scif_poll(..) on the - * host returns POLLHUP as a result. + * host returns EPOLLHUP as a result. * 4. On the host, card peer device unregister and SCIF HW remove(..) also * subsequently complete. * @@ -87,11 +87,11 @@ * ---------- * If a reset is issued after the card has crashed, there is no SCIF_DISCNT * message from the card which would result in scif_poll(..) returning - * POLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE + * EPOLLHUP. In this case when the host SCIF driver sends a SCIF_REMOVE_NODE * message to itself resulting in the card SCIF peer device being unregistered, * this results in a scif_peer_release_dev -> scif_cleanup_scifdev-> * scif_invalidate_ep call sequence which sets the endpoint state to - * DISCONNECTED and results in scif_poll(..) returning POLLHUP. + * DISCONNECTED and results in scif_poll(..) returning EPOLLHUP. */ #define COSM_SCIF_BACKLOG 16 @@ -190,7 +190,7 @@ static void cosm_send_time(struct cosm_device *cdev) /* * Close this cosm_device's endpoint after its peer endpoint on the card has - * been closed. In all cases except MIC card crash POLLHUP on the host is + * been closed. In all cases except MIC card crash EPOLLHUP on the host is * triggered by the client's endpoint being closed. */ static void cosm_scif_close(struct cosm_device *cdev) @@ -252,7 +252,7 @@ void cosm_scif_work(struct work_struct *work) while (1) { pollepd.epd = cdev->epd; - pollepd.events = POLLIN; + pollepd.events = EPOLLIN; /* Drop the mutex before blocking in scif_poll(..) */ mutex_unlock(&cdev->cosm_mutex); @@ -266,11 +266,11 @@ void cosm_scif_work(struct work_struct *work) } /* There is a message from the card */ - if (pollepd.revents & POLLIN) + if (pollepd.revents & EPOLLIN) cosm_scif_recv(cdev); /* The peer endpoint is closed or this endpoint disconnected */ - if (pollepd.revents & POLLHUP) { + if (pollepd.revents & EPOLLHUP) { cosm_scif_close(cdev); break; } diff --git a/drivers/misc/mic/cosm_client/cosm_scif_client.c b/drivers/misc/mic/cosm_client/cosm_scif_client.c index aa530fcceaa99..beafc0da40278 100644 --- a/drivers/misc/mic/cosm_client/cosm_scif_client.c +++ b/drivers/misc/mic/cosm_client/cosm_scif_client.c @@ -160,7 +160,7 @@ static int cosm_scif_client(void *unused) while (!kthread_should_stop()) { pollepd.epd = client_epd; - pollepd.events = POLLIN; + pollepd.events = EPOLLIN; rc = scif_poll(&pollepd, 1, COSM_HEARTBEAT_SEND_MSEC); if (rc < 0) { @@ -171,7 +171,7 @@ static int cosm_scif_client(void *unused) continue; } - if (pollepd.revents & POLLIN) + if (pollepd.revents & EPOLLIN) cosm_client_recv(); msg.id = COSM_MSG_HEARTBEAT; diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c index 8a3e48ec37dd6..7b2dddcdd46d5 100644 --- a/drivers/misc/mic/scif/scif_api.c +++ b/drivers/misc/mic/scif/scif_api.c @@ -1328,7 +1328,7 @@ __scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep) if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED || ep->conn_err) - mask |= POLLOUT; + mask |= EPOLLOUT; goto exit; } } @@ -1338,34 +1338,34 @@ __scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep) _scif_poll_wait(f, &ep->conwq, wait, ep); if (ep->state == SCIFEP_LISTENING) { if (ep->conreqcnt) - mask |= POLLIN; + mask |= EPOLLIN; goto exit; } } /* Endpoint is connected or disconnected */ if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) { - if (poll_requested_events(wait) & POLLIN) + if (poll_requested_events(wait) & EPOLLIN) _scif_poll_wait(f, &ep->recvwq, wait, ep); - if (poll_requested_events(wait) & POLLOUT) + if (poll_requested_events(wait) & EPOLLOUT) _scif_poll_wait(f, &ep->sendwq, wait, ep); if (ep->state == SCIFEP_CONNECTED || ep->state == SCIFEP_DISCONNECTED) { /* Data can be read without blocking */ if (scif_rb_count(&ep->qp_info.qp->inbound_q, 1)) - mask |= POLLIN; + mask |= EPOLLIN; /* Data can be written without blocking */ if (scif_rb_space(&ep->qp_info.qp->outbound_q)) - mask |= POLLOUT; - /* Return POLLHUP if endpoint is disconnected */ + mask |= EPOLLOUT; + /* Return EPOLLHUP if endpoint is disconnected */ if (ep->state == SCIFEP_DISCONNECTED) - mask |= POLLHUP; + mask |= EPOLLHUP; goto exit; } } - /* Return POLLERR if the endpoint is in none of the above states */ - mask |= POLLERR; + /* Return EPOLLERR if the endpoint is in none of the above states */ + mask |= EPOLLERR; exit: spin_unlock(&ep->lock); return mask; @@ -1398,10 +1398,10 @@ scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs) pt = &table.pt; while (1) { for (i = 0; i < nfds; i++) { - pt->_key = ufds[i].events | POLLERR | POLLHUP; + pt->_key = ufds[i].events | EPOLLERR | EPOLLHUP; mask = __scif_pollfd(ufds[i].epd->anon, pt, ufds[i].epd); - mask &= ufds[i].events | POLLERR | POLLHUP; + mask &= ufds[i].events | EPOLLERR | EPOLLHUP; if (mask) { count++; pt->_qproc = NULL; diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c index 01d1f2ba7bb82..cbc8ebcff5cfe 100644 --- a/drivers/misc/mic/vop/vop_vringh.c +++ b/drivers/misc/mic/vop/vop_vringh.c @@ -1010,7 +1010,7 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg) } /* - * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and + * We return EPOLLIN | EPOLLOUT from poll when new buffers are enqueued, and * not when previously enqueued buffers may be available. This means that * in the card->host (TX) path, when userspace is unblocked by poll it * must drain all available descriptors or it can stall. @@ -1022,15 +1022,15 @@ static __poll_t vop_poll(struct file *f, poll_table *wait) mutex_lock(&vdev->vdev_mutex); if (vop_vdev_inited(vdev)) { - mask = POLLERR; + mask = EPOLLERR; goto done; } poll_wait(f, &vdev->waitq, wait); if (vop_vdev_inited(vdev)) { - mask = POLLERR; + mask = EPOLLERR; } else if (vdev->poll_wake) { vdev->poll_wake = 0; - mask = POLLIN | POLLOUT; + mask = EPOLLIN | EPOLLOUT; } done: mutex_unlock(&vdev->vdev_mutex); diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index c90c1a578d2f1..2dd2db9bc1c90 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -215,9 +215,9 @@ static unsigned int afu_poll(struct file *file, struct poll_table_struct *wait) mutex_unlock(&ctx->status_mutex); if (afu_events_pending(ctx)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; else if (closed) - mask = POLLERR; + mask = EPOLLERR; return mask; } @@ -277,7 +277,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count, struct ocxl_context *ctx = file->private_data; struct ocxl_kernel_event_header header; ssize_t rc; - size_t used = 0; + ssize_t used = 0; DEFINE_WAIT(event_wait); memset(&header, 0, sizeof(header)); diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 8fa68cf308e02..b084245f6238e 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -265,9 +265,9 @@ static __poll_t phantom_poll(struct file *file, poll_table *wait) poll_wait(file, &dev->wait, wait); if (!(dev->status & PHB_RUNNING)) - mask = POLLERR; + mask = EPOLLERR; else if (atomic_read(&dev->counter)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c index 6640e76515339..83e0c95d20a47 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -182,7 +182,7 @@ static __poll_t vmci_host_poll(struct file *filp, poll_table *wait) if (context->pending_datagrams > 0 || vmci_handle_arr_get_size( context->pending_doorbell_array) > 0) { - mask = POLLIN; + mask = EPOLLIN; } spin_unlock(&context->lock); } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 0eae619419d9d..620c2d90a646f 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -874,7 +874,6 @@ config MMC_CQHCI config MMC_TOSHIBA_PCI tristate "Toshiba Type A SD/MMC Card Interface Driver" depends on PCI - help config MMC_BCM2835 tristate "Broadcom BCM2835 SDHOST MMC Controller support" diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 229dc18f0581b..768972af8b853 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host) char pio_limit_string[20]; int ret; - mmc->f_max = host->max_clk; + if (!mmc->f_max || mmc->f_max > host->max_clk) + mmc->f_max = host->max_clk; mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 22438ebfe4e62..4f972b879fe6f 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct meson_host *host = mmc_priv(mmc); - int ret; - - /* - * If this is the initial tuning, try to get a sane Rx starting - * phase before doing the actual tuning. - */ - if (!mmc->doing_retune) { - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); - - if (ret) - return ret; - } - - ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); - if (ret) - return ret; return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); } @@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (!IS_ERR(mmc->supply.vmmc)) mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); - /* Reset phases */ + /* Reset rx phase */ clk_set_phase(host->rx_clk, 0); - clk_set_phase(host->tx_clk, 270); break; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index e6b8c59f2c0da..736ac887303c8 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -328,7 +328,7 @@ config MTD_NAND_MARVELL tristate "NAND controller support on Marvell boards" depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ COMPILE_TEST - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA help This enables the NAND flash controller driver for Marvell boards, including: diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 80d31a58e558c..f367144f3c6f3 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) if (mtd->oobsize > 64) mtd->oobsize = 64; - /* - * mtd->ecclayout is not specified here because we're using the - * default large page ECC layout defined in NAND core. - */ + /* Use default large page ECC layout defined in NAND core */ + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); if (chip->ecc.strength == 32) { nfc->ecc_mode = ECC_60_BYTE; chip->ecc.bytes = 60; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 7d1e4e2aaad0c..ce1eed7a6d63b 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -213,7 +213,7 @@ struct rx_tx_queue_stats { struct q_desc_mem { dma_addr_t dma; u64 size; - u16 q_len; + u32 q_len; dma_addr_t phys_base; void *base; void *unalign_base; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 1ca2a39ed0f85..56bc626ef0068 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5166,7 +5166,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->regs = regs; err = t4_wait_dev_ready(regs); if (err < 0) - goto out_unmap_bar0; + goto out_free_adapter; /* We control everything through one PF */ whoami = readl(regs + PL_WHOAMI_A); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index afaf29b201dc6..27447260215d1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -354,6 +354,8 @@ static void release_stats_buffers(struct ibmvnic_adapter *adapter) { kfree(adapter->tx_stats_buffers); kfree(adapter->rx_stats_buffers); + adapter->tx_stats_buffers = NULL; + adapter->rx_stats_buffers = NULL; } static int init_stats_buffers(struct ibmvnic_adapter *adapter) @@ -599,6 +601,8 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter) kfree(adapter->vpd->buff); kfree(adapter->vpd); + + adapter->vpd = NULL; } static void release_tx_pools(struct ibmvnic_adapter *adapter) @@ -909,6 +913,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { dev_err(dev, "Could not map VPD buffer\n"); kfree(adapter->vpd->buff); + adapter->vpd->buff = NULL; return -ENOMEM; } @@ -1414,10 +1419,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) hdrs += 2; } /* determine if l2/3/4 headers are sent to firmware */ - if ((*hdrs >> 7) & 1 && - (skb->protocol == htons(ETH_P_IP) || - skb->protocol == htons(ETH_P_IPV6) || - skb->protocol == htons(ETH_P_ARP))) { + if ((*hdrs >> 7) & 1) { build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); tx_crq.v1.n_crq_elem = num_entries; tx_buff->indir_arr[0] = tx_crq; @@ -1639,6 +1641,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, return rc; } else if (adapter->req_rx_queues != old_num_rx_queues || adapter->req_tx_queues != old_num_tx_queues) { + adapter->map_id = 1; release_rx_pools(adapter); release_tx_pools(adapter); init_rx_pools(netdev); @@ -1831,7 +1834,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) u16 offset; u8 flags = 0; - if (unlikely(adapter->resetting)) { + if (unlikely(adapter->resetting && + adapter->reset_reason != VNIC_RESET_NON_FATAL)) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); napi_complete_done(napi, frames_processed); return frames_processed; @@ -2908,8 +2912,12 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, cpu_to_be64(u64_crq[1])); if (rc) { - if (rc == H_CLOSED) + if (rc == H_CLOSED) { dev_warn(dev, "CRQ Queue closed\n"); + if (adapter->resetting) + ibmvnic_reset(adapter, VNIC_RESET_FATAL); + } + dev_warn(dev, "Send error (rc=%d)\n", rc); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f95ce9b5e4fbe..e31adbc75f9cc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1785,7 +1785,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; u16 sections = 0; u8 netdev_tc = 0; - u16 numtc = 0; + u16 numtc = 1; u16 qcount; u8 offset; u16 qmap; @@ -1795,9 +1795,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; + /* Number of queues per enabled TC */ + num_tc_qps = vsi->alloc_queue_pairs; if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { /* Find numtc from enabled TC bitmap */ - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (enabled_tc & BIT(i)) /* TC is enabled */ numtc++; } @@ -1805,18 +1807,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); numtc = 1; } - } else { - /* At least TC0 is enabled in non-DCB, non-MQPRIO case */ - numtc = 1; + num_tc_qps = num_tc_qps / numtc; + num_tc_qps = min_t(int, num_tc_qps, + i40e_pf_get_max_q_per_tc(pf)); } vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; - /* Number of queues per enabled TC */ - qcount = vsi->alloc_queue_pairs; - - num_tc_qps = qcount / numtc; - num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); /* Do not allow use more TC queue pairs than MSI-X vectors exist */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -1831,9 +1828,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, switch (vsi->type) { case I40E_VSI_MAIN: - qcount = min_t(int, pf->alloc_rss_size, - num_tc_qps); - break; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED)) || + vsi->tc_config.enabled_tc != 1) { + qcount = min_t(int, pf->alloc_rss_size, + num_tc_qps); + break; + } case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 322027792fe81..34e98aa6b9562 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -35,6 +35,7 @@ #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nffw.h" +#include "../nfpcore/nfp_nsp.h" #include "../nfp_app.h" #include "../nfp_main.h" #include "../nfp_net.h" @@ -87,9 +88,20 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) static int nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { + struct nfp_pf *pf = app->pf; struct nfp_bpf_vnic *bv; int err; + if (!pf->eth_tbl) { + nfp_err(pf->cpp, "No ETH table\n"); + return -EINVAL; + } + if (pf->max_data_vnics != pf->eth_tbl->count) { + nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", + pf->max_data_vnics, pf->eth_tbl->count); + return -EINVAL; + } + bv = kzalloc(sizeof(*bv), GFP_KERNEL); if (!bv) return -ENOMEM; @@ -170,6 +182,7 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, return err; bv->tc_prog = cls_bpf->prog; + nn->port->tc_offload_cnt = !!bv->tc_prog; return 0; } @@ -207,13 +220,6 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, } } -static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) -{ - struct nfp_bpf_vnic *bv = nn->app_priv; - - return !!bv->tc_prog; -} - static int nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu) { @@ -417,7 +423,6 @@ const struct nfp_app_type app_bpf = { .ctrl_msg_rx = nfp_bpf_ctrl_msg_rx, .setup_tc = nfp_bpf_setup_tc, - .tc_busy = nfp_bpf_tc_busy, .bpf = nfp_ndo_bpf, .xdp_offload = nfp_bpf_xdp_offload, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 08c4c6dc5f7f2..eb5c13dea8f59 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -349,6 +349,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow, bool egress) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; + struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; @@ -390,6 +391,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, INIT_HLIST_NODE(&flow_pay->link); flow_pay->tc_flower_cookie = flow->cookie; hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ kfree(key_layer); @@ -421,6 +423,7 @@ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flow) { + struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_fl_payload *nfp_flow; int err; @@ -442,6 +445,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, err_free_flow: hash_del_rcu(&nfp_flow->link); + port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 437964afa8eef..20546ae679090 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -92,7 +92,6 @@ extern const struct nfp_app_type app_flower; * @stop: stop application logic * @ctrl_msg_rx: control message handler * @setup_tc: setup TC ndo - * @tc_busy: TC HW offload busy (rules loaded) * @bpf: BPF ndo offload-related calls * @xdp_offload: offload an XDP program * @eswitch_mode_get: get SR-IOV eswitch mode @@ -135,7 +134,6 @@ struct nfp_app_type { int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); - bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*bpf)(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *xdp); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, @@ -301,13 +299,6 @@ static inline bool nfp_app_has_tc(struct nfp_app *app) return app && app->type->setup_tc; } -static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn) -{ - if (!app || !app->type->tc_busy) - return false; - return app->type->tc_busy(app, nn); -} - static inline int nfp_app_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 3f6952b66a497..1e597600c6938 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -107,7 +107,7 @@ u16 immed_get_value(u64 instr) if (!unreg_is_imm(reg)) reg = FIELD_GET(OP_IMMED_B_SRC, instr); - return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr); + return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr) << 8; } void immed_set_value(u64 *instr, u16 immed) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index cc570bb6563c6..ab301d56430bc 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -649,3 +649,4 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_AUTHOR("Netronome Systems "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver."); +MODULE_VERSION(UTS_RELEASE); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index c0fd351c86b10..a05be0ab27134 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3210,10 +3210,9 @@ static int nfp_net_set_features(struct net_device *netdev, new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER; } - if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) { - nn_err(nn, "Cannot disable HW TC offload while in use\n"); - return -EBUSY; - } + err = nfp_port_set_features(netdev, features); + if (err) + return err; nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n", netdev->features, features, changed); @@ -3734,7 +3733,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) netdev->features = netdev->hw_features; - if (nfp_app_has_tc(nn->app)) + if (nfp_app_has_tc(nn->app) && nn->port) netdev->hw_features |= NETIF_F_HW_TC; /* Advertise but disable TSO by default. */ @@ -3751,6 +3750,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = nn->max_mtu; + netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; + netif_carrier_off(netdev); nfp_net_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index eeecef2caac6f..4499a73330784 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -59,9 +59,12 @@ #define NFP_NET_RX_OFFSET 32 /** - * Maximum header size supported for LSO frames + * LSO parameters + * %NFP_NET_LSO_MAX_HDR_SZ: Maximum header size supported for LSO frames + * %NFP_NET_LSO_MAX_SEGS: Maximum number of segments LSO frame can produce */ #define NFP_NET_LSO_MAX_HDR_SZ 255 +#define NFP_NET_LSO_MAX_SEGS 64 /** * Prepend field types diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index f67da6bde9da3..619570524d2a4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -265,6 +265,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, .ndo_get_vf_config = nfp_app_get_vf_config, .ndo_set_vf_link_state = nfp_app_set_vf_link_state, + .ndo_set_features = nfp_port_set_features, }; static void nfp_repr_clean(struct nfp_repr *repr) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 34a6e035fe9a2..7bd8be5c833b0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -32,6 +32,7 @@ */ #include +#include #include #include "nfpcore/nfp_cpp.h" @@ -100,6 +101,23 @@ int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, return nfp_app_setup_tc(port->app, netdev, type, type_data); } +int nfp_port_set_features(struct net_device *netdev, netdev_features_t features) +{ + struct nfp_port *port; + + port = nfp_port_from_netdev(netdev); + if (!port) + return 0; + + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + port->tc_offload_cnt) { + netdev_err(netdev, "Cannot disable HW TC offload while offloads active\n"); + return -EBUSY; + } + + return 0; +} + struct nfp_port * nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 21bd4aa326468..fa7e669a969c6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -72,6 +72,8 @@ enum nfp_port_flags { * @netdev: backpointer to associated netdev * @type: what port type does the entity represent * @flags: port flags + * @tc_offload_cnt: number of active TC offloads, how offloads are counted + * is not defined, use as a boolean * @app: backpointer to the app structure * @dl_port: devlink port structure * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme @@ -87,6 +89,7 @@ struct nfp_port { enum nfp_port_type type; unsigned long flags; + unsigned long tc_offload_cnt; struct nfp_app *app; @@ -121,6 +124,9 @@ static inline bool nfp_port_is_vnic(const struct nfp_port *port) return port->type == NFP_PORT_PF_PORT || port->type == NFP_PORT_VF_PORT; } +int +nfp_port_set_features(struct net_device *netdev, netdev_features_t features); + struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); struct nfp_port * nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 540d21786a43b..ef10baf141862 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -74,8 +74,6 @@ static void dwmac1000_core_init(struct mac_device_info *hw, /* Mask GMAC interrupts */ value = GMAC_INT_DEFAULT_MASK; - if (hw->pmt) - value &= ~GMAC_INT_DISABLE_PMT; if (hw->pcs) value &= ~GMAC_INT_DISABLE_PCS; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 789dad8a07b5c..7761a26ec9c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -98,7 +98,7 @@ #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) -#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN) +#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN) enum dwmac4_irq_status { time_stamp_irq = 0x00001000, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index ed222b20fcf19..63795ecafc8dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -61,10 +61,9 @@ static void dwmac4_core_init(struct mac_device_info *hw, writel(value, ioaddr + GMAC_CONFIG); - /* Mask GMAC interrupts */ - value = GMAC_INT_DEFAULT_MASK; - if (hw->pmt) - value |= GMAC_INT_PMT_EN; + /* Enable GMAC interrupts */ + value = GMAC_INT_DEFAULT_ENABLE; + if (hw->pcs) value |= GMAC_PCS_IRQ_DEFAULT; @@ -572,10 +571,12 @@ static int dwmac4_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { void __iomem *ioaddr = hw->pcsr; - u32 intr_status; + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_enable = readl(ioaddr + GMAC_INT_EN); int ret = 0; - intr_status = readl(ioaddr + GMAC_INT_STATUS); + /* Discard disabled bits */ + intr_status &= intr_enable; /* Not used events (e.g. MMC interrupts) are not handled. */ if ((intr_status & mmc_tx_irq)) diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index b2caf5132bd2b..7b982e02ea3a4 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Sun network device configuration # diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 113bd57e2ea04..9020b084b9538 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h index 882ce168a799b..13f3860496a86 100644 --- a/drivers/net/ethernet/sun/cassini.h +++ b/drivers/net/ethernet/sun/cassini.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. * diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 5ea037672e6f2..a5dd627fe2f92 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* ldmvsw.c: Sun4v LDOM Virtual Switch Driver. * * Copyright (C) 2016-2017 Oracle. All rights reserved. diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 06001bacbe0fe..8dd545fed30d2 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* niu.c: Neptune ethernet driver. * * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net) diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 0b1f41f6bceba..f047b27971564 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index a7afcee3c5ae6..7a16d40a72d13 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ * sungem.c: Sun GEM ethernet driver. * diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 0431f1e5f5112..06da2f59fcbff 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, * auto carrier detecting ethernet driver. Also known as the * "Happy Meal Ethernet" found on SunSwift SBUS cards. diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index a6bcdcdd947e3..7fe0d5e339221 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver. * Once again I am out to prove that every ethernet * controller out there can be most efficiently programmed diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 27fb226388852..63d3d6b215f30 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 8aa3ce46bb81e..d8f4c3f281505 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 3c85a0885f9bb..1b1b78fdc1384 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1636,6 +1636,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, q_idx = q_idx % cpsw->tx_ch_num; txch = cpsw->txv[q_idx].ch; + txq = netdev_get_tx_queue(ndev, q_idx); ret = cpsw_tx_packet_submit(priv, skb, txch); if (unlikely(ret != 0)) { cpsw_err(priv, tx_err, "desc submit failed\n"); @@ -1646,15 +1647,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, * tell the kernel to stop sending us tx frames. */ if (unlikely(!cpdma_check_free_tx_desc(txch))) { - txq = netdev_get_tx_queue(ndev, q_idx); netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); } return NETDEV_TX_OK; fail: ndev->stats.tx_dropped++; - txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb)); netif_tx_stop_queue(txq); + + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (cpdma_check_free_tx_desc(txch)) + netif_tx_wake_queue(txq); + return NETDEV_TX_BUSY; } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index e412dfdda7ddd..377af43b81b3d 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2648,11 +2648,11 @@ static __poll_t ca8210_test_int_poll( poll_wait(filp, &priv->test.readq, ptable); if (!kfifo_is_empty(&priv->test.up_fifo)) - return_flags |= (POLLIN | POLLRDNORM); + return_flags |= (EPOLLIN | EPOLLRDNORM); if (wait_event_interruptible( priv->test.readq, !kfifo_is_empty(&priv->test.up_fifo))) { - return POLLERR; + return EPOLLERR; } return return_flags; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index f3313a1295312..e3e29c2b028b5 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -822,7 +822,7 @@ void phy_start(struct phy_device *phydev) phy_resume(phydev); /* make sure interrupts are re-enabled for the PHY */ - if (phydev->irq != PHY_POLL) { + if (phy_interrupt_is_valid(phydev)) { err = phy_enable_interrupts(phydev); if (err < 0) break; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index ef6b2126b23a2..255a5def56e94 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -539,11 +539,11 @@ static __poll_t ppp_poll(struct file *file, poll_table *wait) if (!pf) return 0; poll_wait(file, &pf->rwait, wait); - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; if (skb_peek(&pf->rq)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (pf->dead) - mask |= POLLHUP; + mask |= EPOLLHUP; else if (pf->kind == INTERFACE) { /* see comment in ppp_read */ struct ppp *ppp = PF_TO_PPP(pf); @@ -551,7 +551,7 @@ static __poll_t ppp_poll(struct file *file, poll_table *wait) ppp_recv_lock(ppp); if (ppp->n_channels == 0 && (ppp->flags & SC_LOOP_TRAFFIC) == 0) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; ppp_recv_unlock(ppp); } diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 0a5ed004781cb..9b6cb780affec 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -377,7 +377,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) } wake_up: - wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); + wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); return RX_HANDLER_CONSUMED; drop: @@ -487,7 +487,7 @@ static void tap_sock_write_space(struct sock *sk) wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) - wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); + wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); } static void tap_sock_destruct(struct sock *sk) @@ -572,7 +572,7 @@ static int tap_release(struct inode *inode, struct file *file) static __poll_t tap_poll(struct file *file, poll_table *wait) { struct tap_queue *q = file->private_data; - __poll_t mask = POLLERR; + __poll_t mask = EPOLLERR; if (!q) goto out; @@ -581,12 +581,12 @@ static __poll_t tap_poll(struct file *file, poll_table *wait) poll_wait(file, &q->wq.wait, wait); if (!ptr_ring_empty(&q->ring)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (sock_writeable(&q->sk) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && sock_writeable(&q->sk))) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; out: return mask; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0dc66e4fbb2c6..81e6cc951e7fc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -181,6 +181,7 @@ struct tun_file { struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; + int xdp_pending_pkts; }; struct tun_flow_entry { @@ -1436,7 +1437,7 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) __poll_t mask = 0; if (!tun) - return POLLERR; + return EPOLLERR; sk = tfile->socket.sk; @@ -1445,16 +1446,16 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) poll_wait(file, sk_sleep(sk), wait); if (!ptr_ring_empty(&tfile->tx_ring)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (tun->dev->flags & IFF_UP && (sock_writeable(sk) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && sock_writeable(sk)))) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) - mask = POLLERR; + mask = EPOLLERR; tun_put(tun); return mask; @@ -1665,6 +1666,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_REDIRECT: get_page(alloc_frag->page); alloc_frag->offset += buflen; + ++tfile->xdp_pending_pkts; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); if (err) goto err_redirect; @@ -1986,6 +1988,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK, false); + if (tfile->xdp_pending_pkts) { + tfile->xdp_pending_pkts = 0; + xdp_do_flush_map(); + } + tun_put(tun); return result; } @@ -2303,8 +2310,8 @@ static void tun_sock_write_space(struct sock *sk) wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) - wake_up_interruptible_sync_poll(wqueue, POLLOUT | - POLLWRNORM | POLLWRBAND); + wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); tfile = container_of(sk, struct tun_file, sk); kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); @@ -2322,6 +2329,13 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); + + if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || + !(m->msg_flags & MSG_MORE)) { + tfile->xdp_pending_pkts = 0; + xdp_do_flush_map(); + } + tun_put(tun); return ret; } @@ -3153,6 +3167,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); + tfile->xdp_pending_pkts = 0; return 0; } diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index b0fdc10236193..f3ec13b80b20c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -90,6 +90,35 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, }, + { + .id = QCA988X_HW_2_0_VERSION, + .dev_id = QCA988X_2_0_DEVICE_ID_UBNT, + .name = "qca988x hw2.0 ubiquiti", + .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .cal_data_len = 2116, + .fw = { + .dir = QCA988X_HW_2_0_FW_DIR, + .board = QCA988X_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA988X_BOARD_DATA_SZ, + .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, + }, + .hw_ops = &qca988x_ops, + .decap_align_bytes = 4, + .spectral_bin_discard = 0, + .vht160_mcs_rx_highest = 0, + .vht160_mcs_tx_highest = 0, + .n_cipher_suites = 8, + .num_peers = TARGET_TLV_NUM_PEERS, + .ast_skid_limit = 0x10, + .num_wds_entries = 0x20, + .target_64bit = false, + .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + }, { .id = QCA9887_HW_1_0_VERSION, .dev_id = QCA9887_1_0_DEVICE_ID, @@ -1276,10 +1305,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, len -= sizeof(*hdr); data = hdr->data; - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - - if (len < ie_len) { + if (len < ALIGN(ie_len, 4)) { ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", ie_id, ie_len, len); ret = -EINVAL; @@ -1318,6 +1344,9 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, goto out; } + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + len -= ie_len; data += ie_len; } @@ -1448,9 +1477,6 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, len -= sizeof(*hdr); data += sizeof(*hdr); - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - if (len < ie_len) { ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); @@ -1556,6 +1582,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name, break; } + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + len -= ie_len; data += ie_len; } diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 4dde126dab171..7173b3743b43b 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -616,7 +617,7 @@ static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, - .len = 0x90000, + .len = 0xa8000, .name = "DRAM", .section_table = { .sections = NULL, diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 6d836a26272fe..554cd7856cb6e 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -81,6 +82,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar) void ath10k_debug_print_board_info(struct ath10k *ar) { char boardinfo[100]; + const struct firmware *board; + u32 crc; if (ar->id.bmi_ids_valid) scnprintf(boardinfo, sizeof(boardinfo), "%d:%d", @@ -88,11 +91,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar) else scnprintf(boardinfo, sizeof(boardinfo), "N/A"); + board = ar->normal_mode_fw.board; + if (!IS_ERR_OR_NULL(board)) + crc = crc32_le(0, board->data, board->size); + else + crc = 0; + ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x", ar->bd_api, boardinfo, - crc32_le(0, ar->normal_mode_fw.board->data, - ar->normal_mode_fw.board->size)); + crc); } void ath10k_debug_print_boot_info(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 6203bc65799be..413b1b4321f77 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -22,6 +22,7 @@ #define ATH10K_FW_DIR "ath10k" +#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac) #define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA6164_2_1_DEVICE_ID (0x0041) #define QCA6174_2_1_DEVICE_ID (0x003e) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 355db6a0fcf3e..1b266cd0c2ec0 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -58,6 +58,9 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 static const struct pci_device_id ath10k_pci_id_table[] = { + /* PCI-E QCA988X V2 (Ubiquiti branded) */ + { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, + { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ @@ -74,6 +77,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { * hacks. ath10k doesn't have them and these devices crash horribly * because of that. */ + { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, @@ -2193,6 +2197,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); switch (ar_pci->pdev->device) { + case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: case QCA9888_2_0_DEVICE_ID: @@ -3424,6 +3429,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); switch (pci_dev->device) { + case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 3d9447e21025f..695c779ae8cf7 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -72,7 +72,7 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah, s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan, s16 nf) { - s8 noise = ath9k_hw_get_default_nf(ah, chan, 0); + s8 noise = ATH_DEFAULT_NOISE_FLOOR; if (nf) { s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH - diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 56676eaff24c1..cb0eef13af1c8 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -24,6 +24,7 @@ static const struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ + { USB_DEVICE(0x07b8, 0x9271) }, /* Altai WA1011N-GU */ { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 8027bb7c03c22..fcb208d1f2762 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -98,6 +98,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) reorder_work.work); struct mt76_dev *dev = tid->dev; struct sk_buff_head frames; + int nframes; __skb_queue_head_init(&frames); @@ -105,14 +106,44 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) spin_lock(&tid->lock); mt76_rx_aggr_check_release(tid, &frames); + nframes = tid->nframes; spin_unlock(&tid->lock); - ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT); + if (nframes) + ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, + REORDER_TIMEOUT); mt76_rx_complete(dev, &frames, -1); local_bh_enable(); } +static void +mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data; + struct mt76_wcid *wcid = status->wcid; + struct mt76_rx_tid *tid; + u16 seqno; + + if (!ieee80211_is_ctl(bar->frame_control)) + return; + + if (!ieee80211_is_back_req(bar->frame_control)) + return; + + status->tid = le16_to_cpu(bar->control) >> 12; + seqno = le16_to_cpu(bar->start_seq_num) >> 4; + tid = rcu_dereference(wcid->aggr[status->tid]); + if (!tid) + return; + + spin_lock_bh(&tid->lock); + mt76_rx_aggr_release_frames(tid, frames, seqno); + mt76_rx_aggr_release_head(tid, frames); + spin_unlock_bh(&tid->lock); +} + void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; @@ -126,9 +157,14 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) __skb_queue_tail(frames, skb); sta = wcid_to_sta(wcid); - if (!sta || !status->aggr) + if (!sta) return; + if (!status->aggr) { + mt76_rx_aggr_check_ctl(skb, frames); + return; + } + tid = rcu_dereference(wcid->aggr[status->tid]); if (!tid) return; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 5fcb2deb89a24..85f8d324ebf82 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -276,6 +276,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); ieee80211_hw_set(hw, MFP_CAPABLE); + ieee80211_hw_set(hw, AP_LINK_PS); wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -470,6 +471,53 @@ mt76_check_ccmp_pn(struct sk_buff *skb) return 0; } +static void +mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_sta *sta; + struct mt76_wcid *wcid = status->wcid; + bool ps; + + if (!wcid || !wcid->sta) + return; + + sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv); + + if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) + return; + + if (ieee80211_is_pspoll(hdr->frame_control)) { + ieee80211_sta_pspoll(sta); + return; + } + + if (ieee80211_has_morefrags(hdr->frame_control) || + !(ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_data(hdr->frame_control))) + return; + + ps = ieee80211_has_pm(hdr->frame_control); + + if (ps && (ieee80211_is_data_qos(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control))) + ieee80211_sta_uapsd_trigger(sta, status->tid); + + if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) + return; + + if (ps) { + set_bit(MT_WCID_FLAG_PS, &wcid->flags); + mt76_stop_tx_queues(dev, sta, true); + } else { + clear_bit(MT_WCID_FLAG_PS, &wcid->flags); + } + + ieee80211_sta_ps_transition(sta, ps); + dev->drv->sta_ps(dev, sta, ps); +} + void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, int queue) { @@ -498,8 +546,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) __skb_queue_head_init(&frames); - while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) + while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { + mt76_check_ps(dev, skb); mt76_rx_aggr_reorder(skb, &frames); + } mt76_rx_complete(dev, &frames, q); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 129015c9d1169..d2ce15093eddd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -121,11 +121,18 @@ struct mt76_queue_ops { void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); }; +enum mt76_wcid_flags { + MT_WCID_FLAG_CHECK_PS, + MT_WCID_FLAG_PS, +}; + struct mt76_wcid { struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; struct work_struct aggr_work; + unsigned long flags; + u8 idx; u8 hw_key_idx; @@ -206,6 +213,9 @@ struct mt76_driver_ops { struct sk_buff *skb); void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); + + void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, + bool ps); }; struct mt76_channel_state { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 17df17afd9bf9..e62131b881020 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -218,6 +218,8 @@ void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); +void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); + void mt76x2_update_channel(struct mt76_dev *mdev); s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 1b00ae4465a27..9dbf94947324e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -630,6 +630,7 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) .tx_complete_skb = mt76x2_tx_complete_skb, .rx_skb = mt76x2_queue_rx_skb, .rx_poll_complete = mt76x2_rx_poll_complete, + .sta_ps = mt76x2_sta_ps, }; struct ieee80211_hw *hw; struct mt76x2_dev *dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index 6c30b5eaa9ca5..7ea3d841918e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -341,7 +341,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, mt76x2_remove_hdr_pad(skb, pad_len); - if (rxinfo & MT_RXINFO_BA) + if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL)) status->aggr = true; if (WARN_ON_ONCE(len > skb->len)) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index bf26284b9989d..205043b470b20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -282,6 +282,9 @@ mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, for (i = 0; i < ARRAY_SIZE(sta->txq); i++) mt76x2_txq_init(dev, sta->txq[i]); + if (vif->type == NL80211_IFTYPE_AP) + set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); out: @@ -311,23 +314,14 @@ mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return 0; } -static void -mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, struct ieee80211_sta *sta) +void +mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv; - struct mt76x2_dev *dev = hw->priv; + struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); int idx = msta->wcid.idx; - switch (cmd) { - case STA_NOTIFY_SLEEP: - mt76x2_mac_wcid_set_drop(dev, idx, true); - mt76_stop_tx_queues(&dev->mt76, sta, true); - break; - case STA_NOTIFY_AWAKE: - mt76x2_mac_wcid_set_drop(dev, idx, false); - break; - } + mt76x2_mac_wcid_set_drop(dev, idx, ps); } static int @@ -549,6 +543,12 @@ static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, mutex_unlock(&dev->mutex); } +static int +mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) +{ + return 0; +} + const struct ieee80211_ops mt76x2_ops = { .tx = mt76x2_tx, .start = mt76x2_start, @@ -560,7 +560,6 @@ const struct ieee80211_ops mt76x2_ops = { .bss_info_changed = mt76x2_bss_info_changed, .sta_add = mt76x2_sta_add, .sta_remove = mt76x2_sta_remove, - .sta_notify = mt76x2_sta_notify, .set_key = mt76x2_set_key, .conf_tx = mt76x2_conf_tx, .sw_scan_start = mt76x2_sw_scan, @@ -573,5 +572,6 @@ const struct ieee80211_ops mt76x2_ops = { .release_buffered_frames = mt76_release_buffered_frames, .set_coverage_class = mt76x2_set_coverage_class, .get_survey = mt76_get_survey, + .set_tim = mt76x2_set_tim, }; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 72c55d1f8903e..ac2572943ed08 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -309,7 +309,7 @@ static __poll_t rt2x00debug_poll_queue_dump(struct file *file, poll_wait(file, &intf->frame_dump_waitqueue, wait); if (!skb_queue_empty(&intf->frame_dump_skbqueue)) - return POLLOUT | POLLWRNORM; + return EPOLLOUT | EPOLLWRNORM; return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index f20e77b4bb65f..317c1b3101dad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1123,7 +1123,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) } if (0 == tmp) { read_addr = REG_DBI_RDATA + addr % 4; - ret = rtl_read_word(rtlpriv, read_addr); + ret = rtl_read_byte(rtlpriv, read_addr); } return ret; } @@ -1165,7 +1165,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) } tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); - _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) | + ASPM_L1_LATENCY << 3); tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index a7aacbc3984ec..46dcb7fef1954 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -99,6 +99,7 @@ #define RTL_USB_MAX_RX_COUNT 100 #define QBSS_LOAD_SIZE 5 #define MAX_WMMELE_LENGTH 64 +#define ASPM_L1_LATENCY 7 #define TOTAL_CAM_ENTRY 32 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 9bd7ddeeb6a5c..8328d395e3329 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -351,6 +351,9 @@ static int xennet_open(struct net_device *dev) unsigned int i = 0; struct netfront_queue *queue = NULL; + if (!np->queues) + return -ENODEV; + for (i = 0; i < num_queues; ++i) { queue = &np->queues[i]; napi_enable(&queue->napi); @@ -1358,18 +1361,8 @@ static int netfront_probe(struct xenbus_device *dev, #ifdef CONFIG_SYSFS info->netdev->sysfs_groups[0] = &xennet_dev_group; #endif - err = register_netdev(info->netdev); - if (err) { - pr_warn("%s: register_netdev err=%d\n", __func__, err); - goto fail; - } return 0; - - fail: - xennet_free_netdev(netdev); - dev_set_drvdata(&dev->dev, NULL); - return err; } static void xennet_end_access(int ref, void *page) @@ -1737,8 +1730,6 @@ static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i; - rtnl_lock(); - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i]; @@ -1747,8 +1738,6 @@ static void xennet_destroy_queues(struct netfront_info *info) netif_napi_del(&queue->napi); } - rtnl_unlock(); - kfree(info->queues); info->queues = NULL; } @@ -1764,8 +1753,6 @@ static int xennet_create_queues(struct netfront_info *info, if (!info->queues) return -ENOMEM; - rtnl_lock(); - for (i = 0; i < *num_queues; i++) { struct netfront_queue *queue = &info->queues[i]; @@ -1774,7 +1761,7 @@ static int xennet_create_queues(struct netfront_info *info, ret = xennet_init_queue(queue); if (ret < 0) { - dev_warn(&info->netdev->dev, + dev_warn(&info->xbdev->dev, "only created %d queues\n", i); *num_queues = i; break; @@ -1788,10 +1775,8 @@ static int xennet_create_queues(struct netfront_info *info, netif_set_real_num_tx_queues(info->netdev, *num_queues); - rtnl_unlock(); - if (*num_queues == 0) { - dev_err(&info->netdev->dev, "no queues\n"); + dev_err(&info->xbdev->dev, "no queues\n"); return -EINVAL; } return 0; @@ -1828,6 +1813,7 @@ static int talk_to_netback(struct xenbus_device *dev, goto out; } + rtnl_lock(); if (info->queues) xennet_destroy_queues(info); @@ -1838,6 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev, info->queues = NULL; goto out; } + rtnl_unlock(); /* Create shared ring, alloc event channel -- for each queue */ for (i = 0; i < num_queues; ++i) { @@ -1934,8 +1921,10 @@ static int talk_to_netback(struct xenbus_device *dev, xenbus_transaction_end(xbt, 1); destroy_ring: xennet_disconnect_backend(info); + rtnl_lock(); xennet_destroy_queues(info); out: + rtnl_unlock(); device_unregister(&dev->dev); return err; } @@ -1965,6 +1954,15 @@ static int xennet_connect(struct net_device *dev) netdev_update_features(dev); rtnl_unlock(); + if (dev->reg_state == NETREG_UNINITIALIZED) { + err = register_netdev(dev); + if (err) { + pr_warn("%s: register_netdev err=%d\n", __func__, err); + device_unregister(&np->xbdev->dev); + return err; + } + } + /* * All public and private state should now be sane. Get * ready to start sending and receiving packets and give the driver @@ -2150,10 +2148,14 @@ static int xennet_remove(struct xenbus_device *dev) xennet_disconnect_backend(info); - unregister_netdev(info->netdev); + if (info->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(info->netdev); - if (info->queues) + if (info->queues) { + rtnl_lock(); xennet_destroy_queues(info); + rtnl_unlock(); + } xennet_free_netdev(info->netdev); return 0; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f431c32774f36..0fe7ea35c2217 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) int ret; ret = nvme_reset_ctrl(ctrl); - if (!ret) + if (!ret) { flush_work(&ctrl->reset_work); + if (ctrl->state != NVME_CTRL_LIVE) + ret = -ENETRESET; + } + return ret; } EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); @@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (new_state) { case NVME_CTRL_ADMIN_ONLY: switch (old_state) { - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: changed = true; /* FALLTHRU */ default: @@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (old_state) { case NVME_CTRL_NEW: case NVME_CTRL_RESETTING: - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: changed = true; /* FALLTHRU */ default: @@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, break; } break; - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: switch (old_state) { - case NVME_CTRL_LIVE: + case NVME_CTRL_NEW: case NVME_CTRL_RESETTING: changed = true; /* FALLTHRU */ @@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, case NVME_CTRL_LIVE: case NVME_CTRL_ADMIN_ONLY: case NVME_CTRL_RESETTING: - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: changed = true; /* FALLTHRU */ default: @@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; - range[n].cattr = cpu_to_le32(0); - range[n].nlb = cpu_to_le32(nlb); - range[n].slba = cpu_to_le64(slba); + if (n < segments) { + range[n].cattr = cpu_to_le32(0); + range[n].nlb = cpu_to_le32(nlb); + range[n].slba = cpu_to_le64(slba); + } n++; } @@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) static int nvme_keep_alive(struct nvme_ctrl *ctrl) { - struct nvme_command c; struct request *rq; - memset(&c, 0, sizeof(c)); - c.common.opcode = nvme_admin_keep_alive; - - rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, + rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, NVME_QID_ANY); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl) return; INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); + memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); + ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } EXPORT_SYMBOL_GPL(nvme_start_keep_alive); @@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, static void nvme_update_formats(struct nvme_ctrl *ctrl) { - struct nvme_ns *ns; + struct nvme_ns *ns, *next; + LIST_HEAD(rm_list); mutex_lock(&ctrl->namespaces_mutex); list_for_each_entry(ns, &ctrl->namespaces, list) { - if (ns->disk && nvme_revalidate_disk(ns->disk)) - nvme_ns_remove(ns); + if (ns->disk && nvme_revalidate_disk(ns->disk)) { + list_move_tail(&ns->list, &rm_list); + } } mutex_unlock(&ctrl->namespaces_mutex); + + list_for_each_entry_safe(ns, next, &rm_list, list) + nvme_ns_remove(ns); } static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) @@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, [NVME_CTRL_LIVE] = "live", [NVME_CTRL_ADMIN_ONLY] = "only-admin", [NVME_CTRL_RESETTING] = "resetting", - [NVME_CTRL_RECONNECTING]= "reconnecting", + [NVME_CTRL_CONNECTING] = "connecting", [NVME_CTRL_DELETING] = "deleting", [NVME_CTRL_DEAD] = "dead", }; diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 25b19f722f5b2..a3145d90c1d2c 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, cmd->common.opcode != nvme_fabrics_command || cmd->fabrics.fctype != nvme_fabrics_type_connect) { /* - * Reconnecting state means transport disruption, which can take - * a long time and even might fail permanently, fail fast to - * give upper layers a chance to failover. + * Connecting state means transport disruption or initial + * establishment, which can take a long time and even might + * fail permanently, fail fast to give upper layers a chance + * to failover. * Deleting state means that the ctrl will never accept commands * again, fail it permanently. */ - if (ctrl->state == NVME_CTRL_RECONNECTING || + if (ctrl->state == NVME_CTRL_CONNECTING || ctrl->state == NVME_CTRL_DELETING) { nvme_req(rq)->status = NVME_SC_ABORT_REQ; return BLK_STS_IOERR; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index b856d7c919d29..7f51f8414b972 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -55,9 +55,7 @@ struct nvme_fc_queue { enum nvme_fcop_flags { FCOP_FLAGS_TERMIO = (1 << 0), - FCOP_FLAGS_RELEASED = (1 << 1), - FCOP_FLAGS_COMPLETE = (1 << 2), - FCOP_FLAGS_AEN = (1 << 3), + FCOP_FLAGS_AEN = (1 << 1), }; struct nvmefc_ls_req_op { @@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) { switch (ctrl->ctrl.state) { case NVME_CTRL_NEW: - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: /* * As all reconnects were suppressed, schedule a * connect. @@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) } break; - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: /* * The association has already been terminated and the * controller is attempting reconnects. No need to do anything @@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) /* *********************** NVME Ctrl Routines **************************** */ -static void __nvme_fc_final_op_cleanup(struct request *rq); static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); static int @@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, static int __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) { - int state; + unsigned long flags; + int opstate; + + spin_lock_irqsave(&ctrl->lock, flags); + opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); + if (opstate != FCPOP_STATE_ACTIVE) + atomic_set(&op->state, opstate); + else if (ctrl->flags & FCCTRL_TERMIO) + ctrl->iocnt++; + spin_unlock_irqrestore(&ctrl->lock, flags); - state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); - if (state != FCPOP_STATE_ACTIVE) { - atomic_set(&op->state, state); + if (opstate != FCPOP_STATE_ACTIVE) return -ECANCELED; - } ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, &ctrl->rport->remoteport, @@ -1532,60 +1535,26 @@ static void nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) { struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; - unsigned long flags; - int i, ret; - - for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { - if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) - continue; - - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->flags & FCCTRL_TERMIO) { - ctrl->iocnt++; - aen_op->flags |= FCOP_FLAGS_TERMIO; - } - spin_unlock_irqrestore(&ctrl->lock, flags); - - ret = __nvme_fc_abort_op(ctrl, aen_op); - if (ret) { - /* - * if __nvme_fc_abort_op failed the io wasn't - * active. Thus this call path is running in - * parallel to the io complete. Treat as non-error. - */ + int i; - /* back out the flags/counters */ - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->flags & FCCTRL_TERMIO) - ctrl->iocnt--; - aen_op->flags &= ~FCOP_FLAGS_TERMIO; - spin_unlock_irqrestore(&ctrl->lock, flags); - return; - } - } + for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) + __nvme_fc_abort_op(ctrl, aen_op); } -static inline int +static inline void __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, - struct nvme_fc_fcp_op *op) + struct nvme_fc_fcp_op *op, int opstate) { unsigned long flags; - bool complete_rq = false; - spin_lock_irqsave(&ctrl->lock, flags); - if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { + if (opstate == FCPOP_STATE_ABORTED) { + spin_lock_irqsave(&ctrl->lock, flags); if (ctrl->flags & FCCTRL_TERMIO) { if (!--ctrl->iocnt) wake_up(&ctrl->ioabort_wait); } + spin_unlock_irqrestore(&ctrl->lock, flags); } - if (op->flags & FCOP_FLAGS_RELEASED) - complete_rq = true; - else - op->flags |= FCOP_FLAGS_COMPLETE; - spin_unlock_irqrestore(&ctrl->lock, flags); - - return complete_rq; } static void @@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); union nvme_result result; bool terminate_assoc = true; + int opstate; /* * WARNING: @@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) * association to be terminated. */ + opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); + fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, sizeof(op->rsp_iu), DMA_FROM_DEVICE); - if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || - op->flags & FCOP_FLAGS_TERMIO) + if (opstate == FCPOP_STATE_ABORTED) status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); else if (freq->status) status = cpu_to_le16(NVME_SC_INTERNAL << 1); @@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) done: if (op->flags & FCOP_FLAGS_AEN) { nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); - __nvme_fc_fcpop_chk_teardowns(ctrl, op); + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); atomic_set(&op->state, FCPOP_STATE_IDLE); op->flags = FCOP_FLAGS_AEN; /* clear other flags */ nvme_fc_ctrl_put(ctrl); @@ -1722,13 +1693,11 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) if (status && (blk_queue_dying(rq->q) || ctrl->ctrl.state == NVME_CTRL_NEW || - ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) + ctrl->ctrl.state == NVME_CTRL_CONNECTING)) status |= cpu_to_le16(NVME_SC_DNR << 1); - if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) - __nvme_fc_final_op_cleanup(rq); - else - nvme_end_request(rq, status, result); + __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); + nvme_end_request(rq, status, result); check_error: if (terminate_assoc) @@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg) } static void -__nvme_fc_final_op_cleanup(struct request *rq) +nvme_fc_complete_rq(struct request *rq) { struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); struct nvme_fc_ctrl *ctrl = op->ctrl; atomic_set(&op->state, FCPOP_STATE_IDLE); - op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | - FCOP_FLAGS_COMPLETE); nvme_fc_unmap_data(ctrl, rq, op); nvme_complete_rq(rq); nvme_fc_ctrl_put(ctrl); - -} - -static void -nvme_fc_complete_rq(struct request *rq) -{ - struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); - struct nvme_fc_ctrl *ctrl = op->ctrl; - unsigned long flags; - bool completed = false; - - /* - * the core layer, on controller resets after calling - * nvme_shutdown_ctrl(), calls complete_rq without our - * calling blk_mq_complete_request(), thus there may still - * be live i/o outstanding with the LLDD. Means transport has - * to track complete calls vs fcpio_done calls to know what - * path to take on completes and dones. - */ - spin_lock_irqsave(&ctrl->lock, flags); - if (op->flags & FCOP_FLAGS_COMPLETE) - completed = true; - else - op->flags |= FCOP_FLAGS_RELEASED; - spin_unlock_irqrestore(&ctrl->lock, flags); - - if (completed) - __nvme_fc_final_op_cleanup(rq); } /* @@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) struct nvme_ctrl *nctrl = data; struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); - unsigned long flags; - int status; if (!blk_mq_request_started(req)) return; - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->flags & FCCTRL_TERMIO) { - ctrl->iocnt++; - op->flags |= FCOP_FLAGS_TERMIO; - } - spin_unlock_irqrestore(&ctrl->lock, flags); - - status = __nvme_fc_abort_op(ctrl, op); - if (status) { - /* - * if __nvme_fc_abort_op failed the io wasn't - * active. Thus this call path is running in - * parallel to the io complete. Treat as non-error. - */ - - /* back out the flags/counters */ - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->flags & FCCTRL_TERMIO) - ctrl->iocnt--; - op->flags &= ~FCOP_FLAGS_TERMIO; - spin_unlock_irqrestore(&ctrl->lock, flags); - return; - } + __nvme_fc_abort_op(ctrl, op); } @@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; bool recon = true; - if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) return; if (portptr->port_state == FC_OBJSTATE_ONLINE) @@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) /* will block will waiting for io to terminate */ nvme_fc_delete_association(ctrl); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: error_recovery: Couldn't change state " - "to RECONNECTING\n", ctrl->cnum); + "to CONNECTING\n", ctrl->cnum); return; } @@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, * transport errors (frame drop, LS failure) inherently must kill * the association. The transport is coded so that any command used * to create the association (prior to a LIVE state transition - * while NEW or RECONNECTING) will fail if it completes in error or + * while NEW or CONNECTING) will fail if it completes in error or * times out. * * As such: as the connect request was mostly likely due to a diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 8e4550fa08f8b..0521e4707d1cf 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -123,7 +123,7 @@ enum nvme_ctrl_state { NVME_CTRL_LIVE, NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ NVME_CTRL_RESETTING, - NVME_CTRL_RECONNECTING, + NVME_CTRL_CONNECTING, NVME_CTRL_DELETING, NVME_CTRL_DEAD, }; @@ -183,6 +183,7 @@ struct nvme_ctrl { struct work_struct scan_work; struct work_struct async_event_work; struct delayed_work ka_work; + struct nvme_command ka_cmd; struct work_struct fw_act_work; /* Power saving configuration */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6fe7af00a1f42..73036d2fbbd58 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) /* If there is a reset/reinit ongoing, we shouldn't reset again. */ switch (dev->ctrl.state) { case NVME_CTRL_RESETTING: - case NVME_CTRL_RECONNECTING: + case NVME_CTRL_CONNECTING: return false; default: break; @@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * cancellation error. All outstanding requests are completed on * shutdown, so we return BLK_EH_HANDLED. */ - if (dev->ctrl.state == NVME_CTRL_RESETTING) { + switch (dev->ctrl.state) { + case NVME_CTRL_CONNECTING: + case NVME_CTRL_RESETTING: dev_warn(dev->ctrl.device, "I/O %d QID %d timeout, disable controller\n", req->tag, nvmeq->qid); nvme_dev_disable(dev, false); nvme_req(req)->flags |= NVME_REQ_CANCELLED; return BLK_EH_HANDLED; + default: + break; } /* @@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, int qid, int depth) { - if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { - unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), - dev->ctrl.page_size); - nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; - nvmeq->sq_cmds_io = dev->cmb + offset; - } else { - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), - &nvmeq->sq_dma_addr, GFP_KERNEL); - if (!nvmeq->sq_cmds) - return -ENOMEM; - } + /* CMB SQEs will be mapped before creation */ + if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) + return 0; + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), + &nvmeq->sq_dma_addr, GFP_KERNEL); + if (!nvmeq->sq_cmds) + return -ENOMEM; return 0; } @@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) struct nvme_dev *dev = nvmeq->dev; int result; + if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { + unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), + dev->ctrl.page_size); + nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; + nvmeq->sq_cmds_io = dev->cmb + offset; + } + nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) @@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work) nvme_dev_disable(dev, false); /* - * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the * initializing procedure here. */ - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { dev_warn(dev->ctrl.device, - "failed to mark controller RECONNECTING\n"); + "failed to mark controller CONNECTING\n"); goto out; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2bc059f7d73c7..3a51ed50eff24 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -887,7 +887,7 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) { /* If we are resetting/deleting then do nothing */ - if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { + if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || ctrl->ctrl.state == NVME_CTRL_LIVE); return; @@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_start_queues(&ctrl->ctrl); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { /* state change failure should never happen */ WARN_ON_ONCE(1); return; @@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) nvme_stop_ctrl(&ctrl->ctrl); nvme_rdma_shutdown_ctrl(ctrl, false); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { /* state change failure should never happen */ WARN_ON_ONCE(1); return; @@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) return; out_fail: - dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); - nvme_remove_namespaces(&ctrl->ctrl); - nvme_rdma_shutdown_ctrl(ctrl, true); - nvme_uninit_ctrl(&ctrl->ctrl); - nvme_put_ctrl(&ctrl->ctrl); + ++ctrl->ctrl.nr_reconnects; + nvme_rdma_reconnect_or_remove(ctrl); } static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { @@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, if (!ctrl->queues) goto out_uninit_ctrl; + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); + WARN_ON_ONCE(!changed); + ret = nvme_rdma_configure_admin_queue(ctrl, true); if (ret) goto out_kfree_queues; diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 0a4372a016f21..28bbdff4a88ba 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req) static u16 nvmet_discard_range(struct nvmet_ns *ns, struct nvme_dsm_range *range, struct bio **bio) { - if (__blkdev_issue_discard(ns->bdev, + int ret; + + ret = __blkdev_issue_discard(ns->bdev, le64_to_cpu(range->slba) << (ns->blksize_shift - 9), le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), - GFP_KERNEL, 0, bio)) + GFP_KERNEL, 0, bio); + if (ret && ret != -EOPNOTSUPP) return NVME_SC_INTERNAL | NVME_SC_DNR; return 0; } diff --git a/drivers/of/property.c b/drivers/of/property.c index 36ed84e26d9c2..f46828e3b082b 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, return 0; } -static void * +static const void * of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, const struct device *dev) { - return (void *)of_device_get_match_data(dev); + return of_device_get_match_data(dev); } const struct fwnode_operations of_fwnode_ops = { diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 2d87bc1adf38b..0c09107094350 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c @@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, if (max_opps <= 0) return max_opps ? max_opps : -ENODATA; - freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); + freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL); if (!freq_table) return -ENOMEM; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 3903d90fe51cf..41713f16ff972 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -385,6 +385,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_handle *phandle = (acpi_handle *)context; + unsigned long long current_status = 0; acpi_status status; struct acpi_device_info *info; int retval = 0; @@ -396,7 +397,9 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle, return retval; } - if (info->current_status && (info->valid & ACPI_VALID_HID) && + acpi_bus_get_status_handle(handle, ¤t_status); + + if (current_status && (info->valid & ACPI_VALID_HID) && (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) { pr_debug("found hardware: %s, handle: %p\n", diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index a60c0ab7883d1..47cd0c037433d 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -511,15 +511,15 @@ static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) - return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP; + return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP; mutex_unlock(&stdev->mrpc_mutex); if (try_wait_for_completion(&stuser->comp)) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) - ret |= POLLPRI | POLLRDBAND; + ret |= EPOLLPRI | EPOLLRDBAND; return ret; } diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index d8599736a41a2..6dec6ab133007 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev) return ret; } -static struct chromeos_laptop samsung_series_5_550 = { +static const struct chromeos_laptop samsung_series_5_550 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, @@ -432,14 +432,14 @@ static struct chromeos_laptop samsung_series_5_550 = { }, }; -static struct chromeos_laptop samsung_series_5 = { +static const struct chromeos_laptop samsung_series_5 = { .i2c_peripherals = { /* Light Sensor. */ { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, }, }; -static struct chromeos_laptop chromebook_pixel = { +static const struct chromeos_laptop chromebook_pixel = { .i2c_peripherals = { /* Touch Screen. */ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, @@ -450,14 +450,14 @@ static struct chromeos_laptop chromebook_pixel = { }, }; -static struct chromeos_laptop hp_chromebook_14 = { +static const struct chromeos_laptop hp_chromebook_14 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, }, }; -static struct chromeos_laptop dell_chromebook_11 = { +static const struct chromeos_laptop dell_chromebook_11 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, @@ -466,28 +466,28 @@ static struct chromeos_laptop dell_chromebook_11 = { }, }; -static struct chromeos_laptop toshiba_cb35 = { +static const struct chromeos_laptop toshiba_cb35 = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, }, }; -static struct chromeos_laptop acer_c7_chromebook = { +static const struct chromeos_laptop acer_c7_chromebook = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, }, }; -static struct chromeos_laptop acer_ac700 = { +static const struct chromeos_laptop acer_ac700 = { .i2c_peripherals = { /* Light Sensor. */ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, }, }; -static struct chromeos_laptop acer_c720 = { +static const struct chromeos_laptop acer_c720 = { .i2c_peripherals = { /* Touchscreen. */ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, @@ -500,14 +500,14 @@ static struct chromeos_laptop acer_c720 = { }, }; -static struct chromeos_laptop hp_pavilion_14_chromebook = { +static const struct chromeos_laptop hp_pavilion_14_chromebook = { .i2c_peripherals = { /* Touchpad. */ { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, }, }; -static struct chromeos_laptop cr48 = { +static const struct chromeos_laptop cr48 = { .i2c_peripherals = { /* Light Sensor. */ { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index 5473e602f7e0f..0e88e18362c10 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -200,7 +200,7 @@ static __poll_t cros_ec_console_log_poll(struct file *file, if (CIRC_CNT(debug_info->log_buffer.head, debug_info->log_buffer.tail, LOG_SIZE)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; mutex_unlock(&debug_info->log_mutex); return mask; diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index 1baf720faf690..af89e82eecd23 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -35,6 +35,9 @@ #define DRV_NAME "cros_ec_lpcs" #define ACPI_DRV_NAME "GOOG0004" +/* True if ACPI device is present */ +static bool cros_ec_lpc_acpi_device_found; + static int ec_response_timed_out(void) { unsigned long one_second = jiffies + HZ; @@ -54,7 +57,6 @@ static int ec_response_timed_out(void) static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { - struct ec_host_request *request; struct ec_host_response response; u8 sum; int ret = 0; @@ -65,8 +67,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, /* Write buffer */ cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); - request = (struct ec_host_request *)ec->dout; - /* Here we go */ sum = EC_COMMAND_PROTOCOL_3; cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum); @@ -362,6 +362,13 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"), }, }, + { + /* x86-glimmer, the Lenovo Thinkpad Yoga 11e. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Glimmer"), + }, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table); @@ -396,9 +403,21 @@ static struct platform_driver cros_ec_lpc_driver = { .remove = cros_ec_lpc_remove, }; +static struct platform_device cros_ec_lpc_device = { + .name = DRV_NAME +}; + +static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level, + void *context, void **retval) +{ + *(bool *)context = true; + return AE_CTRL_TERMINATE; +} + static int __init cros_ec_lpc_init(void) { int ret; + acpi_status status; if (!dmi_check_system(cros_ec_lpc_dmi_table)) { pr_err(DRV_NAME ": unsupported system.\n"); @@ -415,11 +434,28 @@ static int __init cros_ec_lpc_init(void) return ret; } - return 0; + status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device, + &cros_ec_lpc_acpi_device_found, NULL); + if (ACPI_FAILURE(status)) + pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME); + + if (!cros_ec_lpc_acpi_device_found) { + /* Register the device, and it'll get hooked up automatically */ + ret = platform_device_register(&cros_ec_lpc_device); + if (ret) { + pr_err(DRV_NAME ": can't register device: %d\n", ret); + platform_driver_unregister(&cros_ec_lpc_driver); + cros_ec_lpc_reg_destroy(); + } + } + + return ret; } static void __exit cros_ec_lpc_exit(void) { + if (!cros_ec_lpc_acpi_device_found) + platform_device_unregister(&cros_ec_lpc_device); platform_driver_unregister(&cros_ec_lpc_driver); cros_ec_lpc_reg_destroy(); } diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 8dfa7fcb12488..e7bbdf947bbcf 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -60,12 +60,14 @@ static int send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) { int ret; + int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg); if (ec_dev->proto_version > 2) - ret = ec_dev->pkt_xfer(ec_dev, msg); + xfer_fxn = ec_dev->pkt_xfer; else - ret = ec_dev->cmd_xfer(ec_dev, msg); + xfer_fxn = ec_dev->cmd_xfer; + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; struct cros_ec_command *status_msg; @@ -88,7 +90,7 @@ static int send_command(struct cros_ec_device *ec_dev, for (i = 0; i < EC_COMMAND_RETRIES; i++) { usleep_range(10000, 11000); - ret = ec_dev->cmd_xfer(ec_dev, status_msg); + ret = (*xfer_fxn)(ec_dev, status_msg); if (ret < 0) break; diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index d6eebe8721875..da0a719d32f75 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -185,7 +185,7 @@ static ssize_t show_ec_version(struct device *dev, count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: EC error %d\n", msg->result); else { - msg->data[sizeof(msg->data) - 1] = '\0'; + msg->data[EC_HOST_PARAM_SIZE - 1] = '\0'; count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: %s\n", msg->data); } diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 999f1152655ab..3e32a4c14d5fc 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -549,13 +549,13 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) return -ERESTARTSYS; if (status & PIPE_POLL_IN) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (status & PIPE_POLL_OUT) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; if (status & PIPE_POLL_HUP) - mask |= POLLHUP; + mask |= EPOLLHUP; if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) - mask |= POLLERR; + mask |= EPOLLERR; return mask; } diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index 0dfa1ca0d05b0..313cf8ad77bf6 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -300,7 +300,7 @@ mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv, { struct mlxreg_core_data *data = item->data; u32 regval; - int i, ret; + int i, ret = 0; for (i = 0; i < item->count; i++, data++) { /* Mask event. */ diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 2a68f59d2228c..c52c6723374b5 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -126,24 +126,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = { DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/ }, }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/ - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/ - }, - }, - { - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/ - }, - }, { .ident = "Dell Computer Corporation", .matches = { @@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state) struct calling_interface_buffer buffer; int ret; - dell_fill_request(&buffer, 0, 0, 0, 0); + dell_fill_request(&buffer, 0x1, 0, 0, 0); ret = dell_send_request(&buffer, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); if (ret) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 5b6f18b188012..535199c9e6bc6 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); /* * ACPI Helpers */ -#define IDEAPAD_EC_TIMEOUT (100) /* in ms */ +#define IDEAPAD_EC_TIMEOUT (200) /* in ms */ static int read_method_int(acpi_handle handle, const char *method, int *val) { diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 27de29961f5e1..454e14f022855 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -77,10 +77,13 @@ #define MLXPLAT_CPLD_AGGR_FAN_MASK_DEF 0x40 #define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \ MLXPLAT_CPLD_AGGR_FAN_MASK_DEF) +#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04 +#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc0 #define MLXPLAT_CPLD_AGGR_MASK_MSN21XX 0x04 #define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0) /* Start channel numbers */ #define MLXPLAT_CPLD_CH1 2 @@ -89,6 +92,15 @@ /* Number of LPC attached MUX platform devices */ #define MLXPLAT_CPLD_LPC_MUX_DEVS 2 +/* Hotplug devices adapter numbers */ +#define MLXPLAT_CPLD_NR_NONE -1 +#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10 +#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4 +#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11 +#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12 +#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 +#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14 + /* mlxplat_priv - platform private data * @pdev_i2c - i2c controller platform device * @pdev_mux - array of mux platform devices @@ -159,6 +171,15 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { }, }; +static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = { + { + I2C_BOARD_INFO("24c32", 0x51), + }, + { + I2C_BOARD_INFO("24c32", 0x50), + }, +}; + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = { { I2C_BOARD_INFO("dps460", 0x59), @@ -190,14 +211,14 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(0), .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0], - .hpdev.nr = 10, + .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, }, { .label = "psu2", .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .mask = BIT(1), .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1], - .hpdev.nr = 10, + .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, }, }; @@ -207,14 +228,14 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_items_data[] = { .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, .mask = BIT(0), .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], - .hpdev.nr = 10, + .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, }, { .label = "pwr2", .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, .mask = BIT(1), .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], - .hpdev.nr = 10, + .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR, }, }; @@ -224,28 +245,28 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = { .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(0), .hpdev.brdinfo = &mlxplat_mlxcpld_fan[0], - .hpdev.nr = 11, + .hpdev.nr = MLXPLAT_CPLD_FAN1_DEFAULT_NR, }, { .label = "fan2", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(1), .hpdev.brdinfo = &mlxplat_mlxcpld_fan[1], - .hpdev.nr = 12, + .hpdev.nr = MLXPLAT_CPLD_FAN2_DEFAULT_NR, }, { .label = "fan3", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(2), .hpdev.brdinfo = &mlxplat_mlxcpld_fan[2], - .hpdev.nr = 13, + .hpdev.nr = MLXPLAT_CPLD_FAN3_DEFAULT_NR, }, { .label = "fan4", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(3), .hpdev.brdinfo = &mlxplat_mlxcpld_fan[3], - .hpdev.nr = 14, + .hpdev.nr = MLXPLAT_CPLD_FAN4_DEFAULT_NR, }, }; @@ -287,14 +308,29 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .mask = MLXPLAT_CPLD_AGGR_MASK_DEF, }; +static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + /* Platform hotplug MSN21xx system family data */ static struct mlxreg_core_item mlxplat_mlxcpld_msn21xx_items[] = { { - .data = mlxplat_mlxcpld_default_pwr_items_data, + .data = mlxplat_mlxcpld_msn21xx_pwr_items_data, .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF, .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, .mask = MLXPLAT_CPLD_PWR_MASK, - .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .count = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_pwr_items_data), .inversed = 0, .health = false, }, @@ -306,6 +342,245 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn21xx_data = { .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_items), .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + +/* Platform hotplug msn274x system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_fan_items_data[] = { + { + .label = "fan1", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan2", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan3", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(2), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan4", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(3), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_msn274x_items[] = { + { + .data = mlxplat_mlxcpld_msn274x_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_msn274x_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_fan_items_data), + .inversed = 1, + .health = false, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn274x_data = { + .items = mlxplat_mlxcpld_msn274x_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + +/* Platform hotplug MSN201x system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_msn201x_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = { + { + .data = mlxplat_mlxcpld_msn201x_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_pwr_items_data), + .inversed = 0, + .health = false, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = { + .items = mlxplat_mlxcpld_msn21xx_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + +/* Platform hotplug next generation system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = { + { + .label = "fan1", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan2", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan3", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(2), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan4", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(3), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan5", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(4), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "fan6", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = BIT(5), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = { + { + .data = mlxplat_mlxcpld_default_ng_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_NG_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data), + .inversed = 1, + .health = false, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = { + .items = mlxplat_mlxcpld_default_ng_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) @@ -437,7 +712,56 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) return 1; }; +static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_msn274x_data; + + return 1; +}; + +static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_msn201x_data; + + return 1; +}; + +static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_default_ng_data; + + return 1; +}; + static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { + { + .callback = mlxplat_dmi_msn274x_matched, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), + DMI_MATCH(DMI_PRODUCT_NAME, "MSN274"), + }, + }, { .callback = mlxplat_dmi_default_matched, .matches = { @@ -473,6 +797,34 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"), }, }, + { + .callback = mlxplat_dmi_msn201x_matched, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), + DMI_MATCH(DMI_PRODUCT_NAME, "MSN201"), + }, + }, + { + .callback = mlxplat_dmi_qmb7xx_matched, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), + DMI_MATCH(DMI_PRODUCT_NAME, "QMB7"), + }, + }, + { + .callback = mlxplat_dmi_qmb7xx_matched, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), + DMI_MATCH(DMI_PRODUCT_NAME, "SN37"), + }, + }, + { + .callback = mlxplat_dmi_qmb7xx_matched, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), + DMI_MATCH(DMI_PRODUCT_NAME, "SN34"), + }, + }, { } }; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index a4fabf9d75f33..b205b037fd61e 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -4128,7 +4128,7 @@ static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &sonypi_compat.fifo_proc_list, wait); if (kfifo_len(&sonypi_compat.fifo)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index daa68acbc9003..c0c8945603cbb 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev) goto probe_failure; } - buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL); + buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto probe_string_failure; diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 1d42385b1aa55..8febacb8fc54d 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -55,7 +55,7 @@ static __poll_t pps_cdev_poll(struct file *file, poll_table *wait) poll_wait(file, &pps->queue, wait); - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } static int pps_cdev_fasync(int fd, struct file *file, int on) diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index a593b4cf47bf2..767c485af59b2 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -286,7 +286,7 @@ __poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait) poll_wait(fp, &ptp->tsev_wq, wait); - return queue_cnt(&ptp->tsevq) ? POLLIN : 0; + return queue_cnt(&ptp->tsevq) ? EPOLLIN : 0; } #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event)) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 6092b3a5978e9..cfb54e01d758f 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2325,7 +2325,7 @@ static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait) poll_wait(filp, &priv->event_rx_wait, wait); if (kfifo_len(&priv->event_fifo)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 8428eba8cb736..92d0c6a7a8372 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -967,7 +967,7 @@ static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept, poll_wait(filp, &channel->fblockread_event, wait); if (qcom_smd_get_tx_avail(channel) > 20) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index e622fcda30fab..64b6de9763ee2 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -262,12 +262,12 @@ static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait) __poll_t mask = 0; if (!eptdev->ept) - return POLLERR; + return EPOLLERR; poll_wait(filp, &eptdev->readq, wait); if (!skb_queue_empty(&eptdev->queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; mask |= rpmsg_poll(eptdev->ept, filp, wait); diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 5a7b30d0773bc..efa221e8bc22d 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -203,7 +203,7 @@ static __poll_t rtc_dev_poll(struct file *file, poll_table *wait) data = rtc->irq_data; - return (data != 0) ? (POLLIN | POLLRDNORM) : 0; + return (data != 0) ? (EPOLLIN | EPOLLRDNORM) : 0; } static long rtc_dev_ioctl(struct file *file, diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 0c075d100252b..fb2c3599d95c2 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -671,7 +671,7 @@ static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable) poll_wait(filp, &dasd_eer_read_wait_queue, ptable); spin_lock_irqsave(&bufferlock, flags); if (eerb->head != eerb->tail) - mask = POLLIN | POLLRDNORM ; + mask = EPOLLIN | EPOLLRDNORM ; else mask = 0; spin_unlock_irqrestore(&bufferlock, flags); diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 614b44e70a281..a2b33a22c82a8 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -19,6 +19,8 @@ endif CFLAGS_sclp_early_core.o += -D__NO_FORTIFY +CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE) + obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ sclp_early.o sclp_early_core.o diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 956f662908a65..7bc616b253f16 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -435,9 +435,9 @@ static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p) poll_wait(filp, &mon_read_wait_queue, p); if (unlikely(atomic_read(&monpriv->iucv_severed))) - return POLLERR; + return EPOLLERR; if (atomic_read(&monpriv->read_ready)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index d06bc5674e5f9..6b1891539c840 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -49,7 +49,7 @@ struct read_info_sccb { u8 _pad_112[116 - 112]; /* 112-115 */ u8 fac116; /* 116 */ u8 fac117; /* 117 */ - u8 _pad_118; /* 118 */ + u8 fac118; /* 118 */ u8 fac119; /* 119 */ u16 hcpua; /* 120-121 */ u8 _pad_122[124 - 122]; /* 122-123 */ @@ -100,6 +100,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) sclp.has_esca = !!(sccb->fac116 & 0x08); sclp.has_pfmfi = !!(sccb->fac117 & 0x40); sclp.has_ibs = !!(sccb->fac117 & 0x20); + sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); if (sccb->fac85 & 0x02) diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 5c94a3aec4dd2..f95b452b8bbcc 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -412,7 +412,7 @@ static void chp_release(struct device *dev) /** * chp_update_desc - update channel-path description - * @chp - channel-path + * @chp: channel-path * * Update the channel-path description of the specified channel-path * including channel measurement related information. @@ -438,7 +438,7 @@ int chp_update_desc(struct channel_path *chp) /** * chp_new - register a new channel-path - * @chpid - channel-path ID + * @chpid: channel-path ID * * Create and register data structure representing new channel-path. Return * zero on success, non-zero otherwise. @@ -730,8 +730,8 @@ static void cfg_func(struct work_struct *work) /** * chp_cfg_schedule - schedule chpid configuration request - * @chpid - channel-path ID - * @configure - Non-zero for configure, zero for deconfigure + * @chpid: channel-path ID + * @configure: Non-zero for configure, zero for deconfigure * * Schedule a channel-path configuration/deconfiguration request. */ @@ -747,7 +747,7 @@ void chp_cfg_schedule(struct chp_id chpid, int configure) /** * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request - * @chpid - channel-path ID + * @chpid: channel-path ID * * Cancel an active channel-path deconfiguration request if it has not yet * been performed. diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 987bf9a8c9f72..6886b3d34cf84 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1059,7 +1059,7 @@ EXPORT_SYMBOL_GPL(cio_tm_start_key); /** * cio_tm_intrg - perform interrogate function - * @sch - subchannel on which to perform the interrogate function + * @sch: subchannel on which to perform the interrogate function * * If the specified subchannel is running in transport-mode, perform the * interrogate function. Return zero on success, non-zero otherwie. diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 5e495c62cfa77..8af4948dae806 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -1118,9 +1118,10 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable) * enable_cmf() - switch on the channel measurement for a specific device * @cdev: The ccw device to be enabled * - * Returns %0 for success or a negative error value. - * Note: If this is called on a device for which channel measurement is already - * enabled a reset of the measurement data is triggered. + * Enable channel measurements for @cdev. If this is called on a device + * for which channel measurement is already enabled a reset of the + * measurement data is triggered. + * Returns: %0 for success or a negative error value. * Context: * non-atomic */ @@ -1160,7 +1161,7 @@ int enable_cmf(struct ccw_device *cdev) * __disable_cmf() - switch off the channel measurement for a specific device * @cdev: The ccw device to be disabled * - * Returns %0 for success or a negative error value. + * Returns: %0 for success or a negative error value. * * Context: * non-atomic, device_lock() held. @@ -1184,7 +1185,7 @@ int __disable_cmf(struct ccw_device *cdev) * disable_cmf() - switch off the channel measurement for a specific device * @cdev: The ccw device to be disabled * - * Returns %0 for success or a negative error value. + * Returns: %0 for success or a negative error value. * * Context: * non-atomic @@ -1205,7 +1206,7 @@ int disable_cmf(struct ccw_device *cdev) * @cdev: the channel to be read * @index: the index of the value to be read * - * Returns the value read or %0 if the value cannot be read. + * Returns: The value read or %0 if the value cannot be read. * * Context: * any @@ -1220,7 +1221,7 @@ u64 cmf_read(struct ccw_device *cdev, int index) * @cdev: the channel to be read * @data: a pointer to a data block that will be filled * - * Returns %0 on success, a negative error value otherwise. + * Returns: %0 on success, a negative error value otherwise. * * Context: * any diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c index deaf59f93326f..19e46363348cc 100644 --- a/drivers/s390/cio/itcw.c +++ b/drivers/s390/cio/itcw.c @@ -15,7 +15,7 @@ #include #include -/** +/* * struct itcw - incremental tcw helper data type * * This structure serves as a handle for the incremental construction of a diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 95b0efe28afb5..d5b02de02a3af 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -72,6 +72,7 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask, * @mask: which output queues to process * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer * @fc: function code to perform + * @aob: asynchronous operation block * * Returns condition code. * Note: For IQDC unicast queues only the highest priority queue is processed. @@ -1761,9 +1762,6 @@ EXPORT_SYMBOL(qdio_stop_irq); * @response: Response code will be stored at this address * @cb: Callback function will be executed for each element * of the address list - * @priv: Pointer passed from the caller to qdio_pnso_brinfo() - * @type: Type of the address entry passed to the callback - * @entry: Entry containg the address of the specified type * @priv: Pointer to pass to the callback function. * * Performs "Store-network-bridging-information list" operation and calls diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index d9a2fffd034be..2c7550797ec2f 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -835,7 +835,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) /** * cp_iova_pinned() - check if an iova is pinned for a ccw chain. - * @cmd: ccwchain command on which to perform the operation + * @cp: channel_program on which to perform the operation * @iova: the iova to check * * If the @iova is currently pinned for the ccw chain, return true; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index db42107bf2f5f..959c65cf75d94 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -591,6 +591,11 @@ struct qeth_cmd_buffer { void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); }; +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + /** * definition of a qeth channel, used for read and write */ @@ -846,7 +851,7 @@ struct qeth_trap_id { */ static inline int qeth_get_elements_for_range(addr_t start, addr_t end) { - return PFN_UP(end - 1) - PFN_DOWN(start); + return PFN_UP(end) - PFN_DOWN(start); } static inline int qeth_get_micros(void) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 6abd3bc285e4f..ca72f3311004a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2120,7 +2120,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); @@ -2146,10 +2146,13 @@ int qeth_send_control_data(struct qeth_card *card, int len, while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; qeth_prepare_control_data(card, len, iob); - if (IS_IPA(iob->data)) + if (IS_IPA(iob->data)) { + cmd = __ipa_cmd(iob); event_timeout = QETH_IPA_TIMEOUT; - else + } else { event_timeout = QETH_TIMEOUT; + } + timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); @@ -2174,9 +2177,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - if ((cmd->hdr.command == IPA_CMD_SETIP) && - (cmd->hdr.prot_version == QETH_PROT_IPV4)) { + if (cmd && cmd->hdr.command == IPA_CMD_SETIP && + cmd->hdr.prot_version == QETH_PROT_IPV4) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index ba2e0856d22cd..8f5c1d7f751ae 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) vcdev->device_lost = true; rc = NOTIFY_DONE; break; + case CIO_OPER: + rc = NOTIFY_OK; + break; default: rc = NOTIFY_DONE; break; @@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = { {}, }; +#ifdef CONFIG_PM_SLEEP +static int virtio_ccw_freeze(struct ccw_device *cdev) +{ + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + + return virtio_device_freeze(&vcdev->vdev); +} + +static int virtio_ccw_restore(struct ccw_device *cdev) +{ + struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); + int ret; + + ret = virtio_ccw_set_transport_rev(vcdev); + if (ret) + return ret; + + return virtio_device_restore(&vcdev->vdev); +} +#endif + static struct ccw_driver virtio_ccw_driver = { .driver = { .owner = THIS_MODULE, @@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = { .set_online = virtio_ccw_online, .notify = virtio_ccw_cio_notify, .int_class = IRQIO_VIR, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_ccw_freeze, + .thaw = virtio_ccw_restore, + .restore = virtio_ccw_restore, +#endif }; static int __init pure_hex(char **cp, unsigned int *val, int min_digit, diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2791141bd0356..a71ee67df0847 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -7041,7 +7041,7 @@ static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait) poll_wait(file, &megasas_poll_wait, wait); spin_lock_irqsave(&poll_aen_lock, flags); if (megasas_poll_wait_aen) - mask = (POLLIN | POLLRDNORM); + mask = (EPOLLIN | EPOLLRDNORM); else mask = 0; megasas_poll_wait_aen = 0; diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 9cddc3074cd15..523971aeb4c17 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -546,7 +546,7 @@ _ctl_poll(struct file *filep, poll_table *wait) list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { if (ioc->aen_event_read_flag) { spin_unlock(&gioc_lock); - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } } spin_unlock(&gioc_lock); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0c434453aab38..c198b96368dd6 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1152,27 +1152,27 @@ sg_poll(struct file *filp, poll_table * wait) sfp = filp->private_data; if (!sfp) - return POLLERR; + return EPOLLERR; sdp = sfp->parentdp; if (!sdp) - return POLLERR; + return EPOLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) - res = POLLIN | POLLRDNORM; + res = EPOLLIN | EPOLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (atomic_read(&sdp->detaching)) - res |= POLLHUP; + res |= EPOLLHUP; else if (!sfp->cmd_q) { if (0 == count) - res |= POLLOUT | POLLWRNORM; + res |= EPOLLOUT | EPOLLWRNORM; } else if (count < SG_MAX_QUEUE) - res |= POLLOUT | POLLWRNORM; + res |= EPOLLOUT | EPOLLWRNORM; SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_poll: res=0x%x\n", (__force u32) res)); return res; diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index 92863e3818e5c..9475353f49d6c 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -197,10 +197,11 @@ int clk_rate_table_find(struct clk *clk, unsigned long rate) { struct cpufreq_frequency_table *pos; + int idx; - cpufreq_for_each_valid_entry(pos, freq_table) + cpufreq_for_each_valid_entry_idx(pos, freq_table, idx) if (pos->frequency == rate) - return pos - freq_table; + return idx; return -ENOENT; } diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index ee18428a051f2..b3f5cae98ea62 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig @@ -31,7 +31,7 @@ config SSB_BLOCKIO config SSB_PCIHOST_POSSIBLE bool - depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY + depends on SSB && (PCI = y || PCI = SSB) && (PCI_DRIVERS_LEGACY || !MIPS) default y config SSB_PCIHOST diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index ef733847eebed..c13772a0df58a 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2288,7 +2288,7 @@ static __poll_t comedi_poll(struct file *file, poll_table *wait) if (s->busy != file || !comedi_is_subdevice_running(s) || (s->async->cmd.flags & CMDF_WRITE) || comedi_buf_read_n_available(s) > 0) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } s = comedi_file_write_subdevice(file); @@ -2300,7 +2300,7 @@ static __poll_t comedi_poll(struct file *file, poll_table *wait) if (s->busy != file || !comedi_is_subdevice_running(s) || !(s->async->cmd.flags & CMDF_WRITE) || comedi_buf_write_n_available(s) >= bps) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } done: diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c index ab69eeb2c1f18..b3f3b4a201af1 100644 --- a/drivers/staging/comedi/drivers/serial2002.c +++ b/drivers/staging/comedi/drivers/serial2002.c @@ -114,8 +114,8 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout) __poll_t mask; mask = f->f_op->poll(f, &table.pt); - if (mask & (POLLRDNORM | POLLRDBAND | POLLIN | - POLLHUP | POLLERR)) { + if (mask & (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | + EPOLLHUP | EPOLLERR)) { break; } now = ktime_get(); diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c index 5064d5ddf581c..fc2013aade51b 100644 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c @@ -73,6 +73,8 @@ static int __init its_fsl_mc_msi_init(void) for (np = of_find_matching_node(NULL, its_device_id); np; np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np)) + continue; if (!of_property_read_bool(np, "msi-controller")) continue; diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c index 1993b03a6f2d6..e8bfe5520bc79 100644 --- a/drivers/staging/fwserial/fwserial.c +++ b/drivers/staging/fwserial/fwserial.c @@ -37,7 +37,7 @@ module_param_named(loop, create_loop_dev, bool, 0644); /* * Threshold below which the tty is woken for writing * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because - * even if the writer is woken, n_tty_poll() won't set POLLOUT until + * even if the writer is woken, n_tty_poll() won't set EPOLLOUT until * our fifo is below this level */ #define WAKEUP_CHARS 256 diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c index c51610ce24af8..b82e2befe9355 100644 --- a/drivers/staging/greybus/tools/loopback_test.c +++ b/drivers/staging/greybus/tools/loopback_test.c @@ -663,7 +663,7 @@ static int open_poll_files(struct loopback_test *t) goto err; } read(t->fds[fds_idx].fd, &dummy, 1); - t->fds[fds_idx].events = POLLERR|POLLPRI; + t->fds[fds_idx].events = EPOLLERR|EPOLLPRI; t->fds[fds_idx].revents = 0; fds_idx++; } @@ -756,7 +756,7 @@ static int wait_for_complete(struct loopback_test *t) } for (i = 0; i < t->poll_count; i++) { - if (t->fds[i].revents & POLLPRI) { + if (t->fds[i].revents & EPOLLPRI) { /* Dummy read to clear the event */ read(t->fds[i].fd, &dummy, 1); number_of_events++; diff --git a/drivers/staging/irda/drivers/sh_sir.c b/drivers/staging/irda/drivers/sh_sir.c index fede6864c737b..0d0687cc454ab 100644 --- a/drivers/staging/irda/drivers/sh_sir.c +++ b/drivers/staging/irda/drivers/sh_sir.c @@ -226,7 +226,7 @@ static u32 sh_sir_find_sclk(struct clk *irda_clk) clk_put(pclk); /* IrDA can not set over peripheral_clk */ - cpufreq_for_each_valid_entry(pos, freq_table) { + cpufreq_for_each_valid_entry_idx(pos, freq_table, index) { u32 freq = pos->frequency; /* IrDA should not over peripheral_clk */ @@ -236,7 +236,7 @@ static u32 sh_sir_find_sclk(struct clk *irda_clk) tmp = freq % SCLK_BASE; if (tmp < min) { min = tmp; - index = pos - freq_table; + break; } } diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c index f1d128b2dae9e..2f1e9ab3d6d0f 100644 --- a/drivers/staging/irda/net/af_irda.c +++ b/drivers/staging/irda/net/af_irda.c @@ -1749,16 +1749,16 @@ static __poll_t irda_poll(struct file * file, struct socket *sock, /* Exceptional events? */ if (sk->sk_err) - mask |= POLLERR; + mask |= EPOLLERR; if (sk->sk_shutdown & RCV_SHUTDOWN) { pr_debug("%s(), POLLHUP\n", __func__); - mask |= POLLHUP; + mask |= EPOLLHUP; } /* Readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) { pr_debug("Socket is readable\n"); - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } /* Connection-based need to check for termination and startup */ @@ -1766,14 +1766,14 @@ static __poll_t irda_poll(struct file * file, struct socket *sock, case SOCK_STREAM: if (sk->sk_state == TCP_CLOSE) { pr_debug("%s(), POLLHUP\n", __func__); - mask |= POLLHUP; + mask |= EPOLLHUP; } if (sk->sk_state == TCP_ESTABLISHED) { if ((self->tx_flow == FLOW_START) && sock_writeable(sk)) { - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; } } break; @@ -1781,12 +1781,12 @@ static __poll_t irda_poll(struct file * file, struct socket *sock, if ((self->tx_flow == FLOW_START) && sock_writeable(sk)) { - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; } break; case SOCK_DGRAM: if (sock_writeable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; break; default: break; diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c index 75bf9e34311d5..c90a158af4b73 100644 --- a/drivers/staging/irda/net/irnet/irnet_ppp.c +++ b/drivers/staging/irda/net/irnet/irnet_ppp.c @@ -429,10 +429,10 @@ irnet_ctrl_poll(irnet_socket * ap, DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap); poll_wait(file, &irnet_events.rwait, wait); - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; /* If there is unread events */ if(ap->event_index != irnet_events.index) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; #ifdef INITIAL_DISCOVERY if(ap->disco_number != -1) { @@ -441,7 +441,7 @@ irnet_ctrl_poll(irnet_socket * ap, irnet_get_discovery_log(ap); /* Recheck */ if(ap->disco_number != -1) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } #endif /* INITIAL_DISCOVERY */ @@ -618,7 +618,7 @@ dev_irnet_poll(struct file * file, DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", file, ap); - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; DABORT(ap == NULL, mask, FS_ERROR, "ap is NULL !!!\n"); /* If we are connected to ppp_generic, let it handle the job */ diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c index 6657ebbe068a1..4f9f9dca5e6a1 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c @@ -1265,7 +1265,7 @@ static __poll_t atomisp_poll(struct file *file, rt_mutex_lock(&isp->mutex); if (pipe->capq.streaming != 1) { rt_mutex_unlock(&isp->mutex); - return POLLERR; + return EPOLLERR; } rt_mutex_unlock(&isp->mutex); diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c index 4ffff6f8b809c..06d1920150da3 100644 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c @@ -2183,7 +2183,7 @@ static __poll_t bcm2048_fops_poll(struct file *file, poll_wait(file, &bdev->read_queue, pts); if (bdev->rds_data_available) - retval = POLLIN | POLLRDNORM; + retval = EPOLLIN | EPOLLRDNORM; return retval; } diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c index c183489c4a1c5..4d7fce8731fe6 100644 --- a/drivers/staging/most/cdev/cdev.c +++ b/drivers/staging/most/cdev/cdev.c @@ -292,10 +292,10 @@ static __poll_t comp_poll(struct file *filp, poll_table *wait) if (c->cfg->direction == MOST_CH_RX) { if (!kfifo_is_empty(&c->fifo)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } else { if (!kfifo_is_empty(&c->fifo) || ch_has_mbo(c)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } return mask; } diff --git a/drivers/staging/most/video/video.c b/drivers/staging/most/video/video.c index ef23e8524b1e0..9d7e747519d9a 100644 --- a/drivers/staging/most/video/video.c +++ b/drivers/staging/most/video/video.c @@ -213,7 +213,7 @@ static __poll_t comp_vdev_poll(struct file *filp, poll_table *wait) if (!data_ready(mdev)) poll_wait(filp, &mdev->wait_data, wait); if (data_ready(mdev)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig index 282e293da18f0..7ac42a590e21b 100644 --- a/drivers/staging/rtl8192e/rtl8192e/Kconfig +++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig @@ -6,4 +6,3 @@ config RTL8192E select WEXT_PRIV select CRYPTO select FW_LOADER - ---help--- diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig index 3ee9d0d00fb67..97df6507a4851 100644 --- a/drivers/staging/rtl8192u/Kconfig +++ b/drivers/staging/rtl8192u/Kconfig @@ -5,4 +5,3 @@ config RTL8192U select WIRELESS_EXT select WEXT_PRIV select CRYPTO - ---help--- diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c index 0e74d09e18ea3..0a1a7c259ab0a 100644 --- a/drivers/staging/speakup/speakup_soft.c +++ b/drivers/staging/speakup/speakup_soft.c @@ -325,7 +325,7 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait) spin_lock_irqsave(&speakup_info.spinlock, flags); if (!synth_buffer_empty() || speakup_info.flushing) - ret = POLLIN | POLLRDNORM; + ret = EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&speakup_info.spinlock, flags); return ret; } diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 92eb57e2adaf5..8de16016b6de9 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c @@ -893,6 +893,9 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip, return -ENODEV; rcu_read_lock(); + if (!(n->nud_state & NUD_VALID)) + neigh_event_send(n, NULL); + ret = -ENOMEM; if (n->dev->flags & IFF_LOOPBACK) { if (iptype == 4) diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index f9bc8ec6fb6b5..9518ffd8b8bac 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -421,7 +421,8 @@ static int chap_server_compute_md5( auth_ret = 0; out: kzfree(desc); - crypto_free_shash(tfm); + if (tfm) + crypto_free_shash(tfm); kfree(challenge); kfree(challenge_binhex); return auth_ret; diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index b686e2ce9c0e5..8a5e8d17a9426 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk) if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { write_unlock_bh(&sk->sk_callback_lock); pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn); + if (iscsi_target_sk_data_ready == conn->orig_data_ready) + return; + conn->orig_data_ready(sk); return; } diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index b6a913e38b301..9cd4ffe76c07f 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -64,7 +64,7 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd) static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host) { - seq_printf(m, "tcm_loop_proc_info()\n"); + seq_puts(m, "tcm_loop_proc_info()\n"); return 0; } @@ -123,8 +123,8 @@ static void tcm_loop_submission_work(struct work_struct *work) } tl_nexus = tl_tpg->tl_nexus; if (!tl_nexus) { - scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" - " does not exist\n"); + scmd_printk(KERN_ERR, sc, + "TCM_Loop I_T Nexus does not exist\n"); set_host_byte(sc, DID_ERROR); goto out_done; } @@ -166,7 +166,6 @@ static void tcm_loop_submission_work(struct work_struct *work) out_done: kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); sc->scsi_done(sc); - return; } /* @@ -177,14 +176,13 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct tcm_loop_cmd *tl_cmd; - pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x" - " scsi_buf_len: %u\n", sc->device->host->host_no, - sc->device->id, sc->device->channel, sc->device->lun, - sc->cmnd[0], scsi_bufflen(sc)); + pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n", + __func__, sc->device->host->host_no, sc->device->id, + sc->device->channel, sc->device->lun, sc->cmnd[0], + scsi_bufflen(sc)); tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); if (!tl_cmd) { - pr_err("Unable to allocate struct tcm_loop_cmd\n"); set_host_byte(sc, DID_ERROR); sc->scsi_done(sc); return 0; @@ -204,10 +202,10 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, u64 lun, int task, enum tcm_tmreq_table tmr) { - struct se_cmd *se_cmd = NULL; + struct se_cmd *se_cmd; struct se_session *se_sess; struct tcm_loop_nexus *tl_nexus; - struct tcm_loop_cmd *tl_cmd = NULL; + struct tcm_loop_cmd *tl_cmd; int ret = TMR_FUNCTION_FAILED, rc; /* @@ -215,16 +213,13 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, */ tl_nexus = tl_tpg->tl_nexus; if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); + pr_err("Unable to perform device reset without active I_T Nexus\n"); return ret; } tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); - if (!tl_cmd) { - pr_err("Unable to allocate memory for tl_cmd\n"); + if (!tl_cmd) return ret; - } init_completion(&tl_cmd->tmr_done); @@ -298,8 +293,7 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc) */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); if (!tl_hba) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); + pr_err("Unable to perform device reset without active I_T Nexus\n"); return FAILED; } /* @@ -417,8 +411,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host ret = device_register(&tl_hba->dev); if (ret) { - pr_err("device_register() failed for" - " tl_hba->dev: %d\n", ret); + pr_err("device_register() failed for tl_hba->dev: %d\n", ret); return -ENODEV; } @@ -447,8 +440,7 @@ static int tcm_loop_alloc_core_bus(void) ret = driver_register(&tcm_loop_driverfs); if (ret) { - pr_err("driver_register() failed for" - "tcm_loop_driverfs\n"); + pr_err("driver_register() failed for tcm_loop_driverfs\n"); goto bus_unreg; } @@ -587,8 +579,8 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" - " cdb: 0x%02x\n", sc, sc->cmnd[0]); + pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", + __func__, sc, sc->cmnd[0]); sc->result = SAM_STAT_GOOD; set_host_byte(sc, DID_OK); @@ -605,8 +597,8 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" - " cdb: 0x%02x\n", sc, sc->cmnd[0]); + pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n", + __func__, sc, sc->cmnd[0]); if (se_cmd->sense_buffer && ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || @@ -691,8 +683,8 @@ static void tcm_loop_port_unlink( sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); if (!sd) { - pr_err("Unable to locate struct scsi_device for %d:%d:" - "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); + pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n", + 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); return; } /* @@ -772,11 +764,9 @@ static int tcm_loop_make_nexus( return -EEXIST; } - tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); - if (!tl_nexus) { - pr_err("Unable to allocate struct tcm_loop_nexus\n"); + tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL); + if (!tl_nexus) return -ENOMEM; - } tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0, TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, @@ -787,9 +777,8 @@ static int tcm_loop_make_nexus( return ret; } - pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), - name); + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n", + tcm_loop_dump_proto_id(tl_hba), name); return 0; } @@ -808,15 +797,14 @@ static int tcm_loop_drop_nexus( return -ENODEV; if (atomic_read(&tpg->tl_tpg_port_count)) { - pr_err("Unable to remove TCM_Loop I_T Nexus with" - " active TPG port count: %d\n", - atomic_read(&tpg->tl_tpg_port_count)); + pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n", + atomic_read(&tpg->tl_tpg_port_count)); return -EPERM; } - pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), - tl_nexus->se_sess->se_node_acl->initiatorname); + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n", + tcm_loop_dump_proto_id(tpg->tl_hba), + tl_nexus->se_sess->se_node_acl->initiatorname); /* * Release the SCSI I_T Nexus to the emulated Target Port */ @@ -868,8 +856,8 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, * tcm_loop_make_nexus() */ if (strlen(page) >= TL_WWN_ADDR_LEN) { - pr_err("Emulated NAA Sas Address: %s, exceeds" - " max: %d\n", page, TL_WWN_ADDR_LEN); + pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n", + page, TL_WWN_ADDR_LEN); return -EINVAL; } snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page); @@ -877,9 +865,8 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, ptr = strstr(i_port, "naa."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { - pr_err("Passed SAS Initiator Port %s does not" - " match target port protoid: %s\n", i_port, - tcm_loop_dump_proto_id(tl_hba)); + pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n", + i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; } port_ptr = &i_port[0]; @@ -888,9 +875,8 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, ptr = strstr(i_port, "fc."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { - pr_err("Passed FCP Initiator Port %s does not" - " match target port protoid: %s\n", i_port, - tcm_loop_dump_proto_id(tl_hba)); + pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n", + i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; } port_ptr = &i_port[3]; /* Skip over "fc." */ @@ -899,16 +885,15 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item, ptr = strstr(i_port, "iqn."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { - pr_err("Passed iSCSI Initiator Port %s does not" - " match target port protoid: %s\n", i_port, - tcm_loop_dump_proto_id(tl_hba)); + pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n", + i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; } port_ptr = &i_port[0]; goto check_newline; } - pr_err("Unable to locate prefix for emulated Initiator Port:" - " %s\n", i_port); + pr_err("Unable to locate prefix for emulated Initiator Port: %s\n", + i_port); return -EINVAL; /* * Clear any trailing newline for the NAA WWN @@ -1010,16 +995,15 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( unsigned long tpgt; if (strstr(name, "tpgt_") != name) { - pr_err("Unable to locate \"tpgt_#\" directory" - " group\n"); + pr_err("Unable to locate \"tpgt_#\" directory group\n"); return ERR_PTR(-EINVAL); } if (kstrtoul(name+5, 10, &tpgt)) return ERR_PTR(-EINVAL); if (tpgt >= TL_TPGS_PER_HBA) { - pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:" - " %u\n", tpgt, TL_TPGS_PER_HBA); + pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n", + tpgt, TL_TPGS_PER_HBA); return ERR_PTR(-EINVAL); } tl_tpg = &tl_hba->tl_hba_tpgs[tpgt]; @@ -1032,10 +1016,9 @@ static struct se_portal_group *tcm_loop_make_naa_tpg( if (ret < 0) return ERR_PTR(-ENOMEM); - pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" - " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba), - config_item_name(&wwn->wwn_group.cg_item), tpgt); - + pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n", + tcm_loop_dump_proto_id(tl_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); return &tl_tpg->tl_se_tpg; } @@ -1062,9 +1045,9 @@ static void tcm_loop_drop_naa_tpg( tl_tpg->tl_hba = NULL; tl_tpg->tl_tpgt = 0; - pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" - " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), - config_item_name(&wwn->wwn_group.cg_item), tpgt); + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n", + tcm_loop_dump_proto_id(tl_hba), + config_item_name(&wwn->wwn_group.cg_item), tpgt); } /* End items for tcm_loop_naa_cit */ @@ -1081,11 +1064,10 @@ static struct se_wwn *tcm_loop_make_scsi_hba( char *ptr; int ret, off = 0; - tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); - if (!tl_hba) { - pr_err("Unable to allocate struct tcm_loop_hba\n"); + tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL); + if (!tl_hba) return ERR_PTR(-ENOMEM); - } + /* * Determine the emulated Protocol Identifier and Target Port Name * based on the incoming configfs directory name. @@ -1103,8 +1085,8 @@ static struct se_wwn *tcm_loop_make_scsi_hba( } ptr = strstr(name, "iqn."); if (!ptr) { - pr_err("Unable to locate prefix for emulated Target " - "Port: %s\n", name); + pr_err("Unable to locate prefix for emulated Target Port: %s\n", + name); ret = -EINVAL; goto out; } @@ -1112,9 +1094,8 @@ static struct se_wwn *tcm_loop_make_scsi_hba( check_len: if (strlen(name) >= TL_WWN_ADDR_LEN) { - pr_err("Emulated NAA %s Address: %s, exceeds" - " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), - TL_WWN_ADDR_LEN); + pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n", + name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); ret = -EINVAL; goto out; } @@ -1131,10 +1112,8 @@ static struct se_wwn *tcm_loop_make_scsi_hba( sh = tl_hba->sh; tcm_loop_hba_no_cnt++; - pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" - " %s Address: %s at Linux/SCSI Host ID: %d\n", - tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); - + pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", + tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); return &tl_hba->tl_hba_wwn; out: kfree(tl_hba); @@ -1147,10 +1126,9 @@ static void tcm_loop_drop_scsi_hba( struct tcm_loop_hba *tl_hba = container_of(wwn, struct tcm_loop_hba, tl_hba_wwn); - pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target" - " %s Address: %s at Linux/SCSI Host ID: %d\n", - tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, - tl_hba->sh->host_no); + pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n", + tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address, + tl_hba->sh->host_no); /* * Call device_unregister() on the original tl_hba->dev. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will @@ -1223,8 +1201,7 @@ static int __init tcm_loop_fabric_init(void) __alignof__(struct tcm_loop_cmd), 0, NULL); if (!tcm_loop_cmd_cache) { - pr_debug("kmem_cache_create() for" - " tcm_loop_cmd_cache failed\n"); + pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n"); goto out_destroy_workqueue; } diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index e5c3e5f827d0b..fb1003921d85a 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -201,10 +201,9 @@ static struct sbp_session *sbp_session_create( snprintf(guid_str, sizeof(guid_str), "%016llx", guid); sess = kmalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) { - pr_err("failed to allocate session descriptor\n"); + if (!sess) return ERR_PTR(-ENOMEM); - } + spin_lock_init(&sess->lock); INIT_LIST_HEAD(&sess->login_list); INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); @@ -2029,10 +2028,8 @@ static struct se_portal_group *sbp_make_tpg( } tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); - if (!tpg) { - pr_err("Unable to allocate struct sbp_tpg\n"); + if (!tpg) return ERR_PTR(-ENOMEM); - } tpg->tport = tport; tpg->tport_tpgt = tpgt; @@ -2088,10 +2085,8 @@ static struct se_wwn *sbp_make_tport( return ERR_PTR(-EINVAL); tport = kzalloc(sizeof(*tport), GFP_KERNEL); - if (!tport) { - pr_err("Unable to allocate struct sbp_tport\n"); + if (!tport) return ERR_PTR(-ENOMEM); - } tport->guid = guid; sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 72b1cd1bf9d9f..3f4bf126eed06 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1197,6 +1197,7 @@ struct configfs_attribute *passthrough_attrib_attrs[] = { EXPORT_SYMBOL(passthrough_attrib_attrs); TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL); +TB_CIT_SETUP_DRV(dev_action, NULL, NULL); /* End functions for struct config_item_type tb_dev_attrib_cit */ @@ -2940,6 +2941,10 @@ static struct config_group *target_core_make_subdev( config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit); + config_group_init_type_name(&dev->dev_action_group, "action", + &tb->tb_dev_action_cit); + configfs_add_default_group(&dev->dev_action_group, &dev->dev_group); + config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", &tb->tb_dev_attrib_cit); configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group); @@ -3200,6 +3205,7 @@ static const struct config_item_type target_core_cit = { void target_setup_backend_cits(struct target_backend *tb) { target_core_setup_dev_cit(tb); + target_core_setup_dev_action_cit(tb); target_core_setup_dev_attrib_cit(tb); target_core_setup_dev_pr_cit(tb); target_core_setup_dev_wwn_cit(tb); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e8dd6da164b28..e27db4d45a9d3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -997,7 +997,7 @@ int target_configure_device(struct se_device *dev) ret = core_setup_alua(dev); if (ret) - goto out_free_index; + goto out_destroy_device; /* * Startup the struct se_device processing thread @@ -1041,6 +1041,8 @@ int target_configure_device(struct se_device *dev) out_free_alua: core_alua_free_lu_gp_mem(dev); +out_destroy_device: + dev->transport->destroy_device(dev); out_free_index: mutex_lock(&device_mutex); idr_remove(&devices_idr, dev->dev_index); diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 508da345b73fd..71a80257a0526 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -273,7 +273,7 @@ static int iscsi_get_pr_transport_id_len( static char *iscsi_parse_pr_out_transport_id( struct se_portal_group *se_tpg, - const char *buf, + char *buf, u32 *out_tid_len, char **port_nexus_ptr) { @@ -356,7 +356,7 @@ static char *iscsi_parse_pr_out_transport_id( } } - return (char *)&buf[4]; + return &buf[4]; } int target_get_pr_transport_id_len(struct se_node_acl *nacl, @@ -405,7 +405,7 @@ int target_get_pr_transport_id(struct se_node_acl *nacl, } const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, - const char *buf, u32 *out_tid_len, char **port_nexus_ptr) + char *buf, u32 *out_tid_len, char **port_nexus_ptr) { u32 offset; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 9384d19a7326c..1d5afc3ae017c 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -17,6 +17,7 @@ struct target_backend { struct config_item_type tb_dev_cit; struct config_item_type tb_dev_attrib_cit; + struct config_item_type tb_dev_action_cit; struct config_item_type tb_dev_pr_cit; struct config_item_type tb_dev_wwn_cit; struct config_item_type tb_dev_alua_tg_pt_gps_cit; @@ -102,7 +103,7 @@ int target_get_pr_transport_id(struct se_node_acl *nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf); const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg, - const char *buf, u32 *out_tid_len, char **port_nexus_ptr); + char *buf, u32 *out_tid_len, char **port_nexus_ptr); /* target_core_hba.c */ struct se_hba *core_alloc_hba(const char *, u32, u32); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b024613f92171..01ac306131c1f 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1601,7 +1601,7 @@ core_scsi3_decode_spec_i_port( dest_rtpi = tmp_lun->lun_rtpi; i_str = target_parse_pr_out_transport_id(tmp_tpg, - (const char *)ptr, &tid_len, &iport_ptr); + ptr, &tid_len, &iport_ptr); if (!i_str) continue; @@ -3287,7 +3287,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, goto out; } initiator_str = target_parse_pr_out_transport_id(dest_se_tpg, - (const char *)&buf[24], &tmp_tid_len, &iport_ptr); + &buf[24], &tmp_tid_len, &iport_ptr); if (!initiator_str) { pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 750a04ed0e93a..b054682e974f9 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1216,9 +1216,11 @@ sbc_execute_unmap(struct se_cmd *cmd) goto err; } - ret = ops->execute_unmap(cmd, lba, range); - if (ret) - goto err; + if (range) { + ret = ops->execute_unmap(cmd, lba, range); + if (ret) + goto err; + } ptr += 16; size -= 16; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c03a78ee26cd6..4558f2e1fe1bb 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1774,6 +1774,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_OUT_OF_RESOURCES: cmd->scsi_status = SAM_STAT_TASK_SET_FULL; goto queue_status; + case TCM_LUN_BUSY: + cmd->scsi_status = SAM_STAT_BUSY; + goto queue_status; case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index a415d87f22d24..4ad89ea71a701 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include #include #include @@ -77,15 +77,21 @@ * the total size is 256K * PAGE_SIZE. */ #define DATA_BLOCK_SIZE PAGE_SIZE -#define DATA_BLOCK_BITS (256 * 1024) +#define DATA_BLOCK_SHIFT PAGE_SHIFT +#define DATA_BLOCK_BITS_DEF (256 * 1024) #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) -#define DATA_BLOCK_INIT_BITS 128 + +#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) +#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) /* The total size of the ring is 8M + 256K * PAGE_SIZE */ #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) -/* Default maximum of the global data blocks(512K * PAGE_SIZE) */ -#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) +/* + * Default number of global data blocks(512K * PAGE_SIZE) + * when the unmap thread will be started. + */ +#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) static u8 tcmu_kern_cmd_reply_supported; @@ -107,6 +113,7 @@ struct tcmu_nl_cmd { struct tcmu_dev { struct list_head node; struct kref kref; + struct se_device se_dev; char *name; @@ -114,6 +121,7 @@ struct tcmu_dev { #define TCMU_DEV_BIT_OPEN 0 #define TCMU_DEV_BIT_BROKEN 1 +#define TCMU_DEV_BIT_BLOCKED 2 unsigned long flags; struct uio_info uio_info; @@ -128,22 +136,27 @@ struct tcmu_dev { /* Must add data_off and mb_addr to get the address */ size_t data_off; size_t data_size; + uint32_t max_blocks; + size_t ring_size; - wait_queue_head_t wait_cmdr; struct mutex cmdr_lock; + struct list_head cmdr_queue; - bool waiting_global; uint32_t dbi_max; uint32_t dbi_thresh; - DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS); + unsigned long *data_bitmap; struct radix_tree_root data_blocks; struct idr commands; - spinlock_t commands_lock; - struct timer_list timeout; + struct timer_list cmd_timer; unsigned int cmd_time_out; + struct timer_list qfull_timer; + int qfull_time_out; + + struct list_head timedout_entry; + spinlock_t nl_cmd_lock; struct tcmu_nl_cmd curr_nl_cmd; /* wake up threads waiting on curr_nl_cmd */ @@ -161,6 +174,7 @@ struct tcmu_dev { struct tcmu_cmd { struct se_cmd *se_cmd; struct tcmu_dev *tcmu_dev; + struct list_head cmdr_queue_entry; uint16_t cmd_id; @@ -175,16 +189,68 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 unsigned long flags; }; - -static struct task_struct *unmap_thread; -static wait_queue_head_t unmap_wait; +/* + * To avoid dead lock the mutex lock order should always be: + * + * mutex_lock(&root_udev_mutex); + * ... + * mutex_lock(&tcmu_dev->cmdr_lock); + * mutex_unlock(&tcmu_dev->cmdr_lock); + * ... + * mutex_unlock(&root_udev_mutex); + */ static DEFINE_MUTEX(root_udev_mutex); static LIST_HEAD(root_udev); -static atomic_t global_db_count = ATOMIC_INIT(0); +static DEFINE_SPINLOCK(timed_out_udevs_lock); +static LIST_HEAD(timed_out_udevs); static struct kmem_cache *tcmu_cmd_cache; +static atomic_t global_db_count = ATOMIC_INIT(0); +static struct delayed_work tcmu_unmap_work; +static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; + +static int tcmu_set_global_max_data_area(const char *str, + const struct kernel_param *kp) +{ + int ret, max_area_mb; + + ret = kstrtoint(str, 10, &max_area_mb); + if (ret) + return -EINVAL; + + if (max_area_mb <= 0) { + pr_err("global_max_data_area must be larger than 0.\n"); + return -EINVAL; + } + + tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); + if (atomic_read(&global_db_count) > tcmu_global_max_blocks) + schedule_delayed_work(&tcmu_unmap_work, 0); + else + cancel_delayed_work_sync(&tcmu_unmap_work); + + return 0; +} + +static int tcmu_get_global_max_data_area(char *buffer, + const struct kernel_param *kp) +{ + return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); +} + +static const struct kernel_param_ops tcmu_global_max_data_area_op = { + .set = tcmu_set_global_max_data_area, + .get = tcmu_get_global_max_data_area, +}; + +module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, + S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(global_max_data_area_mb, + "Max MBs allowed to be allocated to all the tcmu device's " + "data areas."); + /* multicast group */ enum tcmu_multicast_groups { TCMU_MCGRP_CONFIG, @@ -345,10 +411,8 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, page = radix_tree_lookup(&udev->data_blocks, dbi); if (!page) { if (atomic_add_return(1, &global_db_count) > - TCMU_GLOBAL_MAX_BLOCKS) { - atomic_dec(&global_db_count); - return false; - } + tcmu_global_max_blocks) + schedule_delayed_work(&tcmu_unmap_work, 0); /* try to get new page from the mm */ page = alloc_page(GFP_KERNEL); @@ -379,19 +443,11 @@ static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, { int i; - udev->waiting_global = false; - for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { if (!tcmu_get_empty_block(udev, tcmu_cmd)) - goto err; + return false; } return true; - -err: - udev->waiting_global = true; - /* Try to wake up the unmap thread */ - wake_up(&unmap_wait); - return false; } static inline struct page * @@ -437,6 +493,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) if (!tcmu_cmd) return NULL; + INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; @@ -455,12 +512,13 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) { unsigned long offset = offset_in_page(vaddr); + void *start = vaddr - offset; size = round_up(size+offset, PAGE_SIZE); - vaddr -= offset; while (size) { - flush_dcache_page(virt_to_page(vaddr)); + flush_dcache_page(virt_to_page(start)); + start += PAGE_SIZE; size -= PAGE_SIZE; } } @@ -490,8 +548,7 @@ static inline size_t head_to_end(size_t head, size_t size) return size - head; } -static inline void new_iov(struct iovec **iov, int *iov_cnt, - struct tcmu_dev *udev) +static inline void new_iov(struct iovec **iov, int *iov_cnt) { struct iovec *iovec; @@ -518,7 +575,7 @@ static inline size_t iov_tail(struct iovec *iov) return (size_t)iov->iov_base + iov->iov_len; } -static int scatter_data_area(struct tcmu_dev *udev, +static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, unsigned int data_nents, struct iovec **iov, int *iov_cnt, bool copy_data) @@ -544,19 +601,38 @@ static int scatter_data_area(struct tcmu_dev *udev, to = kmap_atomic(page); } - copy_bytes = min_t(size_t, sg_remaining, - block_remaining); + /* + * Covert to virtual offset of the ring data area. + */ to_offset = get_block_offset_user(udev, dbi, block_remaining); + /* + * The following code will gather and map the blocks + * to the same iovec when the blocks are all next to + * each other. + */ + copy_bytes = min_t(size_t, sg_remaining, + block_remaining); if (*iov_cnt != 0 && to_offset == iov_tail(*iov)) { + /* + * Will append to the current iovec, because + * the current block page is next to the + * previous one. + */ (*iov)->iov_len += copy_bytes; } else { - new_iov(iov, iov_cnt, udev); + /* + * Will allocate a new iovec because we are + * first time here or the current block page + * is not next to the previous one. + */ + new_iov(iov, iov_cnt); (*iov)->iov_base = (void __user *)to_offset; (*iov)->iov_len = copy_bytes; } + if (copy_data) { offset = DATA_BLOCK_SIZE - block_remaining; memcpy(to + offset, @@ -564,15 +640,15 @@ static int scatter_data_area(struct tcmu_dev *udev, copy_bytes); tcmu_flush_dcache_range(to, copy_bytes); } + sg_remaining -= copy_bytes; block_remaining -= copy_bytes; } kunmap_atomic(from - sg->offset); } + if (to) kunmap_atomic(to); - - return 0; } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, @@ -637,7 +713,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) { - return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh)); + return thresh - bitmap_weight(bitmap, thresh); } /* @@ -677,9 +753,9 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, /* try to check and get the data blocks as needed */ space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); - if (space < data_needed) { - unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh; - unsigned long grow; + if ((space * DATA_BLOCK_SIZE) < data_needed) { + unsigned long blocks_left = + (udev->max_blocks - udev->dbi_thresh) + space; if (blocks_left < blocks_needed) { pr_debug("no data space: only %lu available, but ask for %zu\n", @@ -688,23 +764,9 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, return false; } - /* Try to expand the thresh */ - if (!udev->dbi_thresh) { - /* From idle state */ - uint32_t init_thresh = DATA_BLOCK_INIT_BITS; - - udev->dbi_thresh = max(blocks_needed, init_thresh); - } else { - /* - * Grow the data area by max(blocks needed, - * dbi_thresh / 2), but limited to the max - * DATA_BLOCK_BITS size. - */ - grow = max(blocks_needed, udev->dbi_thresh / 2); - udev->dbi_thresh += grow; - if (udev->dbi_thresh > DATA_BLOCK_BITS) - udev->dbi_thresh = DATA_BLOCK_BITS; - } + udev->dbi_thresh += blocks_needed; + if (udev->dbi_thresh > udev->max_blocks) + udev->dbi_thresh = udev->max_blocks; } return tcmu_get_empty_blocks(udev, cmd); @@ -731,14 +793,14 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, return command_size; } -static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) +static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, + struct timer_list *timer) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; - unsigned long tmo = udev->cmd_time_out; int cmd_id; if (tcmu_cmd->cmd_id) - return 0; + goto setup_timer; cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); if (cmd_id < 0) { @@ -747,16 +809,58 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) } tcmu_cmd->cmd_id = cmd_id; + pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, + udev->name, tmo / MSEC_PER_SEC); + +setup_timer: if (!tmo) return 0; tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); - mod_timer(&udev->timeout, tcmu_cmd->deadline); + mod_timer(timer, tcmu_cmd->deadline); return 0; } -static sense_reason_t -tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) +static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned int tmo; + int ret; + + /* + * For backwards compat if qfull_time_out is not set use + * cmd_time_out and if that's not set use the default time out. + */ + if (!udev->qfull_time_out) + return -ETIMEDOUT; + else if (udev->qfull_time_out > 0) + tmo = udev->qfull_time_out; + else if (udev->cmd_time_out) + tmo = udev->cmd_time_out; + else + tmo = TCMU_TIME_OUT; + + ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); + if (ret) + return ret; + + list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); + pr_debug("adding cmd %u on dev %s to ring space wait queue\n", + tcmu_cmd->cmd_id, udev->name); + return 0; +} + +/** + * queue_cmd_ring - queue cmd to ring or internally + * @tcmu_cmd: cmd to queue + * @scsi_err: TCM error code if failure (-1) returned. + * + * Returns: + * -1 we cannot queue internally or to the ring. + * 0 success + * 1 internally queued to wait for ring memory to free. + */ +static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; struct se_cmd *se_cmd = tcmu_cmd->se_cmd; @@ -770,8 +874,17 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) bool copy_to_data_area; size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); - if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + *scsi_err = TCM_NO_SENSE; + + if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { + *scsi_err = TCM_LUN_BUSY; + return -1; + } + + if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { + *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -1; + } /* * Must be a certain minimum size for response sense info, but @@ -788,7 +901,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); - mutex_lock(&udev->cmdr_lock); + if (!list_empty(&udev->cmdr_queue)) + goto queue; mb = udev->mb_addr; cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ @@ -797,33 +911,18 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " "cmd ring/data area\n", command_size, data_length, udev->cmdr_size, udev->data_size); - mutex_unlock(&udev->cmdr_lock); - return TCM_INVALID_CDB_FIELD; + *scsi_err = TCM_INVALID_CDB_FIELD; + return -1; } - while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { - int ret; - DEFINE_WAIT(__wait); - - prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); - - pr_debug("sleeping for ring space\n"); - mutex_unlock(&udev->cmdr_lock); - if (udev->cmd_time_out) - ret = schedule_timeout( - msecs_to_jiffies(udev->cmd_time_out)); - else - ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); - finish_wait(&udev->wait_cmdr, &__wait); - if (!ret) { - pr_warn("tcmu: command timed out\n"); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } - - mutex_lock(&udev->cmdr_lock); - - /* We dropped cmdr_lock, cmd_head is stale */ - cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ + if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { + /* + * Don't leave commands partially setup because the unmap + * thread might need the blocks to make forward progress. + */ + tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); + tcmu_cmd_reset_dbi_cur(tcmu_cmd); + goto queue; } /* Insert a PAD if end-of-ring space is too small */ @@ -855,41 +954,29 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) iov_cnt = 0; copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE || se_cmd->se_cmd_flags & SCF_BIDI); - ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, - se_cmd->t_data_nents, &iov, &iov_cnt, - copy_to_data_area); - if (ret) { - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); - mutex_unlock(&udev->cmdr_lock); - - pr_err("tcmu: alloc and scatter data failed\n"); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } + scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, + se_cmd->t_data_nents, &iov, &iov_cnt, + copy_to_data_area); entry->req.iov_cnt = iov_cnt; /* Handle BIDI commands */ iov_cnt = 0; if (se_cmd->se_cmd_flags & SCF_BIDI) { iov++; - ret = scatter_data_area(udev, tcmu_cmd, - se_cmd->t_bidi_data_sg, - se_cmd->t_bidi_data_nents, - &iov, &iov_cnt, false); - if (ret) { - tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); - mutex_unlock(&udev->cmdr_lock); - - pr_err("tcmu: alloc and scatter bidi data failed\n"); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } + scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, + se_cmd->t_bidi_data_nents, &iov, &iov_cnt, + false); } entry->req.iov_bidi_cnt = iov_cnt; - ret = tcmu_setup_cmd_timer(tcmu_cmd); + ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, + &udev->cmd_timer); if (ret) { tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); mutex_unlock(&udev->cmdr_lock); - return TCM_OUT_OF_RESOURCES; + + *scsi_err = TCM_OUT_OF_RESOURCES; + return -1; } entry->hdr.cmd_id = tcmu_cmd->cmd_id; @@ -911,36 +998,40 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); - mutex_unlock(&udev->cmdr_lock); /* TODO: only if FLUSH and FUA? */ uio_event_notify(&udev->uio_info); - if (udev->cmd_time_out) - mod_timer(&udev->timeout, round_jiffies_up(jiffies + - msecs_to_jiffies(udev->cmd_time_out))); + return 0; + +queue: + if (add_to_cmdr_queue(tcmu_cmd)) { + *scsi_err = TCM_OUT_OF_RESOURCES; + return -1; + } - return TCM_NO_SENSE; + return 1; } static sense_reason_t tcmu_queue_cmd(struct se_cmd *se_cmd) { + struct se_device *se_dev = se_cmd->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); struct tcmu_cmd *tcmu_cmd; - sense_reason_t ret; + sense_reason_t scsi_ret; + int ret; tcmu_cmd = tcmu_alloc_cmd(se_cmd); if (!tcmu_cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - ret = tcmu_queue_cmd_ring(tcmu_cmd); - if (ret != TCM_NO_SENSE) { - pr_err("TCMU: Could not queue command\n"); - + mutex_lock(&udev->cmdr_lock); + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); + mutex_unlock(&udev->cmdr_lock); + if (ret < 0) tcmu_free_cmd(tcmu_cmd); - } - - return ret; + return scsi_ret; } static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) @@ -1011,12 +1102,10 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) } WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); - spin_lock(&udev->commands_lock); cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); - spin_unlock(&udev->commands_lock); - if (!cmd) { - pr_err("cmd_id not found, ring is broken\n"); + pr_err("cmd_id %u not found, ring is broken\n", + entry->hdr.cmd_id); set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); break; } @@ -1030,10 +1119,20 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) handled++; } - if (mb->cmd_tail == mb->cmd_head) - del_timer(&udev->timeout); /* no more pending cmds */ + if (mb->cmd_tail == mb->cmd_head) { + /* no more pending commands */ + del_timer(&udev->cmd_timer); - wake_up(&udev->wait_cmdr); + if (list_empty(&udev->cmdr_queue)) { + /* + * no more pending or waiting commands so try to + * reclaim blocks if needed. + */ + if (atomic_read(&global_db_count) > + tcmu_global_max_blocks) + schedule_delayed_work(&tcmu_unmap_work, 0); + } + } return handled; } @@ -1041,6 +1140,10 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) static int tcmu_check_expired_cmd(int id, void *p, void *data) { struct tcmu_cmd *cmd = p; + struct tcmu_dev *udev = cmd->tcmu_dev; + u8 scsi_status; + struct se_cmd *se_cmd; + bool is_running; if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) return 0; @@ -1048,29 +1151,61 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data) if (!time_after(jiffies, cmd->deadline)) return 0; - set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); - target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); - cmd->se_cmd = NULL; + is_running = list_empty(&cmd->cmdr_queue_entry); + se_cmd = cmd->se_cmd; + + if (is_running) { + /* + * If cmd_time_out is disabled but qfull is set deadline + * will only reflect the qfull timeout. Ignore it. + */ + if (!udev->cmd_time_out) + return 0; + + set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + /* + * target_complete_cmd will translate this to LUN COMM FAILURE + */ + scsi_status = SAM_STAT_CHECK_CONDITION; + } else { + list_del_init(&cmd->cmdr_queue_entry); + + idr_remove(&udev->commands, id); + tcmu_free_cmd(cmd); + scsi_status = SAM_STAT_TASK_SET_FULL; + } + + pr_debug("Timing out cmd %u on dev %s that is %s.\n", + id, udev->name, is_running ? "inflight" : "queued"); + target_complete_cmd(se_cmd, scsi_status); return 0; } -static void tcmu_device_timedout(struct timer_list *t) +static void tcmu_device_timedout(struct tcmu_dev *udev) { - struct tcmu_dev *udev = from_timer(udev, t, timeout); - unsigned long flags; + spin_lock(&timed_out_udevs_lock); + if (list_empty(&udev->timedout_entry)) + list_add_tail(&udev->timedout_entry, &timed_out_udevs); + spin_unlock(&timed_out_udevs_lock); - spin_lock_irqsave(&udev->commands_lock, flags); - idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); - spin_unlock_irqrestore(&udev->commands_lock, flags); + schedule_delayed_work(&tcmu_unmap_work, 0); +} - /* Try to wake up the ummap thread */ - wake_up(&unmap_wait); +static void tcmu_cmd_timedout(struct timer_list *t) +{ + struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); - /* - * We don't need to wakeup threads on wait_cmdr since they have their - * own timeout. - */ + pr_debug("%s cmd timeout has expired\n", udev->name); + tcmu_device_timedout(udev); +} + +static void tcmu_qfull_timedout(struct timer_list *t) +{ + struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); + + pr_debug("%s qfull timeout has expired\n", udev->name); + tcmu_device_timedout(udev); } static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) @@ -1110,14 +1245,17 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->hba = hba; udev->cmd_time_out = TCMU_TIME_OUT; + udev->qfull_time_out = -1; - init_waitqueue_head(&udev->wait_cmdr); + udev->max_blocks = DATA_BLOCK_BITS_DEF; mutex_init(&udev->cmdr_lock); + INIT_LIST_HEAD(&udev->timedout_entry); + INIT_LIST_HEAD(&udev->cmdr_queue); idr_init(&udev->commands); - spin_lock_init(&udev->commands_lock); - timer_setup(&udev->timeout, tcmu_device_timedout, 0); + timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); + timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); init_waitqueue_head(&udev->nl_cmd_wq); spin_lock_init(&udev->nl_cmd_lock); @@ -1127,13 +1265,79 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) return &udev->se_dev; } +static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) +{ + struct tcmu_cmd *tcmu_cmd, *tmp_cmd; + LIST_HEAD(cmds); + bool drained = true; + sense_reason_t scsi_ret; + int ret; + + if (list_empty(&udev->cmdr_queue)) + return true; + + pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); + + list_splice_init(&udev->cmdr_queue, &cmds); + + list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { + list_del_init(&tcmu_cmd->cmdr_queue_entry); + + pr_debug("removing cmd %u on dev %s from queue\n", + tcmu_cmd->cmd_id, udev->name); + + if (fail) { + idr_remove(&udev->commands, tcmu_cmd->cmd_id); + /* + * We were not able to even start the command, so + * fail with busy to allow a retry in case runner + * was only temporarily down. If the device is being + * removed then LIO core will do the right thing and + * fail the retry. + */ + target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); + tcmu_free_cmd(tcmu_cmd); + continue; + } + + ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); + if (ret < 0) { + pr_debug("cmd %u on dev %s failed with %u\n", + tcmu_cmd->cmd_id, udev->name, scsi_ret); + + idr_remove(&udev->commands, tcmu_cmd->cmd_id); + /* + * Ignore scsi_ret for now. target_complete_cmd + * drops it. + */ + target_complete_cmd(tcmu_cmd->se_cmd, + SAM_STAT_CHECK_CONDITION); + tcmu_free_cmd(tcmu_cmd); + } else if (ret > 0) { + pr_debug("ran out of space during cmdr queue run\n"); + /* + * cmd was requeued, so just put all cmds back in + * the queue + */ + list_splice_tail(&cmds, &udev->cmdr_queue); + drained = false; + goto done; + } + } + if (list_empty(&udev->cmdr_queue)) + del_timer(&udev->qfull_timer); +done: + return drained; +} + static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) { - struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); - mutex_lock(&tcmu_dev->cmdr_lock); - tcmu_handle_completions(tcmu_dev); - mutex_unlock(&tcmu_dev->cmdr_lock); + mutex_lock(&udev->cmdr_lock); + tcmu_handle_completions(udev); + run_cmdr_queue(udev, false); + mutex_unlock(&udev->cmdr_lock); return 0; } @@ -1158,7 +1362,6 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma) static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) { struct page *page; - int ret; mutex_lock(&udev->cmdr_lock); page = tcmu_get_block_page(udev, dbi); @@ -1168,42 +1371,12 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) } /* - * Normally it shouldn't be here: - * Only when the userspace has touched the blocks which - * are out of the tcmu_cmd's data iov[], and will return - * one zeroed page. + * Userspace messed up and passed in a address not in the + * data iov passed to it. */ - pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi); - pr_warn("Mostly it will be a bug of userspace, please have a check!\n"); - - if (dbi >= udev->dbi_thresh) { - /* Extern the udev->dbi_thresh to dbi + 1 */ - udev->dbi_thresh = dbi + 1; - udev->dbi_max = dbi; - } - - page = radix_tree_lookup(&udev->data_blocks, dbi); - if (!page) { - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - mutex_unlock(&udev->cmdr_lock); - return NULL; - } - - ret = radix_tree_insert(&udev->data_blocks, dbi, page); - if (ret) { - mutex_unlock(&udev->cmdr_lock); - __free_page(page); - return NULL; - } - - /* - * Since this case is rare in page fault routine, here we - * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS - * to reduce possible page fault call trace. - */ - atomic_inc(&global_db_count); - } + pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", + dbi, udev->name); + page = NULL; mutex_unlock(&udev->cmdr_lock); return page; @@ -1238,7 +1411,7 @@ static int tcmu_vma_fault(struct vm_fault *vmf) dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; page = tcmu_try_get_block_page(udev, dbi); if (!page) - return VM_FAULT_NOPAGE; + return VM_FAULT_SIGBUS; } get_page(page); @@ -1260,7 +1433,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) vma->vm_private_data = udev; /* Ensure the mmap is exactly the right size */ - if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) + if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) return -EINVAL; return 0; @@ -1301,21 +1474,19 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) return -EINVAL; } -static void tcmu_blocks_release(struct tcmu_dev *udev) +static void tcmu_blocks_release(struct radix_tree_root *blocks, + int start, int end) { int i; struct page *page; - /* Try to release all block pages */ - mutex_lock(&udev->cmdr_lock); - for (i = 0; i <= udev->dbi_max; i++) { - page = radix_tree_delete(&udev->data_blocks, i); + for (i = start; i < end; i++) { + page = radix_tree_delete(blocks, i); if (page) { __free_page(page); atomic_dec(&global_db_count); } } - mutex_unlock(&udev->cmdr_lock); } static void tcmu_dev_kref_release(struct kref *kref) @@ -1329,17 +1500,23 @@ static void tcmu_dev_kref_release(struct kref *kref) vfree(udev->mb_addr); udev->mb_addr = NULL; + spin_lock_bh(&timed_out_udevs_lock); + if (!list_empty(&udev->timedout_entry)) + list_del(&udev->timedout_entry); + spin_unlock_bh(&timed_out_udevs_lock); + /* Upper layer should drain all requests before calling this */ - spin_lock_irq(&udev->commands_lock); + mutex_lock(&udev->cmdr_lock); idr_for_each_entry(&udev->commands, cmd, i) { if (tcmu_check_and_free_pending_cmd(cmd) != 0) all_expired = false; } idr_destroy(&udev->commands); - spin_unlock_irq(&udev->commands_lock); WARN_ON(!all_expired); - tcmu_blocks_release(udev); + tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); + kfree(udev->data_bitmap); + mutex_unlock(&udev->cmdr_lock); call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); } @@ -1406,7 +1583,7 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) wake_up_all(&udev->nl_cmd_wq); - return ret;; + return ret; } static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, @@ -1515,6 +1692,13 @@ static int tcmu_configure_device(struct se_device *dev) info = &udev->uio_info; + udev->data_bitmap = kzalloc(BITS_TO_LONGS(udev->max_blocks) * + sizeof(unsigned long), GFP_KERNEL); + if (!udev->data_bitmap) { + ret = -ENOMEM; + goto err_bitmap_alloc; + } + udev->mb_addr = vzalloc(CMDR_SIZE); if (!udev->mb_addr) { ret = -ENOMEM; @@ -1524,9 +1708,8 @@ static int tcmu_configure_device(struct se_device *dev) /* mailbox fits in first part of CMDR space */ udev->cmdr_size = CMDR_SIZE - CMDR_OFF; udev->data_off = CMDR_SIZE; - udev->data_size = DATA_SIZE; + udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; udev->dbi_thresh = 0; /* Default in Idle state */ - udev->waiting_global = false; /* Initialise the mailbox of the ring buffer */ mb = udev->mb_addr; @@ -1543,7 +1726,7 @@ static int tcmu_configure_device(struct se_device *dev) info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; - info->mem[0].size = TCMU_RING_SIZE; + info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; info->mem[0].memtype = UIO_MEM_NONE; info->irqcontrol = tcmu_irqcontrol; @@ -1596,6 +1779,9 @@ static int tcmu_configure_device(struct se_device *dev) vfree(udev->mb_addr); udev->mb_addr = NULL; err_vzalloc: + kfree(udev->data_bitmap); + udev->data_bitmap = NULL; +err_bitmap_alloc: kfree(info->name); info->name = NULL; @@ -1619,7 +1805,8 @@ static void tcmu_destroy_device(struct se_device *dev) { struct tcmu_dev *udev = TCMU_DEV(dev); - del_timer_sync(&udev->timeout); + del_timer_sync(&udev->cmd_timer); + del_timer_sync(&udev->qfull_timer); mutex_lock(&root_udev_mutex); list_del(&udev->node); @@ -1633,9 +1820,81 @@ static void tcmu_destroy_device(struct se_device *dev) kref_put(&udev->kref, tcmu_dev_kref_release); } +static void tcmu_unblock_dev(struct tcmu_dev *udev) +{ + mutex_lock(&udev->cmdr_lock); + clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); + mutex_unlock(&udev->cmdr_lock); +} + +static void tcmu_block_dev(struct tcmu_dev *udev) +{ + mutex_lock(&udev->cmdr_lock); + + if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) + goto unlock; + + /* complete IO that has executed successfully */ + tcmu_handle_completions(udev); + /* fail IO waiting to be queued */ + run_cmdr_queue(udev, true); + +unlock: + mutex_unlock(&udev->cmdr_lock); +} + +static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) +{ + struct tcmu_mailbox *mb; + struct tcmu_cmd *cmd; + int i; + + mutex_lock(&udev->cmdr_lock); + + idr_for_each_entry(&udev->commands, cmd, i) { + if (!list_empty(&cmd->cmdr_queue_entry)) + continue; + + pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", + cmd->cmd_id, udev->name, + test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); + + idr_remove(&udev->commands, i); + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + if (err_level == 1) { + /* + * Userspace was not able to start the + * command or it is retryable. + */ + target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); + } else { + /* hard failure */ + target_complete_cmd(cmd->se_cmd, + SAM_STAT_CHECK_CONDITION); + } + } + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + } + + mb = udev->mb_addr; + tcmu_flush_dcache_range(mb, sizeof(*mb)); + pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, + mb->cmd_tail, mb->cmd_head); + + udev->cmdr_last_cleaned = 0; + mb->cmd_tail = 0; + mb->cmd_head = 0; + tcmu_flush_dcache_range(mb, sizeof(*mb)); + + del_timer(&udev->cmd_timer); + + mutex_unlock(&udev->cmdr_lock); +} + enum { Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, - Opt_nl_reply_supported, Opt_err, + Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, }; static match_table_t tokens = { @@ -1644,6 +1903,7 @@ static match_table_t tokens = { {Opt_hw_block_size, "hw_block_size=%u"}, {Opt_hw_max_sectors, "hw_max_sectors=%u"}, {Opt_nl_reply_supported, "nl_reply_supported=%d"}, + {Opt_max_data_area_mb, "max_data_area_mb=%u"}, {Opt_err, NULL} }; @@ -1677,7 +1937,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, struct tcmu_dev *udev = TCMU_DEV(dev); char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; - int ret = 0, token; + int ret = 0, token, tmpval; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -1729,6 +1989,39 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, if (ret < 0) pr_err("kstrtoint() failed for nl_reply_supported=\n"); break; + case Opt_max_data_area_mb: + if (dev->export_count) { + pr_err("Unable to set max_data_area_mb while exports exist\n"); + ret = -EINVAL; + break; + } + + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtoint(arg_p, 0, &tmpval); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoint() failed for max_data_area_mb=\n"); + break; + } + + if (tmpval <= 0) { + pr_err("Invalid max_data_area %d\n", tmpval); + ret = -EINVAL; + break; + } + + udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval); + if (udev->max_blocks > tcmu_global_max_blocks) { + pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", + tmpval, + TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); + udev->max_blocks = tcmu_global_max_blocks; + } + break; default: break; } @@ -1748,7 +2041,9 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) bl = sprintf(b + bl, "Config: %s ", udev->dev_config[0] ? udev->dev_config : "NULL"); - bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); + bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); + bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", + TCMU_BLOCKS_TO_MBS(udev->max_blocks)); return bl; } @@ -1800,6 +2095,51 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag } CONFIGFS_ATTR(tcmu_, cmd_time_out); +static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? + udev->qfull_time_out : + udev->qfull_time_out / MSEC_PER_SEC); +} + +static ssize_t tcmu_qfull_time_out_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + s32 val; + int ret; + + ret = kstrtos32(page, 0, &val); + if (ret < 0) + return ret; + + if (val >= 0) { + udev->qfull_time_out = val * MSEC_PER_SEC; + } else { + printk(KERN_ERR "Invalid qfull timeout value %d\n", val); + return -EINVAL; + } + return count; +} +CONFIGFS_ATTR(tcmu_, qfull_time_out); + +static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%u\n", + TCMU_BLOCKS_TO_MBS(udev->max_blocks)); +} +CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); + static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) { struct se_dev_attrib *da = container_of(to_config_group(item), @@ -1943,8 +2283,74 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, } CONFIGFS_ATTR(tcmu_, emulate_write_cache); +static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); +} + +static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + u8 val; + int ret; + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + + if (val > 1) { + pr_err("Invalid block value %d\n", val); + return -EINVAL; + } + + if (!val) + tcmu_unblock_dev(udev); + else + tcmu_block_dev(udev); + return count; +} +CONFIGFS_ATTR(tcmu_, block_dev); + +static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + u8 val; + int ret; + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + + if (val != 1 && val != 2) { + pr_err("Invalid reset ring value %d\n", val); + return -EINVAL; + } + + tcmu_reset_ring(udev, val); + return count; +} +CONFIGFS_ATTR_WO(tcmu_, reset_ring); + static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, + &tcmu_attr_qfull_time_out, + &tcmu_attr_max_data_area_mb, &tcmu_attr_dev_config, &tcmu_attr_dev_size, &tcmu_attr_emulate_write_cache, @@ -1954,6 +2360,12 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { static struct configfs_attribute **tcmu_attrs; +static struct configfs_attribute *tcmu_action_attrs[] = { + &tcmu_attr_block_dev, + &tcmu_attr_reset_ring, + NULL, +}; + static struct target_backend_ops tcmu_ops = { .name = "user", .owner = THIS_MODULE, @@ -1969,85 +2381,93 @@ static struct target_backend_ops tcmu_ops = { .show_configfs_dev_params = tcmu_show_configfs_dev_params, .get_device_type = sbc_get_device_type, .get_blocks = tcmu_get_blocks, - .tb_dev_attrib_attrs = NULL, + .tb_dev_action_attrs = tcmu_action_attrs, }; -static int unmap_thread_fn(void *data) +static void find_free_blocks(void) { struct tcmu_dev *udev; loff_t off; - uint32_t start, end, block; - struct page *page; - int i; + u32 start, end, block, total_freed = 0; - while (!kthread_should_stop()) { - DEFINE_WAIT(__wait); + if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) + return; - prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); - schedule(); - finish_wait(&unmap_wait, &__wait); + mutex_lock(&root_udev_mutex); + list_for_each_entry(udev, &root_udev, node) { + mutex_lock(&udev->cmdr_lock); - if (kthread_should_stop()) - break; + /* Try to complete the finished commands first */ + tcmu_handle_completions(udev); - mutex_lock(&root_udev_mutex); - list_for_each_entry(udev, &root_udev, node) { - mutex_lock(&udev->cmdr_lock); + /* Skip the udevs in idle */ + if (!udev->dbi_thresh) { + mutex_unlock(&udev->cmdr_lock); + continue; + } - /* Try to complete the finished commands first */ - tcmu_handle_completions(udev); + end = udev->dbi_max + 1; + block = find_last_bit(udev->data_bitmap, end); + if (block == udev->dbi_max) { + /* + * The last bit is dbi_max, so it is not possible + * reclaim any blocks. + */ + mutex_unlock(&udev->cmdr_lock); + continue; + } else if (block == end) { + /* The current udev will goto idle state */ + udev->dbi_thresh = start = 0; + udev->dbi_max = 0; + } else { + udev->dbi_thresh = start = block + 1; + udev->dbi_max = block; + } - /* Skip the udevs waiting the global pool or in idle */ - if (udev->waiting_global || !udev->dbi_thresh) { - mutex_unlock(&udev->cmdr_lock); - continue; - } + /* Here will truncate the data area from off */ + off = udev->data_off + start * DATA_BLOCK_SIZE; + unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); - end = udev->dbi_max + 1; - block = find_last_bit(udev->data_bitmap, end); - if (block == udev->dbi_max) { - /* - * The last bit is dbi_max, so there is - * no need to shrink any blocks. - */ - mutex_unlock(&udev->cmdr_lock); - continue; - } else if (block == end) { - /* The current udev will goto idle state */ - udev->dbi_thresh = start = 0; - udev->dbi_max = 0; - } else { - udev->dbi_thresh = start = block + 1; - udev->dbi_max = block; - } + /* Release the block pages */ + tcmu_blocks_release(&udev->data_blocks, start, end); + mutex_unlock(&udev->cmdr_lock); - /* Here will truncate the data area from off */ - off = udev->data_off + start * DATA_BLOCK_SIZE; - unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); - - /* Release the block pages */ - for (i = start; i < end; i++) { - page = radix_tree_delete(&udev->data_blocks, i); - if (page) { - __free_page(page); - atomic_dec(&global_db_count); - } - } - mutex_unlock(&udev->cmdr_lock); - } + total_freed += end - start; + pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, + total_freed, udev->name); + } + mutex_unlock(&root_udev_mutex); - /* - * Try to wake up the udevs who are waiting - * for the global data pool. - */ - list_for_each_entry(udev, &root_udev, node) { - if (udev->waiting_global) - wake_up(&udev->wait_cmdr); - } - mutex_unlock(&root_udev_mutex); + if (atomic_read(&global_db_count) > tcmu_global_max_blocks) + schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); +} + +static void check_timedout_devices(void) +{ + struct tcmu_dev *udev, *tmp_dev; + LIST_HEAD(devs); + + spin_lock_bh(&timed_out_udevs_lock); + list_splice_init(&timed_out_udevs, &devs); + + list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { + list_del_init(&udev->timedout_entry); + spin_unlock_bh(&timed_out_udevs_lock); + + mutex_lock(&udev->cmdr_lock); + idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); + mutex_unlock(&udev->cmdr_lock); + + spin_lock_bh(&timed_out_udevs_lock); } - return 0; + spin_unlock_bh(&timed_out_udevs_lock); +} + +static void tcmu_unmap_work_fn(struct work_struct *work) +{ + check_timedout_devices(); + find_free_blocks(); } static int __init tcmu_module_init(void) @@ -2056,6 +2476,8 @@ static int __init tcmu_module_init(void) BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); + INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); + tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", sizeof(struct tcmu_cmd), __alignof__(struct tcmu_cmd), @@ -2101,17 +2523,8 @@ static int __init tcmu_module_init(void) if (ret) goto out_attrs; - init_waitqueue_head(&unmap_wait); - unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap"); - if (IS_ERR(unmap_thread)) { - ret = PTR_ERR(unmap_thread); - goto out_unreg_transport; - } - return 0; -out_unreg_transport: - target_backend_unregister(&tcmu_ops); out_attrs: kfree(tcmu_attrs); out_unreg_genl: @@ -2126,7 +2539,7 @@ static int __init tcmu_module_init(void) static void __exit tcmu_module_exit(void) { - kthread_stop(unmap_thread); + cancel_delayed_work_sync(&tcmu_unmap_work); target_backend_unregister(&tcmu_ops); kfree(tcmu_attrs); genl_unregister_family(&tcmu_genl_family); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 3b3af7e0ce1c7..3b3e1f6632d71 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2477,11 +2477,11 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (tty_hung_up_p(file)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; if (gsm->dead) - mask |= POLLHUP; + mask |= EPOLLHUP; return mask; } diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 929434ebee506..dabb391909aad 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -814,14 +814,14 @@ static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, /* set bits for operations that won't block */ if (!list_empty(&n_hdlc->rx_buf_list.list)) - mask |= POLLIN | POLLRDNORM; /* readable */ + mask |= EPOLLIN | EPOLLRDNORM; /* readable */ if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (tty_hung_up_p(filp)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (!tty_is_writelocked(tty) && !list_empty(&n_hdlc->tx_free_buf_list.list)) - mask |= POLLOUT | POLLWRNORM; /* writable */ + mask |= EPOLLOUT | EPOLLWRNORM; /* writable */ } return mask; } /* end of n_hdlc_tty_poll() */ diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c index e81d3db8ad632..dbf1ab36758eb 100644 --- a/drivers/tty/n_r3964.c +++ b/drivers/tty/n_r3964.c @@ -1223,7 +1223,7 @@ static __poll_t r3964_poll(struct tty_struct *tty, struct file *file, struct r3964_client_info *pClient; struct r3964_message *pMsg = NULL; unsigned long flags; - __poll_t result = POLLOUT; + __poll_t result = EPOLLOUT; TRACE_L("POLL"); @@ -1234,7 +1234,7 @@ static __poll_t r3964_poll(struct tty_struct *tty, struct file *file, pMsg = pClient->first_msg; spin_unlock_irqrestore(&pInfo->lock, flags); if (pMsg) - result |= POLLIN | POLLRDNORM; + result |= EPOLLIN | EPOLLRDNORM; } else { result = -EINVAL; } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 478a9b40fd039..5c0e59e8fe46b 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1344,7 +1344,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); return 0; } } @@ -1625,7 +1625,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); } } @@ -2376,22 +2376,22 @@ static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (input_available_p(tty, 1)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; else { tty_buffer_flush_work(tty->port); if (input_available_p(tty, 1)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } if (tty->packet && tty->link->ctrl_status) - mask |= POLLPRI | POLLIN | POLLRDNORM; + mask |= EPOLLPRI | EPOLLIN | EPOLLRDNORM; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (tty_hung_up_p(file)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (tty->ops->write && !tty_is_writelocked(tty) && tty_chars_in_buffer(tty) < WAKEUP_CHARS && tty_write_room(tty) > 0) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 64338442050ef..6c7151edd7155 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -344,7 +344,7 @@ static void pty_start(struct tty_struct *tty) tty->ctrl_status &= ~TIOCPKT_STOP; tty->ctrl_status |= TIOCPKT_START; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN); } } @@ -357,7 +357,7 @@ static void pty_stop(struct tty_struct *tty) tty->ctrl_status &= ~TIOCPKT_START; tty->ctrl_status |= TIOCPKT_STOP; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->link->read_wait, EPOLLIN); } } diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 4c8b80f1c688c..870e84fb6e39e 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c @@ -197,25 +197,20 @@ int __init setup_earlycon(char *buf) } /* - * When CONFIG_ACPI_SPCR_TABLE is defined, "earlycon" without parameters in - * command line does not start DT earlycon immediately, instead it defers - * starting it until DT/ACPI decision is made. At that time if ACPI is enabled - * call parse_spcr(), else call early_init_dt_scan_chosen_stdout() + * This defers the initialization of the early console until after ACPI has + * been initialized. */ -bool earlycon_init_is_deferred __initdata; +bool earlycon_acpi_spcr_enable __initdata; /* early_param wrapper for setup_earlycon() */ static int __init param_setup_earlycon(char *buf) { int err; - /* - * Just 'earlycon' is a valid param for devicetree earlycons; - * don't generate a warning from parse_early_params() in that case - */ + /* Just 'earlycon' is a valid param for devicetree and ACPI SPCR. */ if (!buf || !buf[0]) { if (IS_ENABLED(CONFIG_ACPI_SPCR_TABLE)) { - earlycon_init_is_deferred = true; + earlycon_acpi_spcr_enable = true; return 0; } else if (!buf) { return early_init_dt_scan_chosen_stdout(); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 6a89835453d3b..eb9133b472f48 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -445,7 +445,7 @@ static ssize_t hung_up_tty_write(struct file *file, const char __user *buf, /* No kernel lock held - none needed ;) */ static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait) { - return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; + return EPOLLIN | EPOLLOUT | EPOLLERR | EPOLLHUP | EPOLLRDNORM | EPOLLWRNORM; } static long hung_up_tty_ioctl(struct file *file, unsigned int cmd, @@ -533,7 +533,7 @@ void tty_wakeup(struct tty_struct *tty) tty_ldisc_deref(ld); } } - wake_up_interruptible_poll(&tty->write_wait, POLLOUT); + wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); } EXPORT_SYMBOL_GPL(tty_wakeup); @@ -867,7 +867,7 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, static void tty_write_unlock(struct tty_struct *tty) { mutex_unlock(&tty->atomic_write_lock); - wake_up_interruptible_poll(&tty->write_wait, POLLOUT); + wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); } static int tty_write_lock(struct tty_struct *tty, int ndelay) @@ -1667,21 +1667,21 @@ int tty_release(struct inode *inode, struct file *filp) if (tty->count <= 1) { if (waitqueue_active(&tty->read_wait)) { - wake_up_poll(&tty->read_wait, POLLIN); + wake_up_poll(&tty->read_wait, EPOLLIN); do_sleep++; } if (waitqueue_active(&tty->write_wait)) { - wake_up_poll(&tty->write_wait, POLLOUT); + wake_up_poll(&tty->write_wait, EPOLLOUT); do_sleep++; } } if (o_tty && o_tty->count <= 1) { if (waitqueue_active(&o_tty->read_wait)) { - wake_up_poll(&o_tty->read_wait, POLLIN); + wake_up_poll(&o_tty->read_wait, EPOLLIN); do_sleep++; } if (waitqueue_active(&o_tty->write_wait)) { - wake_up_poll(&o_tty->write_wait, POLLOUT); + wake_up_poll(&o_tty->write_wait, EPOLLOUT); do_sleep++; } } diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 4e7946c0484bf..050f4d6508917 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -735,8 +735,8 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit) tty_ldisc_deref(ld); } - wake_up_interruptible_poll(&tty->write_wait, POLLOUT); - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); /* * Shutdown the current line discipline, and reset it to diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 3e64ccd0040f8..e4a66e1fd05fb 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -563,7 +563,7 @@ static __poll_t vcs_poll(struct file *file, poll_table *wait) { struct vcs_poll_data *poll = vcs_poll_data_get(file); - __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI; + __poll_t ret = DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; if (poll) { poll_wait(file, &poll->waitq, wait); diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 85bc1aaea4a42..fd4848392e0dd 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -506,7 +506,7 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index f699abab17875..148f3ee702868 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO config USB_EHCI_BIG_ENDIAN_DESC bool +config USB_UHCI_BIG_ENDIAN_MMIO + bool + +config USB_UHCI_BIG_ENDIAN_DESC + bool + menuconfig USB_SUPPORT bool "USB support" depends on HAS_IOMEM diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 9627ea6ec3aea..a0d284ef3f40a 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -603,16 +603,16 @@ static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait) spin_lock_irqsave(&desc->iuspin, flags); if (test_bit(WDM_DISCONNECTING, &desc->flags)) { - mask = POLLHUP | POLLERR; + mask = EPOLLHUP | EPOLLERR; spin_unlock_irqrestore(&desc->iuspin, flags); goto desc_out; } if (test_bit(WDM_READ, &desc->flags)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; if (desc->rerr || desc->werr) - mask |= POLLERR; + mask |= EPOLLERR; if (!test_bit(WDM_IN_USE, &desc->flags)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&desc->iuspin, flags); poll_wait(file, &desc->wait, wait); diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 425247b7f7281..d058d7a31e7c3 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -479,8 +479,8 @@ static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait) poll_wait(file, &usblp->rwait, wait); poll_wait(file, &usblp->wwait, wait); spin_lock_irqsave(&usblp->lock, flags); - ret = ((usblp->bidir && usblp->rcomplete) ? POLLIN | POLLRDNORM : 0) | - ((usblp->no_paper || usblp->wcomplete) ? POLLOUT | POLLWRNORM : 0); + ret = ((usblp->bidir && usblp->rcomplete) ? EPOLLIN | EPOLLRDNORM : 0) | + ((usblp->no_paper || usblp->wcomplete) ? EPOLLOUT | EPOLLWRNORM : 0); spin_unlock_irqrestore(&usblp->lock, flags); return ret; } diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 7ea67a55be103..bdb1de0c0cef6 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1265,13 +1265,13 @@ static __poll_t usbtmc_poll(struct file *file, poll_table *wait) mutex_lock(&data->io_mutex); if (data->zombie) { - mask = POLLHUP | POLLERR; + mask = EPOLLHUP | EPOLLERR; goto no_poll; } poll_wait(file, &data->waitq, wait); - mask = (atomic_read(&data->srq_asserted)) ? POLLIN | POLLRDNORM : 0; + mask = (atomic_read(&data->srq_asserted)) ? EPOLLIN | EPOLLRDNORM : 0; no_poll: mutex_unlock(&data->io_mutex); diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index e2cec448779e4..3de3c750b5f6e 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -632,7 +632,7 @@ static __poll_t usb_device_poll(struct file *file, event_count = atomic_read(&device_event.count); if (file->f_version != event_count) { file->f_version = event_count; - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } return 0; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index bf00166cbee01..d526595bc959c 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -2578,11 +2578,11 @@ static __poll_t usbdev_poll(struct file *file, poll_wait(file, &ps->wait, wait); if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; if (!connected(ps)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (list_empty(&ps->list)) - mask |= POLLERR; + mask |= EPOLLERR; return mask; } diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 67564725e3710..8f2cf3baa19c1 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -644,7 +644,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait) { struct ffs_data *ffs = file->private_data; - __poll_t mask = POLLWRNORM; + __poll_t mask = EPOLLWRNORM; int ret; poll_wait(file, &ffs->ev.waitq, wait); @@ -656,19 +656,19 @@ static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait) switch (ffs->state) { case FFS_READ_DESCRIPTORS: case FFS_READ_STRINGS: - mask |= POLLOUT; + mask |= EPOLLOUT; break; case FFS_ACTIVE: switch (ffs->setup_state) { case FFS_NO_SETUP: if (ffs->ev.count) - mask |= POLLIN; + mask |= EPOLLIN; break; case FFS_SETUP_PENDING: case FFS_SETUP_CANCELLED: - mask |= (POLLIN | POLLOUT); + mask |= (EPOLLIN | EPOLLOUT); break; } case FFS_CLOSING: diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index a73efb1c47d0a..54e859dcb25c3 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -422,10 +422,10 @@ static __poll_t f_hidg_poll(struct file *file, poll_table *wait) poll_wait(file, &hidg->write_queue, wait); if (WRITE_COND) - ret |= POLLOUT | POLLWRNORM; + ret |= EPOLLOUT | EPOLLWRNORM; if (READ_COND) - ret |= POLLIN | POLLRDNORM; + ret |= EPOLLIN | EPOLLRDNORM; return ret; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 453578c4af696..d359efe06c769 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -698,11 +698,11 @@ printer_poll(struct file *fd, poll_table *wait) spin_lock_irqsave(&dev->lock, flags); if (likely(!list_empty(&dev->tx_reqs))) - status |= POLLOUT | POLLWRNORM; + status |= EPOLLOUT | EPOLLWRNORM; if (likely(dev->current_rx_bytes) || likely(!list_empty(&dev->rx_buffers))) - status |= POLLIN | POLLRDNORM; + status |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&dev->lock, flags); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 5960e76f4c751..37ca0e669bd85 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1225,16 +1225,16 @@ ep0_poll (struct file *fd, poll_table *wait) /* report fd mode change before acting on it */ if (dev->setup_abort) { dev->setup_abort = 0; - mask = POLLHUP; + mask = EPOLLHUP; goto out; } if (dev->state == STATE_DEV_SETUP) { if (dev->setup_in || dev->setup_can_stall) - mask = POLLOUT; + mask = EPOLLOUT; } else { if (dev->ev_next != 0) - mask = POLLIN; + mask = EPOLLIN; } out: spin_unlock_irq(&dev->lock); diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 6150bed7cfa80..4fcfb3084b368 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -633,14 +633,6 @@ config USB_UHCI_ASPEED bool default y if ARCH_ASPEED -config USB_UHCI_BIG_ENDIAN_MMIO - bool - default y if SPARC_LEON - -config USB_UHCI_BIG_ENDIAN_DESC - bool - default y if SPARC_LEON - config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" depends on OF_GPIO && QE_GPIO && QUICC_ENGINE diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 1fa00b35f4adb..8d33187ce2af3 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -683,19 +683,19 @@ static __poll_t iowarrior_poll(struct file *file, poll_table * wait) __poll_t mask = 0; if (!dev->present) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); poll_wait(file, &dev->write_wait, wait); if (!dev->present) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; if (read_index(dev) != -1) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (atomic_read(&dev->write_busy) < MAX_WRITES_IN_FLIGHT) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 074398c1e410f..63b9e85dc0e93 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -417,15 +417,15 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait) dev = file->private_data; if (!dev->intf) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); poll_wait(file, &dev->write_wait, wait); if (dev->ring_head != dev->ring_tail) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (!dev->interrupt_out_busy) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 941c45028828d..bf47bd8bc76f7 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -517,17 +517,17 @@ static __poll_t tower_poll (struct file *file, poll_table *wait) dev = file->private_data; if (!dev->udev) - return POLLERR | POLLHUP; + return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); poll_wait(file, &dev->write_wait, wait); tower_check_for_read_packet(dev); if (dev->read_packet_length > 0) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } if (!dev->interrupt_out_busy) { - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } return mask; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index cc5b296bff3fd..2761fad66b95e 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1203,7 +1203,7 @@ mon_bin_poll(struct file *file, struct poll_table_struct *wait) spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) - mask |= POLLIN | POLLRDNORM; /* readable */ + mask |= EPOLLIN | EPOLLRDNORM; /* readable */ spin_unlock_irqrestore(&rp->b_lock, flags); return mask; } diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c index 8cc4b48ff1273..085700f1be100 100644 --- a/drivers/vfio/virqfd.c +++ b/drivers/vfio/virqfd.c @@ -48,7 +48,7 @@ static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void struct virqfd *virqfd = container_of(wait, struct virqfd, wait); __poll_t flags = key_to_poll(key); - if (flags & POLLIN) { + if (flags & EPOLLIN) { /* An event has been signaled, call function */ if ((!virqfd->handler || virqfd->handler(virqfd->opaque, virqfd->data)) && @@ -56,7 +56,7 @@ static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void schedule_work(&virqfd->inject); } - if (flags & POLLHUP) { + if (flags & EPOLLHUP) { unsigned long flags; spin_lock_irqsave(&virqfd_lock, flags); @@ -172,14 +172,14 @@ int vfio_virqfd_enable(void *opaque, * Check if there was an event already pending on the eventfd * before we registered and trigger it as if we didn't miss it. */ - if (events & POLLIN) { + if (events & EPOLLIN) { if ((!handler || handler(opaque, data)) && thread) schedule_work(&virqfd->inject); } /* * Do not drop the file until the irqfd is fully initialized, - * otherwise we might race against the POLLHUP. + * otherwise we might race against the EPOLLHUP. */ fdput(irqfd); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c613d2e3d371c..610cba276d476 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -952,8 +952,8 @@ static int vhost_net_open(struct inode *inode, struct file *f) } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); - vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); - vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); + vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); + vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); f->private_data = n; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 2db5af8e8652a..1b3e8d2d5c8b4 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -211,7 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) mask = file->f_op->poll(file, &poll->table); if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); - if (mask & POLLERR) { + if (mask & EPOLLERR) { if (poll->wqh) remove_wait_queue(poll->wqh, &poll->wait); ret = -EINVAL; @@ -440,7 +440,7 @@ void vhost_dev_init(struct vhost_dev *dev, vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, - POLLIN, dev); + EPOLLIN, dev); } } EXPORT_SYMBOL_GPL(vhost_dev_init); @@ -630,7 +630,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev) vhost_umem_clean(dev->iotlb); dev->iotlb = NULL; vhost_clear_msg(dev); - wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); WARN_ON(!llist_empty(&dev->work_list)); if (dev->worker) { kthread_stop(dev->worker); @@ -1057,7 +1057,7 @@ __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, poll_wait(file, &dev->wait, wait); if (!list_empty(&dev->read_list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } @@ -2356,7 +2356,7 @@ void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, list_add_tail(&node->node, head); spin_unlock(&dev->iotlb_lock); - wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); + wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); } EXPORT_SYMBOL_GPL(vhost_enqueue_msg); diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 6962b4583fd76..11e699f1062b7 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1156,7 +1156,6 @@ config FB_I810_I2C bool "Enable DDC Support" depends on FB_I810 && FB_I810_GTF select FB_DDC - help config FB_LE80578 tristate "Intel LE80578 (Vermilion) support" diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 6082f653c68a4..67773e8bbb954 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c @@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) int timeout = 1000; /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ - if (cpu_data(0).x86_mask == 1) { + if (cpu_data(0).x86_stepping == 1) { pll_table = gx_pll_table_14MHz; pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); } else { diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 39fe7247ff984..f0cac9e0eb944 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -40,6 +40,7 @@ #include #include #include +#include #include