diff --git a/[refs] b/[refs] index 16b4e95b8c17..f61b9a148b07 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: df86b5765a48d5f557489577652bd6df145b0e1b +refs/heads/master: d6ba6d215a538a58f0f0026f0961b0b9125e8042 diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl index b0300529ab13..196b8b9dba11 100644 --- a/trunk/Documentation/DocBook/drm.tmpl +++ b/trunk/Documentation/DocBook/drm.tmpl @@ -6,36 +6,11 @@ Linux DRM Developer's Guide - - - Jesse - Barnes - Initial version - - Intel Corporation -
- jesse.barnes@intel.com -
-
-
- - Laurent - Pinchart - Driver internals - - Ideas on board SPRL -
- laurent.pinchart@ideasonboard.com -
-
-
-
- 2008-2009 - 2012 - Intel Corporation - Laurent Pinchart + + Intel Corporation (Jesse Barnes <jesse.barnes@intel.com>) + @@ -45,17 +20,6 @@ the kernel source COPYING file. - - - - - 1.0 - 2012-07-13 - LP - Added extensive documentation about driver internals. - - -
@@ -108,361 +72,342 @@ submission & fencing, suspend/resume support, and DMA services. + + The core of every DRM driver is struct drm_driver. Drivers + typically statically initialize a drm_driver structure, + then pass it to drm_init() at load time. + - Driver Initialization - - At the core of every DRM driver is a drm_driver - structure. Drivers typically statically initialize a drm_driver structure, - and then pass it to one of the drm_*_init() functions - to register it with the DRM subsystem. + Driver initialization + + Before calling the DRM initialization routines, the driver must + first create and fill out a struct drm_driver structure. + + + static struct drm_driver driver = { + /* Don't use MTRRs here; the Xserver or userspace app should + * deal with them for Intel hardware. + */ + .driver_features = + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, + .load = i915_driver_load, + .unload = i915_driver_unload, + .firstopen = i915_driver_firstopen, + .lastclose = i915_driver_lastclose, + .preclose = i915_driver_preclose, + .save = i915_save, + .restore = i915_restore, + .device_is_agp = i915_driver_device_is_agp, + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i915_enable_vblank, + .disable_vblank = i915_disable_vblank, + .irq_preinstall = i915_driver_irq_preinstall, + .irq_postinstall = i915_driver_irq_postinstall, + .irq_uninstall = i915_driver_irq_uninstall, + .irq_handler = i915_driver_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .fb_probe = intelfb_probe, + .fb_remove = intelfb_remove, + .fb_resize = intelfb_resize, + .master_create = i915_master_create, + .master_destroy = i915_master_destroy, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = i915_debugfs_init, + .debugfs_cleanup = i915_debugfs_cleanup, +#endif + .gem_init_object = i915_gem_init_object, + .gem_free_object = i915_gem_free_object, + .gem_vm_ops = &i915_gem_vm_ops, + .ioctls = i915_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = i915_compat_ioctl, +#endif + .llseek = noop_llseek, + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + }; + + + In the example above, taken from the i915 DRM driver, the driver + sets several flags indicating what core features it supports; + we go over the individual callbacks in later sections. Since + flags indicate which features your driver supports to the DRM + core, you need to set most of them prior to calling drm_init(). Some, + like DRIVER_MODESET can be set later based on user supplied parameters, + but that's the exception rather than the rule. + + + Driver flags + + DRIVER_USE_AGP + + Driver uses AGP interface + + + + DRIVER_REQUIRE_AGP + + Driver needs AGP interface to function. + + + + DRIVER_USE_MTRR + + + Driver uses MTRR interface for mapping memory. Deprecated. + + + + + DRIVER_PCI_DMA + + Driver is capable of PCI DMA. Deprecated. + + + + DRIVER_SG + + Driver can perform scatter/gather DMA. Deprecated. + + + + DRIVER_HAVE_DMA + Driver supports DMA. Deprecated. + + + DRIVER_HAVE_IRQDRIVER_IRQ_SHARED + + + DRIVER_HAVE_IRQ indicates whether the driver has an IRQ + handler. DRIVER_IRQ_SHARED indicates whether the device & + handler support shared IRQs (note that this is required of + PCI drivers). + + + + + DRIVER_DMA_QUEUE + + + Should be set if the driver queues DMA requests and completes them + asynchronously. Deprecated. + + + + + DRIVER_FB_DMA + + + Driver supports DMA to/from the framebuffer. Deprecated. + + + + + DRIVER_MODESET + + + Driver supports mode setting interfaces. + + + + + + In this specific case, the driver requires AGP and supports + IRQs. DMA, as discussed later, is handled by device-specific ioctls + in this case. It also supports the kernel mode setting APIs, though + unlike in the actual i915 driver source, this example unconditionally + exports KMS capability. - - The drm_driver structure contains static - information that describes the driver and features it supports, and - pointers to methods that the DRM core will call to implement the DRM API. - We will first go through the drm_driver static - information fields, and will then describe individual operations in - details as they get used in later sections. + + + + + + Driver load + + In the previous section, we saw what a typical drm_driver + structure might look like. One of the more important fields in + the structure is the hook for the load function. + + + static struct drm_driver driver = { + ... + .load = i915_driver_load, + ... + }; + + + The load function has many responsibilities: allocating a driver + private structure, specifying supported performance counters, + configuring the device (e.g. mapping registers & command + buffers), initializing the memory manager, and setting up the + initial output configuration. + + + If compatibility is a concern (e.g. with drivers converted over + to the new interfaces from the old ones), care must be taken to + prevent device initialization and control that is incompatible with + currently active userspace drivers. For instance, if user + level mode setting drivers are in use, it would be problematic + to perform output discovery & configuration at load time. + Likewise, if user-level drivers unaware of memory management are + in use, memory management and command buffer setup may need to + be omitted. These requirements are driver-specific, and care + needs to be taken to keep both old and new applications and + libraries working. The i915 driver supports the "modeset" + module parameter to control whether advanced features are + enabled at load time or in legacy fashion. + - Driver Information - - Driver Features - - Drivers inform the DRM core about their requirements and supported - features by setting appropriate flags in the - driver_features field. Since those flags - influence the DRM core behaviour since registration time, most of them - must be set to registering the drm_driver - instance. - - u32 driver_features; - - Driver Feature Flags - - DRIVER_USE_AGP - - Driver uses AGP interface, the DRM core will manage AGP resources. - - - - DRIVER_REQUIRE_AGP - - Driver needs AGP interface to function. AGP initialization failure - will become a fatal error. - - - - DRIVER_USE_MTRR - - Driver uses MTRR interface for mapping memory, the DRM core will - manage MTRR resources. Deprecated. - - - - DRIVER_PCI_DMA - - Driver is capable of PCI DMA, mapping of PCI DMA buffers to - userspace will be enabled. Deprecated. - - - - DRIVER_SG - - Driver can perform scatter/gather DMA, allocation and mapping of - scatter/gather buffers will be enabled. Deprecated. - - - - DRIVER_HAVE_DMA - - Driver supports DMA, the userspace DMA API will be supported. - Deprecated. - - - - DRIVER_HAVE_IRQDRIVER_IRQ_SHARED - - DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The - DRM core will automatically register an interrupt handler when the - flag is set. DRIVER_IRQ_SHARED indicates whether the device & - handler support shared IRQs (note that this is required of PCI - drivers). - - - - DRIVER_IRQ_VBL - Unused. Deprecated. - - - DRIVER_DMA_QUEUE - - Should be set if the driver queues DMA requests and completes them - asynchronously. Deprecated. - - - - DRIVER_FB_DMA - - Driver supports DMA to/from the framebuffer, mapping of frambuffer - DMA buffers to userspace will be supported. Deprecated. - - - - DRIVER_IRQ_VBL2 - Unused. Deprecated. - - - DRIVER_GEM - - Driver use the GEM memory manager. - - - - DRIVER_MODESET - - Driver supports mode setting interfaces (KMS). - - - - DRIVER_PRIME - - Driver implements DRM PRIME buffer sharing. - - - - - - Major, Minor and Patchlevel - int major; -int minor; -int patchlevel; - - The DRM core identifies driver versions by a major, minor and patch - level triplet. The information is printed to the kernel log at - initialization time and passed to userspace through the - DRM_IOCTL_VERSION ioctl. - - - The major and minor numbers are also used to verify the requested driver - API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes - between minor versions, applications can call DRM_IOCTL_SET_VERSION to - select a specific version of the API. If the requested major isn't equal - to the driver major, or the requested minor is larger than the driver - minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise - the driver's set_version() method will be called with the requested - version. - - - - Name, Description and Date - char *name; -char *desc; -char *date; - - The driver name is printed to the kernel log at initialization time, - used for IRQ registration and passed to userspace through - DRM_IOCTL_VERSION. - - - The driver description is a purely informative string passed to - userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by - the kernel. - - - The driver date, formatted as YYYYMMDD, is meant to identify the date of - the latest modification to the driver. However, as most drivers fail to - update it, its value is mostly useless. The DRM core prints it to the - kernel log at initialization time and passes it to userspace through the - DRM_IOCTL_VERSION ioctl. - - - - - Driver Load + Driver private & performance counters - The load method is the driver and device - initialization entry point. The method is responsible for allocating and - initializing driver private data, specifying supported performance - counters, performing resource allocation and mapping (e.g. acquiring - clocks, mapping registers or allocating command buffers), initializing - the memory manager (), installing - the IRQ handler (), setting up - vertical blanking handling (), mode - setting () and initial output - configuration (). + The driver private hangs off the main drm_device structure and + can be used for tracking various device-specific bits of + information, like register offsets, command buffer status, + register state for suspend/resume, etc. At load time, a + driver may simply allocate one and set drm_device.dev_priv + appropriately; it should be freed and drm_device.dev_priv set + to NULL when the driver is unloaded. - - If compatibility is a concern (e.g. with drivers converted over from - User Mode Setting to Kernel Mode Setting), care must be taken to prevent - device initialization and control that is incompatible with currently - active userspace drivers. For instance, if user level mode setting - drivers are in use, it would be problematic to perform output discovery - & configuration at load time. Likewise, if user-level drivers - unaware of memory management are in use, memory management and command - buffer setup may need to be omitted. These requirements are - driver-specific, and care needs to be taken to keep both old and new - applications and libraries working. - - int (*load) (struct drm_device *, unsigned long flags); - The method takes two arguments, a pointer to the newly created - drm_device and flags. The flags are used to - pass the driver_data field of the device id - corresponding to the device passed to drm_*_init(). - Only PCI devices currently use this, USB and platform DRM drivers have - their load method called with flags to 0. + The DRM supports several counters which may be used for rough + performance characterization. Note that the DRM stat counter + system is not often used by applications, and supporting + additional counters is completely optional. + + + These interfaces are deprecated and should not be used. If performance + monitoring is desired, the developer should investigate and + potentially enhance the kernel perf and tracing infrastructure to export + GPU related performance information for consumption by performance + monitoring tools and applications. - - Driver Private & Performance Counters - - The driver private hangs off the main - drm_device structure and can be used for - tracking various device-specific bits of information, like register - offsets, command buffer status, register state for suspend/resume, etc. - At load time, a driver may simply allocate one and set - drm_device.dev_priv - appropriately; it should be freed and - drm_device.dev_priv - set to NULL when the driver is unloaded. - - - DRM supports several counters which were used for rough performance - characterization. This stat counter system is deprecated and should not - be used. If performance monitoring is desired, the developer should - investigate and potentially enhance the kernel perf and tracing - infrastructure to export GPU related performance information for - consumption by performance monitoring tools and applications. - - - - IRQ Registration - - The DRM core tries to facilitate IRQ handler registration and - unregistration by providing drm_irq_install and - drm_irq_uninstall functions. Those functions only - support a single interrupt per device. - - - - Both functions get the device IRQ by calling - drm_dev_to_irq. This inline function will call a - bus-specific operation to retrieve the IRQ number. For platform devices, - platform_get_irq(..., 0) is used to retrieve the - IRQ number. - - - drm_irq_install starts by calling the - irq_preinstall driver operation. The operation - is optional and must make sure that the interrupt will not get fired by - clearing all pending interrupt flags or disabling the interrupt. - - - The IRQ will then be requested by a call to - request_irq. If the DRIVER_IRQ_SHARED driver - feature flag is set, a shared (IRQF_SHARED) IRQ handler will be - requested. - - - The IRQ handler function must be provided as the mandatory irq_handler - driver operation. It will get passed directly to - request_irq and thus has the same prototype as all - IRQ handlers. It will get called with a pointer to the DRM device as the - second argument. - - - Finally the function calls the optional - irq_postinstall driver operation. The operation - usually enables interrupts (excluding the vblank interrupt, which is - enabled separately), but drivers may choose to enable/disable interrupts - at a different time. - - - drm_irq_uninstall is similarly used to uninstall an - IRQ handler. It starts by waking up all processes waiting on a vblank - interrupt to make sure they don't hang, and then calls the optional - irq_uninstall driver operation. The operation - must disable all hardware interrupts. Finally the function frees the IRQ - by calling free_irq. - - - - Memory Manager Initialization - - Every DRM driver requires a memory manager which must be initialized at - load time. DRM currently contains two memory managers, the Translation - Table Manager (TTM) and the Graphics Execution Manager (GEM). - This document describes the use of the GEM memory manager only. See - for details. - - - - Miscellaneous Device Configuration - - Another task that may be necessary for PCI devices during configuration - is mapping the video BIOS. On many devices, the VBIOS describes device - configuration, LCD panel timings (if any), and contains flags indicating - device state. Mapping the BIOS can be done using the pci_map_rom() call, - a convenience function that takes care of mapping the actual ROM, - whether it has been shadowed into memory (typically at address 0xc0000) - or exists on the PCI device in the ROM BAR. Note that after the ROM has - been mapped and any necessary information has been extracted, it should - be unmapped; on many devices, the ROM address decoder is shared with - other BARs, so leaving it mapped could cause undesired behaviour like - hangs or memory corruption. - - - - - + + Configuring the device + + Obviously, device configuration is device-specific. + However, there are several common operations: finding a + device's PCI resources, mapping them, and potentially setting + up an IRQ handler. + + + Finding & mapping resources is fairly straightforward. The + DRM wrapper functions, drm_get_resource_start() and + drm_get_resource_len(), may be used to find BARs on the given + drm_device struct. Once those values have been retrieved, the + driver load function can call drm_addmap() to create a new + mapping for the BAR in question. Note that you probably want a + drm_local_map_t in your driver private structure to track any + mappings you create. + + + + + if compatibility with other operating systems isn't a concern + (DRM drivers can run under various BSD variants and OpenSolaris), + native Linux calls may be used for the above, e.g. pci_resource_* + and iomap*/iounmap. See the Linux device driver book for more + info. + + + Once you have a register map, you may use the DRM_READn() and + DRM_WRITEn() macros to access the registers on your device, or + use driver-specific versions to offset into your MMIO space + relative to a driver-specific base pointer (see I915_READ for + an example). + + + If your device supports interrupt generation, you may want to + set up an interrupt handler when the driver is loaded. This + is done using the drm_irq_install() function. If your device + supports vertical blank interrupts, it should call + drm_vblank_init() to initialize the core vblank handling code before + enabling interrupts on your device. This ensures the vblank related + structures are allocated and allows the core to handle vblank events. + + + + Once your interrupt handler is registered (it uses your + drm_driver.irq_handler as the actual interrupt handling + function), you can safely enable interrupts on your device, + assuming any other state your interrupt handler uses is also + initialized. + + + Another task that may be necessary during configuration is + mapping the video BIOS. On many devices, the VBIOS describes + device configuration, LCD panel timings (if any), and contains + flags indicating device state. Mapping the BIOS can be done + using the pci_map_rom() call, a convenience function that + takes care of mapping the actual ROM, whether it has been + shadowed into memory (typically at address 0xc0000) or exists + on the PCI device in the ROM BAR. Note that after the ROM + has been mapped and any necessary information has been extracted, + it should be unmapped; on many devices, the ROM address decoder is + shared with other BARs, so leaving it mapped could cause + undesired behavior like hangs or memory corruption. + + + - - Memory management - - Modern Linux systems require large amount of graphics memory to store - frame buffers, textures, vertices and other graphics-related data. Given - the very dynamic nature of many of that data, managing graphics memory - efficiently is thus crucial for the graphics stack and plays a central - role in the DRM infrastructure. - - - The DRM core includes two memory managers, namely Translation Table Maps - (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory - manager to be developed and tried to be a one-size-fits-them all - solution. It provides a single userspace API to accomodate the need of - all hardware, supporting both Unified Memory Architecture (UMA) devices - and devices with dedicated video RAM (i.e. most discrete video cards). - This resulted in a large, complex piece of code that turned out to be - hard to use for driver development. - - - GEM started as an Intel-sponsored project in reaction to TTM's - complexity. Its design philosophy is completely different: instead of - providing a solution to every graphics memory-related problems, GEM - identified common code between drivers and created a support library to - share it. GEM has simpler initialization and execution requirements than - TTM, but has no video RAM management capabitilies and is thus limited to - UMA devices. - - The Translation Table Manager (TTM) + Memory manager initialization - TTM design background and information belongs here. + In order to allocate command buffers, cursor memory, scanout + buffers, etc., as well as support the latest features provided + by packages like Mesa and the X.Org X server, your driver + should support a memory manager. + + + If your driver supports memory management (it should!), you + need to set that up at load time as well. How you initialize + it depends on which memory manager you're using: TTM or GEM. TTM initialization - This section is outdated. - - Drivers wishing to support TTM must fill out a drm_bo_driver - structure. The structure contains several fields with function - pointers for initializing the TTM, allocating and freeing memory, - waiting for command completion and fence synchronization, and memory - migration. See the radeon_ttm.c file for an example of usage. + + TTM (for Translation Table Manager) manages video memory and + aperture space for graphics devices. TTM supports both UMA devices + and devices with dedicated video RAM (VRAM), i.e. most discrete + graphics devices. If your device has dedicated RAM, supporting + TTM is desirable. TTM also integrates tightly with your + driver-specific buffer execution function. See the radeon + driver for examples. + + + The core TTM structure is the ttm_bo_driver struct. It contains + several fields with function pointers for initializing the TTM, + allocating and freeing memory, waiting for command completion + and fence synchronization, and memory migration. See the + radeon_ttm.c file for an example of usage. The ttm_global_reference structure is made up of several fields: @@ -500,1081 +445,82 @@ char *date; count for the TTM, which will call your initialization function. - - - The Graphics Execution Manager (GEM) - - The GEM design approach has resulted in a memory manager that doesn't - provide full coverage of all (or even all common) use cases in its - userspace or kernel API. GEM exposes a set of standard memory-related - operations to userspace and a set of helper functions to drivers, and let - drivers implement hardware-specific operations with their own private API. - - - The GEM userspace API is described in the - GEM - the Graphics - Execution Manager article on LWN. While slightly - outdated, the document provides a good overview of the GEM API principles. - Buffer allocation and read and write operations, described as part of the - common GEM API, are currently implemented using driver-specific ioctls. - - - GEM is data-agnostic. It manages abstract buffer objects without knowing - what individual buffers contain. APIs that require knowledge of buffer - contents or purpose, such as buffer allocation or synchronization - primitives, are thus outside of the scope of GEM and must be implemented - using driver-specific ioctls. - - - On a fundamental level, GEM involves several operations: - - Memory allocation and freeing - Command execution - Aperture management at command execution time - - Buffer object allocation is relatively straightforward and largely - provided by Linux's shmem layer, which provides memory to back each - object. - - - Device-specific operations, such as command execution, pinning, buffer - read & write, mapping, and domain ownership transfers are left to - driver-specific ioctls. - - - GEM Initialization - - Drivers that use GEM must set the DRIVER_GEM bit in the struct - drm_driver - driver_features field. The DRM core will - then automatically initialize the GEM core before calling the - load operation. Behind the scene, this will - create a DRM Memory Manager object which provides an address space - pool for object allocation. - - - In a KMS configuration, drivers need to allocate and initialize a - command ring buffer following core GEM initialization if required by - the hardware. UMA devices usually have what is called a "stolen" - memory region, which provides space for the initial framebuffer and - large, contiguous memory regions required by the device. This space is - typically not managed by GEM, and must be initialized separately into - its own DRM MM object. - - - - GEM Objects Creation - - GEM splits creation of GEM objects and allocation of the memory that - backs them in two distinct operations. - - - GEM objects are represented by an instance of struct - drm_gem_object. Drivers usually need to extend - GEM objects with private information and thus create a driver-specific - GEM object structure type that embeds an instance of struct - drm_gem_object. - - - To create a GEM object, a driver allocates memory for an instance of its - specific GEM object type and initializes the embedded struct - drm_gem_object with a call to - drm_gem_object_init. The function takes a pointer to - the DRM device, a pointer to the GEM object and the buffer object size - in bytes. - - - GEM uses shmem to allocate anonymous pageable memory. - drm_gem_object_init will create an shmfs file of - the requested size and store it into the struct - drm_gem_object filp - field. The memory is used as either main storage for the object when the - graphics hardware uses system memory directly or as a backing store - otherwise. - - - Drivers are responsible for the actual physical pages allocation by - calling shmem_read_mapping_page_gfp for each page. - Note that they can decide to allocate pages when initializing the GEM - object, or to delay allocation until the memory is needed (for instance - when a page fault occurs as a result of a userspace memory access or - when the driver needs to start a DMA transfer involving the memory). - - - Anonymous pageable memory allocation is not always desired, for instance - when the hardware requires physically contiguous system memory as is - often the case in embedded devices. Drivers can create GEM objects with - no shmfs backing (called private GEM objects) by initializing them with - a call to drm_gem_private_object_init instead of - drm_gem_object_init. Storage for private GEM - objects must be managed by drivers. - - - Drivers that do not need to extend GEM objects with private information - can call the drm_gem_object_alloc function to - allocate and initialize a struct drm_gem_object - instance. The GEM core will call the optional driver - gem_init_object operation after initializing - the GEM object with drm_gem_object_init. - int (*gem_init_object) (struct drm_gem_object *obj); - - - No alloc-and-init function exists for private GEM objects. - - - - GEM Objects Lifetime - - All GEM objects are reference-counted by the GEM core. References can be - acquired and release by calling drm_gem_object_reference - and drm_gem_object_unreference respectively. The - caller must hold the drm_device - struct_mutex lock. As a convenience, GEM - provides the drm_gem_object_reference_unlocked and - drm_gem_object_unreference_unlocked functions that - can be called without holding the lock. - - - When the last reference to a GEM object is released the GEM core calls - the drm_driver - gem_free_object operation. That operation is - mandatory for GEM-enabled drivers and must free the GEM object and all - associated resources. - - - void (*gem_free_object) (struct drm_gem_object *obj); - Drivers are responsible for freeing all GEM object resources, including - the resources created by the GEM core. If an mmap offset has been - created for the object (in which case - drm_gem_object::map_list::map - is not NULL) it must be freed by a call to - drm_gem_free_mmap_offset. The shmfs backing store - must be released by calling drm_gem_object_release - (that function can safely be called if no shmfs backing store has been - created). - - - - GEM Objects Naming - - Communication between userspace and the kernel refers to GEM objects - using local handles, global names or, more recently, file descriptors. - All of those are 32-bit integer values; the usual Linux kernel limits - apply to the file descriptors. - - - GEM handles are local to a DRM file. Applications get a handle to a GEM - object through a driver-specific ioctl, and can use that handle to refer - to the GEM object in other standard or driver-specific ioctls. Closing a - DRM file handle frees all its GEM handles and dereferences the - associated GEM objects. - - - To create a handle for a GEM object drivers call - drm_gem_handle_create. The function takes a pointer - to the DRM file and the GEM object and returns a locally unique handle. - When the handle is no longer needed drivers delete it with a call to - drm_gem_handle_delete. Finally the GEM object - associated with a handle can be retrieved by a call to - drm_gem_object_lookup. - - - Handles don't take ownership of GEM objects, they only take a reference - to the object that will be dropped when the handle is destroyed. To - avoid leaking GEM objects, drivers must make sure they drop the - reference(s) they own (such as the initial reference taken at object - creation time) as appropriate, without any special consideration for the - handle. For example, in the particular case of combined GEM object and - handle creation in the implementation of the - dumb_create operation, drivers must drop the - initial reference to the GEM object before returning the handle. - - - GEM names are similar in purpose to handles but are not local to DRM - files. They can be passed between processes to reference a GEM object - globally. Names can't be used directly to refer to objects in the DRM - API, applications must convert handles to names and names to handles - using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls - respectively. The conversion is handled by the DRM core without any - driver-specific support. - - - Similar to global names, GEM file descriptors are also used to share GEM - objects across processes. They offer additional security: as file - descriptors must be explictly sent over UNIX domain sockets to be shared - between applications, they can't be guessed like the globally unique GEM - names. - - - Drivers that support GEM file descriptors, also known as the DRM PRIME - API, must set the DRIVER_PRIME bit in the struct - drm_driver - driver_features field, and implement the - prime_handle_to_fd and - prime_fd_to_handle operations. - - - int (*prime_handle_to_fd)(struct drm_device *dev, - struct drm_file *file_priv, uint32_t handle, - uint32_t flags, int *prime_fd); - int (*prime_fd_to_handle)(struct drm_device *dev, - struct drm_file *file_priv, int prime_fd, - uint32_t *handle); - Those two operations convert a handle to a PRIME file descriptor and - vice versa. Drivers must use the kernel dma-buf buffer sharing framework - to manage the PRIME file descriptors. - - - While non-GEM drivers must implement the operations themselves, GEM - drivers must use the drm_gem_prime_handle_to_fd - and drm_gem_prime_fd_to_handle helper functions. - Those helpers rely on the driver - gem_prime_export and - gem_prime_import operations to create a dma-buf - instance from a GEM object (dma-buf exporter role) and to create a GEM - object from a dma-buf instance (dma-buf importer role). - - - struct dma_buf * (*gem_prime_export)(struct drm_device *dev, - struct drm_gem_object *obj, - int flags); - struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, - struct dma_buf *dma_buf); - These two operations are mandatory for GEM drivers that support DRM - PRIME. - - - - GEM Objects Mapping - - Because mapping operations are fairly heavyweight GEM favours - read/write-like access to buffers, implemented through driver-specific - ioctls, over mapping buffers to userspace. However, when random access - to the buffer is needed (to perform software rendering for instance), - direct access to the object can be more efficient. - - - The mmap system call can't be used directly to map GEM objects, as they - don't have their own file handle. Two alternative methods currently - co-exist to map GEM objects to userspace. The first method uses a - driver-specific ioctl to perform the mapping operation, calling - do_mmap under the hood. This is often considered - dubious, seems to be discouraged for new GEM-enabled drivers, and will - thus not be described here. - - - The second method uses the mmap system call on the DRM file handle. - void *mmap(void *addr, size_t length, int prot, int flags, int fd, - off_t offset); - DRM identifies the GEM object to be mapped by a fake offset passed - through the mmap offset argument. Prior to being mapped, a GEM object - must thus be associated with a fake offset. To do so, drivers must call - drm_gem_create_mmap_offset on the object. The - function allocates a fake offset range from a pool and stores the - offset divided by PAGE_SIZE in - obj->map_list.hash.key. Care must be taken not to - call drm_gem_create_mmap_offset if a fake offset - has already been allocated for the object. This can be tested by - obj->map_list.map being non-NULL. - - - Once allocated, the fake offset value - (obj->map_list.hash.key << PAGE_SHIFT) - must be passed to the application in a driver-specific way and can then - be used as the mmap offset argument. - - - The GEM core provides a helper method drm_gem_mmap - to handle object mapping. The method can be set directly as the mmap - file operation handler. It will look up the GEM object based on the - offset value and set the VMA operations to the - drm_driver gem_vm_ops - field. Note that drm_gem_mmap doesn't map memory to - userspace, but relies on the driver-provided fault handler to map pages - individually. - - - To use drm_gem_mmap, drivers must fill the struct - drm_driver gem_vm_ops - field with a pointer to VM operations. - - - struct vm_operations_struct *gem_vm_ops - - struct vm_operations_struct { - void (*open)(struct vm_area_struct * area); - void (*close)(struct vm_area_struct * area); - int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); - }; - - - The open and close - operations must update the GEM object reference count. Drivers can use - the drm_gem_vm_open and - drm_gem_vm_close helper functions directly as open - and close handlers. - - - The fault operation handler is responsible for mapping individual pages - to userspace when a page fault occurs. Depending on the memory - allocation scheme, drivers can allocate pages at fault time, or can - decide to allocate memory for the GEM object at the time the object is - created. - - - Drivers that want to map the GEM object upfront instead of handling page - faults can implement their own mmap file operation handler. - - - - Dumb GEM Objects - - The GEM API doesn't standardize GEM objects creation and leaves it to - driver-specific ioctls. While not an issue for full-fledged graphics - stacks that include device-specific userspace components (in libdrm for - instance), this limit makes DRM-based early boot graphics unnecessarily - complex. - - - Dumb GEM objects partly alleviate the problem by providing a standard - API to create dumb buffers suitable for scanout, which can then be used - to create KMS frame buffers. - - - To support dumb GEM objects drivers must implement the - dumb_create, - dumb_destroy and - dumb_map_offset operations. - - - - int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, - struct drm_mode_create_dumb *args); - - The dumb_create operation creates a GEM - object suitable for scanout based on the width, height and depth - from the struct drm_mode_create_dumb - argument. It fills the argument's handle, - pitch and size - fields with a handle for the newly created GEM object and its line - pitch and size in bytes. - - - - int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle); - - The dumb_destroy operation destroys a dumb - GEM object created by dumb_create. - - - - int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle, uint64_t *offset); - - The dumb_map_offset operation associates an - mmap fake offset with the GEM object given by the handle and returns - it. Drivers must use the - drm_gem_create_mmap_offset function to - associate the fake offset as described in - . - - - - - - Memory Coherency - - When mapped to the device or used in a command buffer, backing pages - for an object are flushed to memory and marked write combined so as to - be coherent with the GPU. Likewise, if the CPU accesses an object - after the GPU has finished rendering to the object, then the object - must be made coherent with the CPU's view of memory, usually involving - GPU cache flushing of various kinds. This core CPU<->GPU - coherency management is provided by a device-specific ioctl, which - evaluates an object's current domain and performs any necessary - flushing or synchronization to put the object into the desired - coherency domain (note that the object may be busy, i.e. an active - render target; in that case, setting the domain blocks the client and - waits for rendering to complete before performing any necessary - flushing operations). - - - Command Execution - - Perhaps the most important GEM function for GPU devices is providing a - command execution interface to clients. Client programs construct - command buffers containing references to previously allocated memory - objects, and then submit them to GEM. At that point, GEM takes care to - bind all the objects into the GTT, execute the buffer, and provide - necessary synchronization between clients accessing the same buffers. - This often involves evicting some objects from the GTT and re-binding - others (a fairly expensive operation), and providing relocation - support which hides fixed GTT offsets from clients. Clients must take - care not to submit command buffers that reference more objects than - can fit in the GTT; otherwise, GEM will reject them and no rendering - will occur. Similarly, if several objects in the buffer require fence - registers to be allocated for correct rendering (e.g. 2D blits on - pre-965 chips), care must be taken not to require more fence registers - than are available to the client. Such resource management should be - abstracted from the client in libdrm. - - - - - - - - - Mode Setting - - Drivers must initialize the mode setting core by calling - drm_mode_config_init on the DRM device. The function - initializes the drm_device - mode_config field and never fails. Once done, - mode configuration must be setup by initializing the following fields. - - - - int min_width, min_height; -int max_width, max_height; - - Minimum and maximum width and height of the frame buffers in pixel - units. + GEM initialization + + GEM is an alternative to TTM, designed specifically for UMA + devices. It has simpler initialization and execution requirements + than TTM, but has no VRAM management capability. Core GEM + is initialized by calling drm_mm_init() to create + a GTT DRM MM object, which provides an address space pool for + object allocation. In a KMS configuration, the driver + needs to allocate and initialize a command ring buffer following + core GEM initialization. A UMA device usually has what is called a + "stolen" memory region, which provides space for the initial + framebuffer and large, contiguous memory regions required by the + device. This space is not typically managed by GEM, and it must + be initialized separately into its own DRM MM object. + + + Initialization is driver-specific. In the case of Intel + integrated graphics chips like 965GM, GEM initialization can + be done by calling the internal GEM init function, + i915_gem_do_init(). Since the 965GM is a UMA device + (i.e. it doesn't have dedicated VRAM), GEM manages + making regular RAM available for GPU operations. Memory set + aside by the BIOS (called "stolen" memory by the i915 + driver) is managed by the DRM memrange allocator; the + rest of the aperture is managed by GEM. + + /* Basic memrange allocator for stolen space (aka vram) */ + drm_memrange_init(&dev_priv->vram, 0, prealloc_size); + /* Let GEM Manage from end of prealloc space to end of aperture */ + i915_gem_do_init(dev, prealloc_size, agp_size); + + + + + Once the memory manager has been set up, we may allocate the + command buffer. In the i915 case, this is also done with a + GEM function, i915_gem_init_ringbuffer(). - - - struct drm_mode_config_funcs *funcs; - Mode setting functions. - - - - Frame Buffer Creation - struct drm_framebuffer *(*fb_create)(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); - - Frame buffers are abstract memory objects that provide a source of - pixels to scanout to a CRTC. Applications explicitly request the - creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and - receive an opaque handle that can be passed to the KMS CRTC control, - plane configuration and page flip functions. - - - Frame buffers rely on the underneath memory manager for low-level memory - operations. When creating a frame buffer applications pass a memory - handle (or a list of memory handles for multi-planar formats) through - the drm_mode_fb_cmd2 argument. This document - assumes that the driver uses GEM, those handles thus reference GEM - objects. - - - Drivers must first validate the requested frame buffer parameters passed - through the mode_cmd argument. In particular this is where invalid - sizes, pixel formats or pitches can be caught. - - - If the parameters are deemed valid, drivers then create, initialize and - return an instance of struct drm_framebuffer. - If desired the instance can be embedded in a larger driver-specific - structure. The new instance is initialized with a call to - drm_framebuffer_init which takes a pointer to DRM - frame buffer operations (struct - drm_framebuffer_funcs). Frame buffer operations are - - - int (*create_handle)(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned int *handle); - - Create a handle to the frame buffer underlying memory object. If - the frame buffer uses a multi-plane format, the handle will - reference the memory object associated with the first plane. - - - Drivers call drm_gem_handle_create to create - the handle. - - - - void (*destroy)(struct drm_framebuffer *framebuffer); - - Destroy the frame buffer object and frees all associated - resources. Drivers must call - drm_framebuffer_cleanup to free resources - allocated by the DRM core for the frame buffer object, and must - make sure to unreference all memory objects associated with the - frame buffer. Handles created by the - create_handle operation are released by - the DRM core. - - - - int (*dirty)(struct drm_framebuffer *framebuffer, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips); - - This optional operation notifies the driver that a region of the - frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB - ioctl call. - - - - - - After initializing the drm_framebuffer - instance drivers must fill its width, - height, pitches, - offsets, depth, - bits_per_pixel and - pixel_format fields from the values passed - through the drm_mode_fb_cmd2 argument. They - should call the drm_helper_mode_fill_fb_struct - helper function to do so. - - - - Output Polling - void (*output_poll_changed)(struct drm_device *dev); - - This operation notifies the driver that the status of one or more - connectors has changed. Drivers that use the fb helper can just call the - drm_fb_helper_hotplug_event function to handle this - operation. - - - - - - - - KMS Initialization and Cleanup - - A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders - and connectors. KMS drivers must thus create and initialize all those - objects at load time after initializing mode setting. - - - CRTCs (struct <structname>drm_crtc</structname>) - - A CRTC is an abstraction representing a part of the chip that contains a - pointer to a scanout buffer. Therefore, the number of CRTCs available - determines how many independent scanout buffers can be active at any - given time. The CRTC structure contains several fields to support this: - a pointer to some video memory (abstracted as a frame buffer object), a - display mode, and an (x, y) offset into the video memory to support - panning or configurations where one piece of video memory spans multiple - CRTCs. - - - CRTC Initialization - - A KMS device must create and register at least one struct - drm_crtc instance. The instance is allocated - and zeroed by the driver, possibly as part of a larger structure, and - registered with a call to drm_crtc_init with a - pointer to CRTC functions. - - - - CRTC Operations - - Set Configuration - int (*set_config)(struct drm_mode_set *set); - - Apply a new CRTC configuration to the device. The configuration - specifies a CRTC, a frame buffer to scan out from, a (x,y) position in - the frame buffer, a display mode and an array of connectors to drive - with the CRTC if possible. - - - If the frame buffer specified in the configuration is NULL, the driver - must detach all encoders connected to the CRTC and all connectors - attached to those encoders and disable them. - - - This operation is called with the mode config lock held. - - - FIXME: How should set_config interact with DPMS? If the CRTC is - suspended, should it be resumed? - - - - Page Flipping - int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event); - - Schedule a page flip to the given frame buffer for the CRTC. This - operation is called with the mode config mutex held. - - - Page flipping is a synchronization mechanism that replaces the frame - buffer being scanned out by the CRTC with a new frame buffer during - vertical blanking, avoiding tearing. When an application requests a page - flip the DRM core verifies that the new frame buffer is large enough to - be scanned out by the CRTC in the currently configured mode and then - calls the CRTC page_flip operation with a - pointer to the new frame buffer. - - - The page_flip operation schedules a page flip. - Once any pending rendering targetting the new frame buffer has - completed, the CRTC will be reprogrammed to display that frame buffer - after the next vertical refresh. The operation must return immediately - without waiting for rendering or page flip to complete and must block - any new rendering to the frame buffer until the page flip completes. - - - If a page flip is already pending, the - page_flip operation must return - -EBUSY. - - - To synchronize page flip to vertical blanking the driver will likely - need to enable vertical blanking interrupts. It should call - drm_vblank_get for that purpose, and call - drm_vblank_put after the page flip completes. - - - If the application has requested to be notified when page flip completes - the page_flip operation will be called with a - non-NULL event argument pointing to a - drm_pending_vblank_event instance. Upon page - flip completion the driver must fill the - event::event - sequence, tv_sec - and tv_usec fields with the associated - vertical blanking count and timestamp, add the event to the - drm_file list of events to be signaled, and wake - up any waiting process. This can be performed with - event.sequence = drm_vblank_count_and_time(..., &now); - event->event.tv_sec = now.tv_sec; - event->event.tv_usec = now.tv_usec; - - spin_lock_irqsave(&dev->event_lock, flags); - list_add_tail(&event->base.link, &event->base.file_priv->event_list); - wake_up_interruptible(&event->base.file_priv->event_wait); - spin_unlock_irqrestore(&dev->event_lock, flags); - ]]> - - - FIXME: Could drivers that don't need to wait for rendering to complete - just add the event to dev->vblank_event_list and - let the DRM core handle everything, as for "normal" vertical blanking - events? - - - While waiting for the page flip to complete, the - event->base.link list head can be used freely by - the driver to store the pending event in a driver-specific list. - - - If the file handle is closed before the event is signaled, drivers must - take care to destroy the event in their - preclose operation (and, if needed, call - drm_vblank_put). - - - - Miscellaneous - - - void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); - - Apply a gamma table to the device. The operation is optional. - - - - void (*destroy)(struct drm_crtc *crtc); - - Destroy the CRTC when not needed anymore. See - . - - - - - - - - Planes (struct <structname>drm_plane</structname>) - - A plane represents an image source that can be blended with or overlayed - on top of a CRTC during the scanout process. Planes are associated with - a frame buffer to crop a portion of the image memory (source) and - optionally scale it to a destination size. The result is then blended - with or overlayed on top of a CRTC. - - - Plane Initialization - - Planes are optional. To create a plane, a KMS drivers allocates and - zeroes an instances of struct drm_plane - (possibly as part of a larger structure) and registers it with a call - to drm_plane_init. The function takes a bitmask - of the CRTCs that can be associated with the plane, a pointer to the - plane functions and a list of format supported formats. - - - - Plane Operations - - - int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); - - Enable and configure the plane to use the given CRTC and frame buffer. - - - The source rectangle in frame buffer memory coordinates is given by - the src_x, src_y, - src_w and src_h - parameters (as 16.16 fixed point values). Devices that don't support - subpixel plane coordinates can ignore the fractional part. - - - The destination rectangle in CRTC coordinates is given by the - crtc_x, crtc_y, - crtc_w and crtc_h - parameters (as integer values). Devices scale the source rectangle to - the destination rectangle. If scaling is not supported, and the source - rectangle size doesn't match the destination rectangle size, the - driver must return a -EINVAL error. - - - - int (*disable_plane)(struct drm_plane *plane); - - Disable the plane. The DRM core calls this method in response to a - DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0. - Disabled planes must not be processed by the CRTC. - - - - void (*destroy)(struct drm_plane *plane); - - Destroy the plane when not needed anymore. See - . - - - - - - - Encoders (struct <structname>drm_encoder</structname>) - - An encoder takes pixel data from a CRTC and converts it to a format - suitable for any attached connectors. On some devices, it may be - possible to have a CRTC send data to more than one encoder. In that - case, both encoders would receive data from the same scanout buffer, - resulting in a "cloned" display configuration across the connectors - attached to each encoder. - - - Encoder Initialization - - As for CRTCs, a KMS driver must create, initialize and register at - least one struct drm_encoder instance. The - instance is allocated and zeroed by the driver, possibly as part of a - larger structure. - - - Drivers must initialize the struct drm_encoder - possible_crtcs and - possible_clones fields before registering the - encoder. Both fields are bitmasks of respectively the CRTCs that the - encoder can be connected to, and sibling encoders candidate for cloning. - - - After being initialized, the encoder must be registered with a call to - drm_encoder_init. The function takes a pointer to - the encoder functions and an encoder type. Supported types are - - - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A - - - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort - - - DRM_MODE_ENCODER_LVDS for display panels - - - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component, - SCART) - - - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays - - - - - Encoders must be attached to a CRTC to be used. DRM drivers leave - encoders unattached at initialization time. Applications (or the fbdev - compatibility layer when implemented) are responsible for attaching the - encoders they want to use to a CRTC. - - - - Encoder Operations - - - void (*destroy)(struct drm_encoder *encoder); - - Called to destroy the encoder when not needed anymore. See - . - - - + - Connectors (struct <structname>drm_connector</structname>) + Output configuration - A connector is the final destination for pixel data on a device, and - usually connects directly to an external display device like a monitor - or laptop panel. A connector can only be attached to one encoder at a - time. The connector is also the structure where information about the - attached display is kept, so it contains fields for display data, EDID - data, DPMS & connection status, and information about modes - supported on the attached displays. + The final initialization task is output configuration. This involves: + + + Finding and initializing the CRTCs, encoders, and connectors + for the device. + + + Creating an initial configuration. + + + Registering a framebuffer console driver. + + - Connector Initialization - - Finally a KMS driver must create, initialize, register and attach at - least one struct drm_connector instance. The - instance is created as other KMS objects and initialized by setting the - following fields. - - - - interlace_allowed - - Whether the connector can handle interlaced modes. - - - - doublescan_allowed - - Whether the connector can handle doublescan. - - - - display_info - - - Display information is filled from EDID information when a display - is detected. For non hot-pluggable displays such as flat panels in - embedded systems, the driver should initialize the - display_info.width_mm - and - display_info.height_mm - fields with the physical size of the display. - - - - polled - - Connector polling mode, a combination of - - - DRM_CONNECTOR_POLL_HPD - - The connector generates hotplug events and doesn't need to be - periodically polled. The CONNECT and DISCONNECT flags must not - be set together with the HPD flag. - - - - DRM_CONNECTOR_POLL_CONNECT - - Periodically poll the connector for connection. - - - - DRM_CONNECTOR_POLL_DISCONNECT - - Periodically poll the connector for disconnection. - - - - Set to 0 for connectors that don't support connection status - discovery. - - - - - The connector is then registered with a call to - drm_connector_init with a pointer to the connector - functions and a connector type, and exposed through sysfs with a call to - drm_sysfs_connector_add. - - - Supported connector types are - - DRM_MODE_CONNECTOR_VGA - DRM_MODE_CONNECTOR_DVII - DRM_MODE_CONNECTOR_DVID - DRM_MODE_CONNECTOR_DVIA - DRM_MODE_CONNECTOR_Composite - DRM_MODE_CONNECTOR_SVIDEO - DRM_MODE_CONNECTOR_LVDS - DRM_MODE_CONNECTOR_Component - DRM_MODE_CONNECTOR_9PinDIN - DRM_MODE_CONNECTOR_DisplayPort - DRM_MODE_CONNECTOR_HDMIA - DRM_MODE_CONNECTOR_HDMIB - DRM_MODE_CONNECTOR_TV - DRM_MODE_CONNECTOR_eDP - DRM_MODE_CONNECTOR_VIRTUAL - - - - Connectors must be attached to an encoder to be used. For devices that - map connectors to encoders 1:1, the connector should be attached at - initialization time with a call to - drm_mode_connector_attach_encoder. The driver must - also set the drm_connector - encoder field to point to the attached - encoder. - - - Finally, drivers must initialize the connectors state change detection - with a call to drm_kms_helper_poll_init. If at - least one connector is pollable but can't generate hotplug interrupts - (indicated by the DRM_CONNECTOR_POLL_CONNECT and - DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will - automatically be queued to periodically poll for changes. Connectors - that can generate hotplug interrupts must be marked with the - DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must - call drm_helper_hpd_irq_event. The function will - queue a delayed work to check the state of all connectors, but no - periodic polling will be done. - - - - Connector Operations - - Unless otherwise state, all operations are mandatory. - - - DPMS - void (*dpms)(struct drm_connector *connector, int mode); - - The DPMS operation sets the power state of a connector. The mode - argument is one of - - DRM_MODE_DPMS_ON - DRM_MODE_DPMS_STANDBY - DRM_MODE_DPMS_SUSPEND - DRM_MODE_DPMS_OFF - - - - In all but DPMS_ON mode the encoder to which the connector is attached - should put the display in low-power mode by driving its signals - appropriately. If more than one connector is attached to the encoder - care should be taken not to change the power state of other displays as - a side effect. Low-power mode should be propagated to the encoders and - CRTCs when all related connectors are put in low-power mode. - - - - Modes - int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, - uint32_t max_height); - - Fill the mode list with all supported modes for the connector. If the - max_width and max_height - arguments are non-zero, the implementation must ignore all modes wider - than max_width or higher than - max_height. - - - The connector must also fill in this operation its - display_info - width_mm and - height_mm fields with the connected display - physical size in millimeters. The fields should be set to 0 if the value - isn't known or is not applicable (for instance for projector devices). - - - - Connection Status - - The connection status is updated through polling or hotplug events when - supported (see ). The status - value is reported to userspace through ioctls and must not be used - inside the driver, as it only gets initialized by a call to - drm_mode_getconnector from userspace. - - enum drm_connector_status (*detect)(struct drm_connector *connector, - bool force); - - Check to see if anything is attached to the connector. The - force parameter is set to false whilst polling or - to true when checking the connector due to user request. - force can be used by the driver to avoid - expensive, destructive operations during automated probing. - - - Return connector_status_connected if something is connected to the - connector, connector_status_disconnected if nothing is connected and - connector_status_unknown if the connection state isn't known. - - - Drivers should only return connector_status_connected if the connection - status has really been probed as connected. Connectors that can't detect - the connection status, or failed connection status probes, should return - connector_status_unknown. - - - - Miscellaneous - - - void (*destroy)(struct drm_connector *connector); - - Destroy the connector when not needed anymore. See - . - - - - - - - - Cleanup - - The DRM core manages its objects' lifetime. When an object is not needed - anymore the core calls its destroy function, which must clean up and - free every resource allocated for the object. Every - drm_*_init call must be matched with a - corresponding drm_*_cleanup call to cleanup CRTCs - (drm_crtc_cleanup), planes - (drm_plane_cleanup), encoders - (drm_encoder_cleanup) and connectors - (drm_connector_cleanup). Furthermore, connectors - that have been added to sysfs must be removed by a call to - drm_sysfs_connector_remove before calling - drm_connector_cleanup. - - - Connectors state change detection must be cleanup up with a call to - drm_kms_helper_poll_fini. - - - - Output discovery and initialization example - Output discovery and initialization + + Several core functions exist to create CRTCs, encoders, and + connectors, namely: drm_crtc_init(), drm_connector_init(), and + drm_encoder_init(), along with several "helper" functions to + perform common tasks. + + + Connectors should be registered with sysfs once they've been + detected and initialized, using the + drm_sysfs_connector_add() function. Likewise, when they're + removed from the system, they should be destroyed with + drm_sysfs_connector_remove(). + + + - - In the example above (taken from the i915 driver), a CRTC, connector and - encoder combination is created. A device-specific i2c bus is also - created for fetching EDID data and performing monitor detection. Once - the process is complete, the new connector is registered with sysfs to - make its properties available to applications. - +} +]]> + + + In the example above (again, taken from the i915 driver), a + CRT connector and encoder combination is created. A device-specific + i2c bus is also created for fetching EDID data and + performing monitor detection. Once the process is complete, + the new connector is registered with sysfs to make its + properties available to applications. + + + Helper functions and core functions + + Since many PC-class graphics devices have similar display output + designs, the DRM provides a set of helper functions to make + output management easier. The core helper routines handle + encoder re-routing and the disabling of unused functions following + mode setting. Using the helpers is optional, but recommended for + devices with PC-style architectures (i.e. a set of display planes + for feeding pixels to encoders which are in turn routed to + connectors). Devices with more complex requirements needing + finer grained management may opt to use the core callbacks + directly. + + + [Insert typical diagram here.] [Insert OMAP style config here.] + + + + Each encoder object needs to provide: + + + A DPMS (basically on/off) function. + + + A mode-fixup function (for converting requested modes into + native hardware timings). + + + Functions (prepare, set, and commit) for use by the core DRM + helper functions. + + + Connector helpers need to provide functions (mode-fetch, validity, + and encoder-matching) for returning an ideal encoder for a given + connector. The core connector functions include a DPMS callback, + save/restore routines (deprecated), detection, mode probing, + property handling, and cleanup functions. + + + + + - + - Mid-layer Helper Functions + VBlank event handling - The CRTC, encoder and connector functions provided by the drivers - implement the DRM API. They're called by the DRM core and ioctl handlers - to handle device state changes and configuration request. As implementing - those functions often requires logic not specific to drivers, mid-layer - helper functions are available to avoid duplicating boilerplate code. - - - The DRM core contains one mid-layer implementation. The mid-layer provides - implementations of several CRTC, encoder and connector functions (called - from the top of the mid-layer) that pre-process requests and call - lower-level functions provided by the driver (at the bottom of the - mid-layer). For instance, the - drm_crtc_helper_set_config function can be used to - fill the struct drm_crtc_funcs - set_config field. When called, it will split - the set_config operation in smaller, simpler - operations and call the driver to handle them. + The DRM core exposes two vertical blank related ioctls: + + + DRM_IOCTL_WAIT_VBLANK + + + This takes a struct drm_wait_vblank structure as its argument, + and it is used to block or request a signal when a specified + vblank event occurs. + + + + + DRM_IOCTL_MODESET_CTL + + + This should be called by application level drivers before and + after mode setting, since on many devices the vertical blank + counter is reset at that time. Internally, the DRM snapshots + the last vblank count when the ioctl is called with the + _DRM_PRE_MODESET command, so that the counter won't go backwards + (which is dealt with when _DRM_POST_MODESET is used). + + + + + - To use the mid-layer, drivers call drm_crtc_helper_add, - drm_encoder_helper_add and - drm_connector_helper_add functions to install their - mid-layer bottom operations handlers, and fill the - drm_crtc_funcs, - drm_encoder_funcs and - drm_connector_funcs structures with pointers to - the mid-layer top API functions. Installing the mid-layer bottom operation - handlers is best done right after registering the corresponding KMS object. + To support the functions above, the DRM core provides several + helper functions for tracking vertical blank counters, and + requires drivers to provide several callbacks: + get_vblank_counter(), enable_vblank() and disable_vblank(). The + core uses get_vblank_counter() to keep the counter accurate + across interrupt disable periods. It should return the current + vertical blank event count, which is often tracked in a device + register. The enable and disable vblank callbacks should enable + and disable vertical blank interrupts, respectively. In the + absence of DRM clients waiting on vblank events, the core DRM + code uses the disable_vblank() function to disable + interrupts, which saves power. They are re-enabled again when + a client calls the vblank wait ioctl above. - The mid-layer is not split between CRTC, encoder and connector operations. - To use it, a driver must provide bottom functions for all of the three KMS - entities. + A device that doesn't provide a count register may simply use an + internal atomic counter incremented on every vertical blank + interrupt (and then treat the enable_vblank() and disable_vblank() + callbacks as no-ops). - - Helper Functions - - - int drm_crtc_helper_set_config(struct drm_mode_set *set); - - The drm_crtc_helper_set_config helper function - is a CRTC set_config implementation. It - first tries to locate the best encoder for each connector by calling - the connector best_encoder helper - operation. - - - After locating the appropriate encoders, the helper function will - call the mode_fixup encoder and CRTC helper - operations to adjust the requested mode, or reject it completely in - which case an error will be returned to the application. If the new - configuration after mode adjustment is identical to the current - configuration the helper function will return without performing any - other operation. - - - If the adjusted mode is identical to the current mode but changes to - the frame buffer need to be applied, the - drm_crtc_helper_set_config function will call - the CRTC mode_set_base helper operation. If - the adjusted mode differs from the current mode, or if the - mode_set_base helper operation is not - provided, the helper function performs a full mode set sequence by - calling the prepare, - mode_set and - commit CRTC and encoder helper operations, - in that order. - - - - void drm_helper_connector_dpms(struct drm_connector *connector, int mode); - - The drm_helper_connector_dpms helper function - is a connector dpms implementation that - tracks power state of connectors. To use the function, drivers must - provide dpms helper operations for CRTCs - and encoders to apply the DPMS state to the device. - - - The mid-layer doesn't track the power state of CRTCs and encoders. - The dpms helper operations can thus be - called with a mode identical to the currently active mode. - - - - int drm_helper_probe_single_connector_modes(struct drm_connector *connector, - uint32_t maxX, uint32_t maxY); - - The drm_helper_probe_single_connector_modes helper - function is a connector fill_modes - implementation that updates the connection status for the connector - and then retrieves a list of modes by calling the connector - get_modes helper operation. - - - The function filters out modes larger than - max_width and max_height - if specified. It then calls the connector - mode_valid helper operation for each mode in - the probed list to check whether the mode is valid for the connector. - - - - - - CRTC Helper Operations - - - bool (*mode_fixup)(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Let CRTCs adjust the requested mode or reject it completely. This - operation returns true if the mode is accepted (possibly after being - adjusted) or false if it is rejected. - - - The mode_fixup operation should reject the - mode if it can't reasonably use it. The definition of "reasonable" - is currently fuzzy in this context. One possible behaviour would be - to set the adjusted mode to the panel timings when a fixed-mode - panel is used with hardware capable of scaling. Another behaviour - would be to accept any input mode and adjust it to the closest mode - supported by the hardware (FIXME: This needs to be clarified). - - - - int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) - - Move the CRTC on the current frame buffer (stored in - crtc->fb) to position (x,y). Any of the frame - buffer, x position or y position may have been modified. - - - This helper operation is optional. If not provided, the - drm_crtc_helper_set_config function will fall - back to the mode_set helper operation. - - - FIXME: Why are x and y passed as arguments, as they can be accessed - through crtc->x and - crtc->y? - - - - void (*prepare)(struct drm_crtc *crtc); - - Prepare the CRTC for mode setting. This operation is called after - validating the requested mode. Drivers use it to perform - device-specific operations required before setting the new mode. - - - - int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); - - Set a new mode, position and frame buffer. Depending on the device - requirements, the mode can be stored internally by the driver and - applied in the commit operation, or - programmed to the hardware immediately. - - - The mode_set operation returns 0 on success - or a negative error code if an error occurs. - - - - void (*commit)(struct drm_crtc *crtc); - - Commit a mode. This operation is called after setting the new mode. - Upon return the device must use the new mode and be fully - operational. - - - - - - Encoder Helper Operations - - - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - FIXME: The mode argument be const, but the i915 driver modifies - mode->clock in intel_dp_mode_fixup. - - - Let encoders adjust the requested mode or reject it completely. This - operation returns true if the mode is accepted (possibly after being - adjusted) or false if it is rejected. See the - mode_fixup CRTC helper - operation for an explanation of the allowed adjustments. - - - - void (*prepare)(struct drm_encoder *encoder); - - Prepare the encoder for mode setting. This operation is called after - validating the requested mode. Drivers use it to perform - device-specific operations required before setting the new mode. - - - - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - - Set a new mode. Depending on the device requirements, the mode can - be stored internally by the driver and applied in the - commit operation, or programmed to the - hardware immediately. - - - - void (*commit)(struct drm_encoder *encoder); - - Commit a mode. This operation is called after setting the new mode. - Upon return the device must use the new mode and be fully - operational. - - - - - - Connector Helper Operations - - - struct drm_encoder *(*best_encoder)(struct drm_connector *connector); - - Return a pointer to the best encoder for the connecter. Device that - map connectors to encoders 1:1 simply return the pointer to the - associated encoder. This operation is mandatory. - - - - int (*get_modes)(struct drm_connector *connector); - - Fill the connector's probed_modes list - by parsing EDID data with drm_add_edid_modes or - calling drm_mode_probed_add directly for every - supported mode and return the number of modes it has detected. This - operation is mandatory. - - - When adding modes manually the driver creates each mode with a call to - drm_mode_create and must fill the following fields. - - - __u32 type; - - Mode type bitmask, a combination of - - - DRM_MODE_TYPE_BUILTIN - not used? - - - DRM_MODE_TYPE_CLOCK_C - not used? - - - DRM_MODE_TYPE_CRTC_C - not used? - - - - DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector - - - not used? - - - - DRM_MODE_TYPE_DEFAULT - not used? - - - DRM_MODE_TYPE_USERDEF - not used? - - - DRM_MODE_TYPE_DRIVER - - - The mode has been created by the driver (as opposed to - to user-created modes). - - - - - Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they - create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred - mode. - - - - __u32 clock; - Pixel clock frequency in kHz unit - - - __u16 hdisplay, hsync_start, hsync_end, htotal; - __u16 vdisplay, vsync_start, vsync_end, vtotal; - Horizontal and vertical timing information - <----------------><-------------><--------------> - - //////////////////////| - ////////////////////// | - ////////////////////// |.................. ................ - _______________ - - <----- [hv]display -----> - <------------- [hv]sync_start ------------> - <--------------------- [hv]sync_end ---------------------> - <-------------------------------- [hv]total -----------------------------> -]]> - - - __u16 hskew; - __u16 vscan; - Unknown - - - __u32 flags; - - Mode flags, a combination of - - - DRM_MODE_FLAG_PHSYNC - - Horizontal sync is active high - - - - DRM_MODE_FLAG_NHSYNC - - Horizontal sync is active low - - - - DRM_MODE_FLAG_PVSYNC - - Vertical sync is active high - - - - DRM_MODE_FLAG_NVSYNC - - Vertical sync is active low - - - - DRM_MODE_FLAG_INTERLACE - - Mode is interlaced - - - - DRM_MODE_FLAG_DBLSCAN - - Mode uses doublescan - - - - DRM_MODE_FLAG_CSYNC - - Mode uses composite sync - - - - DRM_MODE_FLAG_PCSYNC - - Composite sync is active high - - - - DRM_MODE_FLAG_NCSYNC - - Composite sync is active low - - - - DRM_MODE_FLAG_HSKEW - - hskew provided (not used?) - - - - DRM_MODE_FLAG_BCAST - - not used? - - - - DRM_MODE_FLAG_PIXMUX - - not used? - - - - DRM_MODE_FLAG_DBLCLK - - not used? - - - - DRM_MODE_FLAG_CLKDIV2 - - ? - - - - - - Note that modes marked with the INTERLACE or DBLSCAN flags will be - filtered out by - drm_helper_probe_single_connector_modes if - the connector's interlace_allowed or - doublescan_allowed field is set to 0. - - - - char name[DRM_DISPLAY_MODE_LEN]; - - Mode name. The driver must call - drm_mode_set_name to fill the mode name from - hdisplay, - vdisplay and interlace flag after - filling the corresponding fields. - - - - - - The vrefresh value is computed by - drm_helper_probe_single_connector_modes. - - - When parsing EDID data, drm_add_edid_modes fill the - connector display_info - width_mm and - height_mm fields. When creating modes - manually the get_modes helper operation must - set the display_info - width_mm and - height_mm fields if they haven't been set - already (for instance at initilization time when a fixed-size panel is - attached to the connector). The mode width_mm - and height_mm fields are only used internally - during EDID parsing and should not be set when creating modes manually. - - - - int (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); - - Verify whether a mode is valid for the connector. Return MODE_OK for - supported modes and one of the enum drm_mode_status values (MODE_*) - for unsupported modes. This operation is mandatory. - - - As the mode rejection reason is currently not used beside for - immediately removing the unsupported mode, an implementation can - return MODE_BAD regardless of the exact reason why the mode is not - valid. - - - Note that the mode_valid helper operation is - only called for modes detected by the device, and - not for modes set by the user through the CRTC - set_config operation. - - - - - - - - Vertical Blanking - - Vertical blanking plays a major role in graphics rendering. To achieve - tear-free display, users must synchronize page flips and/or rendering to - vertical blanking. The DRM API offers ioctls to perform page flips - synchronized to vertical blanking and wait for vertical blanking. - - - The DRM core handles most of the vertical blanking management logic, which - involves filtering out spurious interrupts, keeping race-free blanking - counters, coping with counter wrap-around and resets and keeping use - counts. It relies on the driver to generate vertical blanking interrupts - and optionally provide a hardware vertical blanking counter. Drivers must - implement the following operations. - - - - int (*enable_vblank) (struct drm_device *dev, int crtc); -void (*disable_vblank) (struct drm_device *dev, int crtc); - - Enable or disable vertical blanking interrupts for the given CRTC. - - - - u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); - - Retrieve the value of the vertical blanking counter for the given - CRTC. If the hardware maintains a vertical blanking counter its value - should be returned. Otherwise drivers can use the - drm_vblank_count helper function to handle this - operation. - - - - - Drivers must initialize the vertical blanking handling core with a call to - drm_vblank_init in their - load operation. The function will set the struct - drm_device - vblank_disable_allowed field to 0. This will - keep vertical blanking interrupts enabled permanently until the first mode - set operation, where vblank_disable_allowed is - set to 1. The reason behind this is not clear. Drivers can set the field - to 1 after calling drm_vblank_init to make vertical - blanking interrupts dynamically managed from the beginning. - - - Vertical blanking interrupts can be enabled by the DRM core or by drivers - themselves (for instance to handle page flipping operations). The DRM core - maintains a vertical blanking use count to ensure that the interrupts are - not disabled while a user still needs them. To increment the use count, - drivers call drm_vblank_get. Upon return vertical - blanking interrupts are guaranteed to be enabled. - - - To decrement the use count drivers call - drm_vblank_put. Only when the use count drops to zero - will the DRM core disable the vertical blanking interrupts after a delay - by scheduling a timer. The delay is accessible through the vblankoffdelay - module parameter or the drm_vblank_offdelay global - variable and expressed in milliseconds. Its default value is 5000 ms. - - - When a vertical blanking interrupt occurs drivers only need to call the - drm_handle_vblank function to account for the - interrupt. - + + Memory management - Resources allocated by drm_vblank_init must be freed - with a call to drm_vblank_cleanup in the driver - unload operation handler. + The memory manager lies at the heart of many DRM operations; it + is required to support advanced client features like OpenGL + pbuffers. The DRM currently contains two memory managers: TTM + and GEM. - - - - - Open/Close, File Operations and IOCTLs - Open and Close - int (*firstopen) (struct drm_device *); -void (*lastclose) (struct drm_device *); -int (*open) (struct drm_device *, struct drm_file *); -void (*preclose) (struct drm_device *, struct drm_file *); -void (*postclose) (struct drm_device *, struct drm_file *); - Open and close handlers. None of those methods are mandatory. - - - The firstopen method is called by the DRM core - when an application opens a device that has no other opened file handle. - Similarly the lastclose method is called when - the last application holding a file handle opened on the device closes - it. Both methods are mostly used for UMS (User Mode Setting) drivers to - acquire and release device resources which should be done in the - load and unload - methods for KMS drivers. - - - Note that the lastclose method is also called - at module unload time or, for hot-pluggable devices, when the device is - unplugged. The firstopen and - lastclose calls can thus be unbalanced. - - - The open method is called every time the device - is opened by an application. Drivers can allocate per-file private data - in this method and store them in the struct - drm_file driver_priv - field. Note that the open method is called - before firstopen. - + The Translation Table Manager (TTM) - The close operation is split into preclose and - postclose methods. Drivers must stop and - cleanup all per-file operations in the preclose - method. For instance pending vertical blanking and page flip events must - be cancelled. No per-file operation is allowed on the file handle after - returning from the preclose method. + TTM was developed by Tungsten Graphics, primarily by Thomas + Hellström, and is intended to be a flexible, high performance + graphics memory manager. - Finally the postclose method is called as the - last step of the close operation, right before calling the - lastclose method if no other open file handle - exists for the device. Drivers that have allocated per-file private data - in the open method should free it here. + Drivers wishing to support TTM must fill out a drm_bo_driver + structure. - The lastclose method should restore CRTC and - plane properties to default value, so that a subsequent open of the - device will not inherit state from the previous user. + TTM design background and information belongs here. + - File Operations - const struct file_operations *fops - File operations for the DRM device node. - - Drivers must define the file operations structure that forms the DRM - userspace API entry point, even though most of those operations are - implemented in the DRM core. The open, - release and ioctl - operations are handled by - - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - #ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, - #endif - - - - Drivers that implement private ioctls that requires 32/64bit - compatibility support must provide their own - compat_ioctl handler that processes private - ioctls and calls drm_compat_ioctl for core ioctls. - - - The read and poll - operations provide support for reading DRM events and polling them. They - are implemented by - - .poll = drm_poll, - .read = drm_read, - .fasync = drm_fasync, - .llseek = no_llseek, - - - - The memory mapping implementation varies depending on how the driver - manages memory. Pre-GEM drivers will use drm_mmap, - while GEM-aware drivers will use drm_gem_mmap. See - . - - .mmap = drm_gem_mmap, - - + The Graphics Execution Manager (GEM) - No other file operation is supported by the DRM API. + GEM is an Intel project, authored by Eric Anholt and Keith + Packard. It provides simpler interfaces than TTM, and is well + suited for UMA devices. - - - IOCTLs - struct drm_ioctl_desc *ioctls; -int num_ioctls; - Driver-specific ioctls descriptors table. - Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls - descriptors table is indexed by the ioctl number offset from the base - value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the - table entries. + GEM-enabled drivers must provide gem_init_object() and + gem_free_object() callbacks to support the core memory + allocation routines. They should also provide several driver-specific + ioctls to support command execution, pinning, buffer + read & write, mapping, and domain ownership transfers. - DRM_IOCTL_DEF_DRV(ioctl, func, flags) - - ioctl is the ioctl name. Drivers must define - the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number - offset from DRM_COMMAND_BASE and the ioctl number respectively. The - first macro is private to the device while the second must be exposed - to userspace in a public header. - - - func is a pointer to the ioctl handler function - compatible with the drm_ioctl_t type. - typedef int drm_ioctl_t(struct drm_device *dev, void *data, - struct drm_file *file_priv); - - - flags is a bitmask combination of the following - values. It restricts how the ioctl is allowed to be called. - - - DRM_AUTH - Only authenticated callers allowed - - - DRM_MASTER - The ioctl can only be called on the master file - handle - - - DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed - - - DRM_CONTROL_ALLOW - The ioctl can only be called on a control - device - - - DRM_UNLOCKED - The ioctl handler will be called without locking - the DRM global mutex - - - + On a fundamental level, GEM involves several operations: + + Memory allocation and freeing + Command execution + Aperture management at command execution time + + Buffer object allocation is relatively + straightforward and largely provided by Linux's shmem layer, which + provides memory to back each object. When mapped into the GTT + or used in a command buffer, the backing pages for an object are + flushed to memory and marked write combined so as to be coherent + with the GPU. Likewise, if the CPU accesses an object after the GPU + has finished rendering to the object, then the object must be made + coherent with the CPU's view + of memory, usually involving GPU cache flushing of various kinds. + This core CPU<->GPU coherency management is provided by a + device-specific ioctl, which evaluates an object's current domain and + performs any necessary flushing or synchronization to put the object + into the desired coherency domain (note that the object may be busy, + i.e. an active render target; in that case, setting the domain + blocks the client and waits for rendering to complete before + performing any necessary flushing operations). + + + Perhaps the most important GEM function is providing a command + execution interface to clients. Client programs construct command + buffers containing references to previously allocated memory objects, + and then submit them to GEM. At that point, GEM takes care to bind + all the objects into the GTT, execute the buffer, and provide + necessary synchronization between clients accessing the same buffers. + This often involves evicting some objects from the GTT and re-binding + others (a fairly expensive operation), and providing relocation + support which hides fixed GTT offsets from clients. Clients must + take care not to submit command buffers that reference more objects + than can fit in the GTT; otherwise, GEM will reject them and no rendering + will occur. Similarly, if several objects in the buffer require + fence registers to be allocated for correct rendering (e.g. 2D blits + on pre-965 chips), care must be taken not to require more fence + registers than are available to the client. Such resource management + should be abstracted from the client in libdrm. + + - Command submission & fencing + Output management - This should cover a few device-specific command submission - implementations. + At the core of the DRM output management code is a set of + structures representing CRTCs, encoders, and connectors. + + + A CRTC is an abstraction representing a part of the chip that + contains a pointer to a scanout buffer. Therefore, the number + of CRTCs available determines how many independent scanout + buffers can be active at any given time. The CRTC structure + contains several fields to support this: a pointer to some video + memory, a display mode, and an (x, y) offset into the video + memory to support panning or configurations where one piece of + video memory spans multiple CRTCs. + + + An encoder takes pixel data from a CRTC and converts it to a + format suitable for any attached connectors. On some devices, + it may be possible to have a CRTC send data to more than one + encoder. In that case, both encoders would receive data from + the same scanout buffer, resulting in a "cloned" display + configuration across the connectors attached to each encoder. + + + A connector is the final destination for pixel data on a device, + and usually connects directly to an external display device like + a monitor or laptop panel. A connector can only be attached to + one encoder at a time. The connector is also the structure + where information about the attached display is kept, so it + contains fields for display data, EDID data, DPMS & + connection status, and information about modes supported on the + attached displays. + - + + Framebuffer management + + Clients need to provide a framebuffer object which provides a source + of pixels for a CRTC to deliver to the encoder(s) and ultimately the + connector(s). A framebuffer is fundamentally a driver-specific memory + object, made into an opaque handle by the DRM's addfb() function. + Once a framebuffer has been created this way, it may be passed to the + KMS mode setting routines for use in a completed configuration. + + - Suspend/Resume + Command submission & fencing - The DRM core provides some suspend/resume code, but drivers wanting full - suspend/resume support should provide save() and restore() functions. - These are called at suspend, hibernate, or resume time, and should perform - any state save or restore required by your device across suspend or - hibernate states. + This should cover a few device-specific command submission + implementations. - int (*suspend) (struct drm_device *, pm_message_t state); -int (*resume) (struct drm_device *); + + + + Suspend/resume - Those are legacy suspend and resume methods. New driver should use the - power management interface provided by their bus type (usually through - the struct device_driver dev_pm_ops) and set - these methods to NULL. + The DRM core provides some suspend/resume code, but drivers + wanting full suspend/resume support should provide save() and + restore() functions. These are called at suspend, + hibernate, or resume time, and should perform any state save or + restore required by your device across suspend or hibernate + states. @@ -2385,35 +833,6 @@ int (*resume) (struct drm_device *); - - @@ -2434,42 +853,6 @@ int (*resume) (struct drm_device *); Cover generic ioctls and sysfs layout here. We only need high-level info, since man pages should cover the rest. - - - - - VBlank event handling - - The DRM core exposes two vertical blank related ioctls: - - - DRM_IOCTL_WAIT_VBLANK - - - This takes a struct drm_wait_vblank structure as its argument, - and it is used to block or request a signal when a specified - vblank event occurs. - - - - - DRM_IOCTL_MODESET_CTL - - - This should be called by application level drivers before and - after mode setting, since on many devices the vertical blank - counter is reset at that time. Internally, the DRM snapshots - the last vblank count when the ioctl is called with the - _DRM_PRE_MODESET command, so that the counter won't go backwards - (which is dealt with when _DRM_POST_MODESET is used). - - - - - - - - diff --git a/trunk/Documentation/vfio.txt b/trunk/Documentation/vfio.txt index 0cb6685c8029..8eda3635a17d 100644 --- a/trunk/Documentation/vfio.txt +++ b/trunk/Documentation/vfio.txt @@ -133,7 +133,7 @@ character devices for this group: $ lspci -n -s 0000:06:0d.0 06:0d.0 0401: 1102:0002 (rev 08) # echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind -# echo 1102 0002 > /sys/bus/pci/drivers/vfio/new_id +# echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id Now we need to look at what other devices are in the group to free it for use by VFIO: diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index b17587d9412f..9a6c4da3b2ff 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3552,11 +3552,12 @@ K: \b(ABS|SYN)_MT_ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support +M: Lukasz Dorau +M: Maciej Patelczyk M: Dave Jiang -M: Ed Nadolski L: linux-scsi@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git -S: Maintained +T: git git://git.code.sf.net/p/intel-sas/isci +S: Supported F: drivers/scsi/isci/ F: firmware/isci/ @@ -5544,6 +5545,8 @@ F: Documentation/devicetree/bindings/pwm/ F: include/linux/pwm.h F: include/linux/of_pwm.h F: drivers/pwm/ +F: drivers/video/backlight/pwm_bl.c +F: include/linux/pwm_backlight.h PXA2xx/PXA3xx SUPPORT M: Eric Miao diff --git a/trunk/Makefile b/trunk/Makefile index a3c11d589681..bb9fff26f078 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Terrified Chipmunk # *DOCUMENTATION* diff --git a/trunk/arch/arm/mach-mxs/mach-mxs.c b/trunk/arch/arm/mach-mxs/mach-mxs.c index 8dabfe81d07c..ff886e01a0b0 100644 --- a/trunk/arch/arm/mach-mxs/mach-mxs.c +++ b/trunk/arch/arm/mach-mxs/mach-mxs.c @@ -261,7 +261,7 @@ static void __init apx4devkit_init(void) enable_clk_enet_out(); if (IS_BUILTIN(CONFIG_PHYLIB)) - phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK, + phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK, apx4devkit_phy_fixup); mxsfb_pdata.mode_list = apx4devkit_video_modes; diff --git a/trunk/arch/arm/mach-orion5x/common.c b/trunk/arch/arm/mach-orion5x/common.c index 410291c67666..a6cd14ab1e4e 100644 --- a/trunk/arch/arm/mach-orion5x/common.c +++ b/trunk/arch/arm/mach-orion5x/common.c @@ -204,6 +204,13 @@ void __init orion5x_wdt_init(void) void __init orion5x_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + + /* + * Some Orion5x devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); } int orion5x_tclk; diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index e59c4ab71bcb..13f555d62491 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -346,6 +346,8 @@ static int __init atomic_pool_init(void) (unsigned)pool->size / 1024); return 0; } + + kfree(pages); no_pages: kfree(bitmap); no_bitmap: diff --git a/trunk/arch/c6x/include/asm/Kbuild b/trunk/arch/c6x/include/asm/Kbuild index 3af601e31e66..f08e89183cda 100644 --- a/trunk/arch/c6x/include/asm/Kbuild +++ b/trunk/arch/c6x/include/asm/Kbuild @@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm generic-y += atomic.h generic-y += auxvec.h +generic-y += barrier.h generic-y += bitsperlong.h generic-y += bugs.h generic-y += cputime.h diff --git a/trunk/arch/c6x/include/asm/barrier.h b/trunk/arch/c6x/include/asm/barrier.h deleted file mode 100644 index 538240e85909..000000000000 --- a/trunk/arch/c6x/include/asm/barrier.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Port on Texas Instruments TMS320C6x architecture - * - * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated - * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_C6X_BARRIER_H -#define _ASM_C6X_BARRIER_H - -#define nop() asm("NOP\n"); - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) - -#endif /* _ASM_C6X_BARRIER_H */ diff --git a/trunk/arch/tile/include/gxio/iorpc_trio.h b/trunk/arch/tile/include/gxio/iorpc_trio.h index 15fb77992083..58105c31228b 100644 --- a/trunk/arch/tile/include/gxio/iorpc_trio.h +++ b/trunk/arch/tile/include/gxio/iorpc_trio.h @@ -25,21 +25,23 @@ #include #include -#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) +#define GXIO_TRIO_OP_DEALLOC_ASID IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) +#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401) -#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402) +#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404) -#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e) -#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f) +#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412) -#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417) -#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418) -#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419) -#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a) +#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414) -#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c) -#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d) -#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) +#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) +#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f) +#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420) +#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421) + +#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423) +#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424) +#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425) #define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) diff --git a/trunk/arch/um/include/asm/processor-generic.h b/trunk/arch/um/include/asm/processor-generic.h index 69f1c57a8d0d..33a6a2423bd2 100644 --- a/trunk/arch/um/include/asm/processor-generic.h +++ b/trunk/arch/um/include/asm/processor-generic.h @@ -20,14 +20,6 @@ struct mm_struct; struct thread_struct { struct task_struct *saved_task; - /* - * This flag is set to 1 before calling do_fork (and analyzed in - * copy_thread) to mark that we are begin called from userspace (fork / - * vfork / clone), and reset to 0 after. It is left to 0 when called - * from kernelspace (i.e. kernel_thread() or fork_idle(), - * as of 2.6.11). - */ - int forking; struct pt_regs regs; int singlestep_syscall; void *fault_addr; @@ -58,7 +50,6 @@ struct thread_struct { #define INIT_THREAD \ { \ - .forking = 0, \ .regs = EMPTY_REGS, \ .fault_addr = NULL, \ .prev_sched = NULL, \ diff --git a/trunk/arch/um/include/shared/common-offsets.h b/trunk/arch/um/include/shared/common-offsets.h index 40db8f71deae..2df313b6a586 100644 --- a/trunk/arch/um/include/shared/common-offsets.h +++ b/trunk/arch/um/include/shared/common-offsets.h @@ -7,16 +7,6 @@ DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK); DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT); DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); -DEFINE_STR(UM_KERN_EMERG, KERN_EMERG); -DEFINE_STR(UM_KERN_ALERT, KERN_ALERT); -DEFINE_STR(UM_KERN_CRIT, KERN_CRIT); -DEFINE_STR(UM_KERN_ERR, KERN_ERR); -DEFINE_STR(UM_KERN_WARNING, KERN_WARNING); -DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE); -DEFINE_STR(UM_KERN_INFO, KERN_INFO); -DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG); -DEFINE_STR(UM_KERN_CONT, KERN_CONT); - DEFINE(UM_ELF_CLASS, ELF_CLASS); DEFINE(UM_ELFCLASS32, ELFCLASS32); DEFINE(UM_ELFCLASS64, ELFCLASS64); diff --git a/trunk/arch/um/include/shared/user.h b/trunk/arch/um/include/shared/user.h index 4fa82c055aab..cef068563336 100644 --- a/trunk/arch/um/include/shared/user.h +++ b/trunk/arch/um/include/shared/user.h @@ -26,6 +26,17 @@ extern void panic(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); +/* Requires preincluding include/linux/kern_levels.h */ +#define UM_KERN_EMERG KERN_EMERG +#define UM_KERN_ALERT KERN_ALERT +#define UM_KERN_CRIT KERN_CRIT +#define UM_KERN_ERR KERN_ERR +#define UM_KERN_WARNING KERN_WARNING +#define UM_KERN_NOTICE KERN_NOTICE +#define UM_KERN_INFO KERN_INFO +#define UM_KERN_DEBUG KERN_DEBUG +#define UM_KERN_CONT KERN_CONT + #ifdef UML_CONFIG_PRINTK extern int printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); diff --git a/trunk/arch/um/kernel/exec.c b/trunk/arch/um/kernel/exec.c index 6cade9366364..8c82786da823 100644 --- a/trunk/arch/um/kernel/exec.c +++ b/trunk/arch/um/kernel/exec.c @@ -39,34 +39,21 @@ void flush_thread(void) void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) { + get_safe_registers(regs->regs.gp, regs->regs.fp); PT_REGS_IP(regs) = eip; PT_REGS_SP(regs) = esp; -} -EXPORT_SYMBOL(start_thread); - -static long execve1(const char *file, - const char __user *const __user *argv, - const char __user *const __user *env) -{ - long error; - - error = do_execve(file, argv, env, ¤t->thread.regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; + current->ptrace &= ~PT_DTRACE; #ifdef SUBARCH_EXECVE1 - SUBARCH_EXECVE1(¤t->thread.regs.regs); + SUBARCH_EXECVE1(regs->regs); #endif - task_unlock(current); - } - return error; } +EXPORT_SYMBOL(start_thread); long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) { long err; - err = execve1(file, argv, env); + err = do_execve(file, argv, env, ¤t->thread.regs); if (!err) UML_LONGJMP(current->thread.exec_buf, 1); return err; @@ -81,7 +68,7 @@ long sys_execve(const char __user *file, const char __user *const __user *argv, filename = getname(file); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; - error = execve1(filename, argv, env); + error = do_execve(filename, argv, env, ¤t->thread.regs); putname(filename); out: return error; diff --git a/trunk/arch/um/kernel/process.c b/trunk/arch/um/kernel/process.c index 57fc7028714a..c5f5afa50745 100644 --- a/trunk/arch/um/kernel/process.c +++ b/trunk/arch/um/kernel/process.c @@ -181,11 +181,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, struct pt_regs *regs) { void (*handler)(void); + int kthread = current->flags & PF_KTHREAD; int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; - if (current->thread.forking) { + if (!kthread) { memcpy(&p->thread.regs.regs, ®s->regs, sizeof(p->thread.regs.regs)); PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); @@ -195,8 +196,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, handler = fork_handler; arch_copy_thread(¤t->thread.arch, &p->thread.arch); - } - else { + } else { get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); p->thread.request.u.thread = current->thread.request.u.thread; handler = new_thread_handler; @@ -204,7 +204,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, new_thread(task_stack_page(p), &p->thread.switch_buf, handler); - if (current->thread.forking) { + if (!kthread) { clear_flushed_tls(p); /* diff --git a/trunk/arch/um/kernel/signal.c b/trunk/arch/um/kernel/signal.c index 7362d58efc29..cc9c2350e417 100644 --- a/trunk/arch/um/kernel/signal.c +++ b/trunk/arch/um/kernel/signal.c @@ -22,9 +22,13 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr, struct k_sigaction *ka, siginfo_t *info) { sigset_t *oldset = sigmask_to_save(); + int singlestep = 0; unsigned long sp; int err; + if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) + singlestep = 1; + /* Did we come from a system call? */ if (PT_REGS_SYSCALL_NR(regs) >= 0) { /* If so, check system call restarting.. */ @@ -61,7 +65,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr, if (err) force_sigsegv(signr, current); else - signal_delivered(signr, info, ka, regs, 0); + signal_delivered(signr, info, ka, regs, singlestep); } static int kern_do_signal(struct pt_regs *regs) diff --git a/trunk/arch/um/kernel/syscall.c b/trunk/arch/um/kernel/syscall.c index f958cb876ee3..a4c6d8eee74c 100644 --- a/trunk/arch/um/kernel/syscall.c +++ b/trunk/arch/um/kernel/syscall.c @@ -17,25 +17,25 @@ long sys_fork(void) { - long ret; - - current->thread.forking = 1; - ret = do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs), + return do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs), ¤t->thread.regs, 0, NULL, NULL); - current->thread.forking = 0; - return ret; } long sys_vfork(void) { - long ret; - - current->thread.forking = 1; - ret = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, UPT_SP(¤t->thread.regs.regs), ¤t->thread.regs, 0, NULL, NULL); - current->thread.forking = 0; - return ret; +} + +long sys_clone(unsigned long clone_flags, unsigned long newsp, + void __user *parent_tid, void __user *child_tid) +{ + if (!newsp) + newsp = UPT_SP(¤t->thread.regs.regs); + + return do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, + child_tid); } long old_mmap(unsigned long addr, unsigned long len, diff --git a/trunk/arch/um/scripts/Makefile.rules b/trunk/arch/um/scripts/Makefile.rules index d50270d26b42..15889df9b466 100644 --- a/trunk/arch/um/scripts/Makefile.rules +++ b/trunk/arch/um/scripts/Makefile.rules @@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS)) USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) $(USER_OBJS:.o=.%): \ - c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include user.h $(CFLAGS_$(basetarget).o) + c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include $(srctree)/include/linux/kern_levels.h -include user.h $(CFLAGS_$(basetarget).o) # These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of # using it directly. diff --git a/trunk/arch/x86/um/Kconfig b/trunk/arch/x86/um/Kconfig index 9926e11a772d..aeaff8bef2f1 100644 --- a/trunk/arch/x86/um/Kconfig +++ b/trunk/arch/x86/um/Kconfig @@ -21,6 +21,7 @@ config 64BIT config X86_32 def_bool !64BIT select HAVE_AOUT + select ARCH_WANT_IPC_PARSE_VERSION config X86_64 def_bool 64BIT diff --git a/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h b/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h index 5868526b5eef..46a9df99f3c5 100644 --- a/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h +++ b/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h @@ -7,9 +7,6 @@ #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) -#define STR(x) #x -#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : ) - #define BLANK() asm volatile("\n->" : : ) #define OFFSET(sym, str, mem) \ diff --git a/trunk/arch/x86/um/shared/sysdep/syscalls.h b/trunk/arch/x86/um/shared/sysdep/syscalls.h index bd9a89b67e41..ca255a805ed9 100644 --- a/trunk/arch/x86/um/shared/sysdep/syscalls.h +++ b/trunk/arch/x86/um/shared/sysdep/syscalls.h @@ -1,3 +1,5 @@ +extern long sys_clone(unsigned long clone_flags, unsigned long newsp, + void __user *parent_tid, void __user *child_tid); #ifdef __i386__ #include "syscalls_32.h" #else diff --git a/trunk/arch/x86/um/signal.c b/trunk/arch/x86/um/signal.c index a508cea13503..ba7363ecf896 100644 --- a/trunk/arch/x86/um/signal.c +++ b/trunk/arch/x86/um/signal.c @@ -416,9 +416,6 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, PT_REGS_AX(regs) = (unsigned long) sig; PT_REGS_DX(regs) = (unsigned long) 0; PT_REGS_CX(regs) = (unsigned long) 0; - - if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) - ptrace_notify(SIGTRAP); return 0; } @@ -466,9 +463,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, PT_REGS_AX(regs) = (unsigned long) sig; PT_REGS_DX(regs) = (unsigned long) &frame->info; PT_REGS_CX(regs) = (unsigned long) &frame->uc; - - if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) - ptrace_notify(SIGTRAP); return 0; } diff --git a/trunk/arch/x86/um/sys_call_table_32.c b/trunk/arch/x86/um/sys_call_table_32.c index 68d1dc91b37b..b5408cecac6c 100644 --- a/trunk/arch/x86/um/sys_call_table_32.c +++ b/trunk/arch/x86/um/sys_call_table_32.c @@ -28,7 +28,7 @@ #define ptregs_execve sys_execve #define ptregs_iopl sys_iopl #define ptregs_vm86old sys_vm86old -#define ptregs_clone sys_clone +#define ptregs_clone i386_clone #define ptregs_vm86 sys_vm86 #define ptregs_sigaltstack sys_sigaltstack #define ptregs_vfork sys_vfork diff --git a/trunk/arch/x86/um/syscalls_32.c b/trunk/arch/x86/um/syscalls_32.c index b853e8600b9d..db444c7218fe 100644 --- a/trunk/arch/x86/um/syscalls_32.c +++ b/trunk/arch/x86/um/syscalls_32.c @@ -3,37 +3,24 @@ * Licensed under the GPL */ -#include "linux/sched.h" -#include "linux/shm.h" -#include "linux/ipc.h" -#include "linux/syscalls.h" -#include "asm/mman.h" -#include "asm/uaccess.h" -#include "asm/unistd.h" +#include +#include /* * The prototype on i386 is: * - * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) + * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls * * and the "newtls" arg. on i386 is read by copy_thread directly from the * register saved on the stack. */ -long sys_clone(unsigned long clone_flags, unsigned long newsp, - int __user *parent_tid, void *newtls, int __user *child_tid) +long i386_clone(unsigned long clone_flags, unsigned long newsp, + int __user *parent_tid, void *newtls, int __user *child_tid) { - long ret; - - if (!newsp) - newsp = UPT_SP(¤t->thread.regs.regs); - - current->thread.forking = 1; - ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, - child_tid); - current->thread.forking = 0; - return ret; + return sys_clone(clone_flags, newsp, parent_tid, child_tid); } + long sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { diff --git a/trunk/arch/x86/um/syscalls_64.c b/trunk/arch/x86/um/syscalls_64.c index f3d82bb6e15a..adb08eb5c22a 100644 --- a/trunk/arch/x86/um/syscalls_64.c +++ b/trunk/arch/x86/um/syscalls_64.c @@ -5,12 +5,9 @@ * Licensed under the GPL */ -#include "linux/linkage.h" -#include "linux/personality.h" -#include "linux/utsname.h" -#include "asm/prctl.h" /* XXX This should get the constants from libc */ -#include "asm/uaccess.h" -#include "os.h" +#include +#include /* XXX This should get the constants from libc */ +#include long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) { @@ -79,20 +76,6 @@ long sys_arch_prctl(int code, unsigned long addr) return arch_prctl(current, code, (unsigned long __user *) addr); } -long sys_clone(unsigned long clone_flags, unsigned long newsp, - void __user *parent_tid, void __user *child_tid) -{ - long ret; - - if (!newsp) - newsp = UPT_SP(¤t->thread.regs.regs); - current->thread.forking = 1; - ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, - child_tid); - current->thread.forking = 0; - return ret; -} - void arch_switch_to(struct task_struct *to) { if ((to->thread.arch.fs == 0) || (to->mm == NULL)) diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index d11ca11d14fc..e2d62d697b5d 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -544,4 +545,7 @@ void __init xen_arch_setup(void) disable_cpufreq(); WARN_ON(set_pm_idle_to_default()); fiddle_vdso(); +#ifdef CONFIG_NUMA + numa_off = 1; +#endif } diff --git a/trunk/drivers/acpi/video.c b/trunk/drivers/acpi/video.c index f94d4c818fc7..1e0a9e17c31d 100644 --- a/trunk/drivers/acpi/video.c +++ b/trunk/drivers/acpi/video.c @@ -1448,7 +1448,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch, * most likely via hotkey. */ acpi_bus_generate_proc_event(device, event, 0); - keycode = KEY_SWITCHVIDEOMODE; + if (!acpi_notifier_call_chain(device, event, 0)) + keycode = KEY_SWITCHVIDEOMODE; break; case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video @@ -1478,9 +1479,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event) break; } - if (acpi_notifier_call_chain(device, event, 0)) - /* Something vetoed the keypress. */ - keycode = 0; + if (event != ACPI_VIDEO_NOTIFY_SWITCH) + acpi_notifier_call_chain(device, event, 0); if (keycode) { input_report_key(input, keycode, 1); diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme.c index 38a2d0631882..ad16c68c8645 100644 --- a/trunk/drivers/block/nvme.c +++ b/trunk/drivers/block/nvme.c @@ -79,6 +79,7 @@ struct nvme_dev { char serial[20]; char model[40]; char firmware_rev[8]; + u32 max_hw_sectors; }; /* @@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, } static int nvme_get_features(struct nvme_dev *dev, unsigned fid, - unsigned dword11, dma_addr_t dma_addr) + unsigned nsid, dma_addr_t dma_addr) { struct nvme_command c; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_get_features; + c.features.nsid = cpu_to_le32(nsid); c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - c.features.dword11 = cpu_to_le32(dword11); return nvme_submit_admin_cmd(dev, &c, NULL); } @@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid, return nvme_submit_admin_cmd(dev, &c, result); } +/** + * nvme_cancel_ios - Cancel outstanding I/Os + * @queue: The queue to cancel I/Os on + * @timeout: True to only cancel I/Os which have timed out + */ +static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + unsigned long now = jiffies; + int cmdid; + + for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { + void *ctx; + nvme_completion_fn fn; + static struct nvme_completion cqe = { + .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, + }; + + if (timeout && !time_after(now, info[cmdid].timeout)) + continue; + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); + ctx = cancel_cmdid(nvmeq, cmdid, &fn); + fn(nvmeq->dev, ctx, &cqe); + } +} + +static void nvme_free_queue_mem(struct nvme_queue *nvmeq) +{ + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); +} + static void nvme_free_queue(struct nvme_dev *dev, int qid) { struct nvme_queue *nvmeq = dev->queues[qid]; int vector = dev->entry[nvmeq->cq_vector].vector; + spin_lock_irq(&nvmeq->q_lock); + nvme_cancel_ios(nvmeq, false); + spin_unlock_irq(&nvmeq->q_lock); + irq_set_affinity_hint(vector, NULL); free_irq(vector, nvmeq); @@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) adapter_delete_cq(dev, qid); } - dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); + nvme_free_queue_mem(nvmeq); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth, int vector) { struct device *dmadev = &dev->pci_dev->dev; - unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); + unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * + sizeof(struct nvme_cmd_info)); struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); if (!nvmeq) return NULL; @@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) { - int result; + int result = 0; u32 aqa; u64 cap; unsigned long timeout; @@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; dev->db_stride = NVME_CAP_STRIDE(cap); - while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { + while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { msleep(100); if (fatal_signal_pending(current)) - return -EINTR; + result = -EINTR; if (time_after(jiffies, timeout)) { dev_err(&dev->pci_dev->dev, "Device not ready; aborting initialisation\n"); - return -ENODEV; + result = -ENODEV; } } + if (result) { + nvme_free_queue_mem(nvmeq); + return result; + } + result = queue_request_irq(dev, nvmeq, "nvme admin"); dev->queues[0] = nvmeq; return result; @@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, offset = offset_in_page(addr); count = DIV_ROUND_UP(offset + length, PAGE_SIZE); pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); err = get_user_pages_fast(addr, count, 1, pages); if (err < count) { @@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) return status; } -static int nvme_user_admin_cmd(struct nvme_ns *ns, +static int nvme_user_admin_cmd(struct nvme_dev *dev, struct nvme_admin_cmd __user *ucmd) { - struct nvme_dev *dev = ns->dev; struct nvme_admin_cmd cmd; struct nvme_command c; int status, length; - struct nvme_iod *iod; + struct nvme_iod *uninitialized_var(iod); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, case NVME_IOCTL_ID: return ns->ns_id; case NVME_IOCTL_ADMIN_CMD: - return nvme_user_admin_cmd(ns, (void __user *)arg); + return nvme_user_admin_cmd(ns->dev, (void __user *)arg); case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, (void __user *)arg); default: @@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = { .compat_ioctl = nvme_ioctl, }; -static void nvme_timeout_ios(struct nvme_queue *nvmeq) -{ - int depth = nvmeq->q_depth - 1; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - unsigned long now = jiffies; - int cmdid; - - for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { - void *ctx; - nvme_completion_fn fn; - static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; - - if (!time_after(now, info[cmdid].timeout)) - continue; - dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); - ctx = cancel_cmdid(nvmeq, cmdid, &fn); - fn(nvmeq->dev, ctx, &cqe); - } -} - static void nvme_resubmit_bios(struct nvme_queue *nvmeq) { while (bio_list_peek(&nvmeq->sq_cong)) { @@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data) spin_lock_irq(&nvmeq->q_lock); if (nvme_process_cq(nvmeq)) printk("process_cq did something\n"); - nvme_timeout_ios(nvmeq); + nvme_cancel_ios(nvmeq, true); nvme_resubmit_bios(nvmeq); spin_unlock_irq(&nvmeq->q_lock); } @@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ns->disk = disk; lbaf = id->flbas & 0xf; ns->lba_shift = id->lbaf[lbaf].ds; + blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); + if (dev->max_hw_sectors) + blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); disk->major = nvme_major; disk->minors = NVME_MINORS; @@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count) static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) { - int result, cpu, i, nr_io_queues, db_bar_size; + int result, cpu, i, nr_io_queues, db_bar_size, q_depth; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) cpu = cpumask_next(cpu, cpu_online_mask); } + q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, + NVME_Q_DEPTH); for (i = 0; i < nr_io_queues; i++) { - dev->queues[i + 1] = nvme_create_queue(dev, i + 1, - NVME_Q_DEPTH, i); + dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); if (IS_ERR(dev->queues[i + 1])) return PTR_ERR(dev->queues[i + 1]); dev->queue_count++; @@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev) memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); + if (ctrl->mdts) { + int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; + dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); + } id_ns = mem; for (i = 1; i <= nn; i++) { @@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev) list_del(&dev->node); spin_unlock(&dev_list_lock); - /* TODO: wait all I/O finished or cancel them */ - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { list_del(&ns->list); del_gendisk(ns->disk); @@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) dma_pool_destroy(dev->prp_small_pool); } -/* XXX: Use an ida or something to let remove / add work correctly */ -static void nvme_set_instance(struct nvme_dev *dev) +static DEFINE_IDA(nvme_instance_ida); + +static int nvme_set_instance(struct nvme_dev *dev) { - static int instance; - dev->instance = instance++; + int instance, error; + + do { + if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) + return -ENODEV; + + spin_lock(&dev_list_lock); + error = ida_get_new(&nvme_instance_ida, &instance); + spin_unlock(&dev_list_lock); + } while (error == -EAGAIN); + + if (error) + return -ENODEV; + + dev->instance = instance; + return 0; } static void nvme_release_instance(struct nvme_dev *dev) { + spin_lock(&dev_list_lock); + ida_remove(&nvme_instance_ida, dev->instance); + spin_unlock(&dev_list_lock); } static int __devinit nvme_probe(struct pci_dev *pdev, @@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - nvme_set_instance(dev); + result = nvme_set_instance(dev); + if (result) + goto disable; + dev->entry[0].vector = pdev->irq; result = nvme_setup_prp_pools(dev); @@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = { static int __init nvme_init(void) { - int result = -EBUSY; + int result; nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); if (IS_ERR(nvme_thread)) return PTR_ERR(nvme_thread); - nvme_major = register_blkdev(nvme_major, "nvme"); - if (nvme_major <= 0) + result = register_blkdev(nvme_major, "nvme"); + if (result < 0) goto kill_kthread; + else if (result > 0) + nvme_major = result; result = pci_register_driver(&nvme_driver); if (result) diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index 9917943a3572..54a55f03115d 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; - rbd_get_dev(rbd_dev); - - set_device_ro(bdev, rbd_dev->read_only); - if ((mode & FMODE_WRITE) && rbd_dev->read_only) return -EROFS; + rbd_get_dev(rbd_dev); + set_device_ro(bdev, rbd_dev->read_only); + return 0; } diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c index e01f5eaaec82..58e32f7c3229 100644 --- a/trunk/drivers/char/agp/intel-gtt.c +++ b/trunk/drivers/char/agp/intel-gtt.c @@ -84,33 +84,40 @@ static struct _intel_private { #define IS_IRONLAKE intel_private.driver->is_ironlake #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable -static int intel_gtt_map_memory(struct page **pages, - unsigned int num_entries, - struct sg_table *st) +int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, + struct scatterlist **sg_list, int *num_sg) { + struct sg_table st; struct scatterlist *sg; int i; + if (*sg_list) + return 0; /* already mapped (for e.g. resume */ + DBG("try mapping %lu pages\n", (unsigned long)num_entries); - if (sg_alloc_table(st, num_entries, GFP_KERNEL)) + if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) goto err; - for_each_sg(st->sgl, sg, num_entries, i) + *sg_list = sg = st.sgl; + + for (i = 0 ; i < num_entries; i++, sg = sg_next(sg)) sg_set_page(sg, pages[i], PAGE_SIZE, 0); - if (!pci_map_sg(intel_private.pcidev, - st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL)) + *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, + num_entries, PCI_DMA_BIDIRECTIONAL); + if (unlikely(!*num_sg)) goto err; return 0; err: - sg_free_table(st); + sg_free_table(&st); return -ENOMEM; } +EXPORT_SYMBOL(intel_gtt_map_memory); -static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) +void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) { struct sg_table st; DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); @@ -123,6 +130,7 @@ static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) sg_free_table(&st); } +EXPORT_SYMBOL(intel_gtt_unmap_memory); static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { @@ -666,14 +674,9 @@ static int intel_gtt_init(void) gtt_map_size = intel_private.base.gtt_total_entries * 4; - intel_private.gtt = NULL; - if (INTEL_GTT_GEN < 6) - intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, - gtt_map_size); - if (intel_private.gtt == NULL) - intel_private.gtt = ioremap(intel_private.gtt_bus_addr, - gtt_map_size); - if (intel_private.gtt == NULL) { + intel_private.gtt = ioremap(intel_private.gtt_bus_addr, + gtt_map_size); + if (!intel_private.gtt) { intel_private.driver->cleanup(); iounmap(intel_private.registers); return -ENOMEM; @@ -876,7 +879,8 @@ static bool i830_check_flags(unsigned int flags) return false; } -void intel_gtt_insert_sg_entries(struct sg_table *st, +void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, + unsigned int sg_len, unsigned int pg_start, unsigned int flags) { @@ -888,11 +892,12 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, /* sg may merge pages, but we have to separate * per-page addr for GTT */ - for_each_sg(st->sgl, sg, st->nents, i) { + for_each_sg(sg_list, sg, sg_len, i) { len = sg_dma_len(sg) >> PAGE_SHIFT; for (m = 0; m < len; m++) { dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - intel_private.driver->write_entry(addr, j, flags); + intel_private.driver->write_entry(addr, + j, flags); j++; } } @@ -900,10 +905,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); -static void intel_gtt_insert_pages(unsigned int first_entry, - unsigned int num_entries, - struct page **pages, - unsigned int flags) +void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, + struct page **pages, unsigned int flags) { int i, j; @@ -914,6 +917,7 @@ static void intel_gtt_insert_pages(unsigned int first_entry, } readl(intel_private.gtt+j-1); } +EXPORT_SYMBOL(intel_gtt_insert_pages); static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) @@ -949,15 +953,13 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, global_cache_flush(); if (intel_private.base.needs_dmar) { - struct sg_table st; - - ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); + ret = intel_gtt_map_memory(mem->pages, mem->page_count, + &mem->sg_list, &mem->num_sg); if (ret != 0) return ret; - intel_gtt_insert_sg_entries(&st, pg_start, type); - mem->sg_list = st.sgl; - mem->num_sg = st.nents; + intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, + pg_start, type); } else intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, type); diff --git a/trunk/drivers/edac/i3200_edac.c b/trunk/drivers/edac/i3200_edac.c index 47180a08edad..b6653a6fc5d5 100644 --- a/trunk/drivers/edac/i3200_edac.c +++ b/trunk/drivers/edac/i3200_edac.c @@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) for (j = 0; j < nr_channels; j++) { struct dimm_info *dimm = csrow->channels[j]->dimm; - dimm->nr_pages = nr_pages / nr_channels; + dimm->nr_pages = nr_pages; dimm->grain = nr_pages << PAGE_SHIFT; dimm->mtype = MEM_DDR2; dimm->dtype = DEV_UNKNOWN; diff --git a/trunk/drivers/edac/i5000_edac.c b/trunk/drivers/edac/i5000_edac.c index 39c63757c2a1..6a49dd00b81b 100644 --- a/trunk/drivers/edac/i5000_edac.c +++ b/trunk/drivers/edac/i5000_edac.c @@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, /* add the number of COLUMN bits */ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); + /* Dual-rank memories have twice the size */ + if (dinfo->dual_rank) + addrBits++; + addrBits += 6; /* add 64 bits per DIMM */ addrBits -= 20; /* divide by 2^^20 */ addrBits -= 3; /* 8 bits per bytes */ diff --git a/trunk/drivers/edac/sb_edac.c b/trunk/drivers/edac/sb_edac.c index f3b1f9fafa4b..5715b7c2c517 100644 --- a/trunk/drivers/edac/sb_edac.c +++ b/trunk/drivers/edac/sb_edac.c @@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) { struct sbridge_pvt *pvt = mci->pvt_info; struct dimm_info *dimm; - int i, j, banks, ranks, rows, cols, size, npages; + unsigned i, j, banks, ranks, rows, cols, npages; + u64 size; u32 reg; enum edac_type mode; enum mem_type mtype; @@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) cols = numcol(mtr); /* DDR3 has 8 I/O banks */ - size = (rows * cols * banks * ranks) >> (20 - 3); + size = ((u64)rows * cols * banks * ranks) >> (20 - 3); npages = MiB_TO_PAGES(size); - edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", + edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", pvt->sbridge_dev->mc, i, j, size, npages, banks, ranks, rows, cols); diff --git a/trunk/drivers/gpio/gpio-lpc32xx.c b/trunk/drivers/gpio/gpio-lpc32xx.c index 8a420f13905e..ed94b4ea72e9 100644 --- a/trunk/drivers/gpio/gpio-lpc32xx.c +++ b/trunk/drivers/gpio/gpio-lpc32xx.c @@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin, { struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + __set_gpio_level_p012(group, pin, value); __set_gpio_dir_p012(group, pin, 0); return 0; @@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, { struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + __set_gpio_level_p3(group, pin, value); __set_gpio_dir_p3(group, pin, 0); return 0; @@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, int value) { + struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + + __set_gpo_level_p3(group, pin, value); return 0; } diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index 18321b68b880..90e28081712d 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -22,7 +22,7 @@ menuconfig DRM config DRM_USB tristate depends on DRM - depends on USB_SUPPORT && USB_ARCH_HAS_HCD + depends on USB_ARCH_HAS_HCD select USB config DRM_KMS_HELPER @@ -54,21 +54,6 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. -config DRM_GEM_CMA_HELPER - bool - depends on DRM - help - Choose this if you need the GEM CMA helper functions - -config DRM_KMS_CMA_HELPER - bool - select DRM_GEM_CMA_HELPER - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT - help - Choose this if you need the KMS CMA helper functions - config DRM_TDFX tristate "3dfx Banshee/Voodoo3+" depends on DRM && PCI @@ -208,5 +193,3 @@ source "drivers/gpu/drm/ast/Kconfig" source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/cirrus/Kconfig" - -source "drivers/gpu/drm/shmobile/Kconfig" diff --git a/trunk/drivers/gpu/drm/Makefile b/trunk/drivers/gpu/drm/Makefile index 2ff5cefe9ead..f65f65ed0ddf 100644 --- a/trunk/drivers/gpu/drm/Makefile +++ b/trunk/drivers/gpu/drm/Makefile @@ -15,13 +15,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_trace_points.o drm_global.o drm_prime.o drm-$(CONFIG_COMPAT) += drm_ioc32.o -drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-usb-y := drm_usb.o drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o -drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -47,5 +45,4 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ -obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ obj-y += i2c/ diff --git a/trunk/drivers/gpu/drm/ast/ast_drv.h b/trunk/drivers/gpu/drm/ast/ast_drv.h index 20a437f88780..d4af9edcbb97 100644 --- a/trunk/drivers/gpu/drm/ast/ast_drv.h +++ b/trunk/drivers/gpu/drm/ast/ast_drv.h @@ -94,6 +94,7 @@ struct ast_private { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; + atomic_t validate_sequence; } ttm; struct drm_gem_object *cursor_cache; diff --git a/trunk/drivers/gpu/drm/ast/ast_mode.c b/trunk/drivers/gpu/drm/ast/ast_mode.c index e87f1ff223f4..a712cafcfa1d 100644 --- a/trunk/drivers/gpu/drm/ast/ast_mode.c +++ b/trunk/drivers/gpu/drm/ast/ast_mode.c @@ -582,6 +582,7 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { .mode_set_base = ast_crtc_mode_set_base, .disable = ast_crtc_disable, .load_lut = ast_crtc_load_lut, + .disable = ast_crtc_disable, .prepare = ast_crtc_prepare, .commit = ast_crtc_commit, @@ -736,7 +737,6 @@ static int ast_get_modes(struct drm_connector *connector) if (edid) { drm_mode_connector_update_edid_property(&ast_connector->base, edid); ret = drm_add_edid_modes(connector, edid); - kfree(edid); return ret; } else drm_mode_connector_update_edid_property(&ast_connector->base, NULL); diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h index 5d045647a3fc..64ea597cb6d3 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -143,6 +143,7 @@ struct cirrus_device { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; + atomic_t validate_sequence; } ttm; bool mm_inited; }; diff --git a/trunk/drivers/gpu/drm/drm_cache.c b/trunk/drivers/gpu/drm/drm_cache.c index 4a4274b348b6..08758e061478 100644 --- a/trunk/drivers/gpu/drm/drm_cache.c +++ b/trunk/drivers/gpu/drm/drm_cache.c @@ -37,13 +37,12 @@ drm_clflush_page(struct page *page) { uint8_t *page_virtual; unsigned int i; - const int size = boot_cpu_data.x86_clflush_size; if (unlikely(page == NULL)) return; page_virtual = kmap_atomic(page); - for (i = 0; i < PAGE_SIZE; i += size) + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) clflush(page_virtual + i); kunmap_atomic(page_virtual); } @@ -100,31 +99,6 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) } EXPORT_SYMBOL(drm_clflush_pages); -void -drm_clflush_sg(struct sg_table *st) -{ -#if defined(CONFIG_X86) - if (cpu_has_clflush) { - struct scatterlist *sg; - int i; - - mb(); - for_each_sg(st->sgl, sg, st->nents, i) - drm_clflush_page(sg_page(sg)); - mb(); - - return; - } - - if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) - printk(KERN_ERR "Timed out waiting for cache flush.\n"); -#else - printk(KERN_ERR "Architecture has no drm_cache.c support\n"); - WARN_ON_ONCE(1); -#endif -} -EXPORT_SYMBOL(drm_clflush_sg); - void drm_clflush_virt_range(char *addr, unsigned long length) { diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 9c346a50379f..6fbfc244748f 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -294,8 +294,6 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, { int ret; - kref_init(&fb->refcount); - ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); if (ret) return ret; @@ -309,38 +307,6 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, } EXPORT_SYMBOL(drm_framebuffer_init); -static void drm_framebuffer_free(struct kref *kref) -{ - struct drm_framebuffer *fb = - container_of(kref, struct drm_framebuffer, refcount); - fb->funcs->destroy(fb); -} - -/** - * drm_framebuffer_unreference - unref a framebuffer - * - * LOCKING: - * Caller must hold mode config lock. - */ -void drm_framebuffer_unreference(struct drm_framebuffer *fb) -{ - struct drm_device *dev = fb->dev; - DRM_DEBUG("FB ID: %d\n", fb->base.id); - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); - kref_put(&fb->refcount, drm_framebuffer_free); -} -EXPORT_SYMBOL(drm_framebuffer_unreference); - -/** - * drm_framebuffer_reference - incr the fb refcnt - */ -void drm_framebuffer_reference(struct drm_framebuffer *fb) -{ - DRM_DEBUG("FB ID: %d\n", fb->base.id); - kref_get(&fb->refcount); -} -EXPORT_SYMBOL(drm_framebuffer_reference); - /** * drm_framebuffer_cleanup - remove a framebuffer object * @fb: framebuffer to remove @@ -352,32 +318,6 @@ EXPORT_SYMBOL(drm_framebuffer_reference); * it, setting it to NULL. */ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) -{ - struct drm_device *dev = fb->dev; - /* - * This could be moved to drm_framebuffer_remove(), but for - * debugging is nice to keep around the list of fb's that are - * no longer associated w/ a drm_file but are not unreferenced - * yet. (i915 and omapdrm have debugfs files which will show - * this.) - */ - drm_mode_object_put(dev, &fb->base); - list_del(&fb->head); - dev->mode_config.num_fb--; -} -EXPORT_SYMBOL(drm_framebuffer_cleanup); - -/** - * drm_framebuffer_remove - remove and unreference a framebuffer object - * @fb: framebuffer to remove - * - * LOCKING: - * Caller must hold mode config lock. - * - * Scans all the CRTCs and planes in @dev's mode_config. If they're - * using @fb, removes it, setting it to NULL. - */ -void drm_framebuffer_remove(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; struct drm_crtc *crtc; @@ -410,11 +350,11 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) } } - list_del(&fb->filp_head); - - drm_framebuffer_unreference(fb); + drm_mode_object_put(dev, &fb->base); + list_del(&fb->head); + dev->mode_config.num_fb--; } -EXPORT_SYMBOL(drm_framebuffer_remove); +EXPORT_SYMBOL(drm_framebuffer_cleanup); /** * drm_crtc_init - Initialise a new CRTC object @@ -437,7 +377,6 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; - crtc->invert_dimensions = false; mutex_lock(&dev->mode_config.mutex); @@ -1092,7 +1031,11 @@ void drm_mode_config_cleanup(struct drm_device *dev) } list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { - drm_framebuffer_remove(fb); + fb->funcs->destroy(fb); + } + + list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { + crtc->funcs->destroy(crtc); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, @@ -1100,10 +1043,6 @@ void drm_mode_config_cleanup(struct drm_device *dev) plane->funcs->destroy(plane); } - list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { - crtc->funcs->destroy(crtc); - } - idr_remove_all(&dev->mode_config.crtc_idr); idr_destroy(&dev->mode_config.crtc_idr); } @@ -1913,7 +1852,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (crtc_req->mode_valid) { - int hdisplay, vdisplay; /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ if (crtc_req->fb_id == -1) { @@ -1949,20 +1887,14 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - hdisplay = mode->hdisplay; - vdisplay = mode->vdisplay; - - if (crtc->invert_dimensions) - swap(hdisplay, vdisplay); - - if (hdisplay > fb->width || - vdisplay > fb->height || - crtc_req->x > fb->width - hdisplay || - crtc_req->y > fb->height - vdisplay) { - DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", - fb->width, fb->height, - hdisplay, vdisplay, crtc_req->x, crtc_req->y, - crtc->invert_dimensions ? " (inverted)" : ""); + if (mode->hdisplay > fb->width || + mode->vdisplay > fb->height || + crtc_req->x > fb->width - mode->hdisplay || + crtc_req->y > fb->height - mode->vdisplay) { + DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n", + mode->hdisplay, mode->vdisplay, + crtc_req->x, crtc_req->y, + fb->width, fb->height); ret = -ENOSPC; goto out; } @@ -2237,8 +2169,6 @@ static int format_check(const struct drm_mode_fb_cmd2 *r) case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: @@ -2405,7 +2335,11 @@ int drm_mode_rmfb(struct drm_device *dev, goto out; } - drm_framebuffer_remove(fb); + /* TODO release all crtc connected to the framebuffer */ + /* TODO unhock the destructor from the buffer object */ + + list_del(&fb->filp_head); + fb->funcs->destroy(fb); out: mutex_unlock(&dev->mode_config.mutex); @@ -2555,7 +2489,8 @@ void drm_fb_release(struct drm_file *priv) mutex_lock(&dev->mode_config.mutex); list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { - drm_framebuffer_remove(fb); + list_del(&fb->filp_head); + fb->funcs->destroy(fb); } mutex_unlock(&dev->mode_config.mutex); } @@ -3554,7 +3489,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, struct drm_framebuffer *fb; struct drm_pending_vblank_event *e = NULL; unsigned long flags; - int hdisplay, vdisplay; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || @@ -3584,19 +3518,14 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; fb = obj_to_fb(obj); - hdisplay = crtc->mode.hdisplay; - vdisplay = crtc->mode.vdisplay; - - if (crtc->invert_dimensions) - swap(hdisplay, vdisplay); - - if (hdisplay > fb->width || - vdisplay > fb->height || - crtc->x > fb->width - hdisplay || - crtc->y > fb->height - vdisplay) { - DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", - fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y, - crtc->invert_dimensions ? " (inverted)" : ""); + if (crtc->mode.hdisplay > fb->width || + crtc->mode.vdisplay > fb->height || + crtc->x > fb->width - crtc->mode.hdisplay || + crtc->y > fb->height - crtc->mode.vdisplay) { + DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n", + fb->width, fb->height, + crtc->mode.hdisplay, crtc->mode.vdisplay, + crtc->x, crtc->y); ret = -ENOSPC; goto out; } @@ -3789,8 +3718,6 @@ int drm_format_num_planes(uint32_t format) case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: return 2; default: return 1; @@ -3824,8 +3751,6 @@ int drm_format_plane_cpp(uint32_t format, int plane) case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: return plane ? 2 : 1; case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: diff --git a/trunk/drivers/gpu/drm/drm_drv.c b/trunk/drivers/gpu/drm/drm_drv.c index a2c6eb6e0dd8..9238de4009fa 100644 --- a/trunk/drivers/gpu/drm/drm_drv.c +++ b/trunk/drivers/gpu/drm/drm_drv.c @@ -140,10 +140,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), @@ -152,19 +152,19 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index 289c9b18c0d3..b7ee230572b7 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -161,7 +161,7 @@ MODULE_PARM_DESC(edid_fixup, * Sanity check the EDID block (base or extension). Return 0 if the block * doesn't check out, or 1 if it's valid. */ -bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) +bool drm_edid_block_valid(u8 *raw_edid, int block) { int i; u8 csum = 0; @@ -184,9 +184,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; if (csum) { - if (print_bad_edid) { - DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); - } + DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); /* allow CEA to slide through, switches mangle this */ if (raw_edid[0] != 0x02) @@ -212,7 +210,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) return 1; bad: - if (raw_edid && print_bad_edid) { + if (raw_edid) { printk(KERN_ERR "Raw EDID:\n"); print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, raw_edid, EDID_LENGTH, false); @@ -236,7 +234,7 @@ bool drm_edid_is_valid(struct edid *edid) return false; for (i = 0; i <= edid->extensions; i++) - if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) + if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i)) return false; return true; @@ -259,8 +257,6 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, int block, int len) { unsigned char start = block * EDID_LENGTH; - unsigned char segment = block >> 1; - unsigned char xfers = segment ? 3 : 2; int ret, retries = 5; /* The core i2c driver will automatically retry the transfer if the @@ -272,11 +268,6 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, do { struct i2c_msg msgs[] = { { - .addr = DDC_SEGMENT_ADDR, - .flags = 0, - .len = 1, - .buf = &segment, - }, { .addr = DDC_ADDR, .flags = 0, .len = 1, @@ -288,21 +279,15 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, .buf = buf, } }; - - /* - * Avoid sending the segment addr to not upset non-compliant ddc - * monitors. - */ - ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); - + ret = i2c_transfer(adapter, msgs, 2); if (ret == -ENXIO) { DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", adapter->name); break; } - } while (ret != xfers && --retries); + } while (ret != 2 && --retries); - return ret == xfers ? 0 : -1; + return ret == 2 ? 0 : -1; } static bool drm_edid_is_zero(u8 *in_edid, int length) @@ -321,7 +306,6 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { int i, j = 0, valid_extensions = 0; u8 *block, *new; - bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; @@ -330,7 +314,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block, 0, print_bad_edid)) + if (drm_edid_block_valid(block, 0)) break; if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { connector->null_edid_counter++; @@ -355,7 +339,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) block + (valid_extensions + 1) * EDID_LENGTH, j, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { + if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) { valid_extensions++; break; } @@ -378,11 +362,8 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) return block; carp: - if (print_bad_edid) { - dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", - drm_get_connector_name(connector), j); - } - connector->bad_edid_counter++; + dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", + drm_get_connector_name(connector), j); out: kfree(block); @@ -421,7 +402,10 @@ struct edid *drm_get_edid(struct drm_connector *connector, if (drm_probe_ddc(adapter)) edid = (struct edid *)drm_do_get_edid(connector, adapter); + connector->display_info.raw_edid = (char *)edid; + return edid; + } EXPORT_SYMBOL(drm_get_edid); @@ -1538,40 +1522,6 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) return modes; } -static int -cea_db_payload_len(const u8 *db) -{ - return db[0] & 0x1f; -} - -static int -cea_db_tag(const u8 *db) -{ - return db[0] >> 5; -} - -static int -cea_revision(const u8 *cea) -{ - return cea[1]; -} - -static int -cea_db_offsets(const u8 *cea, int *start, int *end) -{ - /* Data block offset in CEA extension block */ - *start = 4; - *end = cea[2]; - if (*end == 0) - *end = 127; - if (*end < 4 || *end > 127) - return -ERANGE; - return 0; -} - -#define for_each_cea_db(cea, i, start, end) \ - for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) - static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { @@ -1579,17 +1529,10 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) u8 * db, dbl; int modes = 0; - if (cea && cea_revision(cea) >= 3) { - int i, start, end; - - if (cea_db_offsets(cea, &start, &end)) - return 0; - - for_each_cea_db(cea, i, start, end) { - db = &cea[i]; - dbl = cea_db_payload_len(db); - - if (cea_db_tag(db) == VIDEO_BLOCK) + if (cea && cea[1] >= 3) { + for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { + dbl = db[0] & 0x1f; + if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK) modes += do_cea_modes (connector, db+1, dbl); } } @@ -1598,28 +1541,19 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) } static void -parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) +parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db) { - u8 len = cea_db_payload_len(db); + connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ - if (len >= 6) { - connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ - connector->dvi_dual = db[6] & 1; - } - if (len >= 7) - connector->max_tmds_clock = db[7] * 5; - if (len >= 8) { - connector->latency_present[0] = db[8] >> 7; - connector->latency_present[1] = (db[8] >> 6) & 1; - } - if (len >= 9) - connector->video_latency[0] = db[9]; - if (len >= 10) - connector->audio_latency[0] = db[10]; - if (len >= 11) - connector->video_latency[1] = db[11]; - if (len >= 12) - connector->audio_latency[1] = db[12]; + connector->dvi_dual = db[6] & 1; + connector->max_tmds_clock = db[7] * 5; + + connector->latency_present[0] = db[8] >> 7; + connector->latency_present[1] = (db[8] >> 6) & 1; + connector->video_latency[0] = db[9]; + connector->audio_latency[0] = db[10]; + connector->video_latency[1] = db[11]; + connector->audio_latency[1] = db[12]; DRM_LOG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " @@ -1643,21 +1577,6 @@ monitor_name(struct detailed_timing *t, void *data) *(u8 **)data = t->data.other_data.data.str.str; } -static bool cea_db_is_hdmi_vsdb(const u8 *db) -{ - int hdmi_id; - - if (cea_db_tag(db) != VENDOR_BLOCK) - return false; - - if (cea_db_payload_len(db) < 5) - return false; - - hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); - - return hdmi_id == HDMI_IDENTIFIER; -} - /** * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink @@ -1704,40 +1623,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) eld[18] = edid->prod_code[0]; eld[19] = edid->prod_code[1]; - if (cea_revision(cea) >= 3) { - int i, start, end; - - if (cea_db_offsets(cea, &start, &end)) { - start = 0; - end = 0; - } - - for_each_cea_db(cea, i, start, end) { - db = &cea[i]; - dbl = cea_db_payload_len(db); - - switch (cea_db_tag(db)) { + if (cea[1] >= 3) + for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { + dbl = db[0] & 0x1f; + + switch ((db[0] & 0xe0) >> 5) { case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ sad_count = dbl / 3; - if (dbl >= 1) - memcpy(eld + 20 + mnl, &db[1], dbl); + memcpy(eld + 20 + mnl, &db[1], dbl); break; case SPEAKER_BLOCK: - /* Speaker Allocation Data Block */ - if (dbl >= 1) - eld[7] = db[1]; + /* Speaker Allocation Data Block */ + eld[7] = db[1]; break; case VENDOR_BLOCK: /* HDMI Vendor-Specific Data Block */ - if (cea_db_is_hdmi_vsdb(db)) + if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0) parse_hdmi_vsdb(connector, db); break; default: break; } } - } eld[5] |= sad_count << 4; eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; @@ -1815,26 +1723,38 @@ EXPORT_SYMBOL(drm_select_eld); bool drm_detect_hdmi_monitor(struct edid *edid) { u8 *edid_ext; - int i; + int i, hdmi_id; int start_offset, end_offset; + bool is_hdmi = false; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) - return false; + goto end; - if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) - return false; + /* Data block offset in CEA extension block */ + start_offset = 4; + end_offset = edid_ext[2]; /* * Because HDMI identifier is in Vendor Specific Block, * search it from all data blocks of CEA extension. */ - for_each_cea_db(edid_ext, i, start_offset, end_offset) { - if (cea_db_is_hdmi_vsdb(&edid_ext[i])) - return true; + for (i = start_offset; i < end_offset; + /* Increased by data block len */ + i += ((edid_ext[i] & 0x1f) + 1)) { + /* Find vendor specific block */ + if ((edid_ext[i] >> 5) == VENDOR_BLOCK) { + hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) | + edid_ext[i + 3] << 16; + /* Find HDMI identifier */ + if (hdmi_id == HDMI_IDENTIFIER) + is_hdmi = true; + break; + } } - return false; +end: + return is_hdmi; } EXPORT_SYMBOL(drm_detect_hdmi_monitor); @@ -1866,13 +1786,15 @@ bool drm_detect_monitor_audio(struct edid *edid) goto end; } - if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) - goto end; + /* Data block offset in CEA extension block */ + start_offset = 4; + end_offset = edid_ext[2]; - for_each_cea_db(edid_ext, i, start_offset, end_offset) { - if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) { + for (i = start_offset; i < end_offset; + i += ((edid_ext[i] & 0x1f) + 1)) { + if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { has_audio = true; - for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3) + for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) DRM_DEBUG_KMS("CEA audio format %d\n", (edid_ext[i + j] >> 3) & 0xf); goto end; diff --git a/trunk/drivers/gpu/drm/drm_edid_load.c b/trunk/drivers/gpu/drm/drm_edid_load.c index ea9cdab3d431..0303935d10e2 100644 --- a/trunk/drivers/gpu/drm/drm_edid_load.c +++ b/trunk/drivers/gpu/drm/drm_edid_load.c @@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = { }, }; -static u8 *edid_load(struct drm_connector *connector, char *name, - char *connector_name) +static int edid_load(struct drm_connector *connector, char *name, + char *connector_name) { const struct firmware *fw; struct platform_device *pdev; @@ -123,7 +123,6 @@ static u8 *edid_load(struct drm_connector *connector, char *name, int fwsize, expected; int builtin = 0, err = 0; int i, valid_extensions = 0; - bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); pdev = platform_device_register_simple(connector_name, -1, NULL, 0); if (IS_ERR(pdev)) { @@ -174,8 +173,7 @@ static u8 *edid_load(struct drm_connector *connector, char *name, } memcpy(edid, fwdata, fwsize); - if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { - connector->bad_edid_counter++; + if (!drm_edid_block_valid(edid, 0)) { DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", name); kfree(edid); @@ -187,7 +185,7 @@ static u8 *edid_load(struct drm_connector *connector, char *name, if (i != valid_extensions + 1) memcpy(edid + (valid_extensions + 1) * EDID_LENGTH, edid + i * EDID_LENGTH, EDID_LENGTH); - if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid)) + if (drm_edid_block_valid(edid + i * EDID_LENGTH, i)) valid_extensions++; } @@ -207,6 +205,7 @@ static u8 *edid_load(struct drm_connector *connector, char *name, edid = new_edid; } + connector->display_info.raw_edid = edid; DRM_INFO("Got %s EDID base block and %d extension%s from " "\"%s\" for connector \"%s\"\n", builtin ? "built-in" : "external", valid_extensions, valid_extensions == 1 ? "" : "s", @@ -216,10 +215,7 @@ static u8 *edid_load(struct drm_connector *connector, char *name, release_firmware(fw); out: - if (err) - return ERR_PTR(err); - - return edid; + return err; } int drm_load_edid_firmware(struct drm_connector *connector) @@ -227,7 +223,6 @@ int drm_load_edid_firmware(struct drm_connector *connector) char *connector_name = drm_get_connector_name(connector); char *edidname = edid_firmware, *last, *colon; int ret; - struct edid *edid; if (*edidname == '\0') return 0; @@ -245,13 +240,13 @@ int drm_load_edid_firmware(struct drm_connector *connector) if (*last == '\n') *last = '\0'; - edid = (struct edid *) edid_load(connector, edidname, connector_name); - if (IS_ERR_OR_NULL(edid)) + ret = edid_load(connector, edidname, connector_name); + if (ret) return 0; - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); + drm_mode_connector_update_edid_property(connector, + (struct edid *) connector->display_info.raw_edid); - return ret; + return drm_add_edid_modes(connector, (struct edid *) + connector->display_info.raw_edid); } diff --git a/trunk/drivers/gpu/drm/drm_edid_modes.h b/trunk/drivers/gpu/drm/drm_edid_modes.h index 57459b316adc..ff98a7eb38dd 100644 --- a/trunk/drivers/gpu/drm/drm_edid_modes.h +++ b/trunk/drivers/gpu/drm/drm_edid_modes.h @@ -89,7 +89,7 @@ static const struct drm_display_mode drm_dmt_modes[] = { 976, 1088, 0, 480, 486, 494, 517, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@43Hz, interlace */ - { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 1208, 1264, 0, 768, 768, 772, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -395,7 +395,7 @@ static const struct drm_display_mode edid_est_modes[] = { { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ - { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, 1208, 1264, 0, 768, 768, 776, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, @@ -506,17 +506,17 @@ static const struct drm_display_mode edid_cea_modes[] = { 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 5 - 1920x1080i@60Hz */ - { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 6 - 1440x480i@60Hz */ - { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 7 - 1440x480i@60Hz */ - { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -531,12 +531,12 @@ static const struct drm_display_mode edid_cea_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK) }, /* 10 - 2880x480i@60Hz */ - { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 11 - 2880x480i@60Hz */ - { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -573,17 +573,17 @@ static const struct drm_display_mode edid_cea_modes[] = { 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 20 - 1920x1080i@50Hz */ - { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 21 - 1440x576i@50Hz */ - { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 22 - 1440x576i@50Hz */ - { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -598,12 +598,12 @@ static const struct drm_display_mode edid_cea_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK) }, /* 25 - 2880x576i@50Hz */ - { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 26 - 2880x576i@50Hz */ - { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -656,12 +656,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 39 - 1920x1080i@50Hz */ - { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 40 - 1920x1080i@100Hz */ - { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -688,7 +688,7 @@ static const struct drm_display_mode edid_cea_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK) }, /* 46 - 1920x1080i@120Hz */ - { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -705,12 +705,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 50 - 1440x480i@120Hz */ - { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 51 - 1440x480i@120Hz */ - { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -723,12 +723,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 54 - 1440x576i@200Hz */ - { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 55 - 1440x576i@200Hz */ - { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, + { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -741,12 +741,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 58 - 1440x480i@240 */ - { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 59 - 1440x480i@240 */ - { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, + { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, diff --git a/trunk/drivers/gpu/drm/drm_fb_cma_helper.c b/trunk/drivers/gpu/drm/drm_fb_cma_helper.c deleted file mode 100644 index 09e11a5d921a..000000000000 --- a/trunk/drivers/gpu/drm/drm_fb_cma_helper.c +++ /dev/null @@ -1,406 +0,0 @@ -/* - * drm kms/fb cma (contiguous memory allocator) helper functions - * - * Copyright (C) 2012 Analog Device Inc. - * Author: Lars-Peter Clausen - * - * Based on udl_fbdev.c - * Copyright (C) 2012 Red Hat - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include - -struct drm_fb_cma { - struct drm_framebuffer fb; - struct drm_gem_cma_object *obj[4]; -}; - -struct drm_fbdev_cma { - struct drm_fb_helper fb_helper; - struct drm_fb_cma *fb; -}; - -static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper) -{ - return container_of(helper, struct drm_fbdev_cma, fb_helper); -} - -static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb) -{ - return container_of(fb, struct drm_fb_cma, fb); -} - -static void drm_fb_cma_destroy(struct drm_framebuffer *fb) -{ - struct drm_fb_cma *fb_cma = to_fb_cma(fb); - int i; - - for (i = 0; i < 4; i++) { - if (fb_cma->obj[i]) - drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base); - } - - drm_framebuffer_cleanup(fb); - kfree(fb_cma); -} - -static int drm_fb_cma_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned int *handle) -{ - struct drm_fb_cma *fb_cma = to_fb_cma(fb); - - return drm_gem_handle_create(file_priv, - &fb_cma->obj[0]->base, handle); -} - -static struct drm_framebuffer_funcs drm_fb_cma_funcs = { - .destroy = drm_fb_cma_destroy, - .create_handle = drm_fb_cma_create_handle, -}; - -static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, - struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, - unsigned int num_planes) -{ - struct drm_fb_cma *fb_cma; - int ret; - int i; - - fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL); - if (!fb_cma) - return ERR_PTR(-ENOMEM); - - ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs); - if (ret) { - dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret); - kfree(fb_cma); - return ERR_PTR(ret); - } - - drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd); - - for (i = 0; i < num_planes; i++) - fb_cma->obj[i] = obj[i]; - - return fb_cma; -} - -/** - * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function - * - * If your hardware has special alignment or pitch requirements these should be - * checked before calling this function. - */ -struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, - struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct drm_fb_cma *fb_cma; - struct drm_gem_cma_object *objs[4]; - struct drm_gem_object *obj; - unsigned int hsub; - unsigned int vsub; - int ret; - int i; - - hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); - vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); - - for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { - unsigned int width = mode_cmd->width / (i ? hsub : 1); - unsigned int height = mode_cmd->height / (i ? vsub : 1); - unsigned int min_size; - - obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); - if (!obj) { - dev_err(dev->dev, "Failed to lookup GEM object\n"); - ret = -ENXIO; - goto err_gem_object_unreference; - } - - min_size = (height - 1) * mode_cmd->pitches[i] - + width * drm_format_plane_cpp(mode_cmd->pixel_format, i) - + mode_cmd->offsets[i]; - - if (obj->size < min_size) { - drm_gem_object_unreference_unlocked(obj); - ret = -EINVAL; - goto err_gem_object_unreference; - } - objs[i] = to_drm_gem_cma_obj(obj); - } - - fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i); - if (IS_ERR(fb_cma)) { - ret = PTR_ERR(fb_cma); - goto err_gem_object_unreference; - } - - return &fb_cma->fb; - -err_gem_object_unreference: - for (i--; i >= 0; i--) - drm_gem_object_unreference_unlocked(&objs[i]->base); - return ERR_PTR(ret); -} -EXPORT_SYMBOL_GPL(drm_fb_cma_create); - -/** - * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer - * @fb: The framebuffer - * @plane: Which plane - * - * Return the CMA GEM object for given framebuffer. - * - * This function will usually be called from the CRTC callback functions. - */ -struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, - unsigned int plane) -{ - struct drm_fb_cma *fb_cma = to_fb_cma(fb); - - if (plane >= 4) - return NULL; - - return fb_cma->obj[plane]; -} -EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); - -static struct fb_ops drm_fbdev_cma_ops = { - .owner = THIS_MODULE, - .fb_fillrect = sys_fillrect, - .fb_copyarea = sys_copyarea, - .fb_imageblit = sys_imageblit, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, - .fb_blank = drm_fb_helper_blank, - .fb_pan_display = drm_fb_helper_pan_display, - .fb_setcmap = drm_fb_helper_setcmap, -}; - -static int drm_fbdev_cma_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper); - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - struct drm_device *dev = helper->dev; - struct drm_gem_cma_object *obj; - struct drm_framebuffer *fb; - unsigned int bytes_per_pixel; - unsigned long offset; - struct fb_info *fbi; - size_t size; - int ret; - - DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", - sizes->surface_width, sizes->surface_height, - sizes->surface_bpp); - - bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - size = mode_cmd.pitches[0] * mode_cmd.height; - obj = drm_gem_cma_create(dev, size); - if (!obj) - return -ENOMEM; - - fbi = framebuffer_alloc(0, dev->dev); - if (!fbi) { - dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); - ret = -ENOMEM; - goto err_drm_gem_cma_free_object; - } - - fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1); - if (IS_ERR(fbdev_cma->fb)) { - dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); - ret = PTR_ERR(fbdev_cma->fb); - goto err_framebuffer_release; - } - - fb = &fbdev_cma->fb->fb; - helper->fb = fb; - helper->fbdev = fbi; - - fbi->par = helper; - fbi->flags = FBINFO_FLAG_DEFAULT; - fbi->fbops = &drm_fbdev_cma_ops; - - ret = fb_alloc_cmap(&fbi->cmap, 256, 0); - if (ret) { - dev_err(dev->dev, "Failed to allocate color map.\n"); - goto err_drm_fb_cma_destroy; - } - - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); - drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); - - offset = fbi->var.xoffset * bytes_per_pixel; - offset += fbi->var.yoffset * fb->pitches[0]; - - dev->mode_config.fb_base = (resource_size_t)obj->paddr; - fbi->screen_base = obj->vaddr + offset; - fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); - fbi->screen_size = size; - fbi->fix.smem_len = size; - - return 0; - -err_drm_fb_cma_destroy: - drm_fb_cma_destroy(fb); -err_framebuffer_release: - framebuffer_release(fbi); -err_drm_gem_cma_free_object: - drm_gem_cma_free_object(&obj->base); - return ret; -} - -static int drm_fbdev_cma_probe(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - int ret = 0; - - if (!helper->fb) { - ret = drm_fbdev_cma_create(helper, sizes); - if (ret < 0) - return ret; - ret = 1; - } - - return ret; -} - -static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { - .fb_probe = drm_fbdev_cma_probe, -}; - -/** - * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device - * @num_crtc: Number of CRTCs - * @max_conn_count: Maximum number of connectors - * - * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. - */ -struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, - unsigned int preferred_bpp, unsigned int num_crtc, - unsigned int max_conn_count) -{ - struct drm_fbdev_cma *fbdev_cma; - struct drm_fb_helper *helper; - int ret; - - fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); - if (!fbdev_cma) { - dev_err(dev->dev, "Failed to allocate drm fbdev.\n"); - return ERR_PTR(-ENOMEM); - } - - fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs; - helper = &fbdev_cma->fb_helper; - - ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); - if (ret < 0) { - dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); - goto err_free; - } - - ret = drm_fb_helper_single_add_all_connectors(helper); - if (ret < 0) { - dev_err(dev->dev, "Failed to add connectors.\n"); - goto err_drm_fb_helper_fini; - - } - - ret = drm_fb_helper_initial_config(helper, preferred_bpp); - if (ret < 0) { - dev_err(dev->dev, "Failed to set inital hw configuration.\n"); - goto err_drm_fb_helper_fini; - } - - return fbdev_cma; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(helper); -err_free: - kfree(fbdev_cma); - - return ERR_PTR(ret); -} -EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); - -/** - * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct - * @fbdev_cma: The drm_fbdev_cma struct - */ -void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) -{ - if (fbdev_cma->fb_helper.fbdev) { - struct fb_info *info; - int ret; - - info = fbdev_cma->fb_helper.fbdev; - ret = unregister_framebuffer(info); - if (ret < 0) - DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); - - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - - framebuffer_release(info); - } - - if (fbdev_cma->fb) - drm_fb_cma_destroy(&fbdev_cma->fb->fb); - - drm_fb_helper_fini(&fbdev_cma->fb_helper); - kfree(fbdev_cma); -} -EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini); - -/** - * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode - * @fbdev_cma: The drm_fbdev_cma struct, may be NULL - * - * This function is usually called from the DRM drivers lastclose callback. - */ -void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma) -{ - if (fbdev_cma) - drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper); -} -EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode); - -/** - * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events - * @fbdev_cma: The drm_fbdev_cma struct, may be NULL - * - * This function is usually called from the DRM drivers output_poll_changed - * callback. - */ -void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma) -{ - if (fbdev_cma) - drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); -} -EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index b5d05f58c15a..f546d1e8af82 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); -static bool drm_fb_helper_force_kernel_mode(void) +bool drm_fb_helper_force_kernel_mode(void) { bool ret, error = false; struct drm_fb_helper *helper; @@ -330,7 +330,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) /* Walk the connectors & encoders on this fb turning them on/off */ for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; - connector->funcs->dpms(connector, dpms_mode); + drm_helper_connector_dpms(connector, dpms_mode); drm_connector_property_set_value(connector, dev->mode_config.dpms_property, dpms_mode); } @@ -1230,6 +1230,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; + struct drm_encoder *encoder; struct drm_mode_set *modeset; bool *enabled; int width, height; @@ -1240,6 +1241,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) width = dev->mode_config.max_width; height = dev->mode_config.max_height; + /* clean out all the encoder/crtc combos */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + encoder->crtc = NULL; + } + crtcs = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); modes = kcalloc(dev->mode_config.num_connector, diff --git a/trunk/drivers/gpu/drm/drm_gem_cma_helper.c b/trunk/drivers/gpu/drm/drm_gem_cma_helper.c deleted file mode 100644 index 1aa8fee1e865..000000000000 --- a/trunk/drivers/gpu/drm/drm_gem_cma_helper.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * drm gem CMA (contiguous memory allocator) helper functions - * - * Copyright (C) 2012 Sascha Hauer, Pengutronix - * - * Based on Samsung Exynos code - * - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) -{ - return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; -} - -static void drm_gem_cma_buf_destroy(struct drm_device *drm, - struct drm_gem_cma_object *cma_obj) -{ - dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr, - cma_obj->paddr); -} - -/* - * drm_gem_cma_create - allocate an object with the given size - * - * returns a struct drm_gem_cma_object* on success or ERR_PTR values - * on failure. - */ -struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - unsigned int size) -{ - struct drm_gem_cma_object *cma_obj; - struct drm_gem_object *gem_obj; - int ret; - - size = round_up(size, PAGE_SIZE); - - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!cma_obj) - return ERR_PTR(-ENOMEM); - - cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, - &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); - if (!cma_obj->vaddr) { - dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); - ret = -ENOMEM; - goto err_dma_alloc; - } - - gem_obj = &cma_obj->base; - - ret = drm_gem_object_init(drm, gem_obj, size); - if (ret) - goto err_obj_init; - - ret = drm_gem_create_mmap_offset(gem_obj); - if (ret) - goto err_create_mmap_offset; - - return cma_obj; - -err_create_mmap_offset: - drm_gem_object_release(gem_obj); - -err_obj_init: - drm_gem_cma_buf_destroy(drm, cma_obj); - -err_dma_alloc: - kfree(cma_obj); - - return ERR_PTR(ret); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_create); - -/* - * drm_gem_cma_create_with_handle - allocate an object with the given - * size and create a gem handle on it - * - * returns a struct drm_gem_cma_object* on success or ERR_PTR values - * on failure. - */ -static struct drm_gem_cma_object *drm_gem_cma_create_with_handle( - struct drm_file *file_priv, - struct drm_device *drm, unsigned int size, - unsigned int *handle) -{ - struct drm_gem_cma_object *cma_obj; - struct drm_gem_object *gem_obj; - int ret; - - cma_obj = drm_gem_cma_create(drm, size); - if (IS_ERR(cma_obj)) - return cma_obj; - - gem_obj = &cma_obj->base; - - /* - * allocate a id of idr table where the obj is registered - * and handle has the id what user can see. - */ - ret = drm_gem_handle_create(file_priv, gem_obj, handle); - if (ret) - goto err_handle_create; - - /* drop reference from allocate - handle holds it now. */ - drm_gem_object_unreference_unlocked(gem_obj); - - return cma_obj; - -err_handle_create: - drm_gem_cma_free_object(gem_obj); - - return ERR_PTR(ret); -} - -/* - * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback - * function - */ -void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) -{ - struct drm_gem_cma_object *cma_obj; - - if (gem_obj->map_list.map) - drm_gem_free_mmap_offset(gem_obj); - - drm_gem_object_release(gem_obj); - - cma_obj = to_drm_gem_cma_obj(gem_obj); - - drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); - - kfree(cma_obj); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); - -/* - * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback - * function - * - * This aligns the pitch and size arguments to the minimum required. wrap - * this into your own function if you need bigger alignment. - */ -int drm_gem_cma_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, struct drm_mode_create_dumb *args) -{ - struct drm_gem_cma_object *cma_obj; - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - - if (args->pitch < min_pitch) - args->pitch = min_pitch; - - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; - - cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, - args->size, &args->handle); - if (IS_ERR(cma_obj)) - return PTR_ERR(cma_obj); - - return 0; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); - -/* - * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback - * function - */ -int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, uint32_t handle, uint64_t *offset) -{ - struct drm_gem_object *gem_obj; - - mutex_lock(&drm->struct_mutex); - - gem_obj = drm_gem_object_lookup(drm, file_priv, handle); - if (!gem_obj) { - dev_err(drm->dev, "failed to lookup gem object\n"); - mutex_unlock(&drm->struct_mutex); - return -EINVAL; - } - - *offset = get_gem_mmap_offset(gem_obj); - - drm_gem_object_unreference(gem_obj); - - mutex_unlock(&drm->struct_mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); - -const struct vm_operations_struct drm_gem_cma_vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; -EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops); - -/* - * drm_gem_cma_mmap - (struct file_operation)->mmap callback function - */ -int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_gem_object *gem_obj; - struct drm_gem_cma_object *cma_obj; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - gem_obj = vma->vm_private_data; - cma_obj = to_drm_gem_cma_obj(gem_obj); - - ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot); - if (ret) - drm_gem_vm_close(vma); - - return ret; -} -EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); - -/* - * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function - */ -int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, - struct drm_device *drm, unsigned int handle) -{ - return drm_gem_handle_delete(file_priv, handle); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy); diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index 076c4a86ff84..03f16f352fe2 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -1236,7 +1236,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, return ret; } -static void drm_handle_vblank_events(struct drm_device *dev, int crtc) +void drm_handle_vblank_events(struct drm_device *dev, int crtc) { struct drm_pending_vblank_event *e, *t; struct timeval now; diff --git a/trunk/drivers/gpu/drm/drm_vm.c b/trunk/drivers/gpu/drm/drm_vm.c index 6fed21502313..961ee08927fe 100644 --- a/trunk/drivers/gpu/drm/drm_vm.c +++ b/trunk/drivers/gpu/drm/drm_vm.c @@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) tmp = pgprot_writecombine(tmp); else tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) || defined(__arm__) || defined(__mips__) +#elif defined(__sparc__) || defined(__arm__) tmp = pgprot_noncached(tmp); #endif return tmp; @@ -619,11 +619,20 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) offset = drm_core_get_reg_ofs(dev); vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); +#if !defined(__arm__) if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; +#else + if (remap_pfn_range(vma, vma->vm_start, + (map->offset + offset) >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; +#endif + DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," " offset = 0x%llx\n", map->type, diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c index 9dce3b9c3896..d9568198c300 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -147,7 +147,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); - kfree(edid); + + kfree(connector->display_info.raw_edid); + connector->display_info.raw_edid = edid; } else { struct drm_display_mode *mode = drm_mode_create(connector->dev); struct exynos_drm_panel_info *panel; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index f4ac43356583..d5586cc75163 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -266,8 +266,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, /* release drm framebuffer and real buffer */ if (fb_helper->fb && fb_helper->fb->funcs) { fb = fb_helper->fb; - if (fb) - drm_framebuffer_remove(fb); + if (fb && fb->funcs->destroy) + fb->funcs->destroy(fb); } /* release linux framebuffer */ diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c index e364165f1a2a..537027a74fd5 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -102,6 +102,7 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector, u8 *edid, int len) { struct vidi_context *ctx = get_vidi_context(dev); + struct edid *raw_edid; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -114,6 +115,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector, return -EFAULT; } + raw_edid = kzalloc(len, GFP_KERNEL); + if (!raw_edid) { + DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); + return -ENOMEM; + } + + memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) + * EDID_LENGTH, len)); + + /* attach the edid data to connector. */ + connector->display_info.raw_edid = (char *)raw_edid; + memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) * EDID_LENGTH, len)); diff --git a/trunk/drivers/gpu/drm/gma500/Makefile b/trunk/drivers/gpu/drm/gma500/Makefile index 7a2d40a5c1e1..abfa2a93f0d0 100644 --- a/trunk/drivers/gpu/drm/gma500/Makefile +++ b/trunk/drivers/gpu/drm/gma500/Makefile @@ -3,7 +3,7 @@ # ccflags-y += -I$(srctree)/include/drm -gma500_gfx-y += \ +gma500_gfx-y += gem_glue.o \ accel_2d.o \ backlight.o \ framebuffer.o \ @@ -30,8 +30,7 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \ cdv_intel_crt.o \ cdv_intel_display.o \ cdv_intel_hdmi.o \ - cdv_intel_lvds.o \ - cdv_intel_dp.o + cdv_intel_lvds.o gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ oaktrail_crtc.o \ diff --git a/trunk/drivers/gpu/drm/gma500/backlight.c b/trunk/drivers/gpu/drm/gma500/backlight.c index 143eba3309c5..20793951fcac 100644 --- a/trunk/drivers/gpu/drm/gma500/backlight.c +++ b/trunk/drivers/gpu/drm/gma500/backlight.c @@ -26,55 +26,10 @@ #include "intel_bios.h" #include "power.h" -static void do_gma_backlight_set(struct drm_device *dev) -{ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct drm_psb_private *dev_priv = dev->dev_private; - backlight_update_status(dev_priv->backlight_device); -#endif -} - -void gma_backlight_enable(struct drm_device *dev) -{ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct drm_psb_private *dev_priv = dev->dev_private; - dev_priv->backlight_enabled = true; - if (dev_priv->backlight_device) { - dev_priv->backlight_device->props.brightness = dev_priv->backlight_level; - do_gma_backlight_set(dev); - } -#endif -} - -void gma_backlight_disable(struct drm_device *dev) -{ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct drm_psb_private *dev_priv = dev->dev_private; - dev_priv->backlight_enabled = false; - if (dev_priv->backlight_device) { - dev_priv->backlight_device->props.brightness = 0; - do_gma_backlight_set(dev); - } -#endif -} - -void gma_backlight_set(struct drm_device *dev, int v) -{ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct drm_psb_private *dev_priv = dev->dev_private; - dev_priv->backlight_level = v; - if (dev_priv->backlight_device && dev_priv->backlight_enabled) { - dev_priv->backlight_device->props.brightness = v; - do_gma_backlight_set(dev); - } -#endif -} - int gma_backlight_init(struct drm_device *dev) { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = dev->dev_private; - dev_priv->backlight_enabled = true; return dev_priv->ops->backlight_init(dev); #else return 0; diff --git a/trunk/drivers/gpu/drm/gma500/cdv_device.c b/trunk/drivers/gpu/drm/gma500/cdv_device.c index bfc2f397019a..b7e7b49d8f62 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_device.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_device.c @@ -58,17 +58,10 @@ static int cdv_output_init(struct drm_device *dev) cdv_intel_lvds_init(dev, &dev_priv->mode_dev); /* These bits indicate HDMI not SDVO on CDV */ - if (REG_READ(SDVOB) & SDVO_DETECTED) { + if (REG_READ(SDVOB) & SDVO_DETECTED) cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); - if (REG_READ(DP_B) & DP_DETECTED) - cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B); - } - - if (REG_READ(SDVOC) & SDVO_DETECTED) { + if (REG_READ(SDVOC) & SDVO_DETECTED) cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC); - if (REG_READ(DP_C) & DP_DETECTED) - cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C); - } return 0; } @@ -170,7 +163,6 @@ static int cdv_backlight_init(struct drm_device *dev) cdv_get_brightness(cdv_backlight_device); backlight_update_status(cdv_backlight_device); dev_priv->backlight_device = cdv_backlight_device; - dev_priv->backlight_enabled = true; return 0; } @@ -457,7 +449,6 @@ static void cdv_get_core_freq(struct drm_device *dev) case 6: case 7: dev_priv->core_freq = 266; - break; default: dev_priv->core_freq = 0; } @@ -497,65 +488,6 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on) } } -static const char *force_audio_names[] = { - "off", - "auto", - "on", -}; - -void cdv_intel_attach_force_audio_property(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct drm_property *prop; - int i; - - prop = dev_priv->force_audio_property; - if (prop == NULL) { - prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, - "audio", - ARRAY_SIZE(force_audio_names)); - if (prop == NULL) - return; - - for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) - drm_property_add_enum(prop, i, i-1, force_audio_names[i]); - - dev_priv->force_audio_property = prop; - } - drm_connector_attach_property(connector, prop, 0); -} - - -static const char *broadcast_rgb_names[] = { - "Full", - "Limited 16:235", -}; - -void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct drm_property *prop; - int i; - - prop = dev_priv->broadcast_rgb_property; - if (prop == NULL) { - prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, - "Broadcast RGB", - ARRAY_SIZE(broadcast_rgb_names)); - if (prop == NULL) - return; - - for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) - drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); - - dev_priv->broadcast_rgb_property = prop; - } - - drm_connector_attach_property(connector, prop, 0); -} - /* Cedarview */ static const struct psb_offset cdv_regmap[2] = { { diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c index 3cfd0931fbfb..a68509ba22a8 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -57,26 +57,15 @@ struct cdv_intel_clock_t { struct cdv_intel_limit_t { struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1; struct cdv_intel_p2_t p2; - bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *, - int, int, struct cdv_intel_clock_t *); }; -static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, - struct drm_crtc *crtc, int target, int refclk, - struct cdv_intel_clock_t *best_clock); -static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, - int refclk, - struct cdv_intel_clock_t *best_clock); - #define CDV_LIMIT_SINGLE_LVDS_96 0 #define CDV_LIMIT_SINGLE_LVDS_100 1 #define CDV_LIMIT_DAC_HDMI_27 2 #define CDV_LIMIT_DAC_HDMI_96 3 -#define CDV_LIMIT_DP_27 4 -#define CDV_LIMIT_DP_100 5 static const struct cdv_intel_limit_t cdv_intel_limits[] = { - { /* CDV_SINGLE_LVDS_96MHz */ + { /* CDV_SIGNLE_LVDS_96MHz */ .dot = {.min = 20000, .max = 115500}, .vco = {.min = 1800000, .max = 3600000}, .n = {.min = 2, .max = 6}, @@ -87,7 +76,6 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p1 = {.min = 2, .max = 10}, .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, - .find_pll = cdv_intel_find_best_PLL, }, { /* CDV_SINGLE_LVDS_100MHz */ .dot = {.min = 20000, .max = 115500}, @@ -102,7 +90,6 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { * is 80-224Mhz. Prefer single channel as much as possible. */ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, - .find_pll = cdv_intel_find_best_PLL, }, { /* CDV_DAC_HDMI_27MHz */ .dot = {.min = 20000, .max = 400000}, @@ -114,7 +101,6 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p = {.min = 5, .max = 90}, .p1 = {.min = 1, .max = 9}, .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, - .find_pll = cdv_intel_find_best_PLL, }, { /* CDV_DAC_HDMI_96MHz */ .dot = {.min = 20000, .max = 400000}, @@ -126,32 +112,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p = {.min = 5, .max = 100}, .p1 = {.min = 1, .max = 10}, .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, - .find_pll = cdv_intel_find_best_PLL, - }, - { /* CDV_DP_27MHz */ - .dot = {.min = 160000, .max = 272000}, - .vco = {.min = 1809000, .max = 3564000}, - .n = {.min = 1, .max = 1}, - .m = {.min = 67, .max = 132}, - .m1 = {.min = 0, .max = 0}, - .m2 = {.min = 65, .max = 130}, - .p = {.min = 5, .max = 90}, - .p1 = {.min = 1, .max = 9}, - .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10}, - .find_pll = cdv_intel_find_dp_pll, }, - { /* CDV_DP_100MHz */ - .dot = {.min = 160000, .max = 272000}, - .vco = {.min = 1800000, .max = 3600000}, - .n = {.min = 2, .max = 6}, - .m = {.min = 60, .max = 164}, - .m1 = {.min = 0, .max = 0}, - .m2 = {.min = 58, .max = 162}, - .p = {.min = 5, .max = 100}, - .p1 = {.min = 1, .max = 10}, - .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10}, - .find_pll = cdv_intel_find_dp_pll, - } }; #define _wait_for(COND, MS, W) ({ \ @@ -171,7 +132,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { #define wait_for(COND, MS) _wait_for(COND, MS, 1) -int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) +static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) { int ret; @@ -198,7 +159,7 @@ int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) return 0; } -int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) +static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) { int ret; static bool dpio_debug = true; @@ -240,7 +201,7 @@ int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) /* Reset the DPIO configuration register. The BIOS does this at every * mode set. */ -void cdv_sb_reset(struct drm_device *dev) +static void cdv_sb_reset(struct drm_device *dev) { REG_WRITE(DPIO_CFG, 0); @@ -255,7 +216,7 @@ void cdv_sb_reset(struct drm_device *dev) */ static int cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, - struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select) + struct cdv_intel_clock_t *clock, bool is_lvds) { struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); int pipe = psb_crtc->pipe; @@ -298,7 +259,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, ref_value &= ~(REF_CLK_MASK); /* use DPLL_A for pipeB on CRT/HDMI */ - if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) { + if (pipe == 1 && !is_lvds) { DRM_DEBUG_KMS("use DPLLA for pipe B\n"); ref_value |= REF_CLK_DPLLA; } else { @@ -375,33 +336,30 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, if (ret) return ret; - if (ddi_select) { - if ((ddi_select & DDI_MASK) == DDI0_SELECT) { - lane_reg = PSB_LANE0; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - - lane_reg = PSB_LANE1; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - } else { - lane_reg = PSB_LANE2; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - - lane_reg = PSB_LANE3; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - } - } + lane_reg = PSB_LANE0; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + + lane_reg = PSB_LANE1; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + + lane_reg = PSB_LANE2; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + + lane_reg = PSB_LANE3; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + return 0; } @@ -438,12 +396,6 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc, limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; else limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; - } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || - psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { - if (refclk == 27000) - limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; - else - limit = &cdv_intel_limits[CDV_LIMIT_DP_100]; } else { if (refclk == 27000) limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27]; @@ -486,12 +438,13 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc, return true; } -static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, - struct drm_crtc *crtc, int target, int refclk, - struct cdv_intel_clock_t *best_clock) +static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target, + int refclk, + struct cdv_intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct cdv_intel_clock_t clock; + const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk); int err = target; @@ -545,49 +498,6 @@ static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, return err != target; } -static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, - int refclk, - struct cdv_intel_clock_t *best_clock) -{ - struct cdv_intel_clock_t clock; - if (refclk == 27000) { - if (target < 200000) { - clock.p1 = 2; - clock.p2 = 10; - clock.n = 1; - clock.m1 = 0; - clock.m2 = 118; - } else { - clock.p1 = 1; - clock.p2 = 10; - clock.n = 1; - clock.m1 = 0; - clock.m2 = 98; - } - } else if (refclk == 100000) { - if (target < 200000) { - clock.p1 = 2; - clock.p2 = 10; - clock.n = 5; - clock.m1 = 0; - clock.m2 = 160; - } else { - clock.p1 = 1; - clock.p2 = 10; - clock.n = 5; - clock.m1 = 0; - clock.m2 = 133; - } - } else - return false; - clock.m = clock.m2 + 2; - clock.p = clock.p1 * clock.p2; - clock.vco = (refclk * clock.m) / clock.n; - clock.dot = clock.vco / clock.p; - memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); - return true; -} - static int cdv_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -881,7 +791,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: if (psb_intel_crtc->active) - break; + return; psb_intel_crtc->active = true; @@ -925,15 +835,17 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(map->status, temp); REG_READ(map->status); + cdv_intel_update_watermark(dev, crtc); cdv_intel_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ + psb_intel_crtc->crtc_enable = true; break; case DRM_MODE_DPMS_OFF: if (!psb_intel_crtc->active) - break; + return; psb_intel_crtc->active = false; @@ -980,9 +892,10 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) /* Wait for the clocks to turn off. */ udelay(150); + cdv_intel_update_watermark(dev, crtc); + psb_intel_crtc->crtc_enable = false; break; } - cdv_intel_update_watermark(dev, crtc); /*Set FIFO Watermarks*/ REG_WRITE(DSPARB, 0x3F3E); } @@ -1039,12 +952,9 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, u32 dpll = 0, dspcntr, pipeconf; bool ok; bool is_crt = false, is_lvds = false, is_tv = false; - bool is_hdmi = false, is_dp = false; + bool is_hdmi = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; - const struct cdv_intel_limit_t *limit; - u32 ddi_select = 0; - bool is_edp = false; list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = @@ -1054,7 +964,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, || connector->encoder->crtc != crtc) continue; - ddi_select = psb_intel_encoder->ddi_select; switch (psb_intel_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -1068,15 +977,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, case INTEL_OUTPUT_HDMI: is_hdmi = true; break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_edp = true; - break; - default: - DRM_ERROR("invalid output type.\n"); - return 0; } } @@ -1086,20 +986,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, else /* high-end sku, 27/100 mhz */ refclk = 27000; - if (is_dp || is_edp) { - /* - * Based on the spec the low-end SKU has only CRT/LVDS. So it is - * unnecessary to consider it for DP/eDP. - * On the high-end SKU, it will use the 27/100M reference clk - * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise - * it will be 27MHz. From the VBIOS code it seems that the pipe A choose - * 27MHz for DP/eDP while the Pipe B chooses the 100MHz. - */ - if (pipe == 0) - refclk = 27000; - else - refclk = 100000; - } if (is_lvds && dev_priv->lvds_use_ssc) { refclk = dev_priv->lvds_ssc_freq * 1000; @@ -1107,10 +993,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, } drm_mode_debug_printmodeline(adjusted_mode); - - limit = cdv_intel_limit(crtc, refclk); - ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, + ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); @@ -1125,15 +1009,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, } /* dpll |= PLL_REF_INPUT_DREFCLK; */ - if (is_dp || is_edp) { - cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode); - } else { - REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0); - REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0); - REG_WRITE(PIPE_DP_LINK_M(pipe), 0); - REG_WRITE(PIPE_DP_LINK_N(pipe), 0); - } - dpll |= DPLL_SYNCLOCK_ENABLE; /* if (is_lvds) dpll |= DPLLB_MODE_LVDS; @@ -1144,31 +1019,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, /* setup pipeconf */ pipeconf = REG_READ(map->conf); - pipeconf &= ~(PIPE_BPC_MASK); - if (is_edp) { - switch (dev_priv->edp.bpp) { - case 24: - pipeconf |= PIPE_8BPC; - break; - case 18: - pipeconf |= PIPE_6BPC; - break; - case 30: - pipeconf |= PIPE_10BPC; - break; - default: - pipeconf |= PIPE_8BPC; - break; - } - } else if (is_lvds) { - /* the BPC will be 6 if it is 18-bit LVDS panel */ - if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) - pipeconf |= PIPE_8BPC; - else - pipeconf |= PIPE_6BPC; - } else - pipeconf |= PIPE_8BPC; - /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -1183,7 +1033,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE); REG_READ(map->dpll); - cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select); + cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds); udelay(150); diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c deleted file mode 100644 index c9abc06ef680..000000000000 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ /dev/null @@ -1,1951 +0,0 @@ -/* - * Copyright © 2012 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Keith Packard - * - */ - -#include -#include -#include "drmP.h" -#include "drm.h" -#include "drm_crtc.h" -#include "drm_crtc_helper.h" -#include "psb_drv.h" -#include "psb_intel_drv.h" -#include "psb_intel_reg.h" -#include "drm_dp_helper.h" - -#define _wait_for(COND, MS, W) ({ \ - unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ - int ret__ = 0; \ - while (! (COND)) { \ - if (time_after(jiffies, timeout__)) { \ - ret__ = -ETIMEDOUT; \ - break; \ - } \ - if (W && !in_dbg_master()) msleep(W); \ - } \ - ret__; \ -}) - -#define wait_for(COND, MS) _wait_for(COND, MS, 1) - -#define DP_LINK_STATUS_SIZE 6 -#define DP_LINK_CHECK_TIMEOUT (10 * 1000) - -#define DP_LINK_CONFIGURATION_SIZE 9 - -#define CDV_FAST_LINK_TRAIN 1 - -struct cdv_intel_dp { - uint32_t output_reg; - uint32_t DP; - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; - bool has_audio; - int force_audio; - uint32_t color_range; - uint8_t link_bw; - uint8_t lane_count; - uint8_t dpcd[4]; - struct psb_intel_encoder *encoder; - struct i2c_adapter adapter; - struct i2c_algo_dp_aux_data algo; - uint8_t train_set[4]; - uint8_t link_status[DP_LINK_STATUS_SIZE]; - int panel_power_up_delay; - int panel_power_down_delay; - int panel_power_cycle_delay; - int backlight_on_delay; - int backlight_off_delay; - struct drm_display_mode *panel_fixed_mode; /* for eDP */ - bool panel_on; -}; - -struct ddi_regoff { - uint32_t PreEmph1; - uint32_t PreEmph2; - uint32_t VSwing1; - uint32_t VSwing2; - uint32_t VSwing3; - uint32_t VSwing4; - uint32_t VSwing5; -}; - -static struct ddi_regoff ddi_DP_train_table[] = { - {.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154, - .VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150, - .VSwing5 = 0x8158,}, - {.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254, - .VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250, - .VSwing5 = 0x8258,}, -}; - -static uint32_t dp_vswing_premph_table[] = { - 0x55338954, 0x4000, - 0x554d8954, 0x2000, - 0x55668954, 0, - 0x559ac0d4, 0x6000, -}; -/** - * is_edp - is the given port attached to an eDP panel (either CPU or PCH) - * @intel_dp: DP struct - * - * If a CPU or PCH DP output is attached to an eDP panel, this function - * will return true, and false otherwise. - */ -static bool is_edp(struct psb_intel_encoder *encoder) -{ - return encoder->type == INTEL_OUTPUT_EDP; -} - - -static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); -static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); -static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); - -static int -cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int max_lane_count = 4; - - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { - max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; - switch (max_lane_count) { - case 1: case 2: case 4: - break; - default: - max_lane_count = 4; - } - } - return max_lane_count; -} - -static int -cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; - - switch (max_link_bw) { - case DP_LINK_BW_1_62: - case DP_LINK_BW_2_7: - break; - default: - max_link_bw = DP_LINK_BW_1_62; - break; - } - return max_link_bw; -} - -static int -cdv_intel_dp_link_clock(uint8_t link_bw) -{ - if (link_bw == DP_LINK_BW_2_7) - return 270000; - else - return 162000; -} - -static int -cdv_intel_dp_link_required(int pixel_clock, int bpp) -{ - return (pixel_clock * bpp + 7) / 8; -} - -static int -cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes) -{ - return (max_link_clock * max_lanes * 19) / 20; -} - -static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - u32 pp; - - if (intel_dp->panel_on) { - DRM_DEBUG_KMS("Skip VDD on because of panel on\n"); - return; - } - DRM_DEBUG_KMS("\n"); - - pp = REG_READ(PP_CONTROL); - - pp |= EDP_FORCE_VDD; - REG_WRITE(PP_CONTROL, pp); - REG_READ(PP_CONTROL); - msleep(intel_dp->panel_power_up_delay); -} - -static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - u32 pp; - - DRM_DEBUG_KMS("\n"); - pp = REG_READ(PP_CONTROL); - - pp &= ~EDP_FORCE_VDD; - REG_WRITE(PP_CONTROL, pp); - REG_READ(PP_CONTROL); - -} - -/* Returns true if the panel was already on when called */ -static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE; - - if (intel_dp->panel_on) - return true; - - DRM_DEBUG_KMS("\n"); - pp = REG_READ(PP_CONTROL); - pp &= ~PANEL_UNLOCK_MASK; - - pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON); - REG_WRITE(PP_CONTROL, pp); - REG_READ(PP_CONTROL); - - if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) { - DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS)); - intel_dp->panel_on = false; - } else - intel_dp->panel_on = true; - msleep(intel_dp->panel_power_up_delay); - - return false; -} - -static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - u32 pp, idle_off_mask = PP_ON ; - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - - DRM_DEBUG_KMS("\n"); - - pp = REG_READ(PP_CONTROL); - - if ((pp & POWER_TARGET_ON) == 0) - return; - - intel_dp->panel_on = false; - pp &= ~PANEL_UNLOCK_MASK; - /* ILK workaround: disable reset around power sequence */ - - pp &= ~POWER_TARGET_ON; - pp &= ~EDP_FORCE_VDD; - pp &= ~EDP_BLC_ENABLE; - REG_WRITE(PP_CONTROL, pp); - REG_READ(PP_CONTROL); - DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS)); - - if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) { - DRM_DEBUG_KMS("Error in turning off Panel\n"); - } - - msleep(intel_dp->panel_power_cycle_delay); - DRM_DEBUG_KMS("Over\n"); -} - -static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - u32 pp; - - DRM_DEBUG_KMS("\n"); - /* - * If we enable the backlight right away following a panel power - * on, we may see slight flicker as the panel syncs with the eDP - * link. So delay a bit to make sure the image is solid before - * allowing it to appear. - */ - msleep(300); - pp = REG_READ(PP_CONTROL); - - pp |= EDP_BLC_ENABLE; - REG_WRITE(PP_CONTROL, pp); - gma_backlight_enable(dev); -} - -static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) -{ - struct drm_device *dev = intel_encoder->base.dev; - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - u32 pp; - - DRM_DEBUG_KMS("\n"); - gma_backlight_disable(dev); - msleep(10); - pp = REG_READ(PP_CONTROL); - - pp &= ~EDP_BLC_ENABLE; - REG_WRITE(PP_CONTROL, pp); - msleep(intel_dp->backlight_off_delay); -} - -static int -cdv_intel_dp_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); - int max_lanes = cdv_intel_dp_max_lane_count(encoder); - struct drm_psb_private *dev_priv = connector->dev->dev_private; - - if (is_edp(encoder) && intel_dp->panel_fixed_mode) { - if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) - return MODE_PANEL; - } - - /* only refuse the mode on non eDP since we have seen some weird eDP panels - which are outside spec tolerances but somehow work by magic */ - if (!is_edp(encoder) && - (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp) - > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))) - return MODE_CLOCK_HIGH; - - if (is_edp(encoder)) { - if (cdv_intel_dp_link_required(mode->clock, 24) - > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)) - return MODE_CLOCK_HIGH; - - } - if (mode->clock < 10000) - return MODE_CLOCK_LOW; - - return MODE_OK; -} - -static uint32_t -pack_aux(uint8_t *src, int src_bytes) -{ - int i; - uint32_t v = 0; - - if (src_bytes > 4) - src_bytes = 4; - for (i = 0; i < src_bytes; i++) - v |= ((uint32_t) src[i]) << ((3-i) * 8); - return v; -} - -static void -unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) -{ - int i; - if (dst_bytes > 4) - dst_bytes = 4; - for (i = 0; i < dst_bytes; i++) - dst[i] = src >> ((3-i) * 8); -} - -static int -cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, - uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - uint32_t output_reg = intel_dp->output_reg; - struct drm_device *dev = encoder->base.dev; - uint32_t ch_ctl = output_reg + 0x10; - uint32_t ch_data = ch_ctl + 4; - int i; - int recv_bytes; - uint32_t status; - uint32_t aux_clock_divider; - int try, precharge; - - /* The clock divider is based off the hrawclk, - * and would like to run at 2MHz. So, take the - * hrawclk value and divide by 2 and use that - * On CDV platform it uses 200MHz as hrawclk. - * - */ - aux_clock_divider = 200 / 2; - - precharge = 4; - if (is_edp(encoder)) - precharge = 10; - - if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { - DRM_ERROR("dp_aux_ch not started status 0x%08x\n", - REG_READ(ch_ctl)); - return -EBUSY; - } - - /* Must try at least 3 times according to DP spec */ - for (try = 0; try < 5; try++) { - /* Load the send data into the aux channel data registers */ - for (i = 0; i < send_bytes; i += 4) - REG_WRITE(ch_data + i, - pack_aux(send + i, send_bytes - i)); - - /* Send the command and wait for it to complete */ - REG_WRITE(ch_ctl, - DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_TIME_OUT_400us | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); - for (;;) { - status = REG_READ(ch_ctl); - if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) - break; - udelay(100); - } - - /* Clear done status and any errors */ - REG_WRITE(ch_ctl, - status | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); - if (status & DP_AUX_CH_CTL_DONE) - break; - } - - if ((status & DP_AUX_CH_CTL_DONE) == 0) { - DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); - return -EBUSY; - } - - /* Check for timeout or receive error. - * Timeouts occur when the sink is not connected - */ - if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { - DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); - return -EIO; - } - - /* Timeouts occur when the device isn't connected, so they're - * "normal" -- don't fill the kernel log with these */ - if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { - DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); - return -ETIMEDOUT; - } - - /* Unload any bytes sent back from the other side */ - recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> - DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - if (recv_bytes > recv_size) - recv_bytes = recv_size; - - for (i = 0; i < recv_bytes; i += 4) - unpack_aux(REG_READ(ch_data + i), - recv + i, recv_bytes - i); - - return recv_bytes; -} - -/* Write data to the aux channel in native mode */ -static int -cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, - uint16_t address, uint8_t *send, int send_bytes) -{ - int ret; - uint8_t msg[20]; - int msg_bytes; - uint8_t ack; - - if (send_bytes > 16) - return -1; - msg[0] = AUX_NATIVE_WRITE << 4; - msg[1] = address >> 8; - msg[2] = address & 0xff; - msg[3] = send_bytes - 1; - memcpy(&msg[4], send, send_bytes); - msg_bytes = send_bytes + 4; - for (;;) { - ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1); - if (ret < 0) - return ret; - if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) - break; - else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) - udelay(100); - else - return -EIO; - } - return send_bytes; -} - -/* Write a single byte to the aux channel in native mode */ -static int -cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, - uint16_t address, uint8_t byte) -{ - return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); -} - -/* read bytes from a native aux channel */ -static int -cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, - uint16_t address, uint8_t *recv, int recv_bytes) -{ - uint8_t msg[4]; - int msg_bytes; - uint8_t reply[20]; - int reply_bytes; - uint8_t ack; - int ret; - - msg[0] = AUX_NATIVE_READ << 4; - msg[1] = address >> 8; - msg[2] = address & 0xff; - msg[3] = recv_bytes - 1; - - msg_bytes = 4; - reply_bytes = recv_bytes + 1; - - for (;;) { - ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, - reply, reply_bytes); - if (ret == 0) - return -EPROTO; - if (ret < 0) - return ret; - ack = reply[0]; - if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { - memcpy(recv, reply + 1, ret - 1); - return ret - 1; - } - else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) - udelay(100); - else - return -EIO; - } -} - -static int -cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, - uint8_t write_byte, uint8_t *read_byte) -{ - struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; - struct cdv_intel_dp *intel_dp = container_of(adapter, - struct cdv_intel_dp, - adapter); - struct psb_intel_encoder *encoder = intel_dp->encoder; - uint16_t address = algo_data->address; - uint8_t msg[5]; - uint8_t reply[2]; - unsigned retry; - int msg_bytes; - int reply_bytes; - int ret; - - /* Set up the command byte */ - if (mode & MODE_I2C_READ) - msg[0] = AUX_I2C_READ << 4; - else - msg[0] = AUX_I2C_WRITE << 4; - - if (!(mode & MODE_I2C_STOP)) - msg[0] |= AUX_I2C_MOT << 4; - - msg[1] = address >> 8; - msg[2] = address; - - switch (mode) { - case MODE_I2C_WRITE: - msg[3] = 0; - msg[4] = write_byte; - msg_bytes = 5; - reply_bytes = 1; - break; - case MODE_I2C_READ: - msg[3] = 0; - msg_bytes = 4; - reply_bytes = 2; - break; - default: - msg_bytes = 3; - reply_bytes = 1; - break; - } - - for (retry = 0; retry < 5; retry++) { - ret = cdv_intel_dp_aux_ch(encoder, - msg, msg_bytes, - reply, reply_bytes); - if (ret < 0) { - DRM_DEBUG_KMS("aux_ch failed %d\n", ret); - return ret; - } - - switch (reply[0] & AUX_NATIVE_REPLY_MASK) { - case AUX_NATIVE_REPLY_ACK: - /* I2C-over-AUX Reply field is only valid - * when paired with AUX ACK. - */ - break; - case AUX_NATIVE_REPLY_NACK: - DRM_DEBUG_KMS("aux_ch native nack\n"); - return -EREMOTEIO; - case AUX_NATIVE_REPLY_DEFER: - udelay(100); - continue; - default: - DRM_ERROR("aux_ch invalid native reply 0x%02x\n", - reply[0]); - return -EREMOTEIO; - } - - switch (reply[0] & AUX_I2C_REPLY_MASK) { - case AUX_I2C_REPLY_ACK: - if (mode == MODE_I2C_READ) { - *read_byte = reply[1]; - } - return reply_bytes - 1; - case AUX_I2C_REPLY_NACK: - DRM_DEBUG_KMS("aux_i2c nack\n"); - return -EREMOTEIO; - case AUX_I2C_REPLY_DEFER: - DRM_DEBUG_KMS("aux_i2c defer\n"); - udelay(100); - break; - default: - DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); - return -EREMOTEIO; - } - } - - DRM_ERROR("too many retries, giving up\n"); - return -EREMOTEIO; -} - -static int -cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int ret; - - DRM_DEBUG_KMS("i2c_init %s\n", name); - - intel_dp->algo.running = false; - intel_dp->algo.address = 0; - intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch; - - memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); - intel_dp->adapter.owner = THIS_MODULE; - intel_dp->adapter.class = I2C_CLASS_DDC; - strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); - intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; - intel_dp->adapter.algo_data = &intel_dp->algo; - intel_dp->adapter.dev.parent = &connector->base.kdev; - - if (is_edp(encoder)) - cdv_intel_edp_panel_vdd_on(encoder); - ret = i2c_dp_aux_add_bus(&intel_dp->adapter); - if (is_edp(encoder)) - cdv_intel_edp_panel_vdd_off(encoder); - - return ret; -} - -void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, - struct drm_display_mode *adjusted_mode) -{ - adjusted_mode->hdisplay = fixed_mode->hdisplay; - adjusted_mode->hsync_start = fixed_mode->hsync_start; - adjusted_mode->hsync_end = fixed_mode->hsync_end; - adjusted_mode->htotal = fixed_mode->htotal; - - adjusted_mode->vdisplay = fixed_mode->vdisplay; - adjusted_mode->vsync_start = fixed_mode->vsync_start; - adjusted_mode->vsync_end = fixed_mode->vsync_end; - adjusted_mode->vtotal = fixed_mode->vtotal; - - adjusted_mode->clock = fixed_mode->clock; - - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); -} - -static bool -cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_psb_private *dev_priv = encoder->dev->dev_private; - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - int lane_count, clock; - int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); - int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; - static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - int refclock = mode->clock; - int bpp = 24; - - if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) { - cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); - refclock = intel_dp->panel_fixed_mode->clock; - bpp = dev_priv->edp.bpp; - } - - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = max_clock; clock >= 0; clock--) { - int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count); - - if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) { - intel_dp->link_bw = bws[clock]; - intel_dp->lane_count = lane_count; - adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("Display port link bw %02x lane " - "count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - return true; - } - } - } - if (is_edp(intel_encoder)) { - /* okay we failed just pick the highest */ - intel_dp->lane_count = max_lane_count; - intel_dp->link_bw = bws[max_clock]; - adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("Force picking display port link bw %02x lane " - "count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - - return true; - } - return false; -} - -struct cdv_intel_dp_m_n { - uint32_t tu; - uint32_t gmch_m; - uint32_t gmch_n; - uint32_t link_m; - uint32_t link_n; -}; - -static void -cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den) -{ - /* - while (*num > 0xffffff || *den > 0xffffff) { - *num >>= 1; - *den >>= 1; - }*/ - uint64_t value, m; - m = *num; - value = m * (0x800000); - m = do_div(value, *den); - *num = value; - *den = 0x800000; -} - -static void -cdv_intel_dp_compute_m_n(int bpp, - int nlanes, - int pixel_clock, - int link_clock, - struct cdv_intel_dp_m_n *m_n) -{ - m_n->tu = 64; - m_n->gmch_m = (pixel_clock * bpp + 7) >> 3; - m_n->gmch_n = link_clock * nlanes; - cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); - m_n->link_m = pixel_clock; - m_n->link_n = link_clock; - cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n); -} - -void -cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct drm_device *dev = crtc->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); - int lane_count = 4, bpp = 24; - struct cdv_intel_dp_m_n m_n; - int pipe = intel_crtc->pipe; - - /* - * Find the lane count in the intel_encoder private - */ - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct psb_intel_encoder *intel_encoder; - struct cdv_intel_dp *intel_dp; - - if (encoder->crtc != crtc) - continue; - - intel_encoder = to_psb_intel_encoder(encoder); - intel_dp = intel_encoder->dev_priv; - if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { - lane_count = intel_dp->lane_count; - break; - } else if (is_edp(intel_encoder)) { - lane_count = intel_dp->lane_count; - bpp = dev_priv->edp.bpp; - break; - } - } - - /* - * Compute the GMCH and Link ratios. The '3' here is - * the number of bytes_per_pixel post-LUT, which we always - * set up for 8-bits of R/G/B, or 3 bytes total. - */ - cdv_intel_dp_compute_m_n(bpp, lane_count, - mode->clock, adjusted_mode->clock, &m_n); - - { - REG_WRITE(PIPE_GMCH_DATA_M(pipe), - ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | - m_n.gmch_m); - REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); - REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); - REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); - } -} - -static void -cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); - struct drm_crtc *crtc = encoder->crtc; - struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - struct drm_device *dev = encoder->dev; - - intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; - intel_dp->DP |= intel_dp->color_range; - - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - intel_dp->DP |= DP_SYNC_HS_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - intel_dp->DP |= DP_SYNC_VS_HIGH; - - intel_dp->DP |= DP_LINK_TRAIN_OFF; - - switch (intel_dp->lane_count) { - case 1: - intel_dp->DP |= DP_PORT_WIDTH_1; - break; - case 2: - intel_dp->DP |= DP_PORT_WIDTH_2; - break; - case 4: - intel_dp->DP |= DP_PORT_WIDTH_4; - break; - } - if (intel_dp->has_audio) - intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - - memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); - intel_dp->link_configuration[0] = intel_dp->link_bw; - intel_dp->link_configuration[1] = intel_dp->lane_count; - - /* - * Check for DPCD version > 1.1 and enhanced framing support - */ - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { - intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - intel_dp->DP |= DP_ENHANCED_FRAMING; - } - - /* CPT DP's pipe select is decided in TRANS_DP_CTL */ - if (intel_crtc->pipe == 1) - intel_dp->DP |= DP_PIPEB_SELECT; - - REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); - DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP); - if (is_edp(intel_encoder)) { - uint32_t pfit_control; - cdv_intel_edp_panel_on(intel_encoder); - - if (mode->hdisplay != adjusted_mode->hdisplay || - mode->vdisplay != adjusted_mode->vdisplay) - pfit_control = PFIT_ENABLE; - else - pfit_control = 0; - - pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; - - REG_WRITE(PFIT_CONTROL, pfit_control); - } -} - - -/* If the sink supports it, try to set the power state appropriately */ -static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int ret, i; - - /* Should have a valid DPCD by this point */ - if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) - return; - - if (mode != DRM_MODE_DPMS_ON) { - ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER, - DP_SET_POWER_D3); - if (ret != 1) - DRM_DEBUG_DRIVER("failed to write sink power state\n"); - } else { - /* - * When turning on, we need to retry for 1ms to give the sink - * time to wake up. - */ - for (i = 0; i < 3; i++) { - ret = cdv_intel_dp_aux_native_write_1(encoder, - DP_SET_POWER, - DP_SET_POWER_D0); - if (ret == 1) - break; - udelay(1000); - } - } -} - -static void cdv_intel_dp_prepare(struct drm_encoder *encoder) -{ - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); - int edp = is_edp(intel_encoder); - - if (edp) { - cdv_intel_edp_backlight_off(intel_encoder); - cdv_intel_edp_panel_off(intel_encoder); - cdv_intel_edp_panel_vdd_on(intel_encoder); - } - /* Wake up the sink first */ - cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON); - cdv_intel_dp_link_down(intel_encoder); - if (edp) - cdv_intel_edp_panel_vdd_off(intel_encoder); -} - -static void cdv_intel_dp_commit(struct drm_encoder *encoder) -{ - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); - int edp = is_edp(intel_encoder); - - if (edp) - cdv_intel_edp_panel_on(intel_encoder); - cdv_intel_dp_start_link_train(intel_encoder); - cdv_intel_dp_complete_link_train(intel_encoder); - if (edp) - cdv_intel_edp_backlight_on(intel_encoder); -} - -static void -cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) -{ - struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - struct drm_device *dev = encoder->dev; - uint32_t dp_reg = REG_READ(intel_dp->output_reg); - int edp = is_edp(intel_encoder); - - if (mode != DRM_MODE_DPMS_ON) { - if (edp) { - cdv_intel_edp_backlight_off(intel_encoder); - cdv_intel_edp_panel_vdd_on(intel_encoder); - } - cdv_intel_dp_sink_dpms(intel_encoder, mode); - cdv_intel_dp_link_down(intel_encoder); - if (edp) { - cdv_intel_edp_panel_vdd_off(intel_encoder); - cdv_intel_edp_panel_off(intel_encoder); - } - } else { - if (edp) - cdv_intel_edp_panel_on(intel_encoder); - cdv_intel_dp_sink_dpms(intel_encoder, mode); - if (!(dp_reg & DP_PORT_EN)) { - cdv_intel_dp_start_link_train(intel_encoder); - cdv_intel_dp_complete_link_train(intel_encoder); - } - if (edp) - cdv_intel_edp_backlight_on(intel_encoder); - } -} - -/* - * Native read with retry for link status and receiver capability reads for - * cases where the sink may still be asleep. - */ -static bool -cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, - uint8_t *recv, int recv_bytes) -{ - int ret, i; - - /* - * Sinks are *supposed* to come up within 1ms from an off state, - * but we're also supposed to retry 3 times per the spec. - */ - for (i = 0; i < 3; i++) { - ret = cdv_intel_dp_aux_native_read(encoder, address, recv, - recv_bytes); - if (ret == recv_bytes) - return true; - udelay(1000); - } - - return false; -} - -/* - * Fetch AUX CH registers 0x202 - 0x207 which contain - * link status information - */ -static bool -cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - return cdv_intel_dp_aux_native_read_retry(encoder, - DP_LANE0_1_STATUS, - intel_dp->link_status, - DP_LINK_STATUS_SIZE); -} - -static uint8_t -cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], - int r) -{ - return link_status[r - DP_LANE0_1_STATUS]; -} - -static uint8_t -cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); - int s = ((lane & 1) ? - DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : - DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); - uint8_t l = cdv_intel_dp_link_status(link_status, i); - - return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; -} - -static uint8_t -cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); - int s = ((lane & 1) ? - DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : - DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); - uint8_t l = cdv_intel_dp_link_status(link_status, i); - - return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; -} - - -#if 0 -static char *voltage_names[] = { - "0.4V", "0.6V", "0.8V", "1.2V" -}; -static char *pre_emph_names[] = { - "0dB", "3.5dB", "6dB", "9.5dB" -}; -static char *link_train_names[] = { - "pattern 1", "pattern 2", "idle", "off" -}; -#endif - -#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 -/* -static uint8_t -cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) -{ - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_400: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_600: - return DP_TRAIN_PRE_EMPHASIS_6; - case DP_TRAIN_VOLTAGE_SWING_800: - return DP_TRAIN_PRE_EMPHASIS_3_5; - case DP_TRAIN_VOLTAGE_SWING_1200: - default: - return DP_TRAIN_PRE_EMPHASIS_0; - } -} -*/ -static void -cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - uint8_t v = 0; - uint8_t p = 0; - int lane; - - for (lane = 0; lane < intel_dp->lane_count; lane++) { - uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane); - uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); - - if (this_v > v) - v = this_v; - if (this_p > p) - p = this_p; - } - - if (v >= CDV_DP_VOLTAGE_MAX) - v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; - - if (p == DP_TRAIN_PRE_EMPHASIS_MASK) - p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; - - for (lane = 0; lane < 4; lane++) - intel_dp->train_set[lane] = v | p; -} - - -static uint8_t -cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], - int lane) -{ - int i = DP_LANE0_1_STATUS + (lane >> 1); - int s = (lane & 1) * 4; - uint8_t l = cdv_intel_dp_link_status(link_status, i); - - return (l >> s) & 0xf; -} - -/* Check for clock recovery is done on all channels */ -static bool -cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) -{ - int lane; - uint8_t lane_status; - - for (lane = 0; lane < lane_count; lane++) { - lane_status = cdv_intel_get_lane_status(link_status, lane); - if ((lane_status & DP_LANE_CR_DONE) == 0) - return false; - } - return true; -} - -/* Check to see if channel eq is done on all channels */ -#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ - DP_LANE_CHANNEL_EQ_DONE|\ - DP_LANE_SYMBOL_LOCKED) -static bool -cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - uint8_t lane_align; - uint8_t lane_status; - int lane; - - lane_align = cdv_intel_dp_link_status(intel_dp->link_status, - DP_LANE_ALIGN_STATUS_UPDATED); - if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) - return false; - for (lane = 0; lane < intel_dp->lane_count; lane++) { - lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane); - if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) - return false; - } - return true; -} - -static bool -cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, - uint32_t dp_reg_value, - uint8_t dp_train_pat) -{ - - struct drm_device *dev = encoder->base.dev; - int ret; - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - - REG_WRITE(intel_dp->output_reg, dp_reg_value); - REG_READ(intel_dp->output_reg); - - ret = cdv_intel_dp_aux_native_write_1(encoder, - DP_TRAINING_PATTERN_SET, - dp_train_pat); - - if (ret != 1) { - DRM_DEBUG_KMS("Failure in setting link pattern %x\n", - dp_train_pat); - return false; - } - - return true; -} - - -static bool -cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, - uint8_t dp_train_pat) -{ - - int ret; - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - - ret = cdv_intel_dp_aux_native_write(encoder, - DP_TRAINING_LANE0_SET, - intel_dp->train_set, - intel_dp->lane_count); - - if (ret != intel_dp->lane_count) { - DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n", - intel_dp->train_set[0], intel_dp->lane_count); - return false; - } - return true; -} - -static void -cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) -{ - struct drm_device *dev = encoder->base.dev; - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - struct ddi_regoff *ddi_reg; - int vswing, premph, index; - - if (intel_dp->output_reg == DP_B) - ddi_reg = &ddi_DP_train_table[0]; - else - ddi_reg = &ddi_DP_train_table[1]; - - vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK); - premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >> - DP_TRAIN_PRE_EMPHASIS_SHIFT; - - if (vswing + premph > 3) - return; -#ifdef CDV_FAST_LINK_TRAIN - return; -#endif - DRM_DEBUG_KMS("Test2\n"); - //return ; - cdv_sb_reset(dev); - /* ;Swing voltage programming - ;gfx_dpio_set_reg(0xc058, 0x0505313A) */ - cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A); - - /* ;gfx_dpio_set_reg(0x8154, 0x43406055) */ - cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055); - - /* ;gfx_dpio_set_reg(0x8148, 0x55338954) - * The VSwing_PreEmph table is also considered based on the vswing/premp - */ - index = (vswing + premph) * 2; - if (premph == 1 && vswing == 1) { - cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954); - } else - cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]); - - /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */ - if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200) - cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040); - else - cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040); - - /* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */ - /* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */ - - /* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */ - cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055); - - /* ;Pre emphasis programming - * ;gfx_dpio_set_reg(0xc02c, 0x1f030040) - */ - cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040); - - /* ;gfx_dpio_set_reg(0x8124, 0x00004000) */ - index = 2 * premph + 1; - cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]); - return; -} - - -/* Enable corresponding port and start training pattern 1 */ -static void -cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int i; - uint8_t voltage; - bool clock_recovery = false; - int tries; - u32 reg; - uint32_t DP = intel_dp->DP; - - DP |= DP_PORT_EN; - DP &= ~DP_LINK_TRAIN_MASK; - - reg = DP; - reg |= DP_LINK_TRAIN_PAT_1; - /* Enable output, wait for it to become active */ - REG_WRITE(intel_dp->output_reg, reg); - REG_READ(intel_dp->output_reg); - psb_intel_wait_for_vblank(dev); - - DRM_DEBUG_KMS("Link config\n"); - /* Write the link configuration data */ - cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET, - intel_dp->link_configuration, - 2); - - memset(intel_dp->train_set, 0, 4); - voltage = 0; - tries = 0; - clock_recovery = false; - - DRM_DEBUG_KMS("Start train\n"); - reg = DP | DP_LINK_TRAIN_PAT_1; - - - for (;;) { - /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ - DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n", - intel_dp->train_set[0], - intel_dp->link_configuration[0], - intel_dp->link_configuration[1]); - - if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) { - DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n"); - } - cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]); - /* Set training pattern 1 */ - - cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1); - - udelay(200); - if (!cdv_intel_dp_get_link_status(encoder)) - break; - - DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n", - intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2], - intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]); - - if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { - DRM_DEBUG_KMS("PT1 train is done\n"); - clock_recovery = true; - break; - } - - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) - break; - if (i == intel_dp->lane_count) - break; - - /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) - break; - } else - tries = 0; - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - - /* Compute new intel_dp->train_set as requested by target */ - cdv_intel_get_adjust_train(encoder); - - } - - if (!clock_recovery) { - DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]); - } - - intel_dp->DP = DP; -} - -static void -cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - bool channel_eq = false; - int tries, cr_tries; - u32 reg; - uint32_t DP = intel_dp->DP; - - /* channel equalization */ - tries = 0; - cr_tries = 0; - channel_eq = false; - - DRM_DEBUG_KMS("\n"); - reg = DP | DP_LINK_TRAIN_PAT_2; - - for (;;) { - - DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n", - intel_dp->train_set[0], - intel_dp->link_configuration[0], - intel_dp->link_configuration[1]); - /* channel eq pattern */ - - if (!cdv_intel_dp_set_link_train(encoder, reg, - DP_TRAINING_PATTERN_2)) { - DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n"); - } - /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ - - if (cr_tries > 5) { - DRM_ERROR("failed to train DP, aborting\n"); - cdv_intel_dp_link_down(encoder); - break; - } - - cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]); - - cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2); - - udelay(1000); - if (!cdv_intel_dp_get_link_status(encoder)) - break; - - DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n", - intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2], - intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]); - - /* Make sure clock is still ok */ - if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { - cdv_intel_dp_start_link_train(encoder); - cr_tries++; - continue; - } - - if (cdv_intel_channel_eq_ok(encoder)) { - DRM_DEBUG_KMS("PT2 train is done\n"); - channel_eq = true; - break; - } - - /* Try 5 times, then try clock recovery if that fails */ - if (tries > 5) { - cdv_intel_dp_link_down(encoder); - cdv_intel_dp_start_link_train(encoder); - tries = 0; - cr_tries++; - continue; - } - - /* Compute new intel_dp->train_set as requested by target */ - cdv_intel_get_adjust_train(encoder); - ++tries; - - } - - reg = DP | DP_LINK_TRAIN_OFF; - - REG_WRITE(intel_dp->output_reg, reg); - REG_READ(intel_dp->output_reg); - cdv_intel_dp_aux_native_write_1(encoder, - DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); -} - -static void -cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - uint32_t DP = intel_dp->DP; - - if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) - return; - - DRM_DEBUG_KMS("\n"); - - - { - DP &= ~DP_LINK_TRAIN_MASK; - REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); - } - REG_READ(intel_dp->output_reg); - - msleep(17); - - REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); - REG_READ(intel_dp->output_reg); -} - -static enum drm_connector_status -cdv_dp_detect(struct psb_intel_encoder *encoder) -{ - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - enum drm_connector_status status; - - status = connector_status_disconnected; - if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) - { - if (intel_dp->dpcd[DP_DPCD_REV] != 0) - status = connector_status_connected; - } - if (status == connector_status_connected) - DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", - intel_dp->dpcd[0], intel_dp->dpcd[1], - intel_dp->dpcd[2], intel_dp->dpcd[3]); - return status; -} - -/** - * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. - * - * \return true if DP port is connected. - * \return false if DP port is disconnected. - */ -static enum drm_connector_status -cdv_intel_dp_detect(struct drm_connector *connector, bool force) -{ - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - enum drm_connector_status status; - struct edid *edid = NULL; - int edp = is_edp(encoder); - - intel_dp->has_audio = false; - - if (edp) - cdv_intel_edp_panel_vdd_on(encoder); - status = cdv_dp_detect(encoder); - if (status != connector_status_connected) { - if (edp) - cdv_intel_edp_panel_vdd_off(encoder); - return status; - } - - if (intel_dp->force_audio) { - intel_dp->has_audio = intel_dp->force_audio > 0; - } else { - edid = drm_get_edid(connector, &intel_dp->adapter); - if (edid) { - intel_dp->has_audio = drm_detect_monitor_audio(edid); - kfree(edid); - } - } - if (edp) - cdv_intel_edp_panel_vdd_off(encoder); - - return connector_status_connected; -} - -static int cdv_intel_dp_get_modes(struct drm_connector *connector) -{ - struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; - struct edid *edid = NULL; - int ret = 0; - int edp = is_edp(intel_encoder); - - - edid = drm_get_edid(connector, &intel_dp->adapter); - if (edid) { - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - if (is_edp(intel_encoder)) { - struct drm_device *dev = connector->dev; - struct drm_psb_private *dev_priv = dev->dev_private; - - cdv_intel_edp_panel_vdd_off(intel_encoder); - if (ret) { - if (edp && !intel_dp->panel_fixed_mode) { - struct drm_display_mode *newmode; - list_for_each_entry(newmode, &connector->probed_modes, - head) { - if (newmode->type & DRM_MODE_TYPE_PREFERRED) { - intel_dp->panel_fixed_mode = - drm_mode_duplicate(dev, newmode); - break; - } - } - } - - return ret; - } - if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) { - intel_dp->panel_fixed_mode = - drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); - if (intel_dp->panel_fixed_mode) { - intel_dp->panel_fixed_mode->type |= - DRM_MODE_TYPE_PREFERRED; - } - } - if (intel_dp->panel_fixed_mode != NULL) { - struct drm_display_mode *mode; - mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); - drm_mode_probed_add(connector, mode); - return 1; - } - } - - return ret; -} - -static bool -cdv_intel_dp_detect_audio(struct drm_connector *connector) -{ - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - struct edid *edid; - bool has_audio = false; - int edp = is_edp(encoder); - - if (edp) - cdv_intel_edp_panel_vdd_on(encoder); - - edid = drm_get_edid(connector, &intel_dp->adapter); - if (edid) { - has_audio = drm_detect_monitor_audio(edid); - kfree(edid); - } - if (edp) - cdv_intel_edp_panel_vdd_off(encoder); - - return has_audio; -} - -static int -cdv_intel_dp_set_property(struct drm_connector *connector, - struct drm_property *property, - uint64_t val) -{ - struct drm_psb_private *dev_priv = connector->dev->dev_private; - struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = encoder->dev_priv; - int ret; - - ret = drm_connector_property_set_value(connector, property, val); - if (ret) - return ret; - - if (property == dev_priv->force_audio_property) { - int i = val; - bool has_audio; - - if (i == intel_dp->force_audio) - return 0; - - intel_dp->force_audio = i; - - if (i == 0) - has_audio = cdv_intel_dp_detect_audio(connector); - else - has_audio = i > 0; - - if (has_audio == intel_dp->has_audio) - return 0; - - intel_dp->has_audio = has_audio; - goto done; - } - - if (property == dev_priv->broadcast_rgb_property) { - if (val == !!intel_dp->color_range) - return 0; - - intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; - goto done; - } - - return -EINVAL; - -done: - if (encoder->base.crtc) { - struct drm_crtc *crtc = encoder->base.crtc; - drm_crtc_helper_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, - crtc->fb); - } - - return 0; -} - -static void -cdv_intel_dp_destroy(struct drm_connector *connector) -{ - struct psb_intel_encoder *psb_intel_encoder = - psb_intel_attached_encoder(connector); - struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv; - - if (is_edp(psb_intel_encoder)) { - /* cdv_intel_panel_destroy_backlight(connector->dev); */ - if (intel_dp->panel_fixed_mode) { - kfree(intel_dp->panel_fixed_mode); - intel_dp->panel_fixed_mode = NULL; - } - } - i2c_del_adapter(&intel_dp->adapter); - drm_sysfs_connector_remove(connector); - drm_connector_cleanup(connector); - kfree(connector); -} - -static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = { - .dpms = cdv_intel_dp_dpms, - .mode_fixup = cdv_intel_dp_mode_fixup, - .prepare = cdv_intel_dp_prepare, - .mode_set = cdv_intel_dp_mode_set, - .commit = cdv_intel_dp_commit, -}; - -static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .detect = cdv_intel_dp_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .set_property = cdv_intel_dp_set_property, - .destroy = cdv_intel_dp_destroy, -}; - -static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { - .get_modes = cdv_intel_dp_get_modes, - .mode_valid = cdv_intel_dp_mode_valid, - .best_encoder = psb_intel_best_encoder, -}; - -static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { - .destroy = cdv_intel_dp_encoder_destroy, -}; - - -static void cdv_intel_dp_add_properties(struct drm_connector *connector) -{ - cdv_intel_attach_force_audio_property(connector); - cdv_intel_attach_broadcast_rgb_property(connector); -} - -/* check the VBT to see whether the eDP is on DP-D port */ -static bool cdv_intel_dpc_is_edp(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = dev->dev_private; - struct child_device_config *p_child; - int i; - - if (!dev_priv->child_dev_num) - return false; - - for (i = 0; i < dev_priv->child_dev_num; i++) { - p_child = dev_priv->child_dev + i; - - if (p_child->dvo_port == PORT_IDPC && - p_child->device_type == DEVICE_TYPE_eDP) - return true; - } - return false; -} - -/* Cedarview display clock gating - - We need this disable dot get correct behaviour while enabling - DP/eDP. TODO - investigate if we can turn it back to normality - after enabling */ -static void cdv_disable_intel_clock_gating(struct drm_device *dev) -{ - u32 reg_value; - reg_value = REG_READ(DSPCLK_GATE_D); - - reg_value |= (DPUNIT_PIPEB_GATE_DISABLE | - DPUNIT_PIPEA_GATE_DISABLE | - DPCUNIT_CLOCK_GATE_DISABLE | - DPLSUNIT_CLOCK_GATE_DISABLE | - DPOUNIT_CLOCK_GATE_DISABLE | - DPIOUNIT_CLOCK_GATE_DISABLE); - - REG_WRITE(DSPCLK_GATE_D, reg_value); - - udelay(500); -} - -void -cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) -{ - struct psb_intel_encoder *psb_intel_encoder; - struct psb_intel_connector *psb_intel_connector; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct cdv_intel_dp *intel_dp; - const char *name = NULL; - int type = DRM_MODE_CONNECTOR_DisplayPort; - - psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); - if (!psb_intel_encoder) - return; - psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); - if (!psb_intel_connector) - goto err_connector; - intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); - if (!intel_dp) - goto err_priv; - - if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) - type = DRM_MODE_CONNECTOR_eDP; - - connector = &psb_intel_connector->base; - encoder = &psb_intel_encoder->base; - - drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); - drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - - psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); - - if (type == DRM_MODE_CONNECTOR_DisplayPort) - psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; - else - psb_intel_encoder->type = INTEL_OUTPUT_EDP; - - - psb_intel_encoder->dev_priv=intel_dp; - intel_dp->encoder = psb_intel_encoder; - intel_dp->output_reg = output_reg; - - drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); - drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs); - - connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->interlace_allowed = false; - connector->doublescan_allowed = false; - - drm_sysfs_connector_add(connector); - - /* Set up the DDC bus. */ - switch (output_reg) { - case DP_B: - name = "DPDDC-B"; - psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); - break; - case DP_C: - name = "DPDDC-C"; - psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); - break; - } - - cdv_disable_intel_clock_gating(dev); - - cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); - /* FIXME:fail check */ - cdv_intel_dp_add_properties(connector); - - if (is_edp(psb_intel_encoder)) { - int ret; - struct edp_power_seq cur; - u32 pp_on, pp_off, pp_div; - u32 pwm_ctrl; - - pp_on = REG_READ(PP_CONTROL); - pp_on &= ~PANEL_UNLOCK_MASK; - pp_on |= PANEL_UNLOCK_REGS; - - REG_WRITE(PP_CONTROL, pp_on); - - pwm_ctrl = REG_READ(BLC_PWM_CTL2); - pwm_ctrl |= PWM_PIPE_B; - REG_WRITE(BLC_PWM_CTL2, pwm_ctrl); - - pp_on = REG_READ(PP_ON_DELAYS); - pp_off = REG_READ(PP_OFF_DELAYS); - pp_div = REG_READ(PP_DIVISOR); - - /* Pull timing values out of registers */ - cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - - cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; - - cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; - - cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; - - cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT); - - DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); - - - intel_dp->panel_power_up_delay = cur.t1_t3 / 10; - intel_dp->backlight_on_delay = cur.t8 / 10; - intel_dp->backlight_off_delay = cur.t9 / 10; - intel_dp->panel_power_down_delay = cur.t10 / 10; - intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100; - - DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", - intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, - intel_dp->panel_power_cycle_delay); - - DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", - intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); - - - cdv_intel_edp_panel_vdd_on(psb_intel_encoder); - ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, - intel_dp->dpcd, - sizeof(intel_dp->dpcd)); - cdv_intel_edp_panel_vdd_off(psb_intel_encoder); - if (ret == 0) { - /* if this fails, presume the device is a ghost */ - DRM_INFO("failed to retrieve link info, disabling eDP\n"); - cdv_intel_dp_encoder_destroy(encoder); - cdv_intel_dp_destroy(connector); - goto err_priv; - } else { - DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", - intel_dp->dpcd[0], intel_dp->dpcd[1], - intel_dp->dpcd[2], intel_dp->dpcd[3]); - - } - /* The CDV reference driver moves pnale backlight setup into the displays that - have a backlight: this is a good idea and one we should probably adopt, however - we need to migrate all the drivers before we can do that */ - /*cdv_intel_panel_setup_backlight(dev); */ - } - return; - -err_priv: - kfree(psb_intel_connector); -err_connector: - kfree(psb_intel_encoder); -} diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 7272a461edfe..a86f87b9ddde 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -139,6 +139,8 @@ static enum drm_connector_status cdv_hdmi_detect( { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); + struct psb_intel_connector *psb_intel_connector = + to_psb_intel_connector(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; @@ -155,6 +157,8 @@ static enum drm_connector_status cdv_hdmi_detect( hdmi_priv->has_hdmi_audio = drm_detect_monitor_audio(edid); } + + psb_intel_connector->base.display_info.raw_edid = NULL; kfree(edid); } return status; @@ -348,11 +352,9 @@ void cdv_hdmi_init(struct drm_device *dev, switch (reg) { case SDVOB: ddc_bus = GPIOE; - psb_intel_encoder->ddi_select = DDI0_SELECT; break; case SDVOC: ddc_bus = GPIOD; - psb_intel_encoder->ddi_select = DDI1_SELECT; break; default: DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c index b362dd39bf5a..c7f9468b74ba 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -506,8 +506,16 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, property, value)) return -1; - else - gma_backlight_set(encoder->dev, value); + else { +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct drm_psb_private *dev_priv = + encoder->dev->dev_private; + struct backlight_device *bd = + dev_priv->backlight_device; + bd->props.brightness = value; + backlight_update_status(bd); +#endif + } } else if (!strcmp(property->name, "DPMS") && encoder) { struct drm_encoder_helper_funcs *helpers = encoder->helper_private; diff --git a/trunk/drivers/gpu/drm/gma500/framebuffer.c b/trunk/drivers/gpu/drm/gma500/framebuffer.c index 884ba73ac6ce..5732b5702e1c 100644 --- a/trunk/drivers/gpu/drm/gma500/framebuffer.c +++ b/trunk/drivers/gpu/drm/gma500/framebuffer.c @@ -764,13 +764,6 @@ static void psb_setup_outputs(struct drm_device *dev) crtc_mask = dev_priv->ops->hdmi_mask; clone_mask = (1 << INTEL_OUTPUT_HDMI); break; - case INTEL_OUTPUT_DISPLAYPORT: - crtc_mask = (1 << 0) | (1 << 1); - clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); - break; - case INTEL_OUTPUT_EDP: - crtc_mask = (1 << 1); - clone_mask = (1 << INTEL_OUTPUT_EDP); } encoder->possible_crtcs = crtc_mask; encoder->possible_clones = diff --git a/trunk/drivers/gpu/drm/gma500/gem.c b/trunk/drivers/gpu/drm/gma500/gem.c index df20546a2a34..fc7d144bc2d3 100644 --- a/trunk/drivers/gpu/drm/gma500/gem.c +++ b/trunk/drivers/gpu/drm/gma500/gem.c @@ -36,12 +36,7 @@ int psb_gem_init_object(struct drm_gem_object *obj) void psb_gem_free_object(struct drm_gem_object *obj) { struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); - - /* Remove the list map if one is present */ - if (obj->map_list.map) - drm_gem_free_mmap_offset(obj); - drm_gem_object_release(obj); - + drm_gem_object_release_wrap(obj); /* This must occur last as it frees up the memory of the GEM object */ psb_gtt_free_range(obj->dev, gtt); } @@ -82,7 +77,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, /* Make it mmapable */ if (!obj->map_list.map) { - ret = drm_gem_create_mmap_offset(obj); + ret = gem_create_mmap_offset(obj); if (ret) goto out; } diff --git a/trunk/drivers/gpu/drm/gma500/gem_glue.c b/trunk/drivers/gpu/drm/gma500/gem_glue.c new file mode 100644 index 000000000000..3c17634f6061 --- /dev/null +++ b/trunk/drivers/gpu/drm/gma500/gem_glue.c @@ -0,0 +1,90 @@ +/************************************************************************** + * Copyright (c) 2011, Intel Corporation. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + **************************************************************************/ + +#include +#include +#include "gem_glue.h" + +void drm_gem_object_release_wrap(struct drm_gem_object *obj) +{ + /* Remove the list map if one is present */ + if (obj->map_list.map) { + struct drm_gem_mm *mm = obj->dev->mm_private; + struct drm_map_list *list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + drm_mm_put_block(list->file_offset_node); + kfree(list->map); + list->map = NULL; + } + drm_gem_object_release(obj); +} + +/** + * gem_create_mmap_offset - invent an mmap offset + * @obj: our object + * + * Standard implementation of offset generation for mmap as is + * duplicated in several drivers. This belongs in GEM. + */ +int gem_create_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + struct drm_local_map *map; + int ret; + + list = &obj->map_list; + list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); + if (list->map == NULL) + return -ENOMEM; + map = list->map; + map->type = _DRM_GEM; + map->size = obj->size; + map->handle = obj; + + list->file_offset_node = drm_mm_search_free(&mm->offset_manager, + obj->size / PAGE_SIZE, 0, 0); + if (!list->file_offset_node) { + dev_err(dev->dev, "failed to allocate offset for bo %d\n", + obj->name); + ret = -ENOSPC; + goto free_it; + } + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + obj->size / PAGE_SIZE, 0); + if (!list->file_offset_node) { + ret = -ENOMEM; + goto free_it; + } + list->hash.key = list->file_offset_node->start; + ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); + if (ret) { + dev_err(dev->dev, "failed to add to map hash\n"); + goto free_mm; + } + return 0; + +free_mm: + drm_mm_put_block(list->file_offset_node); +free_it: + kfree(list->map); + list->map = NULL; + return ret; +} diff --git a/trunk/drivers/gpu/drm/gma500/gem_glue.h b/trunk/drivers/gpu/drm/gma500/gem_glue.h new file mode 100644 index 000000000000..ce5ce30f74db --- /dev/null +++ b/trunk/drivers/gpu/drm/gma500/gem_glue.h @@ -0,0 +1,2 @@ +extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); +extern int gem_create_mmap_offset(struct drm_gem_object *obj); diff --git a/trunk/drivers/gpu/drm/gma500/intel_bios.c b/trunk/drivers/gpu/drm/gma500/intel_bios.c index 4fb79cf00ed8..8d7caf0f363e 100644 --- a/trunk/drivers/gpu/drm/gma500/intel_bios.c +++ b/trunk/drivers/gpu/drm/gma500/intel_bios.c @@ -54,98 +54,6 @@ static void *find_section(struct bdb_header *bdb, int section_id) return NULL; } -static void -parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb) -{ - struct bdb_edp *edp; - struct edp_power_seq *edp_pps; - struct edp_link_params *edp_link_params; - uint8_t panel_type; - - edp = find_section(bdb, BDB_EDP); - - dev_priv->edp.bpp = 18; - if (!edp) { - if (dev_priv->edp.support) { - DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n", - dev_priv->edp.bpp); - } - return; - } - - panel_type = dev_priv->panel_type; - switch ((edp->color_depth >> (panel_type * 2)) & 3) { - case EDP_18BPP: - dev_priv->edp.bpp = 18; - break; - case EDP_24BPP: - dev_priv->edp.bpp = 24; - break; - case EDP_30BPP: - dev_priv->edp.bpp = 30; - break; - } - - /* Get the eDP sequencing and link info */ - edp_pps = &edp->power_seqs[panel_type]; - edp_link_params = &edp->link_params[panel_type]; - - dev_priv->edp.pps = *edp_pps; - - DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, - dev_priv->edp.pps.t9, dev_priv->edp.pps.t10, - dev_priv->edp.pps.t11_t12); - - dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : - DP_LINK_BW_1_62; - switch (edp_link_params->lanes) { - case 0: - dev_priv->edp.lanes = 1; - break; - case 1: - dev_priv->edp.lanes = 2; - break; - case 3: - default: - dev_priv->edp.lanes = 4; - break; - } - DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n", - dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp); - - switch (edp_link_params->preemphasis) { - case 0: - dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; - break; - case 1: - dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; - break; - case 2: - dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; - break; - case 3: - dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; - break; - } - switch (edp_link_params->vswing) { - case 0: - dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; - break; - case 1: - dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; - break; - case 2: - dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; - break; - case 3: - dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; - break; - } - DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n", - dev_priv->edp.vswing, dev_priv->edp.preemphasis); -} - static u16 get_blocksize(void *p) { @@ -246,8 +154,6 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv, return; dev_priv->lvds_dither = lvds_options->pixel_dither; - dev_priv->panel_type = lvds_options->panel_type; - if (lvds_options->panel_type == 0xff) return; @@ -434,9 +340,6 @@ parse_driver_features(struct drm_psb_private *dev_priv, if (!driver) return; - if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) - dev_priv->edp.support = 1; - /* This bit means to use 96Mhz for DPLL_A or not */ if (driver->primary_lfp_id) dev_priv->dplla_96mhz = true; @@ -534,9 +437,6 @@ int psb_intel_init_bios(struct drm_device *dev) size_t size; int i; - - dev_priv->panel_type = 0xff; - /* XXX Should this validation be moved to intel_opregion.c? */ if (dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; @@ -577,7 +477,6 @@ int psb_intel_init_bios(struct drm_device *dev) parse_sdvo_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb); parse_backlight_data(dev_priv, bdb); - parse_edp(dev_priv, bdb); if (bios) pci_unmap_rom(pdev, bios); diff --git a/trunk/drivers/gpu/drm/gma500/intel_bios.h b/trunk/drivers/gpu/drm/gma500/intel_bios.h index c6267c98c9e7..2e95523b84b1 100644 --- a/trunk/drivers/gpu/drm/gma500/intel_bios.h +++ b/trunk/drivers/gpu/drm/gma500/intel_bios.h @@ -23,7 +23,6 @@ #define _I830_BIOS_H_ #include -#include struct vbt_header { u8 signature[20]; /**< Always starts with 'VBT$' */ @@ -94,7 +93,6 @@ struct vbios_data { #define BDB_SDVO_LVDS_PNP_IDS 24 #define BDB_SDVO_LVDS_POWER_SEQ 25 #define BDB_TV_OPTIONS 26 -#define BDB_EDP 27 #define BDB_LVDS_OPTIONS 40 #define BDB_LVDS_LFP_DATA_PTRS 41 #define BDB_LVDS_LFP_DATA 42 @@ -393,11 +391,6 @@ struct bdb_sdvo_lvds_options { u8 panel_misc_bits_4; } __attribute__((packed)); -#define BDB_DRIVER_FEATURE_NO_LVDS 0 -#define BDB_DRIVER_FEATURE_INT_LVDS 1 -#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 -#define BDB_DRIVER_FEATURE_EDP 3 - struct bdb_driver_features { u8 boot_dev_algorithm:1; u8 block_display_switch:1; @@ -438,45 +431,6 @@ struct bdb_driver_features { u8 custom_vbt_version; } __attribute__((packed)); -#define EDP_18BPP 0 -#define EDP_24BPP 1 -#define EDP_30BPP 2 -#define EDP_RATE_1_62 0 -#define EDP_RATE_2_7 1 -#define EDP_LANE_1 0 -#define EDP_LANE_2 1 -#define EDP_LANE_4 3 -#define EDP_PREEMPHASIS_NONE 0 -#define EDP_PREEMPHASIS_3_5dB 1 -#define EDP_PREEMPHASIS_6dB 2 -#define EDP_PREEMPHASIS_9_5dB 3 -#define EDP_VSWING_0_4V 0 -#define EDP_VSWING_0_6V 1 -#define EDP_VSWING_0_8V 2 -#define EDP_VSWING_1_2V 3 - -struct edp_power_seq { - u16 t1_t3; - u16 t8; - u16 t9; - u16 t10; - u16 t11_t12; -} __attribute__ ((packed)); - -struct edp_link_params { - u8 rate:4; - u8 lanes:4; - u8 preemphasis:4; - u8 vswing:4; -} __attribute__ ((packed)); - -struct bdb_edp { - struct edp_power_seq power_seqs[16]; - u32 color_depth; - u32 sdrrs_msa_timing_delay; - struct edp_link_params link_params[16]; -} __attribute__ ((packed)); - extern int psb_intel_init_bios(struct drm_device *dev); extern void psb_intel_destroy_bios(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 32dba2ab53e1..5675d93b4205 100644 --- a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -299,8 +299,17 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; - else - gma_backlight_set(encoder->dev, value); + else { +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct backlight_device *psb_bd; + + psb_bd = mdfld_get_backlight_device(); + if (psb_bd) { + psb_bd->props.brightness = value; + mdfld_set_brightness(psb_bd); + } +#endif + } } set_prop_done: return 0; diff --git a/trunk/drivers/gpu/drm/gma500/mid_bios.c b/trunk/drivers/gpu/drm/gma500/mid_bios.c index 850cd3fbb969..b2a790bd9899 100644 --- a/trunk/drivers/gpu/drm/gma500/mid_bios.c +++ b/trunk/drivers/gpu/drm/gma500/mid_bios.c @@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv) dev_priv->platform_rev_id); } -struct mid_vbt_header { +struct vbt_header { u32 signature; u8 revision; } __packed; /* The same for r0 and r1 */ struct vbt_r0 { - struct mid_vbt_header vbt_header; + struct vbt_header vbt_header; u8 size; u8 checksum; } __packed; struct vbt_r10 { - struct mid_vbt_header vbt_header; + struct vbt_header vbt_header; u8 checksum; u16 size; u8 panel_count; @@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv) struct drm_device *dev = dev_priv->dev; u32 addr; u8 __iomem *vbt_virtual; - struct mid_vbt_header vbt_header; + struct vbt_header vbt_header; struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); int ret = -1; diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 69e51e903f35..2eb3dc4e9c9b 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -252,6 +252,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector) if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); + connector->display_info.raw_edid = NULL; } /* diff --git a/trunk/drivers/gpu/drm/gma500/opregion.c b/trunk/drivers/gpu/drm/gma500/opregion.c index ad0d6de938f3..c430bd424681 100644 --- a/trunk/drivers/gpu/drm/gma500/opregion.c +++ b/trunk/drivers/gpu/drm/gma500/opregion.c @@ -166,7 +166,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) { int max = bd->props.max_brightness; - gma_backlight_set(dev, bclp * max / 255); + bd->props.brightness = bclp * max / 255; + backlight_update_status(bd); } asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID; diff --git a/trunk/drivers/gpu/drm/gma500/psb_device.c b/trunk/drivers/gpu/drm/gma500/psb_device.c index f1432f096e58..5971bc82b765 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_device.c +++ b/trunk/drivers/gpu/drm/gma500/psb_device.c @@ -290,7 +290,6 @@ static void psb_get_core_freq(struct drm_device *dev) case 6: case 7: dev_priv->core_freq = 266; - break; default: dev_priv->core_freq = 0; } diff --git a/trunk/drivers/gpu/drm/gma500/psb_drv.h b/trunk/drivers/gpu/drm/gma500/psb_drv.h index 223ff5b1b5c4..1bd115ecefe1 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_drv.h +++ b/trunk/drivers/gpu/drm/gma500/psb_drv.h @@ -24,10 +24,10 @@ #include #include "drm_global.h" +#include "gem_glue.h" #include "gma_drm.h" #include "psb_reg.h" #include "psb_intel_drv.h" -#include "intel_bios.h" #include "gtt.h" #include "power.h" #include "opregion.h" @@ -613,8 +613,6 @@ struct drm_psb_private { */ struct backlight_device *backlight_device; struct drm_property *backlight_property; - bool backlight_enabled; - int backlight_level; uint32_t blc_adj1; uint32_t blc_adj2; @@ -642,19 +640,6 @@ struct drm_psb_private { int mdfld_panel_id; bool dplla_96mhz; /* DPLL data from the VBT */ - - struct { - int rate; - int lanes; - int preemphasis; - int vswing; - - bool initialized; - bool support; - int bpp; - struct edp_power_seq pps; - } edp; - uint8_t panel_type; }; @@ -811,9 +796,6 @@ extern int psb_fbdev_init(struct drm_device *dev); /* backlight.c */ int gma_backlight_init(struct drm_device *dev); void gma_backlight_exit(struct drm_device *dev); -void gma_backlight_disable(struct drm_device *dev); -void gma_backlight_enable(struct drm_device *dev); -void gma_backlight_set(struct drm_device *dev, int v); /* oaktrail_crtc.c */ extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs; diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h b/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h index 90f2d11e686b..ebe1a28f60e1 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -29,6 +29,10 @@ * Display related stuff */ +/* store information about an Ixxx DVO */ +/* The i830->i865 use multiple DVOs with multiple i2cs */ +/* the i915, i945 have a single sDVO i2c bus - which is different */ +#define MAX_OUTPUTS 6 /* maximum connectors per crtcs in the mode set */ #define INTELFB_CONN_LIMIT 4 @@ -65,8 +69,6 @@ #define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_MIPI 7 #define INTEL_OUTPUT_MIPI2 8 -#define INTEL_OUTPUT_DISPLAYPORT 9 -#define INTEL_OUTPUT_EDP 10 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -131,11 +133,6 @@ struct psb_intel_encoder { void (*hot_plug)(struct psb_intel_encoder *); int crtc_mask; int clone_mask; - u32 ddi_select; /* Channel info */ -#define DDI0_SELECT 0x01 -#define DDI1_SELECT 0x02 -#define DP_MASK 0x8000 -#define DDI_MASK 0x03 void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */ /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's @@ -193,6 +190,7 @@ struct psb_intel_crtc { u32 mode_flags; bool active; + bool crtc_enable; /* Saved Crtc HW states */ struct psb_intel_crtc_state *crtc_state; @@ -287,20 +285,4 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); extern void gma_intel_teardown_gmbus(struct drm_device *dev); -/* DP support */ -extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg); -extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - -extern void psb_intel_attach_force_audio_property(struct drm_connector *connector); -extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector); - -extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val); -extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val); -extern void cdv_sb_reset(struct drm_device *dev); - -extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector); -extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector); - #endif /* __INTEL_DRV_H__ */ diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c index 2a4c3a9e33e3..37adc9edf974 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -630,8 +630,17 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, property, value)) goto set_prop_error; - else - gma_backlight_set(encoder->dev, value); + else { +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct drm_psb_private *devp = + encoder->dev->dev_private; + struct backlight_device *bd = devp->backlight_device; + if (bd) { + bd->props.brightness = value; + backlight_update_status(bd); + } +#endif + } } else if (!strcmp(property->name, "DPMS")) { struct drm_encoder_helper_funcs *hfuncs = encoder->helper_private; diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h b/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h index d914719c4b60..8e8c8efb0a89 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h @@ -173,46 +173,15 @@ #define PP_SEQUENCE_ON (1 << 28) #define PP_SEQUENCE_OFF (2 << 28) #define PP_SEQUENCE_MASK 0x30000000 -#define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) -#define PP_SEQUENCE_STATE_MASK 0x0000000f - #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) -#define PANEL_UNLOCK_REGS (0xabcd << 16) -#define PANEL_UNLOCK_MASK (0xffff << 16) -#define EDP_FORCE_VDD (1 << 3) -#define EDP_BLC_ENABLE (1 << 2) -#define PANEL_POWER_RESET (1 << 1) -#define PANEL_POWER_OFF (0 << 0) -#define PANEL_POWER_ON (1 << 0) - -/* Poulsbo/Oaktrail */ + #define LVDSPP_ON 0x61208 #define LVDSPP_OFF 0x6120c #define PP_CYCLE 0x61210 -/* Cedartrail */ #define PP_ON_DELAYS 0x61208 /* Cedartrail */ -#define PANEL_PORT_SELECT_MASK (3 << 30) -#define PANEL_PORT_SELECT_LVDS (0 << 30) -#define PANEL_PORT_SELECT_EDP (1 << 30) -#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) -#define PANEL_POWER_UP_DELAY_SHIFT 16 -#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) -#define PANEL_LIGHT_ON_DELAY_SHIFT 0 - #define PP_OFF_DELAYS 0x6120c /* Cedartrail */ -#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) -#define PANEL_POWER_DOWN_DELAY_SHIFT 16 -#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) -#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 - -#define PP_DIVISOR 0x61210 /* Cedartrail */ -#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) -#define PP_REFERENCE_DIVIDER_SHIFT 8 -#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) -#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 #define PFIT_CONTROL 0x61230 #define PFIT_ENABLE (1 << 31) @@ -1313,10 +1282,6 @@ No status bits are changed. # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */ # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) -# define DPUNIT_PIPEB_GATE_DISABLE (1 << 30) -# define DPUNIT_PIPEA_GATE_DISABLE (1 << 25) -# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) -# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) #define RAMCLK_GATE_D 0x6210 @@ -1382,165 +1347,5 @@ No status bits are changed. #define LANE_PLL_ENABLE (0x3 << 20) #define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21)) -#define DP_B 0x64100 -#define DP_C 0x64200 - -#define DP_PORT_EN (1 << 31) -#define DP_PIPEB_SELECT (1 << 30) -#define DP_PIPE_MASK (1 << 30) - -/* Link training mode - select a suitable mode for each stage */ -#define DP_LINK_TRAIN_PAT_1 (0 << 28) -#define DP_LINK_TRAIN_PAT_2 (1 << 28) -#define DP_LINK_TRAIN_PAT_IDLE (2 << 28) -#define DP_LINK_TRAIN_OFF (3 << 28) -#define DP_LINK_TRAIN_MASK (3 << 28) -#define DP_LINK_TRAIN_SHIFT 28 - -/* Signal voltages. These are mostly controlled by the other end */ -#define DP_VOLTAGE_0_4 (0 << 25) -#define DP_VOLTAGE_0_6 (1 << 25) -#define DP_VOLTAGE_0_8 (2 << 25) -#define DP_VOLTAGE_1_2 (3 << 25) -#define DP_VOLTAGE_MASK (7 << 25) -#define DP_VOLTAGE_SHIFT 25 - -/* Signal pre-emphasis levels, like voltages, the other end tells us what - * they want - */ -#define DP_PRE_EMPHASIS_0 (0 << 22) -#define DP_PRE_EMPHASIS_3_5 (1 << 22) -#define DP_PRE_EMPHASIS_6 (2 << 22) -#define DP_PRE_EMPHASIS_9_5 (3 << 22) -#define DP_PRE_EMPHASIS_MASK (7 << 22) -#define DP_PRE_EMPHASIS_SHIFT 22 - -/* How many wires to use. I guess 3 was too hard */ -#define DP_PORT_WIDTH_1 (0 << 19) -#define DP_PORT_WIDTH_2 (1 << 19) -#define DP_PORT_WIDTH_4 (3 << 19) -#define DP_PORT_WIDTH_MASK (7 << 19) - -/* Mystic DPCD version 1.1 special mode */ -#define DP_ENHANCED_FRAMING (1 << 18) - -/** locked once port is enabled */ -#define DP_PORT_REVERSAL (1 << 15) - -/** sends the clock on lane 15 of the PEG for debug */ -#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) - -#define DP_SCRAMBLING_DISABLE (1 << 12) -#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) - -/** limit RGB values to avoid confusing TVs */ -#define DP_COLOR_RANGE_16_235 (1 << 8) - -/** Turn on the audio link */ -#define DP_AUDIO_OUTPUT_ENABLE (1 << 6) - -/** vs and hs sync polarity */ -#define DP_SYNC_VS_HIGH (1 << 4) -#define DP_SYNC_HS_HIGH (1 << 3) - -/** A fantasy */ -#define DP_DETECTED (1 << 2) - -/** The aux channel provides a way to talk to the - * signal sink for DDC etc. Max packet size supported - * is 20 bytes in each direction, hence the 5 fixed - * data registers - */ -#define DPB_AUX_CH_CTL 0x64110 -#define DPB_AUX_CH_DATA1 0x64114 -#define DPB_AUX_CH_DATA2 0x64118 -#define DPB_AUX_CH_DATA3 0x6411c -#define DPB_AUX_CH_DATA4 0x64120 -#define DPB_AUX_CH_DATA5 0x64124 - -#define DPC_AUX_CH_CTL 0x64210 -#define DPC_AUX_CH_DATA1 0x64214 -#define DPC_AUX_CH_DATA2 0x64218 -#define DPC_AUX_CH_DATA3 0x6421c -#define DPC_AUX_CH_DATA4 0x64220 -#define DPC_AUX_CH_DATA5 0x64224 - -#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) -#define DP_AUX_CH_CTL_DONE (1 << 30) -#define DP_AUX_CH_CTL_INTERRUPT (1 << 29) -#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) -#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) -#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) -#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) -#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) -#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 -#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) -#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 -#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) -#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) -#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) -#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) -#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) -#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) -#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 - -/* - * Computing GMCH M and N values for the Display Port link - * - * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes - * - * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) - * - * The GMCH value is used internally - * - * bytes_per_pixel is the number of bytes coming out of the plane, - * which is after the LUTs, so we want the bytes for our color format. - * For our current usage, this is always 3, one byte for R, G and B. - */ - -#define _PIPEA_GMCH_DATA_M 0x70050 -#define _PIPEB_GMCH_DATA_M 0x71050 - -/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ -#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) -#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25 - -#define PIPE_GMCH_DATA_M_MASK (0xffffff) - -#define _PIPEA_GMCH_DATA_N 0x70054 -#define _PIPEB_GMCH_DATA_N 0x71054 -#define PIPE_GMCH_DATA_N_MASK (0xffffff) - -/* - * Computing Link M and N values for the Display Port link - * - * Link M / N = pixel_clock / ls_clk - * - * (the DP spec calls pixel_clock the 'strm_clk') - * - * The Link value is transmitted in the Main Stream - * Attributes and VB-ID. - */ - -#define _PIPEA_DP_LINK_M 0x70060 -#define _PIPEB_DP_LINK_M 0x71060 -#define PIPEA_DP_LINK_M_MASK (0xffffff) - -#define _PIPEA_DP_LINK_N 0x70064 -#define _PIPEB_DP_LINK_N 0x71064 -#define PIPEA_DP_LINK_N_MASK (0xffffff) - -#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) -#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) -#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) -#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) - -#define PIPE_BPC_MASK (7 << 5) -#define PIPE_8BPC (0 << 5) -#define PIPE_10BPC (1 << 5) -#define PIPE_6BPC (2 << 5) #endif diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c index d35f93ba3a85..0466c7b985f8 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1292,6 +1292,7 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector) return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); + return NULL; } static enum drm_connector_status @@ -1342,6 +1343,7 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) } } else status = connector_status_disconnected; + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1402,6 +1404,7 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force) ret = connector_status_disconnected; else ret = connector_status_connected; + connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; @@ -1450,6 +1453,7 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector) drm_add_edid_modes(connector, edid); } + connector->display_info.raw_edid = NULL; kfree(edid); } } diff --git a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c index 599099fe76e3..36d952280c50 100644 --- a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c @@ -427,10 +427,15 @@ static int ch7006_remove(struct i2c_client *client) return 0; } -static int ch7006_resume(struct device *dev) +static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg) { - struct i2c_client *client = to_i2c_client(dev); + ch7006_dbg(client, "\n"); + + return 0; +} +static int ch7006_resume(struct i2c_client *client) +{ ch7006_dbg(client, "\n"); ch7006_write(client, 0x3d, 0x0); @@ -494,18 +499,15 @@ static struct i2c_device_id ch7006_ids[] = { }; MODULE_DEVICE_TABLE(i2c, ch7006_ids); -static const struct dev_pm_ops ch7006_pm_ops = { - .resume = ch7006_resume, -}; - static struct drm_i2c_encoder_driver ch7006_driver = { .i2c_driver = { .probe = ch7006_probe, .remove = ch7006_remove, + .suspend = ch7006_suspend, + .resume = ch7006_resume, .driver = { .name = "ch7006", - .pm = &ch7006_pm_ops, }, .id_table = ch7006_ids, diff --git a/trunk/drivers/gpu/drm/i915/Makefile b/trunk/drivers/gpu/drm/i915/Makefile index 0f2c5493242b..b0bacdba6d7e 100644 --- a/trunk/drivers/gpu/drm/i915/Makefile +++ b/trunk/drivers/gpu/drm/i915/Makefile @@ -40,7 +40,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ dvo_ivch.o \ dvo_tfp410.o \ dvo_sil164.o \ - dvo_ns2501.o \ i915_gem_dmabuf.o i915-$(CONFIG_COMPAT) += i915_ioc32.o diff --git a/trunk/drivers/gpu/drm/i915/dvo.h b/trunk/drivers/gpu/drm/i915/dvo.h index 74b5efccfdb1..58914691a77b 100644 --- a/trunk/drivers/gpu/drm/i915/dvo.h +++ b/trunk/drivers/gpu/drm/i915/dvo.h @@ -58,12 +58,13 @@ struct intel_dvo_dev_ops { void (*create_resources)(struct intel_dvo_device *dvo); /* - * Turn on/off output. + * Turn on/off output or set intermediate power levels if available. * - * Because none of our dvo drivers support an intermediate power levels, - * we don't expose this in the interfac. + * Unsupported intermediate modes drop to the lower power setting. + * If the mode is DPMSModeOff, the output must be disabled, + * as the DPLL may be disabled afterwards. */ - void (*dpms)(struct intel_dvo_device *dvo, bool enable); + void (*dpms)(struct intel_dvo_device *dvo, int mode); /* * Callback for testing a video mode for a given output. @@ -114,12 +115,6 @@ struct intel_dvo_dev_ops { */ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); - /* - * Probe the current hw status, returning true if the connected output - * is active. - */ - bool (*get_hw_state)(struct intel_dvo_device *dev); - /** * Query the device for the modes it provides. * @@ -145,6 +140,5 @@ extern struct intel_dvo_dev_ops ch7xxx_ops; extern struct intel_dvo_dev_ops ivch_ops; extern struct intel_dvo_dev_ops tfp410_ops; extern struct intel_dvo_dev_ops ch7017_ops; -extern struct intel_dvo_dev_ops ns2501_ops; #endif /* _INTEL_DVO_H */ diff --git a/trunk/drivers/gpu/drm/i915/dvo_ch7017.c b/trunk/drivers/gpu/drm/i915/dvo_ch7017.c index 86b27d1d90c2..1ca799a1e1fc 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/trunk/drivers/gpu/drm/i915/dvo_ch7017.c @@ -163,7 +163,7 @@ struct ch7017_priv { }; static void ch7017_dump_regs(struct intel_dvo_device *dvo); -static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); +static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) { @@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | (mode->hdisplay & 0x0700) >> 8; - ch7017_dpms(dvo, false); + ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, horizontal_active_pixel_input); ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, @@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, } /* set the CH7017 power state */ -static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) +static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) { uint8_t val; @@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) CH7017_DAC3_POWER_DOWN | CH7017_TV_POWER_DOWN_EN); - if (enable) { + if (mode == DRM_MODE_DPMS_ON) { /* Turn on the LVDS */ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, val & ~CH7017_LVDS_POWER_DOWN_EN); @@ -359,18 +359,6 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) msleep(20); } -static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) -{ - uint8_t val; - - ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); - - if (val & CH7017_LVDS_POWER_DOWN_EN) - return false; - else - return true; -} - static void ch7017_dump_regs(struct intel_dvo_device *dvo) { uint8_t val; @@ -408,7 +396,6 @@ struct intel_dvo_dev_ops ch7017_ops = { .mode_valid = ch7017_mode_valid, .mode_set = ch7017_mode_set, .dpms = ch7017_dpms, - .get_hw_state = ch7017_get_hw_state, .dump_regs = ch7017_dump_regs, .destroy = ch7017_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c b/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c index 38f3a6cb8c7d..4a036600e806 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -289,26 +289,14 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, } /* set the CH7xxx power state */ -static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) +static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) { - if (enable) + if (mode == DRM_MODE_DPMS_ON) ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); else ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); } -static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo) -{ - u8 val; - - ch7xxx_readb(dvo, CH7xxx_PM, &val); - - if (val & CH7xxx_PM_FPD) - return false; - else - return true; -} - static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) { int i; @@ -338,7 +326,6 @@ struct intel_dvo_dev_ops ch7xxx_ops = { .mode_valid = ch7xxx_mode_valid, .mode_set = ch7xxx_mode_set, .dpms = ch7xxx_dpms, - .get_hw_state = ch7xxx_get_hw_state, .dump_regs = ch7xxx_dump_regs, .destroy = ch7xxx_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/dvo_ivch.c b/trunk/drivers/gpu/drm/i915/dvo_ivch.c index baaf65bf0bdd..04f2893d5e3c 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_ivch.c +++ b/trunk/drivers/gpu/drm/i915/dvo_ivch.c @@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, } /** Sets the power state of the panel connected to the ivch */ -static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) +static void ivch_dpms(struct intel_dvo_device *dvo, int mode) { int i; uint16_t vr01, vr30, backlight; @@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) if (!ivch_read(dvo, VR01, &vr01)) return; - if (enable) + if (mode == DRM_MODE_DPMS_ON) backlight = 1; else backlight = 0; ivch_write(dvo, VR80, backlight); - if (enable) + if (mode == DRM_MODE_DPMS_ON) vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; else vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); @@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) if (!ivch_read(dvo, VR30, &vr30)) break; - if (((vr30 & VR30_PANEL_ON) != 0) == enable) + if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) break; udelay(1000); } @@ -323,20 +323,6 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) udelay(16 * 1000); } -static bool ivch_get_hw_state(struct intel_dvo_device *dvo) -{ - uint16_t vr01; - - /* Set the new power state of the panel. */ - if (!ivch_read(dvo, VR01, &vr01)) - return false; - - if (vr01 & VR01_LCD_ENABLE) - return true; - else - return false; -} - static void ivch_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -427,7 +413,6 @@ static void ivch_destroy(struct intel_dvo_device *dvo) struct intel_dvo_dev_ops ivch_ops = { .init = ivch_init, .dpms = ivch_dpms, - .get_hw_state = ivch_get_hw_state, .mode_valid = ivch_mode_valid, .mode_set = ivch_mode_set, .detect = ivch_detect, diff --git a/trunk/drivers/gpu/drm/i915/dvo_ns2501.c b/trunk/drivers/gpu/drm/i915/dvo_ns2501.c deleted file mode 100644 index c4a255be6979..000000000000 --- a/trunk/drivers/gpu/drm/i915/dvo_ns2501.c +++ /dev/null @@ -1,588 +0,0 @@ -/* - * - * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter - * - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "dvo.h" -#include "i915_reg.h" -#include "i915_drv.h" - -#define NS2501_VID 0x1305 -#define NS2501_DID 0x6726 - -#define NS2501_VID_LO 0x00 -#define NS2501_VID_HI 0x01 -#define NS2501_DID_LO 0x02 -#define NS2501_DID_HI 0x03 -#define NS2501_REV 0x04 -#define NS2501_RSVD 0x05 -#define NS2501_FREQ_LO 0x06 -#define NS2501_FREQ_HI 0x07 - -#define NS2501_REG8 0x08 -#define NS2501_8_VEN (1<<5) -#define NS2501_8_HEN (1<<4) -#define NS2501_8_DSEL (1<<3) -#define NS2501_8_BPAS (1<<2) -#define NS2501_8_RSVD (1<<1) -#define NS2501_8_PD (1<<0) - -#define NS2501_REG9 0x09 -#define NS2501_9_VLOW (1<<7) -#define NS2501_9_MSEL_MASK (0x7<<4) -#define NS2501_9_TSEL (1<<3) -#define NS2501_9_RSEN (1<<2) -#define NS2501_9_RSVD (1<<1) -#define NS2501_9_MDI (1<<0) - -#define NS2501_REGC 0x0c - -struct ns2501_priv { - //I2CDevRec d; - bool quiet; - int reg_8_shadow; - int reg_8_set; - // Shadow registers for i915 - int dvoc; - int pll_a; - int srcdim; - int fw_blc; -}; - -#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) - -/* - * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens - * laptops does not react on the i2c bus unless - * both the PLL is running and the display is configured in its native - * resolution. - * This function forces the DVO on, and stores the registers it touches. - * Afterwards, registers are restored to regular values. - * - * This is pretty much a hack, though it works. - * Without that, ns2501_readb and ns2501_writeb fail - * when switching the resolution. - */ - -static void enable_dvo(struct intel_dvo_device *dvo) -{ - struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); - struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_gmbus *bus = container_of(adapter, - struct intel_gmbus, - adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; - - DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__); - - ns->dvoc = I915_READ(DVO_C); - ns->pll_a = I915_READ(_DPLL_A); - ns->srcdim = I915_READ(DVOC_SRCDIM); - ns->fw_blc = I915_READ(FW_BLC); - - I915_WRITE(DVOC, 0x10004084); - I915_WRITE(_DPLL_A, 0xd0820000); - I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 - I915_WRITE(FW_BLC, 0x1080304); - - I915_WRITE(DVOC, 0x90004084); -} - -/* - * Restore the I915 registers modified by the above - * trigger function. - */ -static void restore_dvo(struct intel_dvo_device *dvo) -{ - struct i2c_adapter *adapter = dvo->i2c_bus; - struct intel_gmbus *bus = container_of(adapter, - struct intel_gmbus, - adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; - struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); - - I915_WRITE(DVOC, ns->dvoc); - I915_WRITE(_DPLL_A, ns->pll_a); - I915_WRITE(DVOC_SRCDIM, ns->srcdim); - I915_WRITE(FW_BLC, ns->fw_blc); -} - -/* -** Read a register from the ns2501. -** Returns true if successful, false otherwise. -** If it returns false, it might be wise to enable the -** DVO with the above function. -*/ -static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) -{ - struct ns2501_priv *ns = dvo->dev_priv; - struct i2c_adapter *adapter = dvo->i2c_bus; - u8 out_buf[2]; - u8 in_buf[2]; - - struct i2c_msg msgs[] = { - { - .addr = dvo->slave_addr, - .flags = 0, - .len = 1, - .buf = out_buf, - }, - { - .addr = dvo->slave_addr, - .flags = I2C_M_RD, - .len = 1, - .buf = in_buf, - } - }; - - out_buf[0] = addr; - out_buf[1] = 0; - - if (i2c_transfer(adapter, msgs, 2) == 2) { - *ch = in_buf[0]; - return true; - }; - - if (!ns->quiet) { - DRM_DEBUG_KMS - ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, - adapter->name, dvo->slave_addr); - } - - return false; -} - -/* -** Write a register to the ns2501. -** Returns true if successful, false otherwise. -** If it returns false, it might be wise to enable the -** DVO with the above function. -*/ -static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) -{ - struct ns2501_priv *ns = dvo->dev_priv; - struct i2c_adapter *adapter = dvo->i2c_bus; - uint8_t out_buf[2]; - - struct i2c_msg msg = { - .addr = dvo->slave_addr, - .flags = 0, - .len = 2, - .buf = out_buf, - }; - - out_buf[0] = addr; - out_buf[1] = ch; - - if (i2c_transfer(adapter, &msg, 1) == 1) { - return true; - } - - if (!ns->quiet) { - DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", - addr, adapter->name, dvo->slave_addr); - } - - return false; -} - -/* National Semiconductor 2501 driver for chip on i2c bus - * scan for the chip on the bus. - * Hope the VBIOS initialized the PLL correctly so we can - * talk to it. If not, it will not be seen and not detected. - * Bummer! - */ -static bool ns2501_init(struct intel_dvo_device *dvo, - struct i2c_adapter *adapter) -{ - /* this will detect the NS2501 chip on the specified i2c bus */ - struct ns2501_priv *ns; - unsigned char ch; - - ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); - if (ns == NULL) - return false; - - dvo->i2c_bus = adapter; - dvo->dev_priv = ns; - ns->quiet = true; - - if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) - goto out; - - if (ch != (NS2501_VID & 0xff)) { - DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", - ch, adapter->name, dvo->slave_addr); - goto out; - } - - if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) - goto out; - - if (ch != (NS2501_DID & 0xff)) { - DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", - ch, adapter->name, dvo->slave_addr); - goto out; - } - ns->quiet = false; - ns->reg_8_set = 0; - ns->reg_8_shadow = - NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN; - - DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); - return true; - -out: - kfree(ns); - return false; -} - -static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) -{ - /* - * This is a Laptop display, it doesn't have hotplugging. - * Even if not, the detection bit of the 2501 is unreliable as - * it only works for some display types. - * It is even more unreliable as the PLL must be active for - * allowing reading from the chiop. - */ - return connector_status_connected; -} - -static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, - struct drm_display_mode *mode) -{ - DRM_DEBUG_KMS - ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", - __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, - mode->vtotal); - - /* - * Currently, these are all the modes I have data from. - * More might exist. Unclear how to find the native resolution - * of the panel in here so we could always accept it - * by disabling the scaler. - */ - if ((mode->hdisplay == 800 && mode->vdisplay == 600) || - (mode->hdisplay == 640 && mode->vdisplay == 480) || - (mode->hdisplay == 1024 && mode->vdisplay == 768)) { - return MODE_OK; - } else { - return MODE_ONE_SIZE; /* Is this a reasonable error? */ - } -} - -static void ns2501_mode_set(struct intel_dvo_device *dvo, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - bool ok; - bool restore = false; - struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); - - DRM_DEBUG_KMS - ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", - __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, - mode->vtotal); - - /* - * Where do I find the native resolution for which scaling is not required??? - * - * First trigger the DVO on as otherwise the chip does not appear on the i2c - * bus. - */ - do { - ok = true; - - if (mode->hdisplay == 800 && mode->vdisplay == 600) { - /* mode 277 */ - ns->reg_8_shadow &= ~NS2501_8_BPAS; - DRM_DEBUG_KMS("%s: switching to 800x600\n", - __FUNCTION__); - - /* - * No, I do not know where this data comes from. - * It is just what the video bios left in the DVO, so - * I'm just copying it here over. - * This also means that I cannot support any other modes - * except the ones supported by the bios. - */ - ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works. - ok &= ns2501_writeb(dvo, 0x1b, 0x19); - ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer - ok &= ns2501_writeb(dvo, 0x1d, 0x02); - - ok &= ns2501_writeb(dvo, 0x34, 0x03); - ok &= ns2501_writeb(dvo, 0x35, 0xff); - - ok &= ns2501_writeb(dvo, 0x80, 0x27); - ok &= ns2501_writeb(dvo, 0x81, 0x03); - ok &= ns2501_writeb(dvo, 0x82, 0x41); - ok &= ns2501_writeb(dvo, 0x83, 0x05); - - ok &= ns2501_writeb(dvo, 0x8d, 0x02); - ok &= ns2501_writeb(dvo, 0x8e, 0x04); - ok &= ns2501_writeb(dvo, 0x8f, 0x00); - - ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */ - ok &= ns2501_writeb(dvo, 0x91, 0x07); - ok &= ns2501_writeb(dvo, 0x94, 0x00); - ok &= ns2501_writeb(dvo, 0x95, 0x00); - - ok &= ns2501_writeb(dvo, 0x96, 0x00); - - ok &= ns2501_writeb(dvo, 0x99, 0x00); - ok &= ns2501_writeb(dvo, 0x9a, 0x88); - - ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */ - ok &= ns2501_writeb(dvo, 0x9d, 0x00); - ok &= ns2501_writeb(dvo, 0x9e, 0x25); - ok &= ns2501_writeb(dvo, 0x9f, 0x03); - - ok &= ns2501_writeb(dvo, 0xa4, 0x80); - - ok &= ns2501_writeb(dvo, 0xb6, 0x00); - - ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */ - ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ - - ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ - ok &= ns2501_writeb(dvo, 0xc1, 0xd7); - - ok &= ns2501_writeb(dvo, 0xc2, 0x00); - ok &= ns2501_writeb(dvo, 0xc3, 0xf8); - - ok &= ns2501_writeb(dvo, 0xc4, 0x03); - ok &= ns2501_writeb(dvo, 0xc5, 0x1a); - - ok &= ns2501_writeb(dvo, 0xc6, 0x00); - ok &= ns2501_writeb(dvo, 0xc7, 0x73); - ok &= ns2501_writeb(dvo, 0xc8, 0x02); - - } else if (mode->hdisplay == 640 && mode->vdisplay == 480) { - /* mode 274 */ - DRM_DEBUG_KMS("%s: switching to 640x480\n", - __FUNCTION__); - /* - * No, I do not know where this data comes from. - * It is just what the video bios left in the DVO, so - * I'm just copying it here over. - * This also means that I cannot support any other modes - * except the ones supported by the bios. - */ - ns->reg_8_shadow &= ~NS2501_8_BPAS; - - ok &= ns2501_writeb(dvo, 0x11, 0xa0); - ok &= ns2501_writeb(dvo, 0x1b, 0x11); - ok &= ns2501_writeb(dvo, 0x1c, 0x54); - ok &= ns2501_writeb(dvo, 0x1d, 0x03); - - ok &= ns2501_writeb(dvo, 0x34, 0x03); - ok &= ns2501_writeb(dvo, 0x35, 0xff); - - ok &= ns2501_writeb(dvo, 0x80, 0xff); - ok &= ns2501_writeb(dvo, 0x81, 0x07); - ok &= ns2501_writeb(dvo, 0x82, 0x3d); - ok &= ns2501_writeb(dvo, 0x83, 0x05); - - ok &= ns2501_writeb(dvo, 0x8d, 0x02); - ok &= ns2501_writeb(dvo, 0x8e, 0x10); - ok &= ns2501_writeb(dvo, 0x8f, 0x00); - - ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */ - ok &= ns2501_writeb(dvo, 0x91, 0x07); - ok &= ns2501_writeb(dvo, 0x94, 0x00); - ok &= ns2501_writeb(dvo, 0x95, 0x00); - - ok &= ns2501_writeb(dvo, 0x96, 0x05); - - ok &= ns2501_writeb(dvo, 0x99, 0x00); - ok &= ns2501_writeb(dvo, 0x9a, 0x88); - - ok &= ns2501_writeb(dvo, 0x9c, 0x24); - ok &= ns2501_writeb(dvo, 0x9d, 0x00); - ok &= ns2501_writeb(dvo, 0x9e, 0x25); - ok &= ns2501_writeb(dvo, 0x9f, 0x03); - - ok &= ns2501_writeb(dvo, 0xa4, 0x84); - - ok &= ns2501_writeb(dvo, 0xb6, 0x09); - - ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */ - ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ - - ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ - ok &= ns2501_writeb(dvo, 0xc1, 0x90); - - ok &= ns2501_writeb(dvo, 0xc2, 0x00); - ok &= ns2501_writeb(dvo, 0xc3, 0x0f); - - ok &= ns2501_writeb(dvo, 0xc4, 0x03); - ok &= ns2501_writeb(dvo, 0xc5, 0x16); - - ok &= ns2501_writeb(dvo, 0xc6, 0x00); - ok &= ns2501_writeb(dvo, 0xc7, 0x02); - ok &= ns2501_writeb(dvo, 0xc8, 0x02); - - } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { - /* mode 280 */ - DRM_DEBUG_KMS("%s: switching to 1024x768\n", - __FUNCTION__); - /* - * This might or might not work, actually. I'm silently - * assuming here that the native panel resolution is - * 1024x768. If not, then this leaves the scaler disabled - * generating a picture that is likely not the expected. - * - * Problem is that I do not know where to take the panel - * dimensions from. - * - * Enable the bypass, scaling not required. - * - * The scaler registers are irrelevant here.... - * - */ - ns->reg_8_shadow |= NS2501_8_BPAS; - ok &= ns2501_writeb(dvo, 0x37, 0x44); - } else { - /* - * Data not known. Bummer! - * Hopefully, the code should not go here - * as mode_OK delivered no other modes. - */ - ns->reg_8_shadow |= NS2501_8_BPAS; - } - ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow); - - if (!ok) { - if (restore) - restore_dvo(dvo); - enable_dvo(dvo); - restore = true; - } - } while (!ok); - /* - * Restore the old i915 registers before - * forcing the ns2501 on. - */ - if (restore) - restore_dvo(dvo); -} - -/* set the NS2501 power state */ -static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) -{ - unsigned char ch; - - if (!ns2501_readb(dvo, NS2501_REG8, &ch)) - return false; - - if (ch & NS2501_8_PD) - return true; - else - return false; -} - -/* set the NS2501 power state */ -static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) -{ - bool ok; - bool restore = false; - struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); - unsigned char ch; - - DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n", - __FUNCTION__, enable); - - ch = ns->reg_8_shadow; - - if (enable) - ch |= NS2501_8_PD; - else - ch &= ~NS2501_8_PD; - - if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) { - ns->reg_8_set = 1; - ns->reg_8_shadow = ch; - - do { - ok = true; - ok &= ns2501_writeb(dvo, NS2501_REG8, ch); - ok &= - ns2501_writeb(dvo, 0x34, - enable ? 0x03 : 0x00); - ok &= - ns2501_writeb(dvo, 0x35, - enable ? 0xff : 0x00); - if (!ok) { - if (restore) - restore_dvo(dvo); - enable_dvo(dvo); - restore = true; - } - } while (!ok); - - if (restore) - restore_dvo(dvo); - } -} - -static void ns2501_dump_regs(struct intel_dvo_device *dvo) -{ - uint8_t val; - - ns2501_readb(dvo, NS2501_FREQ_LO, &val); - DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); - ns2501_readb(dvo, NS2501_FREQ_HI, &val); - DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); - ns2501_readb(dvo, NS2501_REG8, &val); - DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val); - ns2501_readb(dvo, NS2501_REG9, &val); - DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val); - ns2501_readb(dvo, NS2501_REGC, &val); - DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val); -} - -static void ns2501_destroy(struct intel_dvo_device *dvo) -{ - struct ns2501_priv *ns = dvo->dev_priv; - - if (ns) { - kfree(ns); - dvo->dev_priv = NULL; - } -} - -struct intel_dvo_dev_ops ns2501_ops = { - .init = ns2501_init, - .detect = ns2501_detect, - .mode_valid = ns2501_mode_valid, - .mode_set = ns2501_mode_set, - .dpms = ns2501_dpms, - .get_hw_state = ns2501_get_hw_state, - .dump_regs = ns2501_dump_regs, - .destroy = ns2501_destroy, -}; diff --git a/trunk/drivers/gpu/drm/i915/dvo_sil164.c b/trunk/drivers/gpu/drm/i915/dvo_sil164.c index 4debd32e3e4c..a0b13a6f619d 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_sil164.c +++ b/trunk/drivers/gpu/drm/i915/dvo_sil164.c @@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo, } /* set the SIL164 power state */ -static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) +static void sil164_dpms(struct intel_dvo_device *dvo, int mode) { int ret; unsigned char ch; @@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) if (ret == false) return; - if (enable) + if (mode == DRM_MODE_DPMS_ON) ch |= SIL164_8_PD; else ch &= ~SIL164_8_PD; @@ -226,21 +226,6 @@ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) return; } -static bool sil164_get_hw_state(struct intel_dvo_device *dvo) -{ - int ret; - unsigned char ch; - - ret = sil164_readb(dvo, SIL164_REG8, &ch); - if (ret == false) - return false; - - if (ch & SIL164_8_PD) - return true; - else - return false; -} - static void sil164_dump_regs(struct intel_dvo_device *dvo) { uint8_t val; @@ -273,7 +258,6 @@ struct intel_dvo_dev_ops sil164_ops = { .mode_valid = sil164_mode_valid, .mode_set = sil164_mode_set, .dpms = sil164_dpms, - .get_hw_state = sil164_get_hw_state, .dump_regs = sil164_dump_regs, .destroy = sil164_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/dvo_tfp410.c b/trunk/drivers/gpu/drm/i915/dvo_tfp410.c index e17f1b07e915..aa2cd3ec54aa 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/trunk/drivers/gpu/drm/i915/dvo_tfp410.c @@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo, } /* set the tfp410 power state */ -static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) +static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) { uint8_t ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; - if (enable) + if (mode == DRM_MODE_DPMS_ON) ctl1 |= TFP410_CTL_1_PD; else ctl1 &= ~TFP410_CTL_1_PD; @@ -249,19 +249,6 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) tfp410_writeb(dvo, TFP410_CTL_1, ctl1); } -static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) -{ - uint8_t ctl1; - - if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) - return false; - - if (ctl1 & TFP410_CTL_1_PD) - return true; - else - return false; -} - static void tfp410_dump_regs(struct intel_dvo_device *dvo) { uint8_t val, val2; @@ -312,7 +299,6 @@ struct intel_dvo_dev_ops tfp410_ops = { .mode_valid = tfp410_mode_valid, .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, - .get_hw_state = tfp410_get_hw_state, .dump_regs = tfp410_dump_regs, .destroy = tfp410_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c index 7a4b3f3aad3f..359f6e8b9b00 100644 --- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c @@ -44,6 +44,7 @@ enum { ACTIVE_LIST, + FLUSHING_LIST, INACTIVE_LIST, PINNED_LIST, }; @@ -61,11 +62,28 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "gen: %d\n", info->gen); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); -#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) -#define DEV_INFO_SEP ; - DEV_INFO_FLAGS; -#undef DEV_INFO_FLAG -#undef DEV_INFO_SEP +#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) + B(is_mobile); + B(is_i85x); + B(is_i915g); + B(is_i945gm); + B(is_g33); + B(need_gfx_hws); + B(is_g4x); + B(is_pineview); + B(is_broadwater); + B(is_crestline); + B(has_fbc); + B(has_pipe_cxsr); + B(has_hotplug); + B(cursor_needs_physical); + B(has_overlay); + B(overlay_needs_physical); + B(supports_tv); + B(has_bsd_ring); + B(has_blt_ring); + B(has_llc); +#undef B return 0; } @@ -103,23 +121,20 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", + seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain, - obj->last_read_seqno, - obj->last_write_seqno, + obj->last_rendering_seqno, obj->last_fenced_seqno, cache_level_str(obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - if (obj->pin_count) - seq_printf(m, " (pinned x %d)", obj->pin_count); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) @@ -162,6 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; + case FLUSHING_LIST: + seq_printf(m, "Flushing:\n"); + head = &dev_priv->mm.flushing_list; + break; default: mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -199,8 +218,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 count, mappable_count, purgeable_count; - size_t size, mappable_size, purgeable_size; + u32 count, mappable_count; + size_t size, mappable_size; struct drm_i915_gem_object *obj; int ret; @@ -213,12 +232,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->mm.object_memory); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.bound_list, gtt_list); + count_objects(&dev_priv->mm.gtt_list, gtt_list); seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; count_objects(&dev_priv->mm.active_list, mm_list); + count_objects(&dev_priv->mm.flushing_list, mm_list); seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); @@ -227,16 +247,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); - size = count = purgeable_size = purgeable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { - size += obj->base.size, ++count; - if (obj->madv == I915_MADV_DONTNEED) - purgeable_size += obj->base.size, ++purgeable_count; - } - seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); - size = count = mappable_size = mappable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { if (obj->fault_mappable) { size += obj->gtt_space->size; ++count; @@ -245,13 +257,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) mappable_size += obj->gtt_space->size; ++mappable_count; } - if (obj->madv == I915_MADV_DONTNEED) { - purgeable_size += obj->base.size; - ++purgeable_count; - } } - seq_printf(m, "%u purgeable objects, %zu bytes\n", - purgeable_count, purgeable_size); seq_printf(m, "%u pinned mappable objects, %zu bytes\n", mappable_count, mappable_size); seq_printf(m, "%u fault mappable objects, %zu bytes\n", @@ -280,7 +286,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) return ret; total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { if (list == PINNED_LIST && obj->pin_count == 0) continue; @@ -353,22 +359,40 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; struct drm_i915_gem_request *gem_request; - int ret, count, i; + int ret, count; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; count = 0; - for_each_ring(ring, dev_priv, i) { - if (list_empty(&ring->request_list)) - continue; - - seq_printf(m, "%s requests:\n", ring->name); + if (!list_empty(&dev_priv->ring[RCS].request_list)) { + seq_printf(m, "Render requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->ring[RCS].request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + if (!list_empty(&dev_priv->ring[VCS].request_list)) { + seq_printf(m, "BSD requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->ring[VCS].request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + if (!list_empty(&dev_priv->ring[BCS].request_list)) { + seq_printf(m, "BLT requests:\n"); list_for_each_entry(gem_request, - &ring->request_list, + &dev_priv->ring[BCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -389,7 +413,7 @@ static void i915_ring_seqno_info(struct seq_file *m, { if (ring->get_seqno) { seq_printf(m, "Current sequence (%s): %d\n", - ring->name, ring->get_seqno(ring, false)); + ring->name, ring->get_seqno(ring)); } } @@ -398,15 +422,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - for_each_ring(ring, dev_priv, i) - i915_ring_seqno_info(m, ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); @@ -419,7 +442,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; int ret, i, pipe; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -497,13 +519,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - for_each_ring(ring, dev_priv, i) { + for (i = 0; i < I915_NUM_RINGS; i++) { if (IS_GEN6(dev) || IS_GEN7(dev)) { - seq_printf(m, - "Graphics Interrupt mask (%s): %08x\n", - ring->name, I915_READ_IMR(ring)); + seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", + dev_priv->ring[i].name, + I915_READ_IMR(&dev_priv->ring[i])); } - i915_ring_seqno_info(m, ring); + i915_ring_seqno_info(m, &dev_priv->ring[i]); } mutex_unlock(&dev->struct_mutex); @@ -526,8 +548,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; - seq_printf(m, "Fence %d, pin count = %d, object = ", - i, dev_priv->fence_regs[i].pin_count); + seq_printf(m, "Fenced object[%2d] = ", i); if (obj == NULL) seq_printf(m, "unused"); else @@ -609,12 +630,12 @@ static void print_error_buffers(struct seq_file *m, seq_printf(m, "%s [%d]:\n", name, count); while (count--) { - seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", + seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", err->gtt_offset, err->size, err->read_domains, err->write_domain, - err->rseqno, err->wseqno, + err->seqno, pin_flag(err->pinned), tiling_flag(err->tiling), dirty_flag(err->dirty), @@ -646,9 +667,10 @@ static void i915_ring_error_state(struct seq_file *m, seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); - if (ring == RCS && INTEL_INFO(dev)->gen >= 4) + if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { + seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); - + } if (INTEL_INFO(dev)->gen >= 4) seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); @@ -697,17 +719,11 @@ static int i915_error_state(struct seq_file *m, void *unused) for (i = 0; i < dev_priv->num_fence_regs; i++) seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) - seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); - if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "ERROR: 0x%08x\n", error->error); seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (INTEL_INFO(dev)->gen == 7) - seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - for_each_ring(ring, dev_priv, i) i915_ring_error_state(m, dev, error, i); @@ -783,14 +799,10 @@ i915_error_state_write(struct file *filp, struct seq_file *m = filp->private_data; struct i915_error_state_file_priv *error_priv = m->private; struct drm_device *dev = error_priv->dev; - int ret; DRM_DEBUG_DRIVER("Resetting error state\n"); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - + mutex_lock(&dev->struct_mutex); i915_destroy_error_state(dev); mutex_unlock(&dev->struct_mutex); @@ -914,7 +926,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> - GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); + GEN6_CAGF_SHIFT) * 50); seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & GEN6_CURICONT_MASK); seq_printf(m, "RP CUR UP: %dus\n", rpcurup & @@ -930,15 +942,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) max_freq = (rp_state_cap & 0xff0000) >> 16; seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", - max_freq * GT_FREQUENCY_MULTIPLIER); + max_freq * 50); max_freq = (rp_state_cap & 0xff00) >> 8; seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", - max_freq * GT_FREQUENCY_MULTIPLIER); + max_freq * 50); max_freq = rp_state_cap & 0xff; seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", - max_freq * GT_FREQUENCY_MULTIPLIER); + max_freq * 50); } else { seq_printf(m, "no P-state info available\n"); } @@ -1280,8 +1292,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); - for (gpu_freq = dev_priv->rps.min_delay; - gpu_freq <= dev_priv->rps.max_delay; + for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; gpu_freq++) { I915_WRITE(GEN6_PCODE_DATA, gpu_freq); I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | @@ -1292,7 +1303,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) continue; } ia_freq = I915_READ(GEN6_PCODE_DATA); - seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); + seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); } mutex_unlock(&dev->struct_mutex); @@ -1461,12 +1472,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + mutex_lock(&dev->struct_mutex); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", @@ -1513,7 +1520,9 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for_each_ring(ring, dev_priv, i) { + for (i = 0; i < I915_NUM_RINGS; i++) { + ring = &dev_priv->ring[i]; + seq_printf(m, "%s\n", ring->name); if (INTEL_INFO(dev)->gen == 7) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); @@ -1665,7 +1674,7 @@ i915_ring_stop_write(struct file *filp, struct drm_device *dev = filp->private_data; struct drm_i915_private *dev_priv = dev->dev_private; char buf[20]; - int val = 0, ret; + int val = 0; if (cnt > 0) { if (cnt > sizeof(buf) - 1) @@ -1680,10 +1689,7 @@ i915_ring_stop_write(struct file *filp, DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - + mutex_lock(&dev->struct_mutex); dev_priv->stop_rings = val; mutex_unlock(&dev->struct_mutex); @@ -1707,18 +1713,10 @@ i915_max_freq_read(struct file *filp, struct drm_device *dev = filp->private_data; drm_i915_private_t *dev_priv = dev->dev_private; char buf[80]; - int len, ret; - - if (!(IS_GEN6(dev) || IS_GEN7(dev))) - return -ENODEV; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + int len; len = snprintf(buf, sizeof(buf), - "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev->struct_mutex); + "max freq: %d\n", dev_priv->max_delay * 50); if (len > sizeof(buf)) len = sizeof(buf); @@ -1735,10 +1733,7 @@ i915_max_freq_write(struct file *filp, struct drm_device *dev = filp->private_data; struct drm_i915_private *dev_priv = dev->dev_private; char buf[20]; - int val = 1, ret; - - if (!(IS_GEN6(dev) || IS_GEN7(dev))) - return -ENODEV; + int val = 1; if (cnt > 0) { if (cnt > sizeof(buf) - 1) @@ -1753,17 +1748,12 @@ i915_max_freq_write(struct file *filp, DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - /* * Turbo will still be enabled, but won't go above the set value. */ - dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; + dev_priv->max_delay = val / 50; - gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev->struct_mutex); + gen6_set_rps(dev, val / 50); return cnt; } @@ -1783,18 +1773,10 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, struct drm_device *dev = filp->private_data; drm_i915_private_t *dev_priv = dev->dev_private; char buf[80]; - int len, ret; - - if (!(IS_GEN6(dev) || IS_GEN7(dev))) - return -ENODEV; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + int len; len = snprintf(buf, sizeof(buf), - "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev->struct_mutex); + "min freq: %d\n", dev_priv->min_delay * 50); if (len > sizeof(buf)) len = sizeof(buf); @@ -1809,10 +1791,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, struct drm_device *dev = filp->private_data; struct drm_i915_private *dev_priv = dev->dev_private; char buf[20]; - int val = 1, ret; - - if (!(IS_GEN6(dev) || IS_GEN7(dev))) - return -ENODEV; + int val = 1; if (cnt > 0) { if (cnt > sizeof(buf) - 1) @@ -1827,17 +1806,12 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - /* * Turbo will still be enabled, but won't go below the set value. */ - dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; + dev_priv->min_delay = val / 50; - gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); - mutex_unlock(&dev->struct_mutex); + gen6_set_rps(dev, val / 50); return cnt; } @@ -1860,15 +1834,9 @@ i915_cache_sharing_read(struct file *filp, drm_i915_private_t *dev_priv = dev->dev_private; char buf[80]; u32 snpcr; - int len, ret; - - if (!(IS_GEN6(dev) || IS_GEN7(dev))) - return -ENODEV; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + int len; + mutex_lock(&dev_priv->dev->struct_mutex); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); mutex_unlock(&dev_priv->dev->struct_mutex); @@ -1894,9 +1862,6 @@ i915_cache_sharing_write(struct file *filp, u32 snpcr; int val = 1; - if (!(IS_GEN6(dev) || IS_GEN7(dev))) - return -ENODEV; - if (cnt > 0) { if (cnt > sizeof(buf) - 1) return -EINVAL; @@ -1960,11 +1925,16 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; struct drm_i915_private *dev_priv = dev->dev_private; + int ret; if (INTEL_INFO(dev)->gen < 6) return 0; + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; gen6_gt_force_wake_get(dev_priv); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1977,7 +1947,16 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) if (INTEL_INFO(dev)->gen < 6) return 0; + /* + * It's bad that we can potentially hang userspace if struct_mutex gets + * forever stuck. However, if we cannot acquire this lock it means that + * almost certainly the driver has hung, is not unload-able. Therefore + * hanging here is probably a minor inconvenience not to be seen my + * almost every user. + */ + mutex_lock(&dev->struct_mutex); gen6_gt_force_wake_put(dev_priv); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -2027,6 +2006,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_gtt", i915_gem_gtt_info, 0}, {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, + {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, @@ -2087,7 +2067,6 @@ int i915_debugfs_init(struct drm_minor *minor) &i915_cache_sharing_fops); if (ret) return ret; - ret = i915_debugfs_create(minor->debugfs_root, minor, "i915_ring_stop", &i915_ring_stop_fops); diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index ffbc9156c792..914c0dfabe60 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -235,10 +235,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } } - dev_priv->dri1.cpp = init->cpp; - dev_priv->dri1.back_offset = init->back_offset; - dev_priv->dri1.front_offset = init->front_offset; - dev_priv->dri1.current_page = 0; + dev_priv->cpp = init->cpp; + dev_priv->back_offset = init->back_offset; + dev_priv->front_offset = init->front_offset; + dev_priv->current_page = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->pf_current_page = 0; @@ -575,7 +575,7 @@ static int i915_dispatch_flip(struct drm_device * dev) DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, - dev_priv->dri1.current_page, + dev_priv->current_page, master_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); @@ -589,12 +589,12 @@ static int i915_dispatch_flip(struct drm_device * dev) OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); - if (dev_priv->dri1.current_page == 0) { - OUT_RING(dev_priv->dri1.back_offset); - dev_priv->dri1.current_page = 1; + if (dev_priv->current_page == 0) { + OUT_RING(dev_priv->back_offset); + dev_priv->current_page = 1; } else { - OUT_RING(dev_priv->dri1.front_offset); - dev_priv->dri1.current_page = 0; + OUT_RING(dev_priv->front_offset); + dev_priv->current_page = 0; } OUT_RING(0); @@ -613,7 +613,7 @@ static int i915_dispatch_flip(struct drm_device * dev) ADVANCE_LP_RING(); } - master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; + master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } @@ -1009,12 +1009,6 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_WAIT_TIMEOUT: value = 1; break; - case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev); - break; - case I915_PARAM_HAS_PRIME_VMAP_FLUSH: - value = 1; - break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -1431,21 +1425,6 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) kfree(ap); } -static void i915_dump_device_info(struct drm_i915_private *dev_priv) -{ - const struct intel_device_info *info = dev_priv->info; - -#define DEV_INFO_FLAG(name) info->name ? #name "," : "" -#define DEV_INFO_SEP , - DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", - info->gen, - dev_priv->dev->pdev->device, - DEV_INFO_FLAGS); -#undef DEV_INFO_FLAG -#undef DEV_INFO_SEP -} - /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1461,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; struct intel_device_info *info; - int ret = 0, mmio_bar, mmio_size; + int ret = 0, mmio_bar; uint32_t aperture_size; info = (struct intel_device_info *) flags; @@ -1470,6 +1449,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; + /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1485,8 +1465,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->info = info; - i915_dump_device_info(dev_priv); - if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; @@ -1526,19 +1504,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); mmio_bar = IS_GEN2(dev) ? 1 : 0; - /* Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (info->gen < 5) - mmio_size = 512*1024; - else - mmio_size = 2*1024*1024; - - dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; @@ -1570,9 +1536,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the - * workqueue at any time. Use an ordered one. + * workqueue at any time: max_active = 1 and NON_REENTRANT. */ - dev_priv->wq = alloc_ordered_workqueue("i915", 0); + dev_priv->wq = alloc_workqueue("i915", + WQ_UNBOUND | WQ_NON_REENTRANT, + 1); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; @@ -1618,7 +1586,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); - spin_lock_init(&dev_priv->rps.lock); + spin_lock_init(&dev_priv->rps_lock); spin_lock_init(&dev_priv->dpio_lock); if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) @@ -1868,8 +1836,6 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), @@ -1892,7 +1858,6 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index a7837e556945..a24ffbe97c01 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -470,9 +470,6 @@ static int i915_drm_freeze(struct drm_device *dev) "GEM idle failed, resume might fail\n"); return error; } - - intel_modeset_disable(dev); - drm_irq_uninstall(dev); } @@ -546,9 +543,13 @@ static int i915_drm_thaw(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); intel_modeset_init_hw(dev); - intel_modeset_setup_hw_state(dev); drm_mode_config_reset(dev); drm_irq_install(dev); + + /* Resume the modeset for every activated CRTC */ + mutex_lock(&dev->mode_config.mutex); + drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); } intel_opregion_init(dev); @@ -1059,7 +1060,7 @@ static bool IS_DISPLAYREG(u32 reg) * This should make it easier to transition modules over to the * new register block scheme, since we can do it incrementally. */ - if (reg >= VLV_DISPLAY_BASE) + if (reg >= 0x180000) return false; if (reg >= RENDER_RING_BASE && @@ -1173,59 +1174,9 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ - if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ - DRM_ERROR("Unclaimed write to %x\n", reg); \ - writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \ - } \ } __i915_write(8, b) __i915_write(16, w) __i915_write(32, l) __i915_write(64, q) #undef __i915_write - -static const struct register_whitelist { - uint64_t offset; - uint32_t size; - uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ -} whitelist[] = { - { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, -}; - -int i915_reg_read_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reg_read *reg = data; - struct register_whitelist const *entry = whitelist; - int i; - - for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == reg->offset && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) - break; - } - - if (i == ARRAY_SIZE(whitelist)) - return -EINVAL; - - switch (entry->size) { - case 8: - reg->val = I915_READ64(reg->offset); - break; - case 4: - reg->val = I915_READ(reg->offset); - break; - case 2: - reg->val = I915_READ16(reg->offset); - break; - case 1: - reg->val = I915_READ8(reg->offset); - break; - default: - WARN_ON(1); - return -EINVAL; - } - - return 0; -} diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index 4f2831aa5fed..627fe35781b4 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -109,7 +109,6 @@ struct intel_pch_pll { #define WATCH_COHERENCY 0 #define WATCH_LISTS 0 -#define WATCH_GTT 0 #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 @@ -196,10 +195,9 @@ struct drm_i915_error_state { u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ - u32 err_int; /* gen7 */ u32 instpm[I915_NUM_RINGS]; u32 instps[I915_NUM_RINGS]; - u32 extra_instdone[I915_NUM_INSTDONE_REG]; + u32 instdone1; u32 seqno[I915_NUM_RINGS]; u64 bbaddr; u32 fault_reg[I915_NUM_RINGS]; @@ -223,7 +221,7 @@ struct drm_i915_error_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 rseqno, wseqno; + u32 seqno; u32 gtt_offset; u32 read_domains; u32 write_domain; @@ -241,6 +239,7 @@ struct drm_i915_error_state { }; struct drm_i915_display_funcs { + void (*dpms)(struct drm_crtc *crtc, int mode); bool (*fbc_enabled)(struct drm_device *dev); void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); void (*disable_fbc)(struct drm_device *dev); @@ -249,6 +248,7 @@ struct drm_i915_display_funcs { void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); + void (*sanitize_pm)(struct drm_device *dev); void (*update_linetime_wm)(struct drm_device *dev, int pipe, struct drm_display_mode *mode); int (*crtc_mode_set)(struct drm_crtc *crtc, @@ -256,8 +256,6 @@ struct drm_i915_display_funcs { struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); - void (*crtc_enable)(struct drm_crtc *crtc); - void (*crtc_disable)(struct drm_crtc *crtc); void (*off)(struct drm_crtc *crtc); void (*write_eld)(struct drm_connector *connector, struct drm_crtc *crtc); @@ -281,32 +279,6 @@ struct drm_i915_gt_funcs { void (*force_wake_put)(struct drm_i915_private *dev_priv); }; -#define DEV_INFO_FLAGS \ - DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ - DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ - DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ - DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ - DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ - DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ - DEV_INFO_FLAG(has_llc) - struct intel_device_info { u8 gen; u8 is_mobile:1; @@ -430,6 +402,12 @@ typedef struct drm_i915_private { struct resource mch_res; + unsigned int cpp; + int back_offset; + int front_offset; + int current_page; + int page_flipping; + atomic_t irq_received; /* protects the irq masks */ @@ -447,6 +425,7 @@ typedef struct drm_i915_private { u32 hotplug_supported_mask; struct work_struct hotplug_work; + unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int num_pipe; int num_pch_pll; @@ -455,7 +434,8 @@ typedef struct drm_i915_private { struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd[I915_NUM_RINGS]; - uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; + uint32_t last_instdone; + uint32_t last_instdone1; unsigned int stop_rings; @@ -686,13 +666,7 @@ typedef struct drm_i915_private { struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ - struct list_head bound_list; - /** - * List of objects which are not bound to the GTT (thus - * are idle and not used by the GPU) but still have - * (presumably uncached) pages still attached. - */ - struct list_head unbound_list; + struct list_head gtt_list; /** Usable portion of the GTT for GEM */ unsigned long gtt_start; @@ -721,6 +695,17 @@ typedef struct drm_i915_private { */ struct list_head active_list; + /** + * List of objects which are not in the ringbuffer but which + * still have a write_domain which needs to be flushed before + * unbinding. + * + * last_rendering_seqno is 0 while an object is in this list. + * + * A reference is held on the buffer while on this list. + */ + struct list_head flushing_list; + /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. @@ -790,12 +775,6 @@ typedef struct drm_i915_private { struct { unsigned allow_batchbuffer : 1; u32 __iomem *gfx_hws_cpu_addr; - - unsigned int cpp; - int back_offset; - int front_offset; - int current_page; - int page_flipping; } dri1; /* Kernel Modesetting */ @@ -817,6 +796,9 @@ typedef struct drm_i915_private { bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; + struct work_struct idle_work; + struct timer_list idle_timer; + bool busy; u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; @@ -825,41 +807,26 @@ typedef struct drm_i915_private { bool mchbar_need_disable; - /* gen6+ rps state */ - struct { - struct work_struct work; - u32 pm_iir; - /* lock - irqsave spinlock that protectects the work_struct and - * pm_iir. */ - spinlock_t lock; - - /* The below variables an all the rps hw state are protected by - * dev->struct mutext. */ - u8 cur_delay; - u8 min_delay; - u8 max_delay; - } rps; - - /* ilk-only ips/rps state. Everything in here is protected by the global - * mchdev_lock in intel_pm.c */ - struct { - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - u8 corr; - - int c_m; - int r_t; - } ips; + struct work_struct rps_work; + spinlock_t rps_lock; + u32 pm_iir; + + u8 cur_delay; + u8 min_delay; + u8 max_delay; + u8 fmax; + u8 fstart; + + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + int c_m; + int r_t; + u8 corr; + spinlock_t *mchdev_lock; enum no_fbc_reason no_fbc_reason; @@ -894,48 +861,30 @@ enum hdmi_force_audio { }; enum i915_cache_level { - I915_CACHE_NONE = 0, + I915_CACHE_NONE, I915_CACHE_LLC, - I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ -}; - -struct drm_i915_gem_object_ops { - /* Interface between the GEM object and its backing storage. - * get_pages() is called once prior to the use of the associated set - * of pages before to binding them into the GTT, and put_pages() is - * called after we no longer need them. As we expect there to be - * associated cost with migrating pages between the backing storage - * and making them available for the GPU (e.g. clflush), we may hold - * onto the pages after they are no longer referenced by the GPU - * in case they may be used again shortly (for example migrating the - * pages to a different memory domain within the GTT). put_pages() - * will therefore most likely be called when the object itself is - * being released or under memory pressure (where we attempt to - * reap pages for the shrinker). - */ - int (*get_pages)(struct drm_i915_gem_object *); - void (*put_pages)(struct drm_i915_gem_object *); + I915_CACHE_LLC_MLC, /* gen6+ */ }; struct drm_i915_gem_object { struct drm_gem_object base; - const struct drm_i915_gem_object_ops *ops; - /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; struct list_head gtt_list; - /** This object's place on the active/inactive lists */ + /** This object's place on the active/flushing/inactive lists */ struct list_head ring_list; struct list_head mm_list; + /** This object's place on GPU write list */ + struct list_head gpu_write_list; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; /** - * This is set if the object is on the active lists (has pending - * rendering and so a non-zero seqno), and is not set if it i s on - * inactive (ready to be unbound) list. + * This is set if the object is on the active or flushing lists + * (has pending rendering), and is not set if it's on inactive (ready + * to be unbound). */ unsigned int active:1; @@ -945,6 +894,12 @@ struct drm_i915_gem_object { */ unsigned int dirty:1; + /** + * This is set if the object has been written to since the last + * GPU flush. + */ + unsigned int pending_gpu_write:1; + /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. @@ -1006,12 +961,17 @@ struct drm_i915_gem_object { unsigned int has_aliasing_ppgtt_mapping:1; unsigned int has_global_gtt_mapping:1; - unsigned int has_dma_mapping:1; - struct sg_table *pages; - int pages_pin_count; + struct page **pages; + + /** + * DMAR support + */ + struct scatterlist *sg_list; + int num_sg; /* prime dma-buf support */ + struct sg_table *sg_table; void *dma_buf_vmapping; int vmapping_count; @@ -1032,8 +992,7 @@ struct drm_i915_gem_object { struct intel_ring_buffer *ring; /** Breadcrumb of last rendering to the buffer. */ - uint32_t last_read_seqno; - uint32_t last_write_seqno; + uint32_t last_rendering_seqno; /** Breadcrumb of last fenced GPU access to the buffer. */ uint32_t last_fenced_seqno; @@ -1176,10 +1135,6 @@ struct drm_i915_file_private { #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) -#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - -#define GT_FREQUENCY_MULTIPLIER 50 - #include "i915_trace.h" /** @@ -1301,10 +1256,6 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, @@ -1323,42 +1274,24 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops); +int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, - bool map_and_fenceable, - bool nonblocking); + bool map_and_fenceable); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); -static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) -{ - struct scatterlist *sg = obj->pages->sgl; - while (n >= SG_MAX_SINGLE_ALLOC) { - sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); - n -= SG_MAX_SINGLE_ALLOC - 1; - } - return sg_page(sg+n); -} -static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) -{ - BUG_ON(obj->pages == NULL); - obj->pages_pin_count++; -} -static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) -{ - BUG_ON(obj->pages_pin_count == 0); - obj->pages_pin_count--; -} - +int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, + gfp_t gfpmask); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, @@ -1425,9 +1358,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev); -int i915_add_request(struct intel_ring_buffer *ring, - struct drm_file *file, - struct drm_i915_gem_request *request); +int __must_check i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + struct drm_i915_gem_request *request); int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); @@ -1496,11 +1429,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev, /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, - unsigned cache_level, - bool mappable, - bool nonblock); -int i915_gem_evict_everything(struct drm_device *dev); + unsigned alignment, bool mappable); +int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); @@ -1589,7 +1519,6 @@ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); -extern void intel_modeset_setup_hw_state(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); @@ -1600,8 +1529,6 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev); -int i915_reg_read_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); /* overlay */ #ifdef CONFIG_DEBUG_FS diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 365a7dc8a4a8..274d25de521e 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -37,12 +37,12 @@ #include #include +static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, - bool map_and_fenceable, - bool nonblocking); + bool map_and_fenceable); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -56,8 +56,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc); -static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); -static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) @@ -143,7 +141,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj->gtt_space && !obj->active; + return !obj->active; } int @@ -182,7 +180,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) if (obj->pin_count) pinned += obj->gtt_space->size; mutex_unlock(&dev->struct_mutex); @@ -343,7 +341,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, page_length); kunmap_atomic(vaddr); - return ret ? -EFAULT : 0; + return ret; } static void @@ -394,7 +392,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, page_length); kunmap(page); - return ret ? - EFAULT : 0; + return ret; } static int @@ -403,6 +401,7 @@ i915_gem_shmem_pread(struct drm_device *dev, struct drm_i915_gem_pread *args, struct drm_file *file) { + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; char __user *user_data; ssize_t remain; loff_t offset; @@ -411,8 +410,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int hit_slowpath = 0; int prefaulted = 0; int needs_clflush = 0; - struct scatterlist *sg; - int i; + int release_page; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -426,30 +424,16 @@ i915_gem_shmem_pread(struct drm_device *dev, * anyway again before the next pread happens. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush = 1; - if (obj->gtt_space) { - ret = i915_gem_object_set_to_gtt_domain(obj, false); - if (ret) - return ret; - } + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; } - ret = i915_gem_object_get_pages(obj); - if (ret) - return ret; - - i915_gem_object_pin_pages(obj); - offset = args->offset; - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { + while (remain > 0) { struct page *page; - if (i < offset >> PAGE_SHIFT) - continue; - - if (remain <= 0) - break; - /* Operation in this page * * shmem_page_offset = offset within page in shmem file @@ -460,7 +444,18 @@ i915_gem_shmem_pread(struct drm_device *dev, if ((shmem_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - shmem_page_offset; - page = sg_page(sg); + if (obj->pages) { + page = obj->pages[offset >> PAGE_SHIFT]; + release_page = 0; + } else { + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } + release_page = 1; + } + page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; @@ -471,6 +466,7 @@ i915_gem_shmem_pread(struct drm_device *dev, goto next_page; hit_slowpath = 1; + page_cache_get(page); mutex_unlock(&dev->struct_mutex); if (!prefaulted) { @@ -488,12 +484,16 @@ i915_gem_shmem_pread(struct drm_device *dev, needs_clflush); mutex_lock(&dev->struct_mutex); - + page_cache_release(page); next_page: mark_page_accessed(page); + if (release_page) + page_cache_release(page); - if (ret) + if (ret) { + ret = -EFAULT; goto out; + } remain -= page_length; user_data += page_length; @@ -501,8 +501,6 @@ i915_gem_shmem_pread(struct drm_device *dev, } out: - i915_gem_object_unpin_pages(obj); - if (hit_slowpath) { /* Fixup: Kill any reinstated backing storage pages */ if (obj->madv == __I915_MADV_PURGED) @@ -608,7 +606,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, char __user *user_data; int page_offset, page_length, ret; - ret = i915_gem_object_pin(obj, 0, true, true); + ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; @@ -688,7 +686,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, page_length); kunmap_atomic(vaddr); - return ret ? -EFAULT : 0; + return ret; } /* Only difference to the fast-path function is that this can handle bit17 @@ -722,7 +720,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, page_do_bit17_swizzling); kunmap(page); - return ret ? -EFAULT : 0; + return ret; } static int @@ -731,6 +729,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_i915_gem_pwrite *args, struct drm_file *file) { + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; loff_t offset; char __user *user_data; @@ -739,8 +738,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int hit_slowpath = 0; int needs_clflush_after = 0; int needs_clflush_before = 0; - int i; - struct scatterlist *sg; + int release_page; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -754,11 +752,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * right away and we therefore have to clflush anyway. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush_after = 1; - if (obj->gtt_space) { - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; - } + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; } /* Same trick applies for invalidate partially written cachelines before * writing. */ @@ -766,25 +762,13 @@ i915_gem_shmem_pwrite(struct drm_device *dev, && obj->cache_level == I915_CACHE_NONE) needs_clflush_before = 1; - ret = i915_gem_object_get_pages(obj); - if (ret) - return ret; - - i915_gem_object_pin_pages(obj); - offset = args->offset; obj->dirty = 1; - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { + while (remain > 0) { struct page *page; int partial_cacheline_write; - if (i < offset >> PAGE_SHIFT) - continue; - - if (remain <= 0) - break; - /* Operation in this page * * shmem_page_offset = offset within page in shmem file @@ -803,7 +787,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ((shmem_page_offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); - page = sg_page(sg); + if (obj->pages) { + page = obj->pages[offset >> PAGE_SHIFT]; + release_page = 0; + } else { + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } + release_page = 1; + } + page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; @@ -815,20 +810,26 @@ i915_gem_shmem_pwrite(struct drm_device *dev, goto next_page; hit_slowpath = 1; + page_cache_get(page); mutex_unlock(&dev->struct_mutex); + ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, partial_cacheline_write, needs_clflush_after); mutex_lock(&dev->struct_mutex); - + page_cache_release(page); next_page: set_page_dirty(page); mark_page_accessed(page); + if (release_page) + page_cache_release(page); - if (ret) + if (ret) { + ret = -EFAULT; goto out; + } remain -= page_length; user_data += page_length; @@ -836,8 +837,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, } out: - i915_gem_object_unpin_pages(obj); - if (hit_slowpath) { /* Fixup: Kill any reinstated backing storage pages */ if (obj->madv == __I915_MADV_PURGED) @@ -921,8 +920,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } - if (obj->cache_level == I915_CACHE_NONE && + if (obj->gtt_space && + obj->cache_level == I915_CACHE_NONE && obj->tiling_mode == I915_TILING_NONE && + obj->map_and_fenceable && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user @@ -930,7 +931,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT || ret == -ENOSPC) + if (ret == -EFAULT) ret = i915_gem_shmem_pwrite(dev, obj, args, file); out: @@ -940,240 +941,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return ret; } -int -i915_gem_check_wedge(struct drm_i915_private *dev_priv, - bool interruptible) -{ - if (atomic_read(&dev_priv->mm.wedged)) { - struct completion *x = &dev_priv->error_completion; - bool recovery_complete; - unsigned long flags; - - /* Give the error handler a chance to run. */ - spin_lock_irqsave(&x->wait.lock, flags); - recovery_complete = x->done > 0; - spin_unlock_irqrestore(&x->wait.lock, flags); - - /* Non-interruptible callers can't handle -EAGAIN, hence return - * -EIO unconditionally for these. */ - if (!interruptible) - return -EIO; - - /* Recovery complete, but still wedged means reset failure. */ - if (recovery_complete) - return -EIO; - - return -EAGAIN; - } - - return 0; -} - -/* - * Compare seqno against outstanding lazy request. Emit a request if they are - * equal. - */ -static int -i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) -{ - int ret; - - BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - - ret = 0; - if (seqno == ring->outstanding_lazy_request) - ret = i915_add_request(ring, NULL, NULL); - - return ret; -} - -/** - * __wait_seqno - wait until execution of seqno has finished - * @ring: the ring expected to report seqno - * @seqno: duh! - * @interruptible: do an interruptible wait (normally yes) - * @timeout: in - how long to wait (NULL forever); out - how much time remaining - * - * Returns 0 if the seqno was found within the alloted time. Else returns the - * errno with remaining time filled in timeout argument. - */ -static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, - bool interruptible, struct timespec *timeout) -{ - drm_i915_private_t *dev_priv = ring->dev->dev_private; - struct timespec before, now, wait_time={1,0}; - unsigned long timeout_jiffies; - long end; - bool wait_forever = true; - int ret; - - if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) - return 0; - - trace_i915_gem_request_wait_begin(ring, seqno); - - if (timeout != NULL) { - wait_time = *timeout; - wait_forever = false; - } - - timeout_jiffies = timespec_to_jiffies(&wait_time); - - if (WARN_ON(!ring->irq_get(ring))) - return -ENODEV; - - /* Record current time in case interrupted by signal, or wedged * */ - getrawmonotonic(&before); - -#define EXIT_COND \ - (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ - atomic_read(&dev_priv->mm.wedged)) - do { - if (interruptible) - end = wait_event_interruptible_timeout(ring->irq_queue, - EXIT_COND, - timeout_jiffies); - else - end = wait_event_timeout(ring->irq_queue, EXIT_COND, - timeout_jiffies); - - ret = i915_gem_check_wedge(dev_priv, interruptible); - if (ret) - end = ret; - } while (end == 0 && wait_forever); - - getrawmonotonic(&now); - - ring->irq_put(ring); - trace_i915_gem_request_wait_end(ring, seqno); -#undef EXIT_COND - - if (timeout) { - struct timespec sleep_time = timespec_sub(now, before); - *timeout = timespec_sub(*timeout, sleep_time); - } - - switch (end) { - case -EIO: - case -EAGAIN: /* Wedged */ - case -ERESTARTSYS: /* Signal */ - return (int)end; - case 0: /* Timeout */ - if (timeout) - set_normalized_timespec(timeout, 0, 0); - return -ETIME; - default: /* Completed */ - WARN_ON(end < 0); /* We're not aware of other errors */ - return 0; - } -} - -/** - * Waits for a sequence number to be signaled, and cleans up the - * request and object lists appropriately for that event. - */ -int -i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) -{ - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - bool interruptible = dev_priv->mm.interruptible; - int ret; - - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - BUG_ON(seqno == 0); - - ret = i915_gem_check_wedge(dev_priv, interruptible); - if (ret) - return ret; - - ret = i915_gem_check_olr(ring, seqno); - if (ret) - return ret; - - return __wait_seqno(ring, seqno, interruptible, NULL); -} - -/** - * Ensures that all rendering to the object has completed and the object is - * safe to unbind from the GTT or access from the CPU. - */ -static __must_check int -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, - bool readonly) -{ - struct intel_ring_buffer *ring = obj->ring; - u32 seqno; - int ret; - - seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; - if (seqno == 0) - return 0; - - ret = i915_wait_seqno(ring, seqno); - if (ret) - return ret; - - i915_gem_retire_requests_ring(ring); - - /* Manually manage the write flush as we may have not yet - * retired the buffer. - */ - if (obj->last_write_seqno && - i915_seqno_passed(seqno, obj->last_write_seqno)) { - obj->last_write_seqno = 0; - obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; - } - - return 0; -} - -/* A nonblocking variant of the above wait. This is a highly dangerous routine - * as the object state may change during this call. - */ -static __must_check int -i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, - bool readonly) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = obj->ring; - u32 seqno; - int ret; - - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - BUG_ON(!dev_priv->mm.interruptible); - - seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; - if (seqno == 0) - return 0; - - ret = i915_gem_check_wedge(dev_priv, true); - if (ret) - return ret; - - ret = i915_gem_check_olr(ring, seqno); - if (ret) - return ret; - - mutex_unlock(&dev->struct_mutex); - ret = __wait_seqno(ring, seqno, true, NULL); - mutex_lock(&dev->struct_mutex); - - i915_gem_retire_requests_ring(ring); - - /* Manually manage the write flush as we may have not yet - * retired the buffer. - */ - if (obj->last_write_seqno && - i915_seqno_passed(seqno, obj->last_write_seqno)) { - obj->last_write_seqno = 0; - obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; - } - - return ret; -} - /** * Called when user space prepares to use an object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. @@ -1211,14 +978,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, goto unlock; } - /* Try to flush the object off the GPU without holding the lock. - * We will repeat the flush holding the lock in the normal manner - * to catch cases where we are gazumped. - */ - ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); - if (ret) - goto unref; - if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); @@ -1232,7 +991,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } -unref: drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); @@ -1352,7 +1110,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } if (!obj->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, 0, true, false); + ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; @@ -1513,42 +1271,6 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, return i915_gem_get_gtt_size(dev, size, tiling_mode); } -static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - int ret; - - if (obj->base.map_list.map) - return 0; - - ret = drm_gem_create_mmap_offset(&obj->base); - if (ret != -ENOSPC) - return ret; - - /* Badly fragmented mmap space? The only way we can recover - * space is by destroying unwanted objects. We can't randomly release - * mmap_offsets as userspace expects them to be persistent for the - * lifetime of the objects. The closest we can is to release the - * offsets on purgeable objects by truncating it and marking it purged, - * which prevents userspace from ever using that object again. - */ - i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); - ret = drm_gem_create_mmap_offset(&obj->base); - if (ret != -ENOSPC) - return ret; - - i915_gem_shrink_all(dev_priv); - return drm_gem_create_mmap_offset(&obj->base); -} - -static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) -{ - if (!obj->base.map_list.map) - return; - - drm_gem_free_mmap_offset(&obj->base); -} - int i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, @@ -1580,9 +1302,11 @@ i915_gem_mmap_gtt(struct drm_file *file, goto out; } - ret = i915_gem_object_create_mmap_offset(obj); - if (ret) - goto out; + if (!obj->base.map_list.map) { + ret = drm_gem_create_mmap_offset(&obj->base); + if (ret) + goto out; + } *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; @@ -1617,52 +1341,63 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } -/* Immediately discard the backing storage */ -static void -i915_gem_object_truncate(struct drm_i915_gem_object *obj) +int +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, + gfp_t gfpmask) { + int page_count, i; + struct address_space *mapping; struct inode *inode; + struct page *page; - i915_gem_object_free_mmap_offset(obj); - - if (obj->base.filp == NULL) - return; + if (obj->pages || obj->sg_table) + return 0; - /* Our goal here is to return as much of the memory as - * is possible back to the system as we are called from OOM. - * To do this we must instruct the shmfs to drop all of its - * backing pages, *now*. + /* Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. */ + page_count = obj->base.size / PAGE_SIZE; + BUG_ON(obj->pages != NULL); + obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); + if (obj->pages == NULL) + return -ENOMEM; + inode = obj->base.filp->f_path.dentry->d_inode; - shmem_truncate_range(inode, 0, (loff_t)-1); + mapping = inode->i_mapping; + gfpmask |= mapping_gfp_mask(mapping); - obj->madv = __I915_MADV_PURGED; -} + for (i = 0; i < page_count; i++) { + page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + if (IS_ERR(page)) + goto err_pages; -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ - return obj->madv == I915_MADV_DONTNEED; + obj->pages[i] = page; + } + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_do_bit_17_swizzle(obj); + + return 0; + +err_pages: + while (i--) + page_cache_release(obj->pages[i]); + + drm_free_large(obj->pages); + obj->pages = NULL; + return PTR_ERR(page); } static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { int page_count = obj->base.size / PAGE_SIZE; - struct scatterlist *sg; - int ret, i; + int i; - BUG_ON(obj->madv == __I915_MADV_PURGED); + if (!obj->pages) + return; - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) { - /* In the event of a disaster, abandon all caches and - * hope for the best. - */ - WARN_ON(ret != -EIO); - i915_gem_clflush_object(obj); - obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; - } + BUG_ON(obj->madv == __I915_MADV_PURGED); if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -1670,192 +1405,19 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; - for_each_sg(obj->pages->sgl, sg, page_count, i) { - struct page *page = sg_page(sg); - + for (i = 0; i < page_count; i++) { if (obj->dirty) - set_page_dirty(page); + set_page_dirty(obj->pages[i]); if (obj->madv == I915_MADV_WILLNEED) - mark_page_accessed(page); + mark_page_accessed(obj->pages[i]); - page_cache_release(page); + page_cache_release(obj->pages[i]); } obj->dirty = 0; - sg_free_table(obj->pages); - kfree(obj->pages); -} - -static int -i915_gem_object_put_pages(struct drm_i915_gem_object *obj) -{ - const struct drm_i915_gem_object_ops *ops = obj->ops; - - if (obj->pages == NULL) - return 0; - - BUG_ON(obj->gtt_space); - - if (obj->pages_pin_count) - return -EBUSY; - - ops->put_pages(obj); + drm_free_large(obj->pages); obj->pages = NULL; - - list_del(&obj->gtt_list); - if (i915_gem_object_is_purgeable(obj)) - i915_gem_object_truncate(obj); - - return 0; -} - -static long -i915_gem_purge(struct drm_i915_private *dev_priv, long target) -{ - struct drm_i915_gem_object *obj, *next; - long count = 0; - - list_for_each_entry_safe(obj, next, - &dev_priv->mm.unbound_list, - gtt_list) { - if (i915_gem_object_is_purgeable(obj) && - i915_gem_object_put_pages(obj) == 0) { - count += obj->base.size >> PAGE_SHIFT; - if (count >= target) - return count; - } - } - - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { - if (i915_gem_object_is_purgeable(obj) && - i915_gem_object_unbind(obj) == 0 && - i915_gem_object_put_pages(obj) == 0) { - count += obj->base.size >> PAGE_SHIFT; - if (count >= target) - return count; - } - } - - return count; -} - -static void -i915_gem_shrink_all(struct drm_i915_private *dev_priv) -{ - struct drm_i915_gem_object *obj, *next; - - i915_gem_evict_everything(dev_priv->dev); - - list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) - i915_gem_object_put_pages(obj); -} - -static int -i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - int page_count, i; - struct address_space *mapping; - struct sg_table *st; - struct scatterlist *sg; - struct page *page; - gfp_t gfp; - - /* Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; - - page_count = obj->base.size / PAGE_SIZE; - if (sg_alloc_table(st, page_count, GFP_KERNEL)) { - sg_free_table(st); - kfree(st); - return -ENOMEM; - } - - /* Get the list of pages out of our struct file. They'll be pinned - * at this point until we release them. - * - * Fail silently without starting the shrinker - */ - mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; - gfp = mapping_gfp_mask(mapping); - gfp |= __GFP_NORETRY | __GFP_NOWARN; - gfp &= ~(__GFP_IO | __GFP_WAIT); - for_each_sg(st->sgl, sg, page_count, i) { - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (IS_ERR(page)) { - i915_gem_purge(dev_priv, page_count); - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - } - if (IS_ERR(page)) { - /* We've tried hard to allocate the memory by reaping - * our own buffer, now let the real VM do its job and - * go down in flames if truly OOM. - */ - gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); - gfp |= __GFP_IO | __GFP_WAIT; - - i915_gem_shrink_all(dev_priv); - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (IS_ERR(page)) - goto err_pages; - - gfp |= __GFP_NORETRY | __GFP_NOWARN; - gfp &= ~(__GFP_IO | __GFP_WAIT); - } - - sg_set_page(sg, page, PAGE_SIZE, 0); - } - - if (i915_gem_object_needs_bit17_swizzle(obj)) - i915_gem_object_do_bit_17_swizzle(obj); - - obj->pages = st; - return 0; - -err_pages: - for_each_sg(st->sgl, sg, i, page_count) - page_cache_release(sg_page(sg)); - sg_free_table(st); - kfree(st); - return PTR_ERR(page); -} - -/* Ensure that the associated pages are gathered from the backing storage - * and pinned into our object. i915_gem_object_get_pages() may be called - * multiple times before they are released by a single call to - * i915_gem_object_put_pages() - once the pages are no longer referenced - * either as a result of memory pressure (reaping pages under the shrinker) - * or as the object is itself released. - */ -int -i915_gem_object_get_pages(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - const struct drm_i915_gem_object_ops *ops = obj->ops; - int ret; - - if (obj->pages) - return 0; - - BUG_ON(obj->pages_pin_count); - - ret = ops->get_pages(obj); - if (ret) - return ret; - - list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); - return 0; } void @@ -1879,7 +1441,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->ring_list, &ring->active_list); - obj->last_read_seqno = seqno; + obj->last_rendering_seqno = seqno; if (obj->fenced_gpu_access) { obj->last_fenced_seqno = seqno; @@ -1896,35 +1458,97 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, } static void -i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) +i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) +{ + list_del_init(&obj->ring_list); + obj->last_rendering_seqno = 0; + obj->last_fenced_seqno = 0; +} + +static void +i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; - BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); + list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); + + i915_gem_object_move_off_active(obj); +} - if (obj->pin_count) /* are we a framebuffer? */ - intel_mark_fb_idle(obj); +static void +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - list_del_init(&obj->ring_list); + BUG_ON(!list_empty(&obj->gpu_write_list)); + BUG_ON(!obj->active); obj->ring = NULL; - obj->last_read_seqno = 0; - obj->last_write_seqno = 0; - obj->base.write_domain = 0; - - obj->last_fenced_seqno = 0; + i915_gem_object_move_off_active(obj); obj->fenced_gpu_access = false; obj->active = 0; + obj->pending_gpu_write = false; drm_gem_object_unreference(&obj->base); WARN_ON(i915_verify_lists(dev)); } +/* Immediately discard the backing storage */ +static void +i915_gem_object_truncate(struct drm_i915_gem_object *obj) +{ + struct inode *inode; + + /* Our goal here is to return as much of the memory as + * is possible back to the system as we are called from OOM. + * To do this we must instruct the shmfs to drop all of its + * backing pages, *now*. + */ + inode = obj->base.filp->f_path.dentry->d_inode; + shmem_truncate_range(inode, 0, (loff_t)-1); + + if (obj->base.map_list.map) + drm_gem_free_mmap_offset(&obj->base); + + obj->madv = __I915_MADV_PURGED; +} + +static inline int +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) +{ + return obj->madv == I915_MADV_DONTNEED; +} + +static void +i915_gem_process_flushing_list(struct intel_ring_buffer *ring, + uint32_t flush_domains) +{ + struct drm_i915_gem_object *obj, *next; + + list_for_each_entry_safe(obj, next, + &ring->gpu_write_list, + gpu_write_list) { + if (obj->base.write_domain & flush_domains) { + uint32_t old_write_domain = obj->base.write_domain; + + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_active(obj, ring, + i915_gem_next_request_seqno(ring)); + + trace_i915_gem_object_change_domain(obj, + obj->base.read_domains, + old_write_domain); + } + } +} + static u32 i915_gem_get_seqno(struct drm_device *dev) { @@ -1965,16 +1589,15 @@ i915_add_request(struct intel_ring_buffer *ring, * is that the flush _must_ happen before the next request, no matter * what. */ - ret = intel_ring_flush_all_caches(ring); - if (ret) - return ret; + if (ring->gpu_caches_dirty) { + ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; - if (request == NULL) { - request = kmalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; + ring->gpu_caches_dirty = false; } + BUG_ON(request == NULL); seqno = i915_gem_next_request_seqno(ring); /* Record the position of the start of the request so that @@ -1985,10 +1608,8 @@ i915_add_request(struct intel_ring_buffer *ring, request_ring_position = intel_ring_get_tail(ring); ret = ring->add_request(ring, &seqno); - if (ret) { - kfree(request); - return ret; - } + if (ret) + return ret; trace_i915_gem_request_add(ring, seqno); @@ -1998,7 +1619,6 @@ i915_add_request(struct intel_ring_buffer *ring, request->emitted_jiffies = jiffies; was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); - request->file_priv = NULL; if (file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -2018,13 +1638,13 @@ i915_add_request(struct intel_ring_buffer *ring, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } - if (was_empty) { + if (was_empty) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); - intel_mark_busy(dev_priv->dev); - } } + WARN_ON(!list_empty(&ring->gpu_write_list)); + return 0; } @@ -2066,6 +1686,8 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, struct drm_i915_gem_object, ring_list); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_inactive(obj); } } @@ -2101,6 +1723,20 @@ void i915_gem_reset(struct drm_device *dev) for_each_ring(ring, dev_priv, i) i915_gem_reset_ring_lists(dev_priv, ring); + /* Remove anything from the flushing lists. The GPU cache is likely + * to be lost on reset along with the data, so simply move the + * lost bo to the inactive list. + */ + while (!list_empty(&dev_priv->mm.flushing_list)) { + obj = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + mm_list); + + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); + } + /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ @@ -2129,7 +1765,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) WARN_ON(i915_verify_lists(ring->dev)); - seqno = ring->get_seqno(ring, true); + seqno = ring->get_seqno(ring); for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) if (seqno >= ring->sync_seqno[i]) @@ -2168,10 +1804,13 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) struct drm_i915_gem_object, ring_list); - if (!i915_seqno_passed(seqno, obj->last_read_seqno)) + if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) break; - i915_gem_object_move_to_inactive(obj); + if (obj->base.write_domain != 0) + i915_gem_object_move_to_flushing(obj); + else + i915_gem_object_move_to_inactive(obj); } if (unlikely(ring->trace_irq_seqno && @@ -2215,23 +1854,219 @@ i915_gem_retire_work_handler(struct work_struct *work) i915_gem_retire_requests(dev); - /* Send a periodic flush down the ring so we don't hold onto GEM - * objects indefinitely. + /* Send a periodic flush down the ring so we don't hold onto GEM + * objects indefinitely. + */ + idle = true; + for_each_ring(ring, dev_priv, i) { + if (ring->gpu_caches_dirty) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL || + i915_add_request(ring, NULL, request)) + kfree(request); + } + + idle &= list_empty(&ring->request_list); + } + + if (!dev_priv->mm.suspended && !idle) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + + mutex_unlock(&dev->struct_mutex); +} + +int +i915_gem_check_wedge(struct drm_i915_private *dev_priv, + bool interruptible) +{ + if (atomic_read(&dev_priv->mm.wedged)) { + struct completion *x = &dev_priv->error_completion; + bool recovery_complete; + unsigned long flags; + + /* Give the error handler a chance to run. */ + spin_lock_irqsave(&x->wait.lock, flags); + recovery_complete = x->done > 0; + spin_unlock_irqrestore(&x->wait.lock, flags); + + /* Non-interruptible callers can't handle -EAGAIN, hence return + * -EIO unconditionally for these. */ + if (!interruptible) + return -EIO; + + /* Recovery complete, but still wedged means reset failure. */ + if (recovery_complete) + return -EIO; + + return -EAGAIN; + } + + return 0; +} + +/* + * Compare seqno against outstanding lazy request. Emit a request if they are + * equal. + */ +static int +i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) +{ + int ret = 0; + + BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + + if (seqno == ring->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + ret = i915_add_request(ring, NULL, request); + if (ret) { + kfree(request); + return ret; + } + + BUG_ON(seqno != request->seqno); + } + + return ret; +} + +/** + * __wait_seqno - wait until execution of seqno has finished + * @ring: the ring expected to report seqno + * @seqno: duh! + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * + * Returns 0 if the seqno was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ +static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, + bool interruptible, struct timespec *timeout) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct timespec before, now, wait_time={1,0}; + unsigned long timeout_jiffies; + long end; + bool wait_forever = true; + int ret; + + if (i915_seqno_passed(ring->get_seqno(ring), seqno)) + return 0; + + trace_i915_gem_request_wait_begin(ring, seqno); + + if (timeout != NULL) { + wait_time = *timeout; + wait_forever = false; + } + + timeout_jiffies = timespec_to_jiffies(&wait_time); + + if (WARN_ON(!ring->irq_get(ring))) + return -ENODEV; + + /* Record current time in case interrupted by signal, or wedged * */ + getrawmonotonic(&before); + +#define EXIT_COND \ + (i915_seqno_passed(ring->get_seqno(ring), seqno) || \ + atomic_read(&dev_priv->mm.wedged)) + do { + if (interruptible) + end = wait_event_interruptible_timeout(ring->irq_queue, + EXIT_COND, + timeout_jiffies); + else + end = wait_event_timeout(ring->irq_queue, EXIT_COND, + timeout_jiffies); + + ret = i915_gem_check_wedge(dev_priv, interruptible); + if (ret) + end = ret; + } while (end == 0 && wait_forever); + + getrawmonotonic(&now); + + ring->irq_put(ring); + trace_i915_gem_request_wait_end(ring, seqno); +#undef EXIT_COND + + if (timeout) { + struct timespec sleep_time = timespec_sub(now, before); + *timeout = timespec_sub(*timeout, sleep_time); + } + + switch (end) { + case -EIO: + case -EAGAIN: /* Wedged */ + case -ERESTARTSYS: /* Signal */ + return (int)end; + case 0: /* Timeout */ + if (timeout) + set_normalized_timespec(timeout, 0, 0); + return -ETIME; + default: /* Completed */ + WARN_ON(end < 0); /* We're not aware of other errors */ + return 0; + } +} + +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ +int +i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + int ret = 0; + + BUG_ON(seqno == 0); + + ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); + if (ret) + return ret; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + return ret; + + ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); + + return ret; +} + +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + */ +int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) +{ + int ret; + + /* This function only exists to support waiting for existing rendering, + * not for emitting required flushes. */ - idle = true; - for_each_ring(ring, dev_priv, i) { - if (ring->gpu_caches_dirty) - i915_add_request(ring, NULL, NULL); + BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); - idle &= list_empty(&ring->request_list); + /* If there is rendering queued on the buffer being evicted, wait for + * it. + */ + if (obj->active) { + ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno); + if (ret) + return ret; + i915_gem_retire_requests_ring(obj->ring); } - if (!dev_priv->mm.suspended && !idle) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); - if (idle) - intel_mark_idle(dev); - - mutex_unlock(&dev->struct_mutex); + return 0; } /** @@ -2245,10 +2080,14 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) int ret; if (obj->active) { - ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; + ret = i915_gem_check_olr(obj->ring, + obj->last_rendering_seqno); + if (ret) + return ret; i915_gem_retire_requests_ring(obj->ring); } @@ -2308,7 +2147,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto out; if (obj->active) { - seqno = obj->last_read_seqno; + seqno = obj->last_rendering_seqno; ring = obj->ring; } @@ -2363,11 +2202,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) - return i915_gem_object_wait_rendering(obj, false); + return i915_gem_object_wait_rendering(obj); idx = intel_ring_sync_index(from, to); - seqno = obj->last_read_seqno; + seqno = obj->last_rendering_seqno; if (seqno <= from->sync_seqno[idx]) return 0; @@ -2421,8 +2260,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (obj->pin_count) return -EBUSY; - BUG_ON(obj->pages == NULL); - ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -2433,6 +2270,22 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_gem_object_finish_gtt(obj); + /* Move the object to the CPU domain to ensure that + * any possible CPU writes while it's not in the GTT + * are flushed when we go to remap it. + */ + if (ret == 0) + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret == -ERESTARTSYS) + return ret; + if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ + i915_gem_clflush_object(obj); + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + /* release the fence reg _after_ flushing */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -2448,8 +2301,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) } i915_gem_gtt_finish_object(obj); - list_del(&obj->mm_list); - list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + i915_gem_object_put_pages_gtt(obj); + + list_del_init(&obj->gtt_list); + list_del_init(&obj->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; @@ -2457,14 +2312,48 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) obj->gtt_space = NULL; obj->gtt_offset = 0; + if (i915_gem_object_is_purgeable(obj)) + i915_gem_object_truncate(obj); + + return ret; +} + +int +i915_gem_flush_ring(struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + int ret; + + if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) + return 0; + + trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); + + ret = ring->flush(ring, invalidate_domains, flush_domains); + if (ret) + return ret; + + if (flush_domains & I915_GEM_GPU_DOMAINS) + i915_gem_process_flushing_list(ring, flush_domains); + return 0; } static int i915_ring_idle(struct intel_ring_buffer *ring) { - if (list_empty(&ring->active_list)) + int ret; + + if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; + if (!list_empty(&ring->gpu_write_list)) { + ret = i915_gem_flush_ring(ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + } + return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); } @@ -2483,6 +2372,10 @@ int i915_gpu_idle(struct drm_device *dev) ret = i915_ring_idle(ring); if (ret) return ret; + + /* Is the device fubar? */ + if (WARN_ON(!list_empty(&ring->gpu_write_list))) + return -EBUSY; } return 0; @@ -2655,8 +2548,21 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, static int i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) { + int ret; + + if (obj->fenced_gpu_access) { + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { + ret = i915_gem_flush_ring(obj->ring, + 0, obj->base.write_domain); + if (ret) + return ret; + } + + obj->fenced_gpu_access = false; + } + if (obj->last_fenced_seqno) { - int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); + ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); if (ret) return ret; @@ -2669,7 +2575,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) mb(); - obj->fenced_gpu_access = false; return 0; } @@ -2789,88 +2694,18 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) return 0; } -static bool i915_gem_valid_gtt_space(struct drm_device *dev, - struct drm_mm_node *gtt_space, - unsigned long cache_level) -{ - struct drm_mm_node *other; - - /* On non-LLC machines we have to be careful when putting differing - * types of snoopable memory together to avoid the prefetcher - * crossing memory domains and dieing. - */ - if (HAS_LLC(dev)) - return true; - - if (gtt_space == NULL) - return true; - - if (list_empty(>t_space->node_list)) - return true; - - other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); - if (other->allocated && !other->hole_follows && other->color != cache_level) - return false; - - other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); - if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) - return false; - - return true; -} - -static void i915_gem_verify_gtt(struct drm_device *dev) -{ -#if WATCH_GTT - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - int err = 0; - - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { - if (obj->gtt_space == NULL) { - printk(KERN_ERR "object found on GTT list with no space reserved\n"); - err++; - continue; - } - - if (obj->cache_level != obj->gtt_space->color) { - printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, - obj->cache_level, - obj->gtt_space->color); - err++; - continue; - } - - if (!i915_gem_valid_gtt_space(dev, - obj->gtt_space, - obj->cache_level)) { - printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, - obj->cache_level); - err++; - continue; - } - } - - WARN_ON(err); -#endif -} - /** * Finds free space in the GTT aperture and binds the object there. */ static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, - bool map_and_fenceable, - bool nonblocking) + bool map_and_fenceable) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_mm_node *free_space; + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; @@ -2910,67 +2745,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -E2BIG; } - ret = i915_gem_object_get_pages(obj); - if (ret) - return ret; - search_free: if (map_and_fenceable) free_space = - drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); + drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, + size, alignment, + 0, dev_priv->mm.gtt_mappable_end, + 0); else - free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - false); + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, + size, alignment, 0); if (free_space != NULL) { if (map_and_fenceable) obj->gtt_space = drm_mm_get_block_range_generic(free_space, - size, alignment, obj->cache_level, + size, alignment, 0, 0, dev_priv->mm.gtt_mappable_end, - false); + 0); else obj->gtt_space = - drm_mm_get_block_generic(free_space, - size, alignment, obj->cache_level, - false); + drm_mm_get_block(free_space, size, alignment); } if (obj->gtt_space == NULL) { + /* If the gtt is empty and we're still having trouble + * fitting our object in, we're out of memory. + */ ret = i915_gem_evict_something(dev, size, alignment, - obj->cache_level, - map_and_fenceable, - nonblocking); + map_and_fenceable); if (ret) return ret; goto search_free; } - if (WARN_ON(!i915_gem_valid_gtt_space(dev, - obj->gtt_space, - obj->cache_level))) { + + ret = i915_gem_object_get_pages_gtt(obj, gfpmask); + if (ret) { drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; - return -EINVAL; - } + if (ret == -ENOMEM) { + /* first try to reclaim some memory by clearing the GTT */ + ret = i915_gem_evict_everything(dev, false); + if (ret) { + /* now try to shrink everyone else */ + if (gfpmask) { + gfpmask = 0; + goto search_free; + } + + return -ENOMEM; + } + + goto search_free; + } + + return ret; + } ret = i915_gem_gtt_prepare_object(obj); if (ret) { + i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; - return ret; + + if (i915_gem_evict_everything(dev, false)) + return ret; + + goto search_free; } if (!dev_priv->mm.aliasing_ppgtt) i915_gem_gtt_bind_object(obj, obj->cache_level); - list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + obj->gtt_offset = obj->gtt_space->start; fenceable = @@ -2983,7 +2840,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, obj->map_and_fenceable = mappable && fenceable; trace_i915_gem_object_bind(obj, map_and_fenceable); - i915_gem_verify_gtt(dev); return 0; } @@ -3010,7 +2866,18 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) trace_i915_gem_object_clflush(obj); - drm_clflush_sg(obj->pages); + drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); +} + +/** Flushes any GPU write domain for the object if it's dirty. */ +static int +i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) +{ + if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) + return 0; + + /* Queue the GPU write cache flushing we need. */ + return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -3079,10 +2946,16 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) return 0; - ret = i915_gem_object_wait_rendering(obj, !write); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; + if (obj->pending_gpu_write || write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + } + i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -3125,12 +2998,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; - } - if (obj->gtt_space) { ret = i915_gem_object_finish_gpu(obj); if (ret) @@ -3142,7 +3009,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * registers with snooped memory, so relinquish any fences * currently pointing to our region in the aperture. */ - if (INTEL_INFO(dev)->gen < 6) { + if (INTEL_INFO(obj->base.dev)->gen < 6) { ret = i915_gem_object_put_fence(obj); if (ret) return ret; @@ -3153,8 +3020,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); - - obj->gtt_space->color = cache_level; } if (cache_level == I915_CACHE_NONE) { @@ -3181,72 +3046,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } obj->cache_level = cache_level; - i915_gem_verify_gtt(dev); return 0; } -int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_caching *args = data; - struct drm_i915_gem_object *obj; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } - - args->caching = obj->cache_level != I915_CACHE_NONE; - - drm_gem_object_unreference(&obj->base); -unlock: - mutex_unlock(&dev->struct_mutex); - return ret; -} - -int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_caching *args = data; - struct drm_i915_gem_object *obj; - enum i915_cache_level level; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - switch (args->caching) { - case I915_CACHING_NONE: - level = I915_CACHE_NONE; - break; - case I915_CACHING_CACHED: - level = I915_CACHE_LLC; - break; - default: - return -EINVAL; - } - - obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } - - ret = i915_gem_object_set_cache_level(obj, level); - - drm_gem_object_unreference(&obj->base); -unlock: - mutex_unlock(&dev->struct_mutex); - return ret; -} - /* * Prepare buffer for display plane (scanout, cursors, etc). * Can be called from an uninterruptible phase (modesetting) and allows @@ -3260,6 +3062,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 old_read_domains, old_write_domain; int ret; + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + if (pipelined != obj->ring) { ret = i915_gem_object_sync(obj, pipelined); if (ret) @@ -3283,7 +3089,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_object_pin(obj, alignment, true, false); + ret = i915_gem_object_pin(obj, alignment, true); if (ret) return ret; @@ -3295,7 +3101,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - obj->base.write_domain = 0; + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); obj->base.read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, @@ -3313,7 +3119,13 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) return 0; - ret = i915_gem_object_wait_rendering(obj, false); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { + ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); + if (ret) + return ret; + } + + ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; @@ -3337,10 +3149,16 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) return 0; - ret = i915_gem_object_wait_rendering(obj, !write); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; + if (write || obj->pending_gpu_write) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + } + i915_gem_object_flush_gtt_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -3420,8 +3238,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, - bool map_and_fenceable, - bool nonblocking) + bool map_and_fenceable) { int ret; @@ -3446,8 +3263,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (obj->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment, - map_and_fenceable, - nonblocking); + map_and_fenceable); if (ret) return ret; } @@ -3505,7 +3321,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj->user_pin_count++; obj->pin_filp = file; if (obj->user_pin_count == 1) { - ret = i915_gem_object_pin(obj, args->alignment, true, false); + ret = i915_gem_object_pin(obj, args->alignment, true); if (ret) goto out; } @@ -3585,10 +3401,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_flush_active(obj); args->busy = obj->active; - if (obj->ring) { - BUILD_BUG_ON(I915_NUM_RINGS > 16); - args->busy |= intel_ring_flag(obj->ring) << 16; - } drm_gem_object_unreference(&obj->base); unlock: @@ -3637,8 +3449,9 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (obj->madv != __I915_MADV_PURGED) obj->madv = args->madv; - /* if the object is no longer attached, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) + /* if the object is no longer bound, discard its backing storage */ + if (i915_gem_object_is_purgeable(obj) && + obj->gtt_space == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -3650,32 +3463,10 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return ret; } -void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops) -{ - INIT_LIST_HEAD(&obj->mm_list); - INIT_LIST_HEAD(&obj->gtt_list); - INIT_LIST_HEAD(&obj->ring_list); - INIT_LIST_HEAD(&obj->exec_list); - - obj->ops = ops; - - obj->fence_reg = I915_FENCE_REG_NONE; - obj->madv = I915_MADV_WILLNEED; - /* Avoid an unnecessary call to unbind on the first bind. */ - obj->map_and_fenceable = true; - - i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); -} - -static const struct drm_i915_gem_object_ops i915_gem_object_ops = { - .get_pages = i915_gem_object_get_pages_gtt, - .put_pages = i915_gem_object_put_pages_gtt, -}; - struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct address_space *mapping; u32 mask; @@ -3699,7 +3490,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; mapping_set_gfp_mask(mapping, mask); - i915_gem_object_init(obj, &i915_gem_object_ops); + i915_gem_info_add_obj(dev_priv, size); obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; @@ -3721,6 +3512,17 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, } else obj->cache_level = I915_CACHE_NONE; + obj->base.driver_private = NULL; + obj->fence_reg = I915_FENCE_REG_NONE; + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->gtt_list); + INIT_LIST_HEAD(&obj->ring_list); + INIT_LIST_HEAD(&obj->exec_list); + INIT_LIST_HEAD(&obj->gpu_write_list); + obj->madv = I915_MADV_WILLNEED; + /* Avoid an unnecessary call to unbind on the first bind. */ + obj->map_and_fenceable = true; + return obj; } @@ -3739,6 +3541,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); + if (gem_obj->import_attach) + drm_prime_gem_destroy(gem_obj, obj->sg_table); + if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); @@ -3754,14 +3559,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) dev_priv->mm.interruptible = was_interruptible; } - obj->pages_pin_count = 0; - i915_gem_object_put_pages(obj); - i915_gem_object_free_mmap_offset(obj); - - BUG_ON(obj->pages); - - if (obj->base.import_attach) - drm_prime_gem_destroy(&obj->base, NULL); + if (obj->base.map_list.map) + drm_gem_free_mmap_offset(&obj->base); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(dev_priv, obj->base.size); @@ -3792,7 +3591,7 @@ i915_gem_idle(struct drm_device *dev) /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_gem_evict_everything(dev); + i915_gem_evict_everything(dev, false); i915_gem_reset_fences(dev); @@ -4093,6 +3892,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); mutex_unlock(&dev->struct_mutex); @@ -4140,6 +3940,7 @@ init_ring_lists(struct intel_ring_buffer *ring) { INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); } void @@ -4149,10 +3950,10 @@ i915_gem_load(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; INIT_LIST_HEAD(&dev_priv->mm.active_list); + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); - INIT_LIST_HEAD(&dev_priv->mm.unbound_list); - INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); + INIT_LIST_HEAD(&dev_priv->mm.gtt_list); for (i = 0; i < I915_NUM_RINGS; i++) init_ring_lists(&dev_priv->ring[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) @@ -4396,6 +4197,18 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } +static int +i915_gpu_is_active(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int lists_empty; + + lists_empty = list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list); + + return !lists_empty; +} + static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { @@ -4404,27 +4217,60 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_object *obj, *next; int nr_to_scan = sc->nr_to_scan; int cnt; if (!mutex_trylock(&dev->struct_mutex)) return 0; - if (nr_to_scan) { - nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); - if (nr_to_scan > 0) - i915_gem_shrink_all(dev_priv); + /* "fast-path" to count number of available objects */ + if (nr_to_scan == 0) { + cnt = 0; + list_for_each_entry(obj, + &dev_priv->mm.inactive_list, + mm_list) + cnt++; + mutex_unlock(&dev->struct_mutex); + return cnt / 100 * sysctl_vfs_cache_pressure; + } + +rescan: + /* first scan for clean buffers */ + i915_gem_retire_requests(dev); + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (i915_gem_object_is_purgeable(obj)) { + if (i915_gem_object_unbind(obj) == 0 && + --nr_to_scan == 0) + break; + } } + /* second pass, evict/count anything still on the inactive list */ cnt = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) - if (obj->pages_pin_count == 0) - cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) - if (obj->pin_count == 0 && obj->pages_pin_count == 0) - cnt += obj->base.size >> PAGE_SHIFT; + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (nr_to_scan && + i915_gem_object_unbind(obj) == 0) + nr_to_scan--; + else + cnt++; + } + if (nr_to_scan && i915_gpu_is_active(dev)) { + /* + * We are desperate for pages, so as a last resort, wait + * for the GPU to finish and discard whatever we can. + * This has a dramatic impact to reduce the number of + * OOM-killer events whilst running the GPU aggressively. + */ + if (i915_gpu_idle(dev) == 0) + goto rescan; + } mutex_unlock(&dev->struct_mutex); - return cnt; + return cnt / 100 * sysctl_vfs_cache_pressure; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c index 4aa7ecf77ede..a9d58d72bb4d 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c @@ -97,7 +97,8 @@ static struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); -static int do_switch(struct i915_hw_context *to); +static int do_switch(struct drm_i915_gem_object *from_obj, + struct i915_hw_context *to, u32 seqno); static int get_context_size(struct drm_device *dev) { @@ -112,10 +113,7 @@ static int get_context_size(struct drm_device *dev) break; case 7: reg = I915_READ(GEN7_CXT_SIZE); - if (IS_HASWELL(dev)) - ret = HSW_CXT_TOTAL_SIZE(reg) * 64; - else - ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; + ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; break; default: BUG(); @@ -221,21 +219,20 @@ static int create_default_context(struct drm_i915_private *dev_priv) * default context. */ dev_priv->ring[RCS].default_context = ctx; - ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); - if (ret) - goto err_destroy; - - ret = do_switch(ctx); - if (ret) - goto err_unpin; + ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); + if (ret) { + do_destroy(ctx); + return ret; + } - DRM_DEBUG_DRIVER("Default HW context loaded\n"); - return 0; + ret = do_switch(NULL, ctx, 0); + if (ret) { + i915_gem_object_unpin(ctx->obj); + do_destroy(ctx); + } else { + DRM_DEBUG_DRIVER("Default HW context loaded\n"); + } -err_unpin: - i915_gem_object_unpin(ctx->obj); -err_destroy: - do_destroy(ctx); return ret; } @@ -362,19 +359,18 @@ mi_set_context(struct intel_ring_buffer *ring, return ret; } -static int do_switch(struct i915_hw_context *to) +static int do_switch(struct drm_i915_gem_object *from_obj, + struct i915_hw_context *to, + u32 seqno) { - struct intel_ring_buffer *ring = to->ring; - struct drm_i915_gem_object *from_obj = ring->last_context_obj; + struct intel_ring_buffer *ring = NULL; u32 hw_flags = 0; int ret; + BUG_ON(to == NULL); BUG_ON(from_obj != NULL && from_obj->pin_count == 0); - if (from_obj == to->obj) - return 0; - - ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); + ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); if (ret) return ret; @@ -397,6 +393,7 @@ static int do_switch(struct i915_hw_context *to) else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ hw_flags |= MI_FORCE_RESTORE; + ring = to->ring; ret = mi_set_context(ring, to, hw_flags); if (ret) { i915_gem_object_unpin(to->obj); @@ -410,7 +407,6 @@ static int do_switch(struct i915_hw_context *to) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from_obj != NULL) { - u32 seqno = i915_gem_next_request_seqno(ring); from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; i915_gem_object_move_to_active(from_obj, ring, seqno); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the @@ -421,7 +417,7 @@ static int do_switch(struct i915_hw_context *to) * swapped, but there is no way to do that yet. */ from_obj->dirty = 1; - BUG_ON(from_obj->ring != ring); + BUG_ON(from_obj->ring != to->ring); i915_gem_object_unpin(from_obj); drm_gem_object_unreference(&from_obj->base); @@ -452,7 +448,9 @@ int i915_switch_context(struct intel_ring_buffer *ring, int to_id) { struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; + struct drm_i915_gem_object *from_obj = ring->last_context_obj; if (dev_priv->hw_contexts_disabled) return 0; @@ -460,18 +458,21 @@ int i915_switch_context(struct intel_ring_buffer *ring, if (ring != &dev_priv->ring[RCS]) return 0; + if (file) + file_priv = file->driver_priv; + if (to_id == DEFAULT_CONTEXT_ID) { to = ring->default_context; } else { - if (file == NULL) - return -EINVAL; - - to = i915_gem_context_get(file->driver_priv, to_id); + to = i915_gem_context_get(file_priv, to_id); if (to == NULL) return -ENOENT; } - return do_switch(to); + if (from_obj == to->obj) + return 0; + + return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c index ca3497e1108c..aa308e1337db 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -28,62 +28,35 @@ #include static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction dir) + enum dma_data_direction dir) { struct drm_i915_gem_object *obj = attachment->dmabuf->priv; - struct sg_table *st; - struct scatterlist *src, *dst; - int ret, i; + struct drm_device *dev = obj->base.dev; + int npages = obj->base.size / PAGE_SIZE; + struct sg_table *sg = NULL; + int ret; + int nents; - ret = i915_mutex_lock_interruptible(obj->base.dev); + ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); - ret = i915_gem_object_get_pages(obj); - if (ret) { - st = ERR_PTR(ret); - goto out; - } - - /* Copy sg so that we make an independent mapping */ - st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (st == NULL) { - st = ERR_PTR(-ENOMEM); - goto out; + if (!obj->pages) { + ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); + if (ret) + goto out; } - ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); - if (ret) { - kfree(st); - st = ERR_PTR(ret); - goto out; - } - - src = obj->pages->sgl; - dst = st->sgl; - for (i = 0; i < obj->pages->nents; i++) { - sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); - dst = sg_next(dst); - src = sg_next(src); - } - - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - sg_free_table(st); - kfree(st); - st = ERR_PTR(-ENOMEM); - goto out; - } - - i915_gem_object_pin_pages(obj); - + /* link the pages into an SG then map the sg */ + sg = drm_prime_pages_to_sg(obj->pages, npages); + nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); out: - mutex_unlock(&obj->base.dev->struct_mutex); - return st; + mutex_unlock(&dev->struct_mutex); + return sg; } static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *sg, - enum dma_data_direction dir) + struct sg_table *sg, enum dma_data_direction dir) { dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); @@ -105,9 +78,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->base.dev; - struct scatterlist *sg; - struct page **pages; - int ret, i; + int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -118,34 +89,24 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) goto out_unlock; } - ret = i915_gem_object_get_pages(obj); - if (ret) - goto error; - - ret = -ENOMEM; - - pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); - if (pages == NULL) - goto error; - - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) - pages[i] = sg_page(sg); - - obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); - drm_free_large(pages); + if (!obj->pages) { + ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); + } + } - if (!obj->dma_buf_vmapping) - goto error; + obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); + if (!obj->dma_buf_vmapping) { + DRM_ERROR("failed to vmap object\n"); + goto out_unlock; + } obj->vmapping_count = 1; - i915_gem_object_pin_pages(obj); out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; - -error: - mutex_unlock(&dev->struct_mutex); - return ERR_PTR(ret); } static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) @@ -158,11 +119,10 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) if (ret) return; - if (--obj->vmapping_count == 0) { + --obj->vmapping_count; + if (obj->vmapping_count == 0) { vunmap(obj->dma_buf_vmapping); obj->dma_buf_vmapping = NULL; - - i915_gem_object_unpin_pages(obj); } mutex_unlock(&dev->struct_mutex); } @@ -191,22 +151,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * return -EINVAL; } -static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) -{ - struct drm_i915_gem_object *obj = dma_buf->priv; - struct drm_device *dev = obj->base.dev; - int ret; - bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_set_to_cpu_domain(obj, write); - mutex_unlock(&dev->struct_mutex); - return ret; -} - static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, @@ -218,47 +162,25 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .mmap = i915_gem_dmabuf_mmap, .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, - .begin_cpu_access = i915_gem_begin_cpu_access, }; struct dma_buf *i915_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *gem_obj, int flags) + struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); -} - -static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) -{ - struct sg_table *sg; - - sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sg)) - return PTR_ERR(sg); - - obj->pages = sg; - obj->has_dma_mapping = true; - return 0; + return dma_buf_export(obj, &i915_dmabuf_ops, + obj->base.size, 0600); } -static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) -{ - dma_buf_unmap_attachment(obj->base.import_attach, - obj->pages, DMA_BIDIRECTIONAL); - obj->has_dma_mapping = false; -} - -static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { - .get_pages = i915_gem_object_get_pages_dmabuf, - .put_pages = i915_gem_object_put_pages_dmabuf, -}; - struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) + struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; + struct sg_table *sg; struct drm_i915_gem_object *obj; + int npages; + int size; int ret; /* is this one of own objects? */ @@ -276,24 +198,34 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, if (IS_ERR(attach)) return ERR_CAST(attach); + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); + goto fail_detach; + } + + size = dma_buf->size; + npages = size / PAGE_SIZE; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (obj == NULL) { ret = -ENOMEM; - goto fail_detach; + goto fail_unmap; } - ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); + ret = drm_gem_private_object_init(dev, &obj->base, size); if (ret) { kfree(obj); - goto fail_detach; + goto fail_unmap; } - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); + obj->sg_table = sg; obj->base.import_attach = attach; return &obj->base; +fail_unmap: + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_evict.c b/trunk/drivers/gpu/drm/i915/i915_gem_evict.c index a2d8acde8550..eba0308f10e3 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_evict.c @@ -44,8 +44,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) int i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, unsigned cache_level, - bool mappable, bool nonblocking) + unsigned alignment, bool mappable) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; @@ -80,11 +79,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, INIT_LIST_HEAD(&unwind_list); if (mappable) drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level, + min_size, alignment, 0, 0, dev_priv->mm.gtt_mappable_end); else drm_mm_init_scan(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level); + min_size, alignment, 0); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { @@ -92,16 +91,29 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, goto found; } - if (nonblocking) - goto none; - /* Now merge in the soon-to-be-expired objects... */ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + /* Does the object require an outstanding flush? */ + if (obj->base.write_domain) + continue; + + if (mark_free(obj, &unwind_list)) + goto found; + } + + /* Finally add anything with a pending flush (in order of retirement) */ + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { + if (mark_free(obj, &unwind_list)) + goto found; + } + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (!obj->base.write_domain) + continue; + if (mark_free(obj, &unwind_list)) goto found; } -none: /* Nothing found, clean up and bail out! */ while (!list_empty(&unwind_list)) { obj = list_first_entry(&unwind_list, @@ -152,7 +164,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, } int -i915_gem_evict_everything(struct drm_device *dev) +i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; @@ -160,11 +172,12 @@ i915_gem_evict_everything(struct drm_device *dev) int ret; lists_empty = (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.active_list)); if (lists_empty) return -ENOSPC; - trace_i915_gem_evict_everything(dev); + trace_i915_gem_evict_everything(dev, purgeable_only); /* The gpu_idle will flush everything in the write domain to the * active list. Then we must move everything off the active list @@ -176,11 +189,16 @@ i915_gem_evict_everything(struct drm_device *dev) i915_gem_retire_requests(dev); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + /* Having flushed everything, unbind() should never raise an error */ list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, mm_list) - if (obj->pin_count == 0) - WARN_ON(i915_gem_object_unbind(obj)); + &dev_priv->mm.inactive_list, mm_list) { + if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { + if (obj->pin_count == 0) + WARN_ON(i915_gem_object_unbind(obj)); + } + } return 0; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6a2f3e50c714..ff2819ea0813 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -34,6 +34,180 @@ #include "intel_drv.h" #include +struct change_domains { + uint32_t invalidate_domains; + uint32_t flush_domains; + uint32_t flush_rings; + uint32_t flips; +}; + +/* + * Set the next domain for the specified object. This + * may not actually perform the necessary flushing/invaliding though, + * as that may want to be batched with other set_domain operations + * + * This is (we hope) the only really tricky part of gem. The goal + * is fairly simple -- track which caches hold bits of the object + * and make sure they remain coherent. A few concrete examples may + * help to explain how it works. For shorthand, we use the notation + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the + * a pair of read and write domain masks. + * + * Case 1: the batch buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Mapped to GTT + * 4. Read by GPU + * 5. Unmapped from GTT + * 6. Freed + * + * Let's take these a step at a time + * + * 1. Allocated + * Pages allocated from the kernel may still have + * cache contents, so we set them to (CPU, CPU) always. + * 2. Written by CPU (using pwrite) + * The pwrite function calls set_domain (CPU, CPU) and + * this function does nothing (as nothing changes) + * 3. Mapped by GTT + * This function asserts that the object is not + * currently in any GPU-based read or write domains + * 4. Read by GPU + * i915_gem_execbuffer calls set_domain (COMMAND, 0). + * As write_domain is zero, this function adds in the + * current read domains (CPU+COMMAND, 0). + * flush_domains is set to CPU. + * invalidate_domains is set to COMMAND + * clflush is run to get data out of the CPU caches + * then i915_dev_set_domain calls i915_gem_flush to + * emit an MI_FLUSH and drm_agp_chipset_flush + * 5. Unmapped from GTT + * i915_gem_object_unbind calls set_domain (CPU, CPU) + * flush_domains and invalidate_domains end up both zero + * so no flushing/invalidating happens + * 6. Freed + * yay, done + * + * Case 2: The shared render buffer + * + * 1. Allocated + * 2. Mapped to GTT + * 3. Read/written by GPU + * 4. set_domain to (CPU,CPU) + * 5. Read/written by CPU + * 6. Read/written by GPU + * + * 1. Allocated + * Same as last example, (CPU, CPU) + * 2. Mapped to GTT + * Nothing changes (assertions find that it is not in the GPU) + * 3. Read/written by GPU + * execbuffer calls set_domain (RENDER, RENDER) + * flush_domains gets CPU + * invalidate_domains gets GPU + * clflush (obj) + * MI_FLUSH and drm_agp_chipset_flush + * 4. set_domain (CPU, CPU) + * flush_domains gets GPU + * invalidate_domains gets CPU + * wait_rendering (obj) to make sure all drawing is complete. + * This will include an MI_FLUSH to get the data from GPU + * to memory + * clflush (obj) to invalidate the CPU cache + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) + * 5. Read/written by CPU + * cache lines are loaded and dirtied + * 6. Read written by GPU + * Same as last GPU access + * + * Case 3: The constant buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Read by GPU + * 4. Updated (written) by CPU again + * 5. Read by GPU + * + * 1. Allocated + * (CPU, CPU) + * 2. Written by CPU + * (CPU, CPU) + * 3. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + * 4. Updated (written) by CPU again + * (CPU, CPU) + * flush_domains = 0 (no previous write domain) + * invalidate_domains = 0 (no new read domains) + * 5. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + */ +static void +i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring, + struct change_domains *cd) +{ + uint32_t invalidate_domains = 0, flush_domains = 0; + + /* + * If the object isn't moving to a new write domain, + * let the object stay in multiple read domains + */ + if (obj->base.pending_write_domain == 0) + obj->base.pending_read_domains |= obj->base.read_domains; + + /* + * Flush the current write domain if + * the new read domains don't match. Invalidate + * any read domains which differ from the old + * write domain + */ + if (obj->base.write_domain && + (((obj->base.write_domain != obj->base.pending_read_domains || + obj->ring != ring)) || + (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { + flush_domains |= obj->base.write_domain; + invalidate_domains |= + obj->base.pending_read_domains & ~obj->base.write_domain; + } + /* + * Invalidate any read caches which may have + * stale data. That is, any new read domains. + */ + invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) + i915_gem_clflush_object(obj); + + if (obj->base.pending_write_domain) + cd->flips |= atomic_read(&obj->pending_flip); + + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->base.pending_write_domain == 0) + obj->base.pending_write_domain = obj->base.write_domain; + + cd->invalidate_domains |= invalidate_domains; + cd->flush_domains |= flush_domains; + if (flush_domains & I915_GEM_GPU_DOMAINS) + cd->flush_rings |= intel_ring_flag(obj->ring); + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + cd->flush_rings |= intel_ring_flag(ring); +} + struct eb_objects { int and; struct hlist_head buckets[0]; @@ -44,7 +218,6 @@ eb_create(int size) { struct eb_objects *eb; int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; - BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); while (count > size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + @@ -96,7 +269,6 @@ eb_destroy(struct eb_objects *eb) static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || - !obj->map_and_fenceable || obj->cache_level != I915_CACHE_NONE); } @@ -211,8 +383,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (ret) return ret; - vaddr = kmap_atomic(i915_gem_object_get_page(obj, - reloc->offset >> PAGE_SHIFT)); + vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); *(uint32_t *)(vaddr + page_offset) = reloc->delta; kunmap_atomic(vaddr); } else { @@ -333,8 +504,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, return ret; } -#define __EXEC_OBJECT_HAS_PIN (1<<31) -#define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_HAS_FENCE (1<<31) static int need_reloc_mappable(struct drm_i915_gem_object *obj) @@ -344,10 +514,9 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) } static int -i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) +pin_and_fence_object(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence, need_mappable; @@ -359,17 +528,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); + ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); if (ret) return ret; - entry->flags |= __EXEC_OBJECT_HAS_PIN; - if (has_fenced_gpu_access) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { ret = i915_gem_object_get_fence(obj); if (ret) - return ret; + goto err_unpin; if (i915_gem_object_pin_fence(obj)) entry->flags |= __EXEC_OBJECT_HAS_FENCE; @@ -378,35 +545,12 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, } } - /* Ensure ppgtt mapping exists if needed */ - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, obj->cache_level); - - obj->has_aliasing_ppgtt_mapping = 1; - } - entry->offset = obj->gtt_offset; return 0; -} - -static void -i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) -{ - struct drm_i915_gem_exec_object2 *entry; - - if (!obj->gtt_space) - return; - - entry = obj->exec_entry; - - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) - i915_gem_object_unpin_fence(obj); - if (entry->flags & __EXEC_OBJECT_HAS_PIN) - i915_gem_object_unpin(obj); - - entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); +err_unpin: + i915_gem_object_unpin(obj); + return ret; } static int @@ -414,10 +558,11 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_file *file, struct list_head *objects) { + drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj; - struct list_head ordered_objects; + int ret, retry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; - int retry; + struct list_head ordered_objects; INIT_LIST_HEAD(&ordered_objects); while (!list_empty(objects)) { @@ -442,7 +587,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; - obj->pending_fenced_gpu_access = false; } list_splice(&ordered_objects, objects); @@ -455,12 +599,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, * 2. Bind new objects. * 3. Decrement pin count. * - * This avoid unnecessary unbinding of later objects in order to make + * This avoid unnecessary unbinding of later objects in order to makr * room for the earlier objects *unless* we need to defragment. */ retry = 0; do { - int ret = 0; + ret = 0; /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(obj, objects, exec_list) { @@ -480,7 +624,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else - ret = i915_gem_execbuffer_reserve_object(obj, ring); + ret = pin_and_fence_object(obj, ring); if (ret) goto err; } @@ -490,22 +634,77 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, if (obj->gtt_space) continue; - ret = i915_gem_execbuffer_reserve_object(obj, ring); - if (ret) - goto err; + ret = pin_and_fence_object(obj, ring); + if (ret) { + int ret_ignore; + + /* This can potentially raise a harmless + * -EINVAL if we failed to bind in the above + * call. It cannot raise -EINTR since we know + * that the bo is freshly bound and so will + * not need to be flushed or waited upon. + */ + ret_ignore = i915_gem_object_unbind(obj); + (void)ret_ignore; + WARN_ON(obj->gtt_space); + break; + } } -err: /* Decrement pin count for bound objects */ - list_for_each_entry(obj, objects, exec_list) - i915_gem_execbuffer_unreserve_object(obj); + /* Decrement pin count for bound objects */ + list_for_each_entry(obj, objects, exec_list) { + struct drm_i915_gem_exec_object2 *entry; + + if (!obj->gtt_space) + continue; + + entry = obj->exec_entry; + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { + i915_gem_object_unpin_fence(obj); + entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; + } + + i915_gem_object_unpin(obj); + + /* ... and ensure ppgtt mapping exist if needed. */ + if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { + i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, + obj, obj->cache_level); - if (ret != -ENOSPC || retry++) + obj->has_aliasing_ppgtt_mapping = 1; + } + } + + if (ret != -ENOSPC || retry > 1) return ret; - ret = i915_gem_evict_everything(ring->dev); + /* First attempt, just clear anything that is purgeable. + * Second attempt, clear the entire GTT. + */ + ret = i915_gem_evict_everything(ring->dev, retry == 0); if (ret) return ret; + + retry++; } while (1); + +err: + list_for_each_entry_continue_reverse(obj, objects, exec_list) { + struct drm_i915_gem_exec_object2 *entry; + + if (!obj->gtt_space) + continue; + + entry = obj->exec_entry; + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { + i915_gem_object_unpin_fence(obj); + entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; + } + + i915_gem_object_unpin(obj); + } + + return ret; } static int @@ -611,6 +810,18 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, return ret; } +static void +i915_gem_execbuffer_flush(struct drm_device *dev, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + if (flush_domains & I915_GEM_DOMAIN_CPU) + intel_gtt_chipset_flush(); + + if (flush_domains & I915_GEM_DOMAIN_GTT) + wmb(); +} + static int i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) { @@ -643,45 +854,48 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) return 0; } + static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, struct list_head *objects) { struct drm_i915_gem_object *obj; - uint32_t flush_domains = 0; - uint32_t flips = 0; + struct change_domains cd; int ret; - list_for_each_entry(obj, objects, exec_list) { - ret = i915_gem_object_sync(obj, ring); - if (ret) - return ret; + memset(&cd, 0, sizeof(cd)); + list_for_each_entry(obj, objects, exec_list) + i915_gem_object_set_to_gpu_domain(obj, ring, &cd); - if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); - - if (obj->base.pending_write_domain) - flips |= atomic_read(&obj->pending_flip); - - flush_domains |= obj->base.write_domain; + if (cd.invalidate_domains | cd.flush_domains) { + i915_gem_execbuffer_flush(ring->dev, + cd.invalidate_domains, + cd.flush_domains); } - if (flips) { - ret = i915_gem_execbuffer_wait_for_flips(ring, flips); + if (cd.flips) { + ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); if (ret) return ret; } - if (flush_domains & I915_GEM_DOMAIN_CPU) - intel_gtt_chipset_flush(); - - if (flush_domains & I915_GEM_DOMAIN_GTT) - wmb(); + list_for_each_entry(obj, objects, exec_list) { + ret = i915_gem_object_sync(obj, ring); + if (ret) + return ret; + } /* Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ - return intel_ring_invalidate_all_caches(ring); + ret = i915_gem_flush_ring(ring, + I915_GEM_GPU_DOMAINS, + ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0); + if (ret) + return ret; + + ring->gpu_caches_dirty = false; + return 0; } static bool @@ -729,8 +943,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, struct drm_i915_gem_object *obj; list_for_each_entry(obj, objects, exec_list) { - u32 old_read = obj->base.read_domains; - u32 old_write = obj->base.write_domain; + u32 old_read = obj->base.read_domains; + u32 old_write = obj->base.write_domain; + obj->base.read_domains = obj->base.pending_read_domains; obj->base.write_domain = obj->base.pending_write_domain; @@ -739,13 +954,17 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, i915_gem_object_move_to_active(obj, ring, seqno); if (obj->base.write_domain) { obj->dirty = 1; - obj->last_write_seqno = seqno; + obj->pending_gpu_write = true; + list_move_tail(&obj->gpu_write_list, + &ring->gpu_write_list); if (obj->pin_count) /* check for potential scanout */ - intel_mark_fb_busy(obj); + intel_mark_busy(ring->dev, obj); } trace_i915_gem_object_change_domain(obj, old_read, old_write); } + + intel_mark_busy(ring->dev, NULL); } static void @@ -753,11 +972,16 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring) { + struct drm_i915_gem_request *request; + /* Unconditionally force add_request to emit a full flush. */ ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - (void)i915_add_request(ring, file, NULL); + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL || i915_add_request(ring, file, request)) { + kfree(request); + } } static int @@ -1103,7 +1327,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec_list, - (void __user *)(uintptr_t)args->buffers_ptr, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1142,7 +1367,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) exec_list[i].offset = exec2_list[i].offset; /* ... and back out to userspace */ - ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, exec_list, sizeof(*exec_list) * args->buffer_count); if (ret) { @@ -1196,7 +1422,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, exec2_list, sizeof(*exec2_list) * args->buffer_count); if (ret) { diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index 47e427e4bc67..60815b861ec2 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -167,7 +167,8 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) } static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, - const struct sg_table *pages, + struct scatterlist *sg_list, + unsigned sg_len, unsigned first_entry, uint32_t pte_flags) { @@ -179,12 +180,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, struct scatterlist *sg; /* init sg walking */ - sg = pages->sgl; + sg = sg_list; i = 0; segment_len = sg_dma_len(sg) >> PAGE_SHIFT; m = 0; - while (i < pages->nents) { + while (i < sg_len) { pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { @@ -193,11 +194,13 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, pt_vaddr[j] = pte | pte_flags; /* grab the next page */ - if (++m == segment_len) { - if (++i == pages->nents) + m++; + if (m == segment_len) { + sg = sg_next(sg); + i++; + if (i == sg_len) break; - sg = sg_next(sg); segment_len = sg_dma_len(sg) >> PAGE_SHIFT; m = 0; } @@ -210,10 +213,44 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, } } +static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, + unsigned first_entry, unsigned num_entries, + struct page **pages, uint32_t pte_flags) +{ + uint32_t *pt_vaddr, pte; + unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; + unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned last_pte, i; + dma_addr_t page_addr; + + while (num_entries) { + last_pte = first_pte + num_entries; + last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES); + + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); + + for (i = first_pte; i < last_pte; i++) { + page_addr = page_to_phys(*pages); + pte = GEN6_PTE_ADDR_ENCODE(page_addr); + pt_vaddr[i] = pte | pte_flags; + + pages++; + } + + kunmap_atomic(pt_vaddr); + + num_entries -= last_pte - first_pte; + first_pte = 0; + act_pd++; + } +} + void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; uint32_t pte_flags = GEN6_PTE_VALID; switch (cache_level) { @@ -224,7 +261,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, pte_flags |= GEN6_PTE_CACHE_LLC; break; case I915_CACHE_NONE: - if (IS_HASWELL(obj->base.dev)) + if (IS_HASWELL(dev)) pte_flags |= HSW_PTE_UNCACHED; else pte_flags |= GEN6_PTE_UNCACHED; @@ -233,10 +270,26 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, BUG(); } - i915_ppgtt_insert_sg_entries(ppgtt, - obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, - pte_flags); + if (obj->sg_table) { + i915_ppgtt_insert_sg_entries(ppgtt, + obj->sg_table->sgl, + obj->sg_table->nents, + obj->gtt_space->start >> PAGE_SHIFT, + pte_flags); + } else if (dev_priv->mm.gtt->needs_dmar) { + BUG_ON(!obj->sg_list); + + i915_ppgtt_insert_sg_entries(ppgtt, + obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + pte_flags); + } else + i915_ppgtt_insert_pages(ppgtt, + obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + pte_flags); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, @@ -298,7 +351,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { i915_gem_clflush_object(obj); i915_gem_gtt_bind_object(obj, obj->cache_level); } @@ -308,26 +361,44 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { - if (obj->has_dma_mapping) - return 0; - - if (!dma_map_sg(&obj->base.dev->pdev->dev, - obj->pages->sgl, obj->pages->nents, - PCI_DMA_BIDIRECTIONAL)) - return -ENOSPC; + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; - return 0; + /* don't map imported dma buf objects */ + if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table) + return intel_gtt_map_memory(obj->pages, + obj->base.size >> PAGE_SHIFT, + &obj->sg_list, + &obj->num_sg); + else + return 0; } void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); - intel_gtt_insert_sg_entries(obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, - agp_type); + if (obj->sg_table) { + intel_gtt_insert_sg_entries(obj->sg_table->sgl, + obj->sg_table->nents, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); + } else if (dev_priv->mm.gtt->needs_dmar) { + BUG_ON(!obj->sg_list); + + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); + } else + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + agp_type); + obj->has_global_gtt_mapping = 1; } @@ -347,31 +418,14 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) interruptible = do_idling(dev_priv); - if (!obj->has_dma_mapping) - dma_unmap_sg(&dev->pdev->dev, - obj->pages->sgl, obj->pages->nents, - PCI_DMA_BIDIRECTIONAL); + if (obj->sg_list) { + intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); + obj->sg_list = NULL; + } undo_idling(dev_priv, interruptible); } -static void i915_gtt_color_adjust(struct drm_mm_node *node, - unsigned long color, - unsigned long *start, - unsigned long *end) -{ - if (node->color != color) - *start += 4096; - - if (!list_empty(&node->node_list)) { - node = list_entry(node->node_list.next, - struct drm_mm_node, - node_list); - if (node->allocated && node->color != color) - *end -= 4096; - } -} - void i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, @@ -381,8 +435,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev, /* Substract the guard page ... */ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); - if (!HAS_LLC(dev)) - dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; dev_priv->mm.gtt_start = start; dev_priv->mm.gtt_mappable_end = mappable_end; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c index 8093ecd2ea31..b964df51cec7 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -470,20 +470,18 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct scatterlist *sg; int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) return; - for_each_sg(obj->pages->sgl, sg, page_count, i) { - struct page *page = sg_page(sg); - char new_bit_17 = page_to_phys(page) >> 17; + for (i = 0; i < page_count; i++) { + char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { - i915_gem_swizzle_page(page); - set_page_dirty(page); + i915_gem_swizzle_page(obj->pages[i]); + set_page_dirty(obj->pages[i]); } } } @@ -491,7 +489,6 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct scatterlist *sg; int page_count = obj->base.size >> PAGE_SHIFT; int i; @@ -505,9 +502,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } } - for_each_sg(obj->pages->sgl, sg, page_count, i) { - struct page *page = sg_page(sg); - if (page_to_phys(page) & (1 << 17)) + for (i = 0; i < page_count; i++) { + if (page_to_phys(obj->pages[i]) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index d7f0066538a9..5249640cce13 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -296,21 +296,11 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_helper_hpd_irq_event(dev); } -/* defined intel_pm.c */ -extern spinlock_t mchdev_lock; - -static void ironlake_handle_rps_change(struct drm_device *dev) +static void i915_handle_rps_change(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; - u8 new_delay; - unsigned long flags; - - spin_lock_irqsave(&mchdev_lock, flags); - - I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); - - new_delay = dev_priv->ips.cur_delay; + u8 new_delay = dev_priv->cur_delay; I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); busy_up = I915_READ(RCPREVBSYTUPAVG); @@ -320,21 +310,19 @@ static void ironlake_handle_rps_change(struct drm_device *dev) /* Handle RCS change request from hw */ if (busy_up > max_avg) { - if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) - new_delay = dev_priv->ips.cur_delay - 1; - if (new_delay < dev_priv->ips.max_delay) - new_delay = dev_priv->ips.max_delay; + if (dev_priv->cur_delay != dev_priv->max_delay) + new_delay = dev_priv->cur_delay - 1; + if (new_delay < dev_priv->max_delay) + new_delay = dev_priv->max_delay; } else if (busy_down < min_avg) { - if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) - new_delay = dev_priv->ips.cur_delay + 1; - if (new_delay > dev_priv->ips.min_delay) - new_delay = dev_priv->ips.min_delay; + if (dev_priv->cur_delay != dev_priv->min_delay) + new_delay = dev_priv->cur_delay + 1; + if (new_delay > dev_priv->min_delay) + new_delay = dev_priv->min_delay; } if (ironlake_set_drps(dev, new_delay)) - dev_priv->ips.cur_delay = new_delay; - - spin_unlock_irqrestore(&mchdev_lock, flags); + dev_priv->cur_delay = new_delay; return; } @@ -347,7 +335,7 @@ static void notify_ring(struct drm_device *dev, if (ring->obj == NULL) return; - trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); + trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); wake_up_all(&ring->irq_queue); if (i915_enable_hangcheck) { @@ -361,16 +349,16 @@ static void notify_ring(struct drm_device *dev, static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - rps.work); + rps_work); u32 pm_iir, pm_imr; u8 new_delay; - spin_lock_irq(&dev_priv->rps.lock); - pm_iir = dev_priv->rps.pm_iir; - dev_priv->rps.pm_iir = 0; + spin_lock_irq(&dev_priv->rps_lock); + pm_iir = dev_priv->pm_iir; + dev_priv->pm_iir = 0; pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->rps_lock); if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; @@ -378,17 +366,11 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_lock(&dev_priv->dev->struct_mutex); if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) - new_delay = dev_priv->rps.cur_delay + 1; + new_delay = dev_priv->cur_delay + 1; else - new_delay = dev_priv->rps.cur_delay - 1; + new_delay = dev_priv->cur_delay - 1; - /* sysfs frequency interfaces may have snuck in while servicing the - * interrupt - */ - if (!(new_delay > dev_priv->rps.max_delay || - new_delay < dev_priv->rps.min_delay)) { - gen6_set_rps(dev_priv->dev, new_delay); - } + gen6_set_rps(dev_priv->dev, new_delay); mutex_unlock(&dev_priv->dev->struct_mutex); } @@ -462,7 +444,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long flags; - if (!HAS_L3_GPU_CACHE(dev)) + if (!IS_IVYBRIDGE(dev)) return; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -506,19 +488,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, * IIR bits should never already be set because IMR should * prevent an interrupt from being shown in IIR. The warning * displays a case where we've unsafely cleared - * dev_priv->rps.pm_iir. Although missing an interrupt of the same + * dev_priv->pm_iir. Although missing an interrupt of the same * type is not a problem, it displays a problem in the logic. * - * The mask bit in IMR is cleared by dev_priv->rps.work. + * The mask bit in IMR is cleared by rps_work. */ - spin_lock_irqsave(&dev_priv->rps.lock, flags); - dev_priv->rps.pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); + spin_lock_irqsave(&dev_priv->rps_lock, flags); + dev_priv->pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->rps_lock, flags); - queue_work(dev_priv->wq, &dev_priv->rps.work); + queue_work(dev_priv->wq, &dev_priv->rps_work); } static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) @@ -811,8 +793,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) ibx_irq_handler(dev, pch_iir); } - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_handle_rps_change(dev); + if (de_iir & DE_PCU_EVENT) { + I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); + i915_handle_rps_change(dev); + } if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) gen6_queue_rps_work(dev_priv, pm_iir); @@ -859,55 +843,26 @@ static void i915_error_work_func(struct work_struct *work) } } -/* NB: please notice the memset */ -static void i915_get_extra_instdone(struct drm_device *dev, - uint32_t *instdone) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - - switch(INTEL_INFO(dev)->gen) { - case 2: - case 3: - instdone[0] = I915_READ(INSTDONE); - break; - case 4: - case 5: - case 6: - instdone[0] = I915_READ(INSTDONE_I965); - instdone[1] = I915_READ(INSTDONE1); - break; - default: - WARN_ONCE(1, "Unsupported platform\n"); - case 7: - instdone[0] = I915_READ(GEN7_INSTDONE_1); - instdone[1] = I915_READ(GEN7_SC_INSTDONE); - instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); - instdone[3] = I915_READ(GEN7_ROW_INSTDONE); - break; - } -} - #ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src) { struct drm_i915_error_object *dst; - int i, count; + int page, page_count; u32 reloc_offset; if (src == NULL || src->pages == NULL) return NULL; - count = src->base.size / PAGE_SIZE; + page_count = src->base.size / PAGE_SIZE; - dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); + dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); if (dst == NULL) return NULL; reloc_offset = src->gtt_offset; - for (i = 0; i < count; i++) { + for (page = 0; page < page_count; page++) { unsigned long flags; void *d; @@ -930,33 +885,30 @@ i915_error_object_create(struct drm_i915_private *dev_priv, memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); } else { - struct page *page; void *s; - page = i915_gem_object_get_page(src, i); - - drm_clflush_pages(&page, 1); + drm_clflush_pages(&src->pages[page], 1); - s = kmap_atomic(page); + s = kmap_atomic(src->pages[page]); memcpy(d, s, PAGE_SIZE); kunmap_atomic(s); - drm_clflush_pages(&page, 1); + drm_clflush_pages(&src->pages[page], 1); } local_irq_restore(flags); - dst->pages[i] = d; + dst->pages[page] = d; reloc_offset += PAGE_SIZE; } - dst->page_count = count; + dst->page_count = page_count; dst->gtt_offset = src->gtt_offset; return dst; unwind: - while (i--) - kfree(dst->pages[i]); + while (page--) + kfree(dst->pages[page]); kfree(dst); return NULL; } @@ -997,8 +949,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, { err->size = obj->base.size; err->name = obj->base.name; - err->rseqno = obj->last_read_seqno; - err->wseqno = obj->last_write_seqno; + err->seqno = obj->last_rendering_seqno; err->gtt_offset = obj->gtt_offset; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; @@ -1088,12 +1039,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, if (!ring->get_seqno) return NULL; - seqno = ring->get_seqno(ring, false); + seqno = ring->get_seqno(ring); list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (obj->ring != ring) continue; - if (i915_seqno_passed(seqno, obj->last_read_seqno)) + if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) continue; if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) @@ -1129,8 +1080,10 @@ static void i915_record_ring_state(struct drm_device *dev, error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); - if (ring->id == RCS) + if (ring->id == RCS) { + error->instdone1 = I915_READ(INSTDONE1); error->bbaddr = I915_READ64(BB_ADDR); + } } else { error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); error->ipeir[ring->id] = I915_READ(IPEIR); @@ -1140,7 +1093,7 @@ static void i915_record_ring_state(struct drm_device *dev, error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); - error->seqno[ring->id] = ring->get_seqno(ring, false); + error->seqno[ring->id] = ring->get_seqno(ring); error->acthd[ring->id] = intel_ring_get_active_head(ring); error->head[ring->id] = I915_READ_HEAD(ring); error->tail[ring->id] = I915_READ_TAIL(ring); @@ -1246,11 +1199,6 @@ static void i915_capture_error_state(struct drm_device *dev) error->done_reg = I915_READ(DONE_REG); } - if (INTEL_INFO(dev)->gen == 7) - error->err_int = I915_READ(GEN7_ERR_INT); - - i915_get_extra_instdone(dev, error->extra_instdone); - i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); @@ -1262,7 +1210,7 @@ static void i915_capture_error_state(struct drm_device *dev) list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) if (obj->pin_count) i++; error->pinned_bo_count = i - error->active_bo_count; @@ -1287,7 +1235,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->pinned_bo_count = capture_pinned_bo(error->pinned_bo, error->pinned_bo_count, - &dev_priv->mm.bound_list); + &dev_priv->mm.gtt_list); do_gettimeofday(&error->time); @@ -1326,26 +1274,24 @@ void i915_destroy_error_state(struct drm_device *dev) static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t instdone[I915_NUM_INSTDONE_REG]; u32 eir = I915_READ(EIR); - int pipe, i; + int pipe; if (!eir) return; pr_err("render error detected, EIR: 0x%08x\n", eir); - i915_get_extra_instdone(dev, instdone); - if (IS_G4X(dev)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); - for (i = 0; i < ARRAY_SIZE(instdone); i++) - pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); + pr_err(" INSTDONE: 0x%08x\n", + I915_READ(INSTDONE_I965)); pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); @@ -1379,13 +1325,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) if (eir & I915_ERROR_INSTRUCTION) { pr_err("instruction error\n"); pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); - for (i = 0; i < ARRAY_SIZE(instdone); i++) - pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); + pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); POSTING_READ(IPEIR); @@ -1394,7 +1339,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); + pr_err(" INSTDONE: 0x%08x\n", + I915_READ(INSTDONE_I965)); pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); + pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); @@ -1642,8 +1590,7 @@ ring_last_seqno(struct intel_ring_buffer *ring) static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) { if (list_empty(&ring->request_list) || - i915_seqno_passed(ring->get_seqno(ring, false), - ring_last_seqno(ring))) { + i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { /* Issue a wake-up to catch stuck h/w. */ if (waitqueue_active(&ring->irq_queue)) { DRM_ERROR("Hangcheck timer elapsed... %s idle\n", @@ -1709,7 +1656,7 @@ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; + uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; struct intel_ring_buffer *ring; bool err = false, idle; int i; @@ -1737,16 +1684,25 @@ void i915_hangcheck_elapsed(unsigned long data) return; } - i915_get_extra_instdone(dev, instdone); + if (INTEL_INFO(dev)->gen < 4) { + instdone = I915_READ(INSTDONE); + instdone1 = 0; + } else { + instdone = I915_READ(INSTDONE_I965); + instdone1 = I915_READ(INSTDONE1); + } + if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && - memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { + dev_priv->last_instdone == instdone && + dev_priv->last_instdone1 == instdone1) { if (i915_hangcheck_hung(dev)) return; } else { dev_priv->hangcheck_count = 0; memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); - memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); + dev_priv->last_instdone = instdone; + dev_priv->last_instdone1 = instdone1; } repeat: @@ -2691,7 +2647,7 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); - INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); + INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); dev->driver->get_vblank_counter = i915_get_vblank_counter; diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 7637824c6a7d..28725ce5b82c 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -450,7 +450,6 @@ #define RING_ACTHD(base) ((base)+0x74) #define RING_NOPID(base) ((base)+0x94) #define RING_IMR(base) ((base)+0xa8) -#define RING_TIMESTAMP(base) ((base)+0x358) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -479,11 +478,6 @@ #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c -#define GEN7_INSTDONE_1 0x0206c -#define GEN7_SC_INSTDONE 0x07100 -#define GEN7_SAMPLER_INSTDONE 0x0e160 -#define GEN7_ROW_INSTDONE 0x0e164 -#define I915_NUM_INSTDONE_REG 4 #define RING_IPEIR(base) ((base)+0x64) #define RING_IPEHR(base) ((base)+0x68) #define RING_INSTDONE(base) ((base)+0x6c) @@ -506,8 +500,6 @@ #define DMA_FADD_I8XX 0x020d0 #define ERROR_GEN6 0x040a0 -#define GEN7_ERR_INT 0x44040 -#define ERR_INT_MMIO_UNCLAIMED (1<<13) /* GM45+ chicken bits -- debug workaround bits that may be required * for various sorts of correct behavior. The top 16 bits of each are @@ -537,8 +529,6 @@ #define GFX_PSMI_GRANULARITY (1<<10) #define GFX_PPGTT_ENABLE (1<<9) -#define VLV_DISPLAY_BASE 0x180000 - #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 @@ -1506,14 +1496,6 @@ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_GT1_SIZE(ctx_reg) + \ GEN7_CXT_VFSTATE_SIZE(ctx_reg)) -#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f) -#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7) -#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff) -#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \ - HSW_CXT_RING_SIZE(ctx_reg) + \ - HSW_CXT_RENDER_SIZE(ctx_reg) + \ - GEN7_CXT_VFSTATE_SIZE(ctx_reg)) - /* * Overlay regs @@ -1567,35 +1549,12 @@ /* VGA port control */ #define ADPA 0x61100 -#define PCH_ADPA 0xe1100 -#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) - #define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) -/* CPT uses bits 29:30 for pch transcoder select */ -#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ -#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) -#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) -#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) -#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) -#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) -#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) -#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) -#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) -#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) -#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) -#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) -#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 #define ADPA_VSYNC_CNTL_DISABLE (1<<11) @@ -1794,10 +1753,6 @@ /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 -/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC - * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte - * of the infoframe structure specified by CEA-861. */ -#define VIDEO_DIP_DATA_SIZE 32 #define VIDEO_DIP_CTL 0x61170 /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) @@ -3934,6 +3889,31 @@ #define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_2 0xfe004 +/* CRT */ +#define PCH_ADPA 0xe1100 +#define ADPA_TRANS_SELECT_MASK (1<<30) +#define ADPA_TRANS_A_SELECT 0 +#define ADPA_TRANS_B_SELECT (1<<30) +#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ +#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) +#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) +#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) +#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) +#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) +#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) +#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) +#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) +#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) +#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) +#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) +#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) + /* or SDVOB */ #define HDMIB 0xe1140 #define PORT_ENABLE (1 << 31) @@ -4041,8 +4021,6 @@ #define PORT_TRANS_C_SEL_CPT (2<<29) #define PORT_TRANS_SEL_MASK (3<<29) #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) -#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) -#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 @@ -4261,15 +4239,7 @@ #define G4X_HDMIW_HDMIEDID 0x6210C #define IBX_HDMIW_HDMIEDID_A 0xE2050 -#define IBX_HDMIW_HDMIEDID_B 0xE2150 -#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ - IBX_HDMIW_HDMIEDID_A, \ - IBX_HDMIW_HDMIEDID_B) #define IBX_AUD_CNTL_ST_A 0xE20B4 -#define IBX_AUD_CNTL_ST_B 0xE21B4 -#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ - IBX_AUD_CNTL_ST_A, \ - IBX_AUD_CNTL_ST_B) #define IBX_ELD_BUFFER_SIZE (0x1f << 10) #define IBX_ELD_ADDRESS (0x1f << 5) #define IBX_ELD_ACK (1 << 4) @@ -4278,15 +4248,7 @@ #define IBX_CP_READYB (1 << 1) #define CPT_HDMIW_HDMIEDID_A 0xE5050 -#define CPT_HDMIW_HDMIEDID_B 0xE5150 -#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ - CPT_HDMIW_HDMIEDID_A, \ - CPT_HDMIW_HDMIEDID_B) #define CPT_AUD_CNTL_ST_A 0xE50B4 -#define CPT_AUD_CNTL_ST_B 0xE51B4 -#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ - CPT_AUD_CNTL_ST_A, \ - CPT_AUD_CNTL_ST_B) #define CPT_AUD_CNTRL_ST2 0xE50C0 /* These are the 4 32-bit write offset registers for each stream @@ -4296,15 +4258,7 @@ #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) #define IBX_AUD_CONFIG_A 0xe2000 -#define IBX_AUD_CONFIG_B 0xe2100 -#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ - IBX_AUD_CONFIG_A, \ - IBX_AUD_CONFIG_B) #define CPT_AUD_CONFIG_A 0xe5000 -#define CPT_AUD_CONFIG_B 0xe5100 -#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ - CPT_AUD_CONFIG_A, \ - CPT_AUD_CONFIG_B) #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) #define AUD_CONFIG_UPPER_N_SHIFT 20 @@ -4315,233 +4269,195 @@ #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) #define AUD_CONFIG_DISABLE_NCTS (1 << 3) -/* HSW Audio */ -#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ -#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ -#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ - HSW_AUD_CONFIG_A, \ - HSW_AUD_CONFIG_B) - -#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ -#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ -#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ - HSW_AUD_MISC_CTRL_A, \ - HSW_AUD_MISC_CTRL_B) - -#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ -#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ -#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ - HSW_AUD_DIP_ELD_CTRL_ST_A, \ - HSW_AUD_DIP_ELD_CTRL_ST_B) - -/* Audio Digital Converter */ -#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ -#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ -#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ - HSW_AUD_DIG_CNVT_1, \ - HSW_AUD_DIG_CNVT_2) -#define DIP_PORT_SEL_MASK 0x3 - -#define HSW_AUD_EDID_DATA_A 0x65050 -#define HSW_AUD_EDID_DATA_B 0x65150 -#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ - HSW_AUD_EDID_DATA_A, \ - HSW_AUD_EDID_DATA_B) - -#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ -#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ -#define AUDIO_INACTIVE_C (1<<11) -#define AUDIO_INACTIVE_B (1<<7) -#define AUDIO_INACTIVE_A (1<<3) -#define AUDIO_OUTPUT_ENABLE_A (1<<2) -#define AUDIO_OUTPUT_ENABLE_B (1<<6) -#define AUDIO_OUTPUT_ENABLE_C (1<<10) -#define AUDIO_ELD_VALID_A (1<<0) -#define AUDIO_ELD_VALID_B (1<<4) -#define AUDIO_ELD_VALID_C (1<<8) -#define AUDIO_CP_READY_A (1<<1) -#define AUDIO_CP_READY_B (1<<5) -#define AUDIO_CP_READY_C (1<<9) - /* HSW Power Wells */ -#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ -#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ -#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ -#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ -#define HSW_PWR_WELL_ENABLE (1<<31) -#define HSW_PWR_WELL_STATE (1<<30) -#define HSW_PWR_WELL_CTL5 0x45410 +#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ +#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ +#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ +#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ +#define HSW_PWR_WELL_ENABLE (1<<31) +#define HSW_PWR_WELL_STATE (1<<30) +#define HSW_PWR_WELL_CTL5 0x45410 #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) -#define HSW_PWR_WELL_FORCE_ON (1<<19) -#define HSW_PWR_WELL_CTL6 0x45414 +#define HSW_PWR_WELL_FORCE_ON (1<<19) +#define HSW_PWR_WELL_CTL6 0x45414 /* Per-pipe DDI Function Control */ -#define PIPE_DDI_FUNC_CTL_A 0x60400 -#define PIPE_DDI_FUNC_CTL_B 0x61400 -#define PIPE_DDI_FUNC_CTL_C 0x62400 +#define PIPE_DDI_FUNC_CTL_A 0x60400 +#define PIPE_DDI_FUNC_CTL_B 0x61400 +#define PIPE_DDI_FUNC_CTL_C 0x62400 #define PIPE_DDI_FUNC_CTL_EDP 0x6F400 -#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ - PIPE_DDI_FUNC_CTL_B) +#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ + PIPE_DDI_FUNC_CTL_A, \ + PIPE_DDI_FUNC_CTL_B) #define PIPE_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define PIPE_DDI_PORT_MASK (7<<28) -#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) -#define PIPE_DDI_MODE_SELECT_MASK (7<<24) -#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) -#define PIPE_DDI_MODE_SELECT_DVI (1<<24) +#define PIPE_DDI_PORT_MASK (7<<28) +#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) +#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) +#define PIPE_DDI_MODE_SELECT_DVI (1<<24) #define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) #define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) -#define PIPE_DDI_MODE_SELECT_FDI (4<<24) -#define PIPE_DDI_BPC_MASK (7<<20) -#define PIPE_DDI_BPC_8 (0<<20) -#define PIPE_DDI_BPC_10 (1<<20) -#define PIPE_DDI_BPC_6 (2<<20) -#define PIPE_DDI_BPC_12 (3<<20) -#define PIPE_DDI_PVSYNC (1<<17) -#define PIPE_DDI_PHSYNC (1<<16) -#define PIPE_DDI_BFI_ENABLE (1<<4) -#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) -#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) -#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) +#define PIPE_DDI_MODE_SELECT_FDI (4<<24) +#define PIPE_DDI_BPC_8 (0<<20) +#define PIPE_DDI_BPC_10 (1<<20) +#define PIPE_DDI_BPC_6 (2<<20) +#define PIPE_DDI_BPC_12 (3<<20) +#define PIPE_DDI_BFI_ENABLE (1<<4) +#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) +#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) +#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) /* DisplayPort Transport Control */ #define DP_TP_CTL_A 0x64040 #define DP_TP_CTL_B 0x64140 -#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) -#define DP_TP_CTL_ENABLE (1<<31) -#define DP_TP_CTL_MODE_SST (0<<27) -#define DP_TP_CTL_MODE_MST (1<<27) +#define DP_TP_CTL(port) _PORT(port, \ + DP_TP_CTL_A, \ + DP_TP_CTL_B) +#define DP_TP_CTL_ENABLE (1<<31) +#define DP_TP_CTL_MODE_SST (0<<27) +#define DP_TP_CTL_MODE_MST (1<<27) #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) -#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) +#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) -#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) +#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) /* DisplayPort Transport Status */ #define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_B 0x64144 -#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) +#define DP_TP_STATUS(port) _PORT(port, \ + DP_TP_STATUS_A, \ + DP_TP_STATUS_B) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) /* DDI Buffer Control */ #define DDI_BUF_CTL_A 0x64000 #define DDI_BUF_CTL_B 0x64100 -#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) -#define DDI_BUF_CTL_ENABLE (1<<31) +#define DDI_BUF_CTL(port) _PORT(port, \ + DDI_BUF_CTL_A, \ + DDI_BUF_CTL_B) +#define DDI_BUF_CTL_ENABLE (1<<31) #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ -#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ +#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ -#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ +#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ #define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ -#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ +#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ -#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ -#define DDI_BUF_EMP_MASK (0xf<<24) -#define DDI_BUF_IS_IDLE (1<<7) -#define DDI_PORT_WIDTH_X1 (0<<1) -#define DDI_PORT_WIDTH_X2 (1<<1) -#define DDI_PORT_WIDTH_X4 (3<<1) +#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ +#define DDI_BUF_EMP_MASK (0xf<<24) +#define DDI_BUF_IS_IDLE (1<<7) +#define DDI_PORT_WIDTH_X1 (0<<1) +#define DDI_PORT_WIDTH_X2 (1<<1) +#define DDI_PORT_WIDTH_X4 (3<<1) #define DDI_INIT_DISPLAY_DETECTED (1<<0) /* DDI Buffer Translations */ #define DDI_BUF_TRANS_A 0x64E00 #define DDI_BUF_TRANS_B 0x64E60 -#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) +#define DDI_BUF_TRANS(port) _PORT(port, \ + DDI_BUF_TRANS_A, \ + DDI_BUF_TRANS_B) /* Sideband Interface (SBI) is programmed indirectly, via * SBI_ADDR, which contains the register offset; and SBI_DATA, * which contains the payload */ -#define SBI_ADDR 0xC6000 -#define SBI_DATA 0xC6004 +#define SBI_ADDR 0xC6000 +#define SBI_DATA 0xC6004 #define SBI_CTL_STAT 0xC6008 #define SBI_CTL_OP_CRRD (0x6<<8) #define SBI_CTL_OP_CRWR (0x7<<8) #define SBI_RESPONSE_FAIL (0x1<<1) -#define SBI_RESPONSE_SUCCESS (0x0<<1) -#define SBI_BUSY (0x1<<0) -#define SBI_READY (0x0<<0) +#define SBI_RESPONSE_SUCCESS (0x0<<1) +#define SBI_BUSY (0x1<<0) +#define SBI_READY (0x0<<0) /* SBI offsets */ -#define SBI_SSCDIVINTPHASE6 0x0600 +#define SBI_SSCDIVINTPHASE6 0x0600 #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) #define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) -#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) +#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) -#define SBI_SSCCTL 0x020c +#define SBI_SSCCTL 0x020c #define SBI_SSCCTL6 0x060C -#define SBI_SSCCTL_DISABLE (1<<0) +#define SBI_SSCCTL_DISABLE (1<<0) #define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) -#define SBI_DBUFF0 0x2a00 +#define SBI_DBUFF0 0x2a00 /* LPT PIXCLK_GATE */ -#define PIXCLK_GATE 0xC6020 -#define PIXCLK_GATE_UNGATE (1<<0) -#define PIXCLK_GATE_GATE (0<<0) +#define PIXCLK_GATE 0xC6020 +#define PIXCLK_GATE_UNGATE 1<<0 +#define PIXCLK_GATE_GATE 0<<0 /* SPLL */ -#define SPLL_CTL 0x46020 +#define SPLL_CTL 0x46020 #define SPLL_PLL_ENABLE (1<<31) #define SPLL_PLL_SCC (1<<28) #define SPLL_PLL_NON_SCC (2<<28) -#define SPLL_PLL_FREQ_810MHz (0<<26) -#define SPLL_PLL_FREQ_1350MHz (1<<26) +#define SPLL_PLL_FREQ_810MHz (0<<26) +#define SPLL_PLL_FREQ_1350MHz (1<<26) /* WRPLL */ -#define WRPLL_CTL1 0x46040 -#define WRPLL_CTL2 0x46060 -#define WRPLL_PLL_ENABLE (1<<31) -#define WRPLL_PLL_SELECT_SSC (0x01<<28) -#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) +#define WRPLL_CTL1 0x46040 +#define WRPLL_CTL2 0x46060 +#define WRPLL_PLL_ENABLE (1<<31) +#define WRPLL_PLL_SELECT_SSC (0x01<<28) +#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) /* WRPLL divider programming */ -#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) -#define WRPLL_DIVIDER_POST(x) ((x)<<8) -#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) +#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) +#define WRPLL_DIVIDER_POST(x) ((x)<<8) +#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) /* Port clock selection */ #define PORT_CLK_SEL_A 0x46100 #define PORT_CLK_SEL_B 0x46104 -#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) +#define PORT_CLK_SEL(port) _PORT(port, \ + PORT_CLK_SEL_A, \ + PORT_CLK_SEL_B) #define PORT_CLK_SEL_LCPLL_2700 (0<<29) #define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29) -#define PORT_CLK_SEL_SPLL (3<<29) +#define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) /* Pipe clock selection */ #define PIPE_CLK_SEL_A 0x46140 #define PIPE_CLK_SEL_B 0x46144 -#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) +#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ + PIPE_CLK_SEL_A, \ + PIPE_CLK_SEL_B) /* For each pipe, we need to select the corresponding port clock */ -#define PIPE_CLK_SEL_DISABLED (0x0<<29) -#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) +#define PIPE_CLK_SEL_DISABLED (0x0<<29) +#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) /* LCPLL Control */ -#define LCPLL_CTL 0x130040 +#define LCPLL_CTL 0x130040 #define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_LOCK (1<<30) -#define LCPLL_CD_CLOCK_DISABLE (1<<25) +#define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) /* Pipe WM_LINETIME - watermark line time */ #define PIPE_WM_LINETIME_A 0x45270 #define PIPE_WM_LINETIME_B 0x45274 -#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ - PIPE_WM_LINETIME_B) -#define PIPE_WM_LINETIME_MASK (0x1ff) -#define PIPE_WM_LINETIME_TIME(x) ((x)) +#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ + PIPE_WM_LINETIME_A, \ + PIPE_WM_LINETIME_B) +#define PIPE_WM_LINETIME_MASK (0x1ff) +#define PIPE_WM_LINETIME_TIME(x) ((x)) #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) -#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) +#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) /* SFUSE_STRAP */ -#define SFUSE_STRAP 0xc2014 +#define SFUSE_STRAP 0xc2014 #define SFUSE_STRAP_DDIB_DETECTED (1<<2) #define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDID_DETECTED (1<<0) diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c index 903eebd2117a..7631807a2788 100644 --- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c @@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) } static ssize_t -show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) +show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); } static ssize_t -show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) +show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); } static ssize_t -show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) +show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); } static ssize_t -show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) +show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); } @@ -93,7 +93,6 @@ static struct attribute_group rc6_attr_group = { .name = power_group_name, .attrs = rc6_attrs }; -#endif static int l3_access_valid(struct drm_device *dev, loff_t offset) { @@ -203,214 +202,37 @@ static struct bin_attribute dpf_attrs = { .mmap = NULL }; -static ssize_t gt_cur_freq_mhz_show(struct device *kdev, - struct device_attribute *attr, char *buf) -{ - struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev->struct_mutex); - - return snprintf(buf, PAGE_SIZE, "%d", ret); -} - -static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) -{ - struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev->struct_mutex); - - return snprintf(buf, PAGE_SIZE, "%d", ret); -} - -static ssize_t gt_max_freq_mhz_store(struct device *kdev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 val, rp_state_cap, hw_max, hw_min; - ssize_t ret; - - ret = kstrtou32(buf, 0, &val); - if (ret) - return ret; - - val /= GT_FREQUENCY_MULTIPLIER; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - hw_max = (rp_state_cap & 0xff); - hw_min = ((rp_state_cap & 0xff0000) >> 16); - - if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - if (dev_priv->rps.cur_delay > val) - gen6_set_rps(dev_priv->dev, val); - - dev_priv->rps.max_delay = val; - - mutex_unlock(&dev->struct_mutex); - - return count; -} - -static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) -{ - struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; - mutex_unlock(&dev->struct_mutex); - - return snprintf(buf, PAGE_SIZE, "%d", ret); -} - -static ssize_t gt_min_freq_mhz_store(struct device *kdev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 val, rp_state_cap, hw_max, hw_min; - ssize_t ret; - - ret = kstrtou32(buf, 0, &val); - if (ret) - return ret; - - val /= GT_FREQUENCY_MULTIPLIER; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - hw_max = (rp_state_cap & 0xff); - hw_min = ((rp_state_cap & 0xff0000) >> 16); - - if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - - if (dev_priv->rps.cur_delay < val) - gen6_set_rps(dev_priv->dev, val); - - dev_priv->rps.min_delay = val; - - mutex_unlock(&dev->struct_mutex); - - return count; - -} - -static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); -static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); -static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); - - -static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); -static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); -static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); -static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); - -/* For now we have a static number of RP states */ -static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) -{ - struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 val, rp_state_cap; - ssize_t ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - mutex_unlock(&dev->struct_mutex); - - if (attr == &dev_attr_gt_RP0_freq_mhz) { - val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; - } else if (attr == &dev_attr_gt_RP1_freq_mhz) { - val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; - } else if (attr == &dev_attr_gt_RPn_freq_mhz) { - val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; - } else { - BUG(); - } - return snprintf(buf, PAGE_SIZE, "%d", val); -} - -static const struct attribute *gen6_attrs[] = { - &dev_attr_gt_cur_freq_mhz.attr, - &dev_attr_gt_max_freq_mhz.attr, - &dev_attr_gt_min_freq_mhz.attr, - &dev_attr_gt_RP0_freq_mhz.attr, - &dev_attr_gt_RP1_freq_mhz.attr, - &dev_attr_gt_RPn_freq_mhz.attr, - NULL, -}; - void i915_setup_sysfs(struct drm_device *dev) { int ret; -#ifdef CONFIG_PM if (INTEL_INFO(dev)->gen >= 6) { ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group); if (ret) DRM_ERROR("RC6 residency sysfs setup failed\n"); } -#endif - if (HAS_L3_GPU_CACHE(dev)) { + + if (IS_IVYBRIDGE(dev)) { ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); if (ret) DRM_ERROR("l3 parity sysfs setup failed\n"); } - - if (INTEL_INFO(dev)->gen >= 6) { - ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); - if (ret) - DRM_ERROR("gen6 sysfs setup failed\n"); - } } void i915_teardown_sysfs(struct drm_device *dev) { - sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); -#ifdef CONFIG_PM sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); -#endif } +#else +void i915_setup_sysfs(struct drm_device *dev) +{ + return; +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + return; +} +#endif /* CONFIG_PM */ diff --git a/trunk/drivers/gpu/drm/i915/i915_trace.h b/trunk/drivers/gpu/drm/i915/i915_trace.h index 8134421b89a6..fe90b3a84a6d 100644 --- a/trunk/drivers/gpu/drm/i915/i915_trace.h +++ b/trunk/drivers/gpu/drm/i915/i915_trace.h @@ -214,18 +214,22 @@ TRACE_EVENT(i915_gem_evict, ); TRACE_EVENT(i915_gem_evict_everything, - TP_PROTO(struct drm_device *dev), - TP_ARGS(dev), + TP_PROTO(struct drm_device *dev, bool purgeable), + TP_ARGS(dev, purgeable), TP_STRUCT__entry( __field(u32, dev) + __field(bool, purgeable) ), TP_fast_assign( __entry->dev = dev->primary->index; + __entry->purgeable = purgeable; ), - TP_printk("dev=%d", __entry->dev) + TP_printk("dev=%d%s", + __entry->dev, + __entry->purgeable ? ", purgeable only" : "") ); TRACE_EVENT(i915_gem_ring_dispatch, @@ -430,21 +434,6 @@ TRACE_EVENT(i915_reg_rw, (u32)(__entry->val >> 32)) ); -TRACE_EVENT(intel_gpu_freq_change, - TP_PROTO(u32 freq), - TP_ARGS(freq), - - TP_STRUCT__entry( - __field(u32, freq) - ), - - TP_fast_assign( - __entry->freq = freq; - ), - - TP_printk("new_freq=%u", __entry->freq) -); - #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index c42b9809f86d..23bdc8cd1458 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -47,7 +47,6 @@ struct intel_crt { struct intel_encoder base; bool force_hotplug_required; - u32 adpa_reg; }; static struct intel_crt *intel_attached_crt(struct drm_connector *connector) @@ -56,68 +55,42 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) struct intel_crt, base); } -static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) -{ - return container_of(encoder, struct intel_crt, base); -} - -static bool intel_crt_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) +static void pch_crt_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->base.dev; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 tmp; - - tmp = I915_READ(crt->adpa_reg); - - if (!(tmp & ADPA_DAC_ENABLE)) - return false; - - if (HAS_PCH_CPT(dev)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - return true; -} - -static void intel_disable_crt(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 temp; - temp = I915_READ(crt->adpa_reg); - temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + temp = I915_READ(PCH_ADPA); temp &= ~ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, temp); -} -static void intel_enable_crt(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 temp; + switch (mode) { + case DRM_MODE_DPMS_ON: + temp |= ADPA_DAC_ENABLE; + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + /* Just leave port enable cleared */ + break; + } - temp = I915_READ(crt->adpa_reg); - temp |= ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, temp); + I915_WRITE(PCH_ADPA, temp); } -/* Note: The caller is required to filter out dpms modes not supported by the - * platform. */ -static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) +static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->base.dev; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 temp; - temp = I915_READ(crt->adpa_reg); + temp = I915_READ(ADPA); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp &= ~ADPA_DAC_ENABLE; + if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + switch (mode) { case DRM_MODE_DPMS_ON: temp |= ADPA_DAC_ENABLE; @@ -133,51 +106,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) break; } - I915_WRITE(crt->adpa_reg, temp); -} - -static void intel_crt_dpms(struct drm_connector *connector, int mode) -{ - struct drm_device *dev = connector->dev; - struct intel_encoder *encoder = intel_attached_encoder(connector); - struct drm_crtc *crtc; - int old_dpms; - - /* PCH platforms and VLV only support on/off. */ - if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (mode == connector->dpms) - return; - - old_dpms = connector->dpms; - connector->dpms = mode; - - /* Only need to change hw state when actually enabled */ - crtc = encoder->base.crtc; - if (!crtc) { - encoder->connectors_active = false; - return; - } - - /* We need the pipe to run for anything but OFF. */ - if (mode == DRM_MODE_DPMS_OFF) - encoder->connectors_active = false; - else - encoder->connectors_active = true; - - if (mode < old_dpms) { - /* From off to on, enable the pipe first. */ - intel_crtc_update_dpms(crtc); - - intel_crt_set_dpms(encoder, mode); - } else { - intel_crt_set_dpms(encoder, mode); - - intel_crtc_update_dpms(crtc); - } - - intel_modeset_check_state(connector->dev); + I915_WRITE(ADPA, temp); } static int intel_crt_mode_valid(struct drm_connector *connector, @@ -216,15 +145,19 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; - struct intel_crt *crt = - intel_encoder_to_crt(to_intel_encoder(encoder)); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = dev->dev_private; int dpll_md_reg; u32 adpa, dpll_md; + u32 adpa_reg; dpll_md_reg = DPLL_MD(intel_crtc->pipe); + if (HAS_PCH_SPLIT(dev)) + adpa_reg = PCH_ADPA; + else + adpa_reg = ADPA; + /* * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning @@ -252,7 +185,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); - I915_WRITE(crt->adpa_reg, adpa); + I915_WRITE(adpa_reg, adpa); } static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) @@ -612,12 +545,14 @@ intel_crt_detect(struct drm_connector *connector, bool force) return connector->status; /* for pre-945g platforms use load detect */ - if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { + if (intel_get_load_detect_pipe(&crt->base, connector, NULL, + &tmp)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(connector, &tmp); + intel_release_load_detect_pipe(&crt->base, connector, + &tmp); } else status = connector_status_unknown; @@ -668,15 +603,25 @@ static void intel_crt_reset(struct drm_connector *connector) * Routines for controlling stuff on the analog port */ -static const struct drm_encoder_helper_funcs crt_encoder_funcs = { +static const struct drm_encoder_helper_funcs pch_encoder_funcs = { .mode_fixup = intel_crt_mode_fixup, + .prepare = intel_encoder_prepare, + .commit = intel_encoder_commit, .mode_set = intel_crt_mode_set, - .disable = intel_encoder_noop, + .dpms = pch_crt_dpms, +}; + +static const struct drm_encoder_helper_funcs gmch_encoder_funcs = { + .mode_fixup = intel_crt_mode_fixup, + .prepare = intel_encoder_prepare, + .commit = intel_encoder_commit, + .mode_set = intel_crt_mode_set, + .dpms = gmch_crt_dpms, }; static const struct drm_connector_funcs intel_crt_connector_funcs = { .reset = intel_crt_reset, - .dpms = intel_crt_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_crt_destroy, @@ -717,6 +662,7 @@ void intel_crt_init(struct drm_device *dev) struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; + const struct drm_encoder_helper_funcs *encoder_helper_funcs; /* Skip machines without VGA that falsely report hotplug events */ if (dmi_check_system(intel_no_crt)) @@ -742,11 +688,13 @@ void intel_crt_init(struct drm_device *dev) intel_connector_attach_encoder(intel_connector, &crt->base); crt->base.type = INTEL_OUTPUT_ANALOG; - crt->base.cloneable = true; + crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | + 1 << INTEL_ANALOG_CLONE_BIT | + 1 << INTEL_SDVO_LVDS_CLONE_BIT); if (IS_HASWELL(dev)) crt->base.crtc_mask = (1 << 0); else - crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + crt->base.crtc_mask = (1 << 0) | (1 << 1); if (IS_GEN2(dev)) connector->interlace_allowed = 0; @@ -755,18 +703,11 @@ void intel_crt_init(struct drm_device *dev) connector->doublescan_allowed = 0; if (HAS_PCH_SPLIT(dev)) - crt->adpa_reg = PCH_ADPA; - else if (IS_VALLEYVIEW(dev)) - crt->adpa_reg = VLV_ADPA; + encoder_helper_funcs = &pch_encoder_funcs; else - crt->adpa_reg = ADPA; - - crt->base.disable = intel_disable_crt; - crt->base.enable = intel_enable_crt; - crt->base.get_hw_state = intel_crt_get_hw_state; - intel_connector->get_hw_state = intel_connector_get_hw_state; + encoder_helper_funcs = &gmch_encoder_funcs; - drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); + drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c index bfe375466a0e..933c74859172 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ddi.c +++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c @@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) case PORT_B: case PORT_C: case PORT_D: - intel_hdmi_init(dev, DDI_BUF_CTL(port), port); + intel_hdmi_init(dev, DDI_BUF_CTL(port)); break; default: DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", @@ -267,8 +267,7 @@ struct wrpll_tmds_clock { u16 r2; /* Reference divider */ }; -/* Table of matching values for WRPLL clocks programming for each frequency. - * The code assumes this table is sorted. */ +/* Table of matching values for WRPLL clocks programming for each frequency */ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {19750, 38, 25, 18}, {20000, 48, 32, 18}, @@ -278,6 +277,7 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {23000, 36, 23, 15}, {23500, 40, 40, 23}, {23750, 26, 16, 14}, + {23750, 26, 16, 14}, {24000, 36, 24, 15}, {25000, 36, 25, 15}, {25175, 26, 40, 33}, @@ -437,6 +437,7 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {108000, 8, 24, 15}, {108108, 8, 173, 108}, {109000, 6, 23, 19}, + {109000, 6, 23, 19}, {110000, 6, 22, 18}, {110013, 6, 22, 18}, {110250, 8, 49, 30}, @@ -613,6 +614,7 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {218250, 4, 42, 26}, {218750, 4, 34, 21}, {219000, 4, 47, 29}, + {219000, 4, 47, 29}, {220000, 4, 44, 27}, {220640, 4, 49, 30}, {220750, 4, 36, 22}, @@ -656,7 +658,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int port = intel_hdmi->ddi_port; int pipe = intel_crtc->pipe; - int p, n2, r2; + int p, n2, r2, valid=0; u32 temp, i; /* On Haswell, we need to enable the clocks and prepare DDI function to @@ -664,23 +666,26 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, */ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); - for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) - if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) - break; - - if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) - i--; + for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) { + if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { + p = wrpll_tmds_clock_table[i].p; + n2 = wrpll_tmds_clock_table[i].n2; + r2 = wrpll_tmds_clock_table[i].r2; - p = wrpll_tmds_clock_table[i].p; - n2 = wrpll_tmds_clock_table[i].n2; - r2 = wrpll_tmds_clock_table[i].r2; + DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", + crtc->mode.clock, + p, n2, r2); - if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) - DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", - wrpll_tmds_clock_table[i].clock, crtc->mode.clock); + valid = 1; + break; + } + } - DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", - crtc->mode.clock, p, n2, r2); + if (!valid) { + DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", + crtc->mode.clock); + return; + } /* Enable LCPLL if disabled */ temp = I915_READ(LCPLL_CTL); @@ -713,107 +718,46 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, /* Proper support for digital audio needs a new logic and a new set * of registers, so we leave it for future patch bombing. */ - DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", + DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", pipe_name(intel_crtc->pipe)); - - /* write eld */ - DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); } /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ - temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); - - switch (intel_crtc->bpp) { - case 18: - temp |= PIPE_DDI_BPC_6; - break; - case 24: - temp |= PIPE_DDI_BPC_8; - break; - case 30: - temp |= PIPE_DDI_BPC_10; - break; - case 36: - temp |= PIPE_DDI_BPC_12; - break; - default: - WARN(1, "%d bpp unsupported by pipe DDI function\n", - intel_crtc->bpp); - } - - if (intel_hdmi->has_hdmi_sink) - temp |= PIPE_DDI_MODE_SELECT_HDMI; - else - temp |= PIPE_DDI_MODE_SELECT_DVI; - - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - temp |= PIPE_DDI_PVSYNC; - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - temp |= PIPE_DDI_PHSYNC; + temp = I915_READ(DDI_FUNC_CTL(pipe)); + temp &= ~PIPE_DDI_PORT_MASK; + temp &= ~PIPE_DDI_BPC_12; + temp |= PIPE_DDI_SELECT_PORT(port) | + PIPE_DDI_MODE_SELECT_HDMI | + ((intel_crtc->bpp > 24) ? + PIPE_DDI_BPC_12 : + PIPE_DDI_BPC_8) | + PIPE_DDI_FUNC_ENABLE; I915_WRITE(DDI_FUNC_CTL(pipe), temp); intel_hdmi->set_infoframes(encoder, adjusted_mode); } -bool intel_ddi_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) +void intel_ddi_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - u32 tmp; - int i; - - tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); - - if (!(tmp & DDI_BUF_CTL_ENABLE)) - return false; - - for_each_pipe(i) { - tmp = I915_READ(DDI_FUNC_CTL(i)); - - if ((tmp & PIPE_DDI_PORT_MASK) - == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { - *pipe = i; - return true; - } - } - - DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); - - return true; -} - -void intel_enable_ddi(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int port = intel_hdmi->ddi_port; u32 temp; temp = I915_READ(DDI_BUF_CTL(port)); - temp |= DDI_BUF_CTL_ENABLE; + + if (mode != DRM_MODE_DPMS_ON) { + temp &= ~DDI_BUF_CTL_ENABLE; + } else { + temp |= DDI_BUF_CTL_ENABLE; + } /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, * and swing/emphasis values are ignored so nothing special needs * to be done besides enabling the port. */ - I915_WRITE(DDI_BUF_CTL(port), temp); -} - -void intel_disable_ddi(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - int port = intel_hdmi->ddi_port; - u32 temp; - - temp = I915_READ(DDI_BUF_CTL(port)); - temp &= ~DDI_BUF_CTL_ENABLE; - - I915_WRITE(DDI_BUF_CTL(port), temp); + I915_WRITE(DDI_BUF_CTL(port), + temp); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index 6f5aafa1b633..c040aee1341c 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) /* Wait for the Pipe State to go off */ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100)) - WARN(1, "pipe_off wait timed out\n"); + DRM_DEBUG_KMS("pipe_off wait timed out\n"); } else { u32 last_line, line_mask; int reg = PIPEDSL(pipe); @@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) } while (((I915_READ(reg) & line_mask) != last_line) && time_after(timeout, jiffies)); if (time_after(jiffies, timeout)) - WARN(1, "pipe_off wait timed out\n"); + DRM_DEBUG_KMS("pipe_off wait timed out\n"); } } @@ -1431,8 +1431,6 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, * protect mechanism may be enabled. * * Note! This is for pre-ILK only. - * - * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. */ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1862,6 +1860,59 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, intel_wait_for_vblank(dev_priv->dev, pipe); } +static void disable_pch_dp(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg, u32 port_sel) +{ + u32 val = I915_READ(reg); + if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { + DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); + I915_WRITE(reg, val & ~DP_PORT_EN); + } +} + +static void disable_pch_hdmi(struct drm_i915_private *dev_priv, + enum pipe pipe, int reg) +{ + u32 val = I915_READ(reg); + if (hdmi_pipe_enabled(dev_priv, pipe, val)) { + DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", + reg, pipe); + I915_WRITE(reg, val & ~PORT_ENABLE); + } +} + +/* Disable any ports connected to this transcoder */ +static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 reg, val; + + val = I915_READ(PCH_PP_CONTROL); + I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); + + disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); + disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); + disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); + + reg = PCH_ADPA; + val = I915_READ(reg); + if (adpa_pipe_enabled(dev_priv, pipe, val)) + I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); + + reg = PCH_LVDS; + val = I915_READ(reg); + if (lvds_pipe_enabled(dev_priv, pipe, val)) { + DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); + I915_WRITE(reg, val & ~LVDS_PORT_EN); + POSTING_READ(reg); + udelay(100); + } + + disable_pch_hdmi(dev_priv, pipe, HDMIB); + disable_pch_hdmi(dev_priv, pipe, HDMIC); + disable_pch_hdmi(dev_priv, pipe, HDMID); +} + int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -2150,17 +2201,16 @@ intel_finish_fb(struct drm_framebuffer *old_fb) static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *fb) + struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_framebuffer *old_fb; int ret; /* no fb bound */ - if (!fb) { + if (!crtc->fb) { DRM_ERROR("No FB bound\n"); return 0; } @@ -2174,7 +2224,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, - to_intel_framebuffer(fb)->obj, + to_intel_framebuffer(crtc->fb)->obj, NULL); if (ret != 0) { mutex_unlock(&dev->struct_mutex); @@ -2182,22 +2232,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - if (crtc->fb) - intel_finish_fb(crtc->fb); + if (old_fb) + intel_finish_fb(old_fb); - ret = dev_priv->display.update_plane(crtc, fb, x, y); + ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y); if (ret) { - intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); + intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); DRM_ERROR("failed to update base address\n"); return ret; } - old_fb = crtc->fb; - crtc->fb = fb; - crtc->x = x; - crtc->y = y; - if (old_fb) { intel_wait_for_vblank(dev, intel_crtc->pipe); intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); @@ -2664,10 +2709,11 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) +static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) { - struct drm_device *dev = intel_crtc->base.dev; + struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; @@ -2708,35 +2754,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) } } -static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) -{ - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int pipe = intel_crtc->pipe; - u32 reg, temp; - - /* Switch from PCDclk to Rawclk */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_PCDCLK); - - /* Disable CPU FDI TX PLL */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); - - POSTING_READ(reg); - udelay(100); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); - - /* Wait for the clocks to turn off. */ - POSTING_READ(reg); - udelay(100); -} - static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2821,13 +2838,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; + struct intel_encoder *encoder; /* * If there's a non-PCH eDP on this crtc, it must be DP_A, and that * must be driven by its own crtc; no sharing is possible. */ - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + for_each_encoder_on_crtc(dev, crtc, encoder) { /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell * CPU handles all others */ @@ -2835,19 +2852,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc) /* It is still unclear how this will work on PPT, so throw up a warning */ WARN_ON(!HAS_PCH_LPT(dev)); - if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { + if (encoder->type == DRM_MODE_ENCODER_DAC) { DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); return true; } else { DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", - intel_encoder->type); + encoder->type); return false; } } - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_EDP: - if (!intel_encoder_is_pch_edp(&intel_encoder->base)) + if (!intel_encoder_is_pch_edp(&encoder->base)) return false; continue; } @@ -3164,14 +3181,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 temp; bool is_pch_port; - WARN_ON(!crtc->enabled); - if (intel_crtc->active) return; @@ -3186,16 +3200,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) is_pch_port = intel_crtc_driving_pch(crtc); - if (is_pch_port) { - ironlake_fdi_pll_enable(intel_crtc); - } else { - assert_fdi_tx_disabled(dev_priv, pipe); - assert_fdi_rx_disabled(dev_priv, pipe); - } - - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); + if (is_pch_port) + ironlake_fdi_pll_enable(crtc); + else + ironlake_fdi_disable(crtc); /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && @@ -3226,12 +3234,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); intel_crtc_update_cursor(crtc, true); - - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - - if (HAS_PCH_CPT(dev)) - intel_cpt_verify_modeset(dev, intel_crtc->pipe); } static void ironlake_crtc_disable(struct drm_crtc *crtc) @@ -3239,18 +3241,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp; - if (!intel_crtc->active) return; - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); - intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); @@ -3266,12 +3263,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); - ironlake_fdi_disable(crtc); + /* This is a horrible layering violation; we should be doing this in + * the connector/encoder ->prepare instead, but we don't always have + * enough information there about the config to know whether it will + * actually be necessary or just cause undesired flicker. + */ + intel_disable_pch_ports(dev_priv, pipe); + intel_disable_transcoder(dev_priv, pipe); if (HAS_PCH_CPT(dev)) { @@ -3304,7 +3304,26 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) /* disable PCH DPLL */ intel_disable_pch_pll(intel_crtc); - ironlake_fdi_pll_disable(intel_crtc); + /* Switch from PCDclk to Rawclk */ + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_PCDCLK); + + /* Disable CPU FDI TX PLL */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(100); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); + + /* Wait for the clocks to turn off. */ + POSTING_READ(reg); + udelay(100); intel_crtc->active = false; intel_update_watermarks(dev); @@ -3314,6 +3333,30 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); } +static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + + /* XXX: When our outputs are all unaware of DPMS modes other than off + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); + ironlake_crtc_enable(crtc); + break; + + case DRM_MODE_DPMS_OFF: + DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); + ironlake_crtc_disable(crtc); + break; + } +} + static void ironlake_crtc_off(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -3343,12 +3386,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - WARN_ON(!crtc->enabled); - if (intel_crtc->active) return; @@ -3365,9 +3405,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); intel_crtc_update_cursor(crtc, true); - - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); } static void i9xx_crtc_disable(struct drm_crtc *crtc) @@ -3375,17 +3412,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - if (!intel_crtc->active) return; - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); - /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); @@ -3404,17 +3436,45 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_update_watermarks(dev); } +static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + /* XXX: When our outputs are all unaware of DPMS modes other than off + * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + i9xx_crtc_enable(crtc); + break; + case DRM_MODE_DPMS_OFF: + i9xx_crtc_disable(crtc); + break; + } +} + static void i9xx_crtc_off(struct drm_crtc *crtc) { } -static void intel_crtc_update_sarea(struct drm_crtc *crtc, - bool enabled) +/** + * Sets the power management mode of the pipe and plane. + */ +static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; + bool enabled; + + if (intel_crtc->dpms_mode == mode) + return; + + intel_crtc->dpms_mode = mode; + + dev_priv->display.dpms(crtc, mode); if (!dev->primary->master) return; @@ -3423,6 +3483,8 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc, if (!master_priv->sarea_priv) return; + enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; + switch (pipe) { case 0: master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; @@ -3438,42 +3500,13 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc, } } -/** - * Sets the power management mode of the pipe and plane. - */ -void intel_crtc_update_dpms(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; - bool enable = false; - - for_each_encoder_on_crtc(dev, crtc, intel_encoder) - enable |= intel_encoder->connectors_active; - - if (enable) - dev_priv->display.crtc_enable(crtc); - else - dev_priv->display.crtc_disable(crtc); - - intel_crtc_update_sarea(crtc, enable); -} - -static void intel_crtc_noop(struct drm_crtc *crtc) -{ -} - static void intel_crtc_disable(struct drm_crtc *crtc) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; - struct drm_connector *connector; struct drm_i915_private *dev_priv = dev->dev_private; - /* crtc should still be enabled when we disable it. */ - WARN_ON(!crtc->enabled); - - dev_priv->display.crtc_disable(crtc); - intel_crtc_update_sarea(crtc, false); + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); dev_priv->display.off(crtc); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); @@ -3483,128 +3516,63 @@ static void intel_crtc_disable(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); - crtc->fb = NULL; - } - - /* Update computed state. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (!connector->encoder || !connector->encoder->crtc) - continue; - - if (connector->encoder->crtc != crtc) - continue; - - connector->dpms = DRM_MODE_DPMS_OFF; - to_intel_encoder(connector->encoder)->connectors_active = false; } } -void intel_modeset_disable(struct drm_device *dev) +/* Prepare for a mode set. + * + * Note we could be a lot smarter here. We need to figure out which outputs + * will be enabled, which disabled (in short, how the config will changes) + * and perform the minimum necessary steps to accomplish that, e.g. updating + * watermarks, FBC configuration, making sure PLLs are programmed correctly, + * panel fitting is in the proper state, etc. + */ +static void i9xx_crtc_prepare(struct drm_crtc *crtc) { - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->enabled) - intel_crtc_disable(crtc); - } + i9xx_crtc_disable(crtc); } -void intel_encoder_noop(struct drm_encoder *encoder) +static void i9xx_crtc_commit(struct drm_crtc *crtc) { + i9xx_crtc_enable(crtc); } -void intel_encoder_destroy(struct drm_encoder *encoder) +static void ironlake_crtc_prepare(struct drm_crtc *crtc) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - - drm_encoder_cleanup(encoder); - kfree(intel_encoder); + ironlake_crtc_disable(crtc); } -/* Simple dpms helper for encodres with just one connector, no cloning and only - * one kind of off state. It clamps all !ON modes to fully OFF and changes the - * state of the entire output pipe. */ -void intel_encoder_dpms(struct intel_encoder *encoder, int mode) +static void ironlake_crtc_commit(struct drm_crtc *crtc) { - if (mode == DRM_MODE_DPMS_ON) { - encoder->connectors_active = true; - - intel_crtc_update_dpms(encoder->base.crtc); - } else { - encoder->connectors_active = false; - - intel_crtc_update_dpms(encoder->base.crtc); - } + ironlake_crtc_enable(crtc); } -/* Cross check the actual hw state with our own modeset state tracking (and it's - * internal consistency). */ -static void intel_connector_check_state(struct intel_connector *connector) +void intel_encoder_prepare(struct drm_encoder *encoder) { - if (connector->get_hw_state(connector)) { - struct intel_encoder *encoder = connector->encoder; - struct drm_crtc *crtc; - bool encoder_enabled; - enum pipe pipe; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.base.id, - drm_get_connector_name(&connector->base)); - - WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, - "wrong connector dpms state\n"); - WARN(connector->base.encoder != &encoder->base, - "active connector not linked to encoder\n"); - WARN(!encoder->connectors_active, - "encoder->connectors_active not set\n"); - - encoder_enabled = encoder->get_hw_state(encoder, &pipe); - WARN(!encoder_enabled, "encoder not enabled\n"); - if (WARN_ON(!encoder->base.crtc)) - return; - - crtc = encoder->base.crtc; - - WARN(!crtc->enabled, "crtc not enabled\n"); - WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); - WARN(pipe != to_intel_crtc(crtc)->pipe, - "encoder active on the wrong pipe\n"); - } + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + /* lvds has its own version of prepare see intel_lvds_prepare */ + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); } -/* Even simpler default implementation, if there's really no special case to - * consider. */ -void intel_connector_dpms(struct drm_connector *connector, int mode) +void intel_encoder_commit(struct drm_encoder *encoder) { - struct intel_encoder *encoder = intel_attached_encoder(connector); - - /* All the simple cases only support two dpms states. */ - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (mode == connector->dpms) - return; - - connector->dpms = mode; + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + struct drm_device *dev = encoder->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - /* Only need to change hw state when actually enabled */ - if (encoder->base.crtc) - intel_encoder_dpms(encoder, mode); - else - WARN_ON(encoder->connectors_active != false); + /* lvds has its own version of commit see intel_lvds_commit */ + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - intel_modeset_check_state(connector->dev); + if (HAS_PCH_CPT(dev)) + intel_cpt_verify_modeset(dev, intel_crtc->pipe); } -/* Simple connector->get_hw_state implementation for encoders that support only - * one connector and no cloning and hence the encoder state determines the state - * of the connector. */ -bool intel_connector_get_hw_state(struct intel_connector *connector) +void intel_encoder_destroy(struct drm_encoder *encoder) { - enum pipe pipe = 0; - struct intel_encoder *encoder = connector->encoder; + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - return encoder->get_hw_state(encoder, &pipe); + drm_encoder_cleanup(encoder); + kfree(intel_encoder); } static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, @@ -3625,13 +3593,6 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) drm_mode_set_crtcinfo(adjusted_mode, 0); - /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes - * with a hsync front porch of 0. - */ - if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && - adjusted_mode->hsync_start == adjusted_mode->hdisplay) - return false; - return true; } @@ -3767,7 +3728,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) * true if they don't match). */ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, - struct drm_framebuffer *fb, unsigned int *pipe_bpp, struct drm_display_mode *mode) { @@ -3837,7 +3797,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, * also stays within the max display bpc discovered above. */ - switch (fb->depth) { + switch (crtc->fb->depth) { case 8: bpc = 8; /* since we go through a colormap */ break; @@ -4256,7 +4216,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *fb) + struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4446,7 +4406,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); - ret = intel_pipe_set_base(crtc, x, y, fb); + ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); @@ -4600,147 +4560,41 @@ static int ironlake_get_refclk(struct drm_crtc *crtc) return 120000; } -static void ironlake_set_pipeconf(struct drm_crtc *crtc, +static int ironlake_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, - bool dither) -{ - struct drm_i915_private *dev_priv = crtc->dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - uint32_t val; - - val = I915_READ(PIPECONF(pipe)); - - val &= ~PIPE_BPC_MASK; - switch (intel_crtc->bpp) { - case 18: - val |= PIPE_6BPC; - break; - case 24: - val |= PIPE_8BPC; - break; - case 30: - val |= PIPE_10BPC; - break; - case 36: - val |= PIPE_12BPC; - break; - default: - val |= PIPE_8BPC; - break; - } - - val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); - if (dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - - val &= ~PIPECONF_INTERLACE_MASK; - if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; - else - val |= PIPECONF_PROGRESSIVE; - - I915_WRITE(PIPECONF(pipe), val); - POSTING_READ(PIPECONF(pipe)); -} - -static bool ironlake_compute_clocks(struct drm_crtc *crtc, - struct drm_display_mode *adjusted_mode, - intel_clock_t *clock, - bool *has_reduced_clock, - intel_clock_t *reduced_clock) + int x, int y, + struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; - int refclk; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int refclk, num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + struct intel_encoder *encoder, *edp_encoder = NULL; const intel_limit_t *limit; - bool ret, is_sdvo = false, is_tv = false, is_lvds = false; + int ret; + struct fdi_m_n m_n = {0}; + u32 temp; + int target_clock, pixel_multiplier, lane, link_bw, factor; + unsigned int pipe_bpp; + bool dither; + bool is_cpu_edp = false, is_pch_edp = false; - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { - switch (intel_encoder->type) { + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; case INTEL_OUTPUT_SDVO: case INTEL_OUTPUT_HDMI: is_sdvo = true; - if (intel_encoder->needs_tv_clock) - is_tv = true; - break; - case INTEL_OUTPUT_TVOUT: - is_tv = true; - break; - } - } - - refclk = ironlake_get_refclk(crtc); - - /* - * Returns a set of divisors for the desired target clock with the given - * refclk, or FALSE. The returned values represent the clock equation: - * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. - */ - limit = intel_limit(crtc, refclk); - ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, - clock); - if (!ret) - return false; - - if (is_lvds && dev_priv->lvds_downclock_avail) { - /* - * Ensure we match the reduced clock's P to the target clock. - * If the clocks don't match, we can't switch the display clock - * by using the FP0/FP1. In such case we will disable the LVDS - * downclock feature. - */ - *has_reduced_clock = limit->find_pll(limit, crtc, - dev_priv->lvds_downclock, - refclk, - clock, - reduced_clock); - } - - if (is_sdvo && is_tv) - i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock); - - return true; -} - -static int ironlake_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *fb) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - int num_connectors = 0; - intel_clock_t clock, reduced_clock; - u32 dpll, fp = 0, fp2 = 0; - bool ok, has_reduced_clock = false, is_sdvo = false; - bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - struct intel_encoder *encoder, *edp_encoder = NULL; - int ret; - struct fdi_m_n m_n = {0}; - u32 temp; - int target_clock, pixel_multiplier, lane, link_bw, factor; - unsigned int pipe_bpp; - bool dither; - bool is_cpu_edp = false, is_pch_edp = false; - - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_SDVO: - case INTEL_OUTPUT_HDMI: - is_sdvo = true; - if (encoder->needs_tv_clock) + if (encoder->needs_tv_clock) is_tv = true; break; case INTEL_OUTPUT_TVOUT: @@ -4765,8 +4619,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, num_connectors++; } - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, - &has_reduced_clock, &reduced_clock); + refclk = ironlake_get_refclk(crtc); + + /* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ + limit = intel_limit(crtc, refclk); + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, + &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; @@ -4775,6 +4637,24 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, /* Ensure that the cursor is valid for the new mode before changing... */ intel_crtc_update_cursor(crtc, true); + if (is_lvds && dev_priv->lvds_downclock_avail) { + /* + * Ensure we match the reduced clock's P to the target clock. + * If the clocks don't match, we can't switch the display clock + * by using the FP0/FP1. In such case we will disable the LVDS + * downclock feature. + */ + has_reduced_clock = limit->find_pll(limit, crtc, + dev_priv->lvds_downclock, + refclk, + &clock, + &reduced_clock); + } + + if (is_sdvo && is_tv) + i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); + + /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); lane = 0; @@ -4802,17 +4682,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, target_clock = adjusted_mode->clock; /* determine panel color depth */ - dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode); - if (is_lvds && dev_priv->lvds_dither) - dither = true; - - if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 && - pipe_bpp != 36) { + temp = I915_READ(PIPECONF(pipe)); + temp &= ~PIPE_BPC_MASK; + dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); + switch (pipe_bpp) { + case 18: + temp |= PIPE_6BPC; + break; + case 24: + temp |= PIPE_8BPC; + break; + case 30: + temp |= PIPE_10BPC; + break; + case 36: + temp |= PIPE_12BPC; + break; + default: WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", - pipe_bpp); + pipe_bpp); + temp |= PIPE_8BPC; pipe_bpp = 24; + break; } + intel_crtc->bpp = pipe_bpp; + I915_WRITE(PIPECONF(pipe), temp); if (!lane) { /* @@ -4896,6 +4791,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, else dpll |= PLL_REF_INPUT_DREFCLK; + /* setup pipeconf */ + pipeconf = I915_READ(PIPECONF(pipe)); + + /* Set up the display plane register */ + dspcntr = DISPPLANE_GAMMA_ENABLE; + DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); drm_mode_debug_printmodeline(mode); @@ -4955,6 +4856,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(PCH_LVDS, temp); } + pipeconf &= ~PIPECONF_DITHER_EN; + pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; + if ((is_lvds && dev_priv->lvds_dither) || dither) { + pipeconf |= PIPECONF_DITHER_EN; + pipeconf |= PIPECONF_DITHER_TYPE_SP; + } if (is_dp && !is_cpu_edp) { intel_dp_set_m_n(crtc, mode, adjusted_mode); } else { @@ -4990,7 +4897,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, } } + pipeconf &= ~PIPECONF_INTERLACE_MASK; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { + pipeconf |= PIPECONF_INTERLACED_ILK; /* the chip adds 2 halflines automatically */ adjusted_mode->crtc_vtotal -= 1; adjusted_mode->crtc_vblank_end -= 1; @@ -4998,6 +4907,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_htotal/2); } else { + pipeconf |= PIPECONF_PROGRESSIVE; I915_WRITE(VSYNCSHIFT(pipe), 0); } @@ -5035,15 +4945,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (is_cpu_edp) ironlake_set_pll_edp(crtc, adjusted_mode->clock); - ironlake_set_pipeconf(crtc, adjusted_mode, dither); + I915_WRITE(PIPECONF(pipe), pipeconf); + POSTING_READ(PIPECONF(pipe)); intel_wait_for_vblank(dev, pipe); - /* Set up the display plane register */ - I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); + I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); - ret = intel_pipe_set_base(crtc, x, y, fb); + ret = intel_pipe_set_base(crtc, x, y, old_fb); intel_update_watermarks(dev); @@ -5056,7 +4966,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *fb) + struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -5067,9 +4977,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, drm_vblank_pre_modeset(dev, pipe); ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, - x, y, fb); + x, y, old_fb); drm_vblank_post_modeset(dev, pipe); + if (ret) + intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; + else + intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; + return ret; } @@ -5142,91 +5057,6 @@ static void g4x_write_eld(struct drm_connector *connector, I915_WRITE(G4X_AUD_CNTL_ST, i); } -static void haswell_write_eld(struct drm_connector *connector, - struct drm_crtc *crtc) -{ - struct drm_i915_private *dev_priv = connector->dev->dev_private; - uint8_t *eld = connector->eld; - struct drm_device *dev = crtc->dev; - uint32_t eldv; - uint32_t i; - int len; - int pipe = to_intel_crtc(crtc)->pipe; - int tmp; - - int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); - int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); - int aud_config = HSW_AUD_CFG(pipe); - int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; - - - DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); - - /* Audio output enable */ - DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); - tmp = I915_READ(aud_cntrl_st2); - tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); - I915_WRITE(aud_cntrl_st2, tmp); - - /* Wait for 1 vertical blank */ - intel_wait_for_vblank(dev, pipe); - - /* Set ELD valid state */ - tmp = I915_READ(aud_cntrl_st2); - DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); - tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); - I915_WRITE(aud_cntrl_st2, tmp); - tmp = I915_READ(aud_cntrl_st2); - DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); - - /* Enable HDMI mode */ - tmp = I915_READ(aud_config); - DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); - /* clear N_programing_enable and N_value_index */ - tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); - I915_WRITE(aud_config, tmp); - - DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); - - eldv = AUDIO_ELD_VALID_A << (pipe * 4); - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { - DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); - eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ - I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ - } else - I915_WRITE(aud_config, 0); - - if (intel_eld_uptodate(connector, - aud_cntrl_st2, eldv, - aud_cntl_st, IBX_ELD_ADDRESS, - hdmiw_hdmiedid)) - return; - - i = I915_READ(aud_cntrl_st2); - i &= ~eldv; - I915_WRITE(aud_cntrl_st2, i); - - if (!eld[0]) - return; - - i = I915_READ(aud_cntl_st); - i &= ~IBX_ELD_ADDRESS; - I915_WRITE(aud_cntl_st, i); - i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ - DRM_DEBUG_DRIVER("port num:%d\n", i); - - len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ - DRM_DEBUG_DRIVER("ELD size %d\n", len); - for (i = 0; i < len; i++) - I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); - - i = I915_READ(aud_cntrl_st2); - i |= eldv; - I915_WRITE(aud_cntrl_st2, i); - -} - static void ironlake_write_eld(struct drm_connector *connector, struct drm_crtc *crtc) { @@ -5239,24 +5069,28 @@ static void ironlake_write_eld(struct drm_connector *connector, int aud_config; int aud_cntl_st; int aud_cntrl_st2; - int pipe = to_intel_crtc(crtc)->pipe; if (HAS_PCH_IBX(connector->dev)) { - hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); - aud_config = IBX_AUD_CFG(pipe); - aud_cntl_st = IBX_AUD_CNTL_ST(pipe); + hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; + aud_config = IBX_AUD_CONFIG_A; + aud_cntl_st = IBX_AUD_CNTL_ST_A; aud_cntrl_st2 = IBX_AUD_CNTL_ST2; } else { - hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); - aud_config = CPT_AUD_CFG(pipe); - aud_cntl_st = CPT_AUD_CNTL_ST(pipe); + hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; + aud_config = CPT_AUD_CONFIG_A; + aud_cntl_st = CPT_AUD_CNTL_ST_A; aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; } - DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); + i = to_intel_crtc(crtc)->pipe; + hdmiw_hdmiedid += i * 0x100; + aud_cntl_st += i * 0x100; + aud_config += i * 0x100; + + DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); i = I915_READ(aud_cntl_st); - i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ + i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ if (!i) { DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); /* operate blindly on all ports */ @@ -5503,6 +5337,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, uint32_t addr; int ret; + DRM_DEBUG_KMS("\n"); + /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); @@ -5748,18 +5584,17 @@ mode_fits_in_fbdev(struct drm_device *dev, return fb; } -bool intel_get_load_detect_pipe(struct drm_connector *connector, +bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old) { struct intel_crtc *intel_crtc; - struct intel_encoder *intel_encoder = - intel_attached_encoder(connector); struct drm_crtc *possible_crtc; struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; - struct drm_framebuffer *fb; + struct drm_framebuffer *old_fb; int i = -1; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", @@ -5780,12 +5615,21 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, if (encoder->crtc) { crtc = encoder->crtc; - old->dpms_mode = connector->dpms; + intel_crtc = to_intel_crtc(crtc); + old->dpms_mode = intel_crtc->dpms_mode; old->load_detect_temp = false; /* Make sure the crtc and connector are running */ - if (connector->dpms != DRM_MODE_DPMS_ON) - connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); + if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { + struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_crtc_helper_funcs *crtc_funcs; + + crtc_funcs = crtc->helper_private; + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); + + encoder_funcs = encoder->helper_private; + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); + } return true; } @@ -5809,17 +5653,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, return false; } - intel_encoder->new_crtc = to_intel_crtc(crtc); - to_intel_connector(connector)->new_encoder = intel_encoder; + encoder->crtc = crtc; + connector->encoder = encoder; intel_crtc = to_intel_crtc(crtc); - old->dpms_mode = connector->dpms; + old->dpms_mode = intel_crtc->dpms_mode; old->load_detect_temp = true; old->release_fb = NULL; if (!mode) mode = &load_detect_mode; + old_fb = crtc->fb; + /* We need a framebuffer large enough to accommodate all accesses * that the plane may generate whilst we perform load detection. * We can not rely on the fbcon either being present (we get called @@ -5827,52 +5673,50 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, * not even exist) or that it is large enough to satisfy the * requested mode. */ - fb = mode_fits_in_fbdev(dev, mode); - if (fb == NULL) { + crtc->fb = mode_fits_in_fbdev(dev, mode); + if (crtc->fb == NULL) { DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); - fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); - old->release_fb = fb; + crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); + old->release_fb = crtc->fb; } else DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); - if (IS_ERR(fb)) { + if (IS_ERR(crtc->fb)) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); - goto fail; + crtc->fb = old_fb; + return false; } - if (!intel_set_mode(crtc, mode, 0, 0, fb)) { + if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); - goto fail; + crtc->fb = old_fb; + return false; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); return true; -fail: - connector->encoder = NULL; - encoder->crtc = NULL; - return false; } -void intel_release_load_detect_pipe(struct drm_connector *connector, +void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, struct intel_load_detect_pipe *old) { - struct intel_encoder *intel_encoder = - intel_attached_encoder(connector); struct drm_encoder *encoder = &intel_encoder->base; + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc = encoder->crtc; + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), encoder->base.id, drm_get_encoder_name(encoder)); if (old->load_detect_temp) { - struct drm_crtc *crtc = encoder->crtc; - - to_intel_connector(connector)->new_encoder = NULL; - intel_encoder->new_crtc = NULL; - intel_set_mode(crtc, NULL, 0, 0, NULL); + connector->encoder = NULL; + drm_helper_disable_unused_functions(dev); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); @@ -5881,8 +5725,10 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, } /* Switch crtc and encoder back off if necessary */ - if (old->dpms_mode != DRM_MODE_DPMS_ON) - connector->funcs->dpms(connector, old->dpms_mode); + if (old->dpms_mode != DRM_MODE_DPMS_ON) { + encoder_funcs->dpms(encoder, old->dpms_mode); + crtc_funcs->dpms(crtc, old->dpms_mode); + } } /* Returns the clock of the currently programmed mode of the given pipe. */ @@ -6004,6 +5850,46 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } +#define GPU_IDLE_TIMEOUT 500 /* ms */ + +/* When this timer fires, we've been idle for awhile */ +static void intel_gpu_idle_timer(unsigned long arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!list_empty(&dev_priv->mm.active_list)) { + /* Still processing requests, so just re-arm the timer. */ + mod_timer(&dev_priv->idle_timer, jiffies + + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); + return; + } + + dev_priv->busy = false; + queue_work(dev_priv->wq, &dev_priv->idle_work); +} + +#define CRTC_IDLE_TIMEOUT 1000 /* ms */ + +static void intel_crtc_idle_timer(unsigned long arg) +{ + struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; + struct drm_crtc *crtc = &intel_crtc->base; + drm_i915_private_t *dev_priv = crtc->dev->dev_private; + struct intel_framebuffer *intel_fb; + + intel_fb = to_intel_framebuffer(crtc->fb); + if (intel_fb && intel_fb->obj->active) { + /* The framebuffer is still being accessed by the GPU. */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + return; + } + + intel_crtc->busy = false; + queue_work(dev_priv->wq, &dev_priv->idle_work); +} + static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -6033,6 +5919,10 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) if (dpll & DISPLAY_RATE_SELECT_FPA1) DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); } + + /* Schedule downclock */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } static void intel_decrease_pllclock(struct drm_crtc *crtc) @@ -6071,46 +5961,89 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) } -void intel_mark_busy(struct drm_device *dev) -{ - i915_update_gfx_val(dev->dev_private); -} - -void intel_mark_idle(struct drm_device *dev) -{ -} - -void intel_mark_fb_busy(struct drm_i915_gem_object *obj) +/** + * intel_idle_update - adjust clocks for idleness + * @work: work struct + * + * Either the GPU or display (or both) went idle. Check the busy status + * here and adjust the CRTC and GPU clocks as necessary. + */ +static void intel_idle_update(struct work_struct *work) { - struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, + idle_work); + struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; if (!i915_powersave) return; + mutex_lock(&dev->struct_mutex); + + i915_update_gfx_val(dev_priv); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + /* Skip inactive CRTCs */ if (!crtc->fb) continue; - if (to_intel_framebuffer(crtc->fb)->obj == obj) - intel_increase_pllclock(crtc); + intel_crtc = to_intel_crtc(crtc); + if (!intel_crtc->busy) + intel_decrease_pllclock(crtc); } + + + mutex_unlock(&dev->struct_mutex); } -void intel_mark_fb_idle(struct drm_i915_gem_object *obj) +/** + * intel_mark_busy - mark the GPU and possibly the display busy + * @dev: drm device + * @obj: object we're operating on + * + * Callers can use this function to indicate that the GPU is busy processing + * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout + * buffer), we'll also mark the display as busy, so we know to increase its + * clock frequency. + */ +void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_crtc *crtc; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = NULL; + struct intel_framebuffer *intel_fb; + struct intel_crtc *intel_crtc; - if (!i915_powersave) + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + if (!dev_priv->busy) { + intel_sanitize_pm(dev); + dev_priv->busy = true; + } else + mod_timer(&dev_priv->idle_timer, jiffies + + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); + + if (obj == NULL) return; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (!crtc->fb) continue; - if (to_intel_framebuffer(crtc->fb)->obj == obj) - intel_decrease_pllclock(crtc); + intel_crtc = to_intel_crtc(crtc); + intel_fb = to_intel_framebuffer(crtc->fb); + if (intel_fb->obj == obj) { + if (!intel_crtc->busy) { + /* Non-busy -> busy, upclock */ + intel_increase_pllclock(crtc); + intel_crtc->busy = true; + } else { + /* Busy -> busy, put off timer */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + } + } } } @@ -6461,7 +6394,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, default: WARN_ONCE(1, "unknown plane in flip command\n"); ret = -ENODEV; - goto err_unpin; + goto err; } ret = intel_ring_begin(ring, 4); @@ -6569,7 +6502,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_pending; intel_disable_fbc(dev); - intel_mark_fb_busy(obj); + intel_mark_busy(dev, obj); mutex_unlock(&dev->struct_mutex); trace_i915_flip_request(intel_crtc->plane, obj); @@ -6594,807 +6527,81 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return ret; } -static struct drm_crtc_helper_funcs intel_helper_funcs = { - .mode_set_base_atomic = intel_pipe_set_base_atomic, - .load_lut = intel_crtc_load_lut, - .disable = intel_crtc_noop, -}; - -bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) -{ - struct intel_encoder *other_encoder; - struct drm_crtc *crtc = &encoder->new_crtc->base; - - if (WARN_ON(!crtc)) - return false; - - list_for_each_entry(other_encoder, - &crtc->dev->mode_config.encoder_list, - base.head) { - - if (&other_encoder->new_crtc->base != crtc || - encoder == other_encoder) - continue; - else - return true; - } - - return false; -} - -static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, - struct drm_crtc *crtc) +static void intel_sanitize_modesetting(struct drm_device *dev, + int pipe, int plane) { - struct drm_device *dev; - struct drm_crtc *tmp; - int crtc_mask = 1; - - WARN(!crtc, "checking null crtc?\n"); - - dev = crtc->dev; - - list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { - if (tmp == crtc) - break; - crtc_mask <<= 1; - } - - if (encoder->possible_crtcs & crtc_mask) - return true; - return false; -} - -/** - * intel_modeset_update_staged_output_state - * - * Updates the staged output configuration state, e.g. after we've read out the - * current hw state. - */ -static void intel_modeset_update_staged_output_state(struct drm_device *dev) -{ - struct intel_encoder *encoder; - struct intel_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - connector->new_encoder = - to_intel_encoder(connector->base.encoder); - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - encoder->new_crtc = - to_intel_crtc(encoder->base.crtc); - } -} - -/** - * intel_modeset_commit_output_state - * - * This function copies the stage display pipe configuration to the real one. - */ -static void intel_modeset_commit_output_state(struct drm_device *dev) -{ - struct intel_encoder *encoder; - struct intel_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - connector->base.encoder = &connector->new_encoder->base; - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - encoder->base.crtc = &encoder->new_crtc->base; - } -} - -static struct drm_display_mode * -intel_modeset_adjusted_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode) -{ - struct drm_device *dev = crtc->dev; - struct drm_display_mode *adjusted_mode; - struct drm_encoder_helper_funcs *encoder_funcs; - struct intel_encoder *encoder; - - adjusted_mode = drm_mode_duplicate(dev, mode); - if (!adjusted_mode) - return ERR_PTR(-ENOMEM); - - /* Pass our mode to the connectors and the CRTC to give them a chance to - * adjust it according to limitations or connector properties, and also - * a chance to reject the mode entirely. - */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - - if (&encoder->new_crtc->base != crtc) - continue; - encoder_funcs = encoder->base.helper_private; - if (!(encoder_funcs->mode_fixup(&encoder->base, mode, - adjusted_mode))) { - DRM_DEBUG_KMS("Encoder fixup failed\n"); - goto fail; - } - } - - if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) { - DRM_DEBUG_KMS("CRTC fixup failed\n"); - goto fail; - } - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); - - return adjusted_mode; -fail: - drm_mode_destroy(dev, adjusted_mode); - return ERR_PTR(-EINVAL); -} - -/* Computes which crtcs are affected and sets the relevant bits in the mask. For - * simplicity we use the crtc's pipe number (because it's easier to obtain). */ -static void -intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, - unsigned *prepare_pipes, unsigned *disable_pipes) -{ - struct intel_crtc *intel_crtc; - struct drm_device *dev = crtc->dev; - struct intel_encoder *encoder; - struct intel_connector *connector; - struct drm_crtc *tmp_crtc; - - *disable_pipes = *modeset_pipes = *prepare_pipes = 0; - - /* Check which crtcs have changed outputs connected to them, these need - * to be part of the prepare_pipes mask. We don't (yet) support global - * modeset across multiple crtcs, so modeset_pipes will only have one - * bit set at most. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (connector->base.encoder == &connector->new_encoder->base) - continue; - - if (connector->base.encoder) { - tmp_crtc = connector->base.encoder->crtc; - - *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; - } - - if (connector->new_encoder) - *prepare_pipes |= - 1 << connector->new_encoder->new_crtc->pipe; - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - if (encoder->base.crtc == &encoder->new_crtc->base) - continue; - - if (encoder->base.crtc) { - tmp_crtc = encoder->base.crtc; - - *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; - } - - if (encoder->new_crtc) - *prepare_pipes |= 1 << encoder->new_crtc->pipe; - } - - /* Check for any pipes that will be fully disabled ... */ - list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, - base.head) { - bool used = false; - - /* Don't try to disable disabled crtcs. */ - if (!intel_crtc->base.enabled) - continue; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - if (encoder->new_crtc == intel_crtc) - used = true; - } + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg, val; + int i; - if (!used) - *disable_pipes |= 1 << intel_crtc->pipe; + /* Clear any frame start delays used for debugging left by the BIOS */ + for_each_pipe(i) { + reg = PIPECONF(i); + I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); } + if (HAS_PCH_SPLIT(dev)) + return; - /* set_mode is also used to update properties on life display pipes. */ - intel_crtc = to_intel_crtc(crtc); - if (crtc->enabled) - *prepare_pipes |= 1 << intel_crtc->pipe; - - /* We only support modeset on one single crtc, hence we need to do that - * only for the passed in crtc iff we change anything else than just - * disable crtcs. + /* Who knows what state these registers were left in by the BIOS or + * grub? * - * This is actually not true, to be fully compatible with the old crtc - * helper we automatically disable _any_ output (i.e. doesn't need to be - * connected to the crtc we're modesetting on) if it's disconnected. - * Which is a rather nutty api (since changed the output configuration - * without userspace's explicit request can lead to confusion), but - * alas. Hence we currently need to modeset on all pipes we prepare. */ - if (*prepare_pipes) - *modeset_pipes = *prepare_pipes; - - /* ... and mask these out. */ - *modeset_pipes &= ~(*disable_pipes); - *prepare_pipes &= ~(*disable_pipes); -} - -static bool intel_crtc_in_use(struct drm_crtc *crtc) -{ - struct drm_encoder *encoder; - struct drm_device *dev = crtc->dev; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) - if (encoder->crtc == crtc) - return true; - - return false; -} - -static void -intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) -{ - struct intel_encoder *intel_encoder; - struct intel_crtc *intel_crtc; - struct drm_connector *connector; - - list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, - base.head) { - if (!intel_encoder->base.crtc) - continue; - - intel_crtc = to_intel_crtc(intel_encoder->base.crtc); - - if (prepare_pipes & (1 << intel_crtc->pipe)) - intel_encoder->connectors_active = false; - } - - intel_modeset_commit_output_state(dev); - - /* Update computed state. */ - list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, - base.head) { - intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); - } - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (!connector->encoder || !connector->encoder->crtc) - continue; - - intel_crtc = to_intel_crtc(connector->encoder->crtc); - - if (prepare_pipes & (1 << intel_crtc->pipe)) { - struct drm_property *dpms_property = - dev->mode_config.dpms_property; - - connector->dpms = DRM_MODE_DPMS_ON; - drm_connector_property_set_value(connector, - dpms_property, - DRM_MODE_DPMS_ON); - - intel_encoder = to_intel_encoder(connector->encoder); - intel_encoder->connectors_active = true; - } - } - -} - -#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ - list_for_each_entry((intel_crtc), \ - &(dev)->mode_config.crtc_list, \ - base.head) \ - if (mask & (1 <<(intel_crtc)->pipe)) \ - -void -intel_modeset_check_state(struct drm_device *dev) -{ - struct intel_crtc *crtc; - struct intel_encoder *encoder; - struct intel_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - /* This also checks the encoder/connector hw state with the - * ->get_hw_state callbacks. */ - intel_connector_check_state(connector); - - WARN(&connector->new_encoder->base != connector->base.encoder, - "connector's staged encoder doesn't match current encoder\n"); - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - bool enabled = false; - bool active = false; - enum pipe pipe, tracked_pipe; - - DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", - encoder->base.base.id, - drm_get_encoder_name(&encoder->base)); - - WARN(&encoder->new_crtc->base != encoder->base.crtc, - "encoder's stage crtc doesn't match current crtc\n"); - WARN(encoder->connectors_active && !encoder->base.crtc, - "encoder's active_connectors set, but no crtc\n"); - - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (connector->base.encoder != &encoder->base) - continue; - enabled = true; - if (connector->base.dpms != DRM_MODE_DPMS_OFF) - active = true; - } - WARN(!!encoder->base.crtc != enabled, - "encoder's enabled state mismatch " - "(expected %i, found %i)\n", - !!encoder->base.crtc, enabled); - WARN(active && !encoder->base.crtc, - "active encoder with no crtc\n"); - - WARN(encoder->connectors_active != active, - "encoder's computed active state doesn't match tracked active state " - "(expected %i, found %i)\n", active, encoder->connectors_active); - - active = encoder->get_hw_state(encoder, &pipe); - WARN(active != encoder->connectors_active, - "encoder's hw state doesn't match sw tracking " - "(expected %i, found %i)\n", - encoder->connectors_active, active); - - if (!encoder->base.crtc) - continue; - - tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; - WARN(active && pipe != tracked_pipe, - "active encoder's pipe doesn't match" - "(expected %i, found %i)\n", - tracked_pipe, pipe); - - } - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, - base.head) { - bool enabled = false; - bool active = false; + * If we leave the registers in a conflicting state (e.g. with the + * display plane reading from the other pipe than the one we intend + * to use) then when we attempt to teardown the active mode, we will + * not disable the pipes and planes in the correct order -- leaving + * a plane reading from a disabled pipe and possibly leading to + * undefined behaviour. + */ - DRM_DEBUG_KMS("[CRTC:%d]\n", - crtc->base.base.id); + reg = DSPCNTR(plane); + val = I915_READ(reg); - WARN(crtc->active && !crtc->base.enabled, - "active crtc, but not enabled in sw tracking\n"); + if ((val & DISPLAY_PLANE_ENABLE) == 0) + return; + if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) + return; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - if (encoder->base.crtc != &crtc->base) - continue; - enabled = true; - if (encoder->connectors_active) - active = true; - } - WARN(active != crtc->active, - "crtc's computed active state doesn't match tracked active state " - "(expected %i, found %i)\n", active, crtc->active); - WARN(enabled != crtc->base.enabled, - "crtc's computed enabled state doesn't match tracked enabled state " - "(expected %i, found %i)\n", enabled, crtc->base.enabled); + /* This display plane is active and attached to the other CPU pipe. */ + pipe = !pipe; - assert_pipe(dev->dev_private, crtc->pipe, crtc->active); - } + /* Disable the plane and wait for it to stop reading from the pipe. */ + intel_disable_plane(dev_priv, plane, pipe); + intel_disable_pipe(dev_priv, pipe); } -bool intel_set_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) +static void intel_crtc_reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; - struct drm_encoder_helper_funcs *encoder_funcs; - struct drm_encoder *encoder; - struct intel_crtc *intel_crtc; - unsigned disable_pipes, prepare_pipes, modeset_pipes; - bool ret = true; - - intel_modeset_affected_pipes(crtc, &modeset_pipes, - &prepare_pipes, &disable_pipes); - - DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", - modeset_pipes, prepare_pipes, disable_pipes); - - for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) - intel_crtc_disable(&intel_crtc->base); - - saved_hwmode = crtc->hwmode; - saved_mode = crtc->mode; - - /* Hack: Because we don't (yet) support global modeset on multiple - * crtcs, we don't keep track of the new mode for more than one crtc. - * Hence simply check whether any bit is set in modeset_pipes in all the - * pieces of code that are not yet converted to deal with mutliple crtcs - * changing their mode at the same time. */ - adjusted_mode = NULL; - if (modeset_pipes) { - adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); - if (IS_ERR(adjusted_mode)) { - return false; - } - } - - for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { - if (intel_crtc->base.enabled) - dev_priv->display.crtc_disable(&intel_crtc->base); - } + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - /* crtc->mode is already used by the ->mode_set callbacks, hence we need - * to set it here already despite that we pass it down the callchain. + /* Reset flags back to the 'unknown' status so that they + * will be correctly set on the initial modeset. */ - if (modeset_pipes) - crtc->mode = *mode; - - /* Only after disabling all output pipelines that will be changed can we - * update the the output configuration. */ - intel_modeset_update_state(dev, prepare_pipes); + intel_crtc->dpms_mode = -1; - /* Set up the DPLL and any encoders state that needs to adjust or depend - * on the DPLL. + /* We need to fix up any BIOS configuration that conflicts with + * our expectations. */ - for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { - ret = !intel_crtc_mode_set(&intel_crtc->base, - mode, adjusted_mode, - x, y, fb); - if (!ret) - goto done; - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - - if (encoder->crtc != &intel_crtc->base) - continue; - - DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", - encoder->base.id, drm_get_encoder_name(encoder), - mode->base.id, mode->name); - encoder_funcs = encoder->helper_private; - encoder_funcs->mode_set(encoder, mode, adjusted_mode); - } - } - - /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) - dev_priv->display.crtc_enable(&intel_crtc->base); - - if (modeset_pipes) { - /* Store real post-adjustment hardware mode. */ - crtc->hwmode = *adjusted_mode; - - /* Calculate and store various constants which - * are later needed by vblank and swap-completion - * timestamping. They are derived from true hwmode. - */ - drm_calc_timestamping_constants(crtc); - } - - /* FIXME: add subpixel order */ -done: - drm_mode_destroy(dev, adjusted_mode); - if (!ret && crtc->enabled) { - crtc->hwmode = saved_hwmode; - crtc->mode = saved_mode; - } else { - intel_modeset_check_state(dev); - } - - return ret; + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } -#undef for_each_intel_crtc_masked - -static void intel_set_config_free(struct intel_set_config *config) -{ - if (!config) - return; - - kfree(config->save_connector_encoders); - kfree(config->save_encoder_crtcs); - kfree(config); -} - -static int intel_set_config_save_state(struct drm_device *dev, - struct intel_set_config *config) -{ - struct drm_encoder *encoder; - struct drm_connector *connector; - int count; - - config->save_encoder_crtcs = - kcalloc(dev->mode_config.num_encoder, - sizeof(struct drm_crtc *), GFP_KERNEL); - if (!config->save_encoder_crtcs) - return -ENOMEM; - - config->save_connector_encoders = - kcalloc(dev->mode_config.num_connector, - sizeof(struct drm_encoder *), GFP_KERNEL); - if (!config->save_connector_encoders) - return -ENOMEM; - - /* Copy data. Note that driver private data is not affected. - * Should anything bad happen only the expected state is - * restored, not the drivers personal bookkeeping. - */ - count = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - config->save_encoder_crtcs[count++] = encoder->crtc; - } - - count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - config->save_connector_encoders[count++] = connector->encoder; - } - - return 0; -} - -static void intel_set_config_restore_state(struct drm_device *dev, - struct intel_set_config *config) -{ - struct intel_encoder *encoder; - struct intel_connector *connector; - int count; - - count = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { - encoder->new_crtc = - to_intel_crtc(config->save_encoder_crtcs[count++]); - } - - count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { - connector->new_encoder = - to_intel_encoder(config->save_connector_encoders[count++]); - } -} - -static void -intel_set_config_compute_mode_changes(struct drm_mode_set *set, - struct intel_set_config *config) -{ - - /* We should be able to check here if the fb has the same properties - * and then just flip_or_move it */ - if (set->crtc->fb != set->fb) { - /* If we have no fb then treat it as a full mode set */ - if (set->crtc->fb == NULL) { - DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); - config->mode_changed = true; - } else if (set->fb == NULL) { - config->mode_changed = true; - } else if (set->fb->depth != set->crtc->fb->depth) { - config->mode_changed = true; - } else if (set->fb->bits_per_pixel != - set->crtc->fb->bits_per_pixel) { - config->mode_changed = true; - } else - config->fb_changed = true; - } - - if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) - config->fb_changed = true; - - if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { - DRM_DEBUG_KMS("modes are different, full mode set\n"); - drm_mode_debug_printmodeline(&set->crtc->mode); - drm_mode_debug_printmodeline(set->mode); - config->mode_changed = true; - } -} - -static int -intel_modeset_stage_output_state(struct drm_device *dev, - struct drm_mode_set *set, - struct intel_set_config *config) -{ - struct drm_crtc *new_crtc; - struct intel_connector *connector; - struct intel_encoder *encoder; - int count, ro; - - /* The upper layers ensure that we either disabl a crtc or have a list - * of connectors. For paranoia, double-check this. */ - WARN_ON(!set->fb && (set->num_connectors != 0)); - WARN_ON(set->fb && (set->num_connectors == 0)); - - count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - /* Otherwise traverse passed in connector list and get encoders - * for them. */ - for (ro = 0; ro < set->num_connectors; ro++) { - if (set->connectors[ro] == &connector->base) { - connector->new_encoder = connector->encoder; - break; - } - } - - /* If we disable the crtc, disable all its connectors. Also, if - * the connector is on the changing crtc but not on the new - * connector list, disable it. */ - if ((!set->fb || ro == set->num_connectors) && - connector->base.encoder && - connector->base.encoder->crtc == set->crtc) { - connector->new_encoder = NULL; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", - connector->base.base.id, - drm_get_connector_name(&connector->base)); - } - - - if (&connector->new_encoder->base != connector->base.encoder) { - DRM_DEBUG_KMS("encoder changed, full mode switch\n"); - config->mode_changed = true; - } - - /* Disable all disconnected encoders. */ - if (connector->base.status == connector_status_disconnected) - connector->new_encoder = NULL; - } - /* connector->new_encoder is now updated for all connectors. */ - - /* Update crtc of enabled connectors. */ - count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (!connector->new_encoder) - continue; - - new_crtc = connector->new_encoder->base.crtc; - - for (ro = 0; ro < set->num_connectors; ro++) { - if (set->connectors[ro] == &connector->base) - new_crtc = set->crtc; - } - - /* Make sure the new CRTC will work with the encoder */ - if (!intel_encoder_crtc_ok(&connector->new_encoder->base, - new_crtc)) { - return -EINVAL; - } - connector->encoder->new_crtc = to_intel_crtc(new_crtc); - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", - connector->base.base.id, - drm_get_connector_name(&connector->base), - new_crtc->base.id); - } - - /* Check for any encoders that needs to be disabled. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { - if (connector->new_encoder == encoder) { - WARN_ON(!connector->new_encoder->new_crtc); - - goto next_encoder; - } - } - encoder->new_crtc = NULL; -next_encoder: - /* Only now check for crtc changes so we don't miss encoders - * that will be disabled. */ - if (&encoder->new_crtc->base != encoder->base.crtc) { - DRM_DEBUG_KMS("crtc changed, full mode switch\n"); - config->mode_changed = true; - } - } - /* Now we've also updated encoder->new_crtc for all encoders. */ - - return 0; -} - -static int intel_crtc_set_config(struct drm_mode_set *set) -{ - struct drm_device *dev; - struct drm_mode_set save_set; - struct intel_set_config *config; - int ret; - - BUG_ON(!set); - BUG_ON(!set->crtc); - BUG_ON(!set->crtc->helper_private); - - if (!set->mode) - set->fb = NULL; - - /* The fb helper likes to play gross jokes with ->mode_set_config. - * Unfortunately the crtc helper doesn't do much at all for this case, - * so we have to cope with this madness until the fb helper is fixed up. */ - if (set->fb && set->num_connectors == 0) - return 0; - - if (set->fb) { - DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", - set->crtc->base.id, set->fb->base.id, - (int)set->num_connectors, set->x, set->y); - } else { - DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); - } - - dev = set->crtc->dev; - - ret = -ENOMEM; - config = kzalloc(sizeof(*config), GFP_KERNEL); - if (!config) - goto out_config; - - ret = intel_set_config_save_state(dev, config); - if (ret) - goto out_config; - - save_set.crtc = set->crtc; - save_set.mode = &set->crtc->mode; - save_set.x = set->crtc->x; - save_set.y = set->crtc->y; - save_set.fb = set->crtc->fb; - - /* Compute whether we need a full modeset, only an fb base update or no - * change at all. In the future we might also check whether only the - * mode changed, e.g. for LVDS where we only change the panel fitter in - * such cases. */ - intel_set_config_compute_mode_changes(set, config); - - ret = intel_modeset_stage_output_state(dev, set, config); - if (ret) - goto fail; - - if (config->mode_changed) { - if (set->mode) { - DRM_DEBUG_KMS("attempting to set mode from" - " userspace\n"); - drm_mode_debug_printmodeline(set->mode); - } - - if (!intel_set_mode(set->crtc, set->mode, - set->x, set->y, set->fb)) { - DRM_ERROR("failed to set mode on [CRTC:%d]\n", - set->crtc->base.id); - ret = -EINVAL; - goto fail; - } - } else if (config->fb_changed) { - ret = intel_pipe_set_base(set->crtc, - set->x, set->y, set->fb); - } - - intel_set_config_free(config); - - return 0; - -fail: - intel_set_config_restore_state(dev, config); - - /* Try to restore the config */ - if (config->mode_changed && - !intel_set_mode(save_set.crtc, save_set.mode, - save_set.x, save_set.y, save_set.fb)) - DRM_ERROR("failed to restore config after modeset failure\n"); - -out_config: - intel_set_config_free(config); - return ret; -} +static struct drm_crtc_helper_funcs intel_helper_funcs = { + .dpms = intel_crtc_dpms, + .mode_fixup = intel_crtc_mode_fixup, + .mode_set = intel_crtc_mode_set, + .mode_set_base = intel_pipe_set_base, + .mode_set_base_atomic = intel_pipe_set_base_atomic, + .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_disable, +}; static const struct drm_crtc_funcs intel_crtc_funcs = { + .reset = intel_crtc_reset, .cursor_set = intel_crtc_cursor_set, .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, - .set_config = intel_crtc_set_config, + .set_config = drm_crtc_helper_set_config, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, }; @@ -7448,9 +6655,24 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; + intel_crtc_reset(&intel_crtc->base); + intel_crtc->active = true; /* force the pipe off on setup_init_config */ intel_crtc->bpp = 24; /* default for pre-Ironlake */ + if (HAS_PCH_SPLIT(dev)) { + intel_helper_funcs.prepare = ironlake_crtc_prepare; + intel_helper_funcs.commit = ironlake_crtc_commit; + } else { + intel_helper_funcs.prepare = i9xx_crtc_prepare; + intel_helper_funcs.commit = i9xx_crtc_commit; + } + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); + + intel_crtc->busy = false; + + setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, + (unsigned long)intel_crtc); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, @@ -7477,23 +6699,15 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, return 0; } -static int intel_encoder_clones(struct intel_encoder *encoder) +static int intel_encoder_clones(struct drm_device *dev, int type_mask) { - struct drm_device *dev = encoder->base.dev; - struct intel_encoder *source_encoder; + struct intel_encoder *encoder; int index_mask = 0; int entry = 0; - list_for_each_entry(source_encoder, - &dev->mode_config.encoder_list, base.head) { - - if (encoder == source_encoder) - index_mask |= (1 << entry); - - /* Intel hw has only one MUX where enocoders could be cloned. */ - if (encoder->cloneable && source_encoder->cloneable) + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + if (type_mask & encoder->clone_mask) index_mask |= (1 << entry); - entry++; } @@ -7534,10 +6748,10 @@ static void intel_setup_outputs(struct drm_device *dev) dpd_is_edp = intel_dpd_is_edp(dev); if (has_edp_a(dev)) - intel_dp_init(dev, DP_A, PORT_A); + intel_dp_init(dev, DP_A); if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_D, PORT_D); + intel_dp_init(dev, PCH_DP_D); } intel_crt_init(dev); @@ -7568,22 +6782,22 @@ static void intel_setup_outputs(struct drm_device *dev) /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, PCH_SDVOB, true); if (!found) - intel_hdmi_init(dev, HDMIB, PORT_B); + intel_hdmi_init(dev, HDMIB); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_B, PORT_B); + intel_dp_init(dev, PCH_DP_B); } if (I915_READ(HDMIC) & PORT_DETECTED) - intel_hdmi_init(dev, HDMIC, PORT_C); + intel_hdmi_init(dev, HDMIC); if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) - intel_hdmi_init(dev, HDMID, PORT_D); + intel_hdmi_init(dev, HDMID); if (I915_READ(PCH_DP_C) & DP_DETECTED) - intel_dp_init(dev, PCH_DP_C, PORT_C); + intel_dp_init(dev, PCH_DP_C); if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_D, PORT_D); + intel_dp_init(dev, PCH_DP_D); } else if (IS_VALLEYVIEW(dev)) { int found; @@ -7591,17 +6805,17 @@ static void intel_setup_outputs(struct drm_device *dev) /* SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, SDVOB, true); if (!found) - intel_hdmi_init(dev, SDVOB, PORT_B); + intel_hdmi_init(dev, SDVOB); if (!found && (I915_READ(DP_B) & DP_DETECTED)) - intel_dp_init(dev, DP_B, PORT_B); + intel_dp_init(dev, DP_B); } if (I915_READ(SDVOC) & PORT_DETECTED) - intel_hdmi_init(dev, SDVOC, PORT_C); + intel_hdmi_init(dev, SDVOC); /* Shares lanes with HDMI on SDVOC */ if (I915_READ(DP_C) & DP_DETECTED) - intel_dp_init(dev, DP_C, PORT_C); + intel_dp_init(dev, DP_C); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; @@ -7610,12 +6824,12 @@ static void intel_setup_outputs(struct drm_device *dev) found = intel_sdvo_init(dev, SDVOB, true); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); - intel_hdmi_init(dev, SDVOB, PORT_B); + intel_hdmi_init(dev, SDVOB); } if (!found && SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_B\n"); - intel_dp_init(dev, DP_B, PORT_B); + intel_dp_init(dev, DP_B); } } @@ -7630,18 +6844,18 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); - intel_hdmi_init(dev, SDVOC, PORT_C); + intel_hdmi_init(dev, SDVOC); } if (SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_C\n"); - intel_dp_init(dev, DP_C, PORT_C); + intel_dp_init(dev, DP_C); } } if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) { DRM_DEBUG_KMS("probing DP_D\n"); - intel_dp_init(dev, DP_D, PORT_D); + intel_dp_init(dev, DP_D); } } else if (IS_GEN2(dev)) intel_dvo_init(dev); @@ -7652,9 +6866,12 @@ static void intel_setup_outputs(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = - intel_encoder_clones(encoder); + intel_encoder_clones(dev, encoder->clone_mask); } + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); } @@ -7756,15 +6973,13 @@ static void intel_init_display(struct drm_device *dev) /* We always want a DPMS function */ if (HAS_PCH_SPLIT(dev)) { + dev_priv->display.dpms = ironlake_crtc_dpms; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; - dev_priv->display.crtc_enable = ironlake_crtc_enable; - dev_priv->display.crtc_disable = ironlake_crtc_disable; dev_priv->display.off = ironlake_crtc_off; dev_priv->display.update_plane = ironlake_update_plane; } else { + dev_priv->display.dpms = i9xx_crtc_dpms; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; - dev_priv->display.crtc_enable = i9xx_crtc_enable; - dev_priv->display.crtc_disable = i9xx_crtc_disable; dev_priv->display.off = i9xx_crtc_off; dev_priv->display.update_plane = i9xx_update_plane; } @@ -7808,7 +7023,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.write_eld = ironlake_write_eld; } else if (IS_HASWELL(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - dev_priv->display.write_eld = haswell_write_eld; + dev_priv->display.write_eld = ironlake_write_eld; } else dev_priv->display.update_wm = NULL; } else if (IS_G4X(dev)) { @@ -7886,16 +7101,21 @@ static struct intel_quirk intel_quirks[] = { /* HP Mini needs pipe A force quirk (LP: #322104) */ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, + /* Thinkpad R31 needs pipe A force quirk */ + { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, + /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ + { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, + /* ThinkPad X40 needs pipe A force quirk */ + /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, /* 855 & before need to leave pipe A & dpll A up */ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, - { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, /* Lenovo U160 cannot use SSC on LVDS */ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, @@ -8011,251 +7231,10 @@ void intel_modeset_init(struct drm_device *dev) /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); -} - -static void -intel_connector_break_all_links(struct intel_connector *connector) -{ - connector->base.dpms = DRM_MODE_DPMS_OFF; - connector->base.encoder = NULL; - connector->encoder->connectors_active = false; - connector->encoder->base.crtc = NULL; -} - -static void intel_enable_pipe_a(struct drm_device *dev) -{ - struct intel_connector *connector; - struct drm_connector *crt = NULL; - struct intel_load_detect_pipe load_detect_temp; - - /* We can't just switch on the pipe A, we need to set things up with a - * proper mode and output configuration. As a gross hack, enable pipe A - * by enabling the load detect pipe once. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { - if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { - crt = &connector->base; - break; - } - } - - if (!crt) - return; - - if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) - intel_release_load_detect_pipe(crt, &load_detect_temp); - - -} - -static void intel_sanitize_crtc(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg, val; - - /* Clear any frame start delays used for debugging left by the BIOS */ - reg = PIPECONF(crtc->pipe); - I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); - - /* We need to sanitize the plane -> pipe mapping first because this will - * disable the crtc (and hence change the state) if it is wrong. */ - if (!HAS_PCH_SPLIT(dev)) { - struct intel_connector *connector; - bool plane; - - reg = DSPCNTR(crtc->plane); - val = I915_READ(reg); - - if ((val & DISPLAY_PLANE_ENABLE) == 0 && - (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) - goto ok; - - DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", - crtc->base.base.id); - - /* Pipe has the wrong plane attached and the plane is active. - * Temporarily change the plane mapping and disable everything - * ... */ - plane = crtc->plane; - crtc->plane = !plane; - dev_priv->display.crtc_disable(&crtc->base); - crtc->plane = plane; - - /* ... and break all links. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (connector->encoder->base.crtc != &crtc->base) - continue; - - intel_connector_break_all_links(connector); - } - - WARN_ON(crtc->active); - crtc->base.enabled = false; - } -ok: - - if (dev_priv->quirks & QUIRK_PIPEA_FORCE && - crtc->pipe == PIPE_A && !crtc->active) { - /* BIOS forgot to enable pipe A, this mostly happens after - * resume. Force-enable the pipe to fix this, the update_dpms - * call below we restore the pipe to the right state, but leave - * the required bits on. */ - intel_enable_pipe_a(dev); - } - - /* Adjust the state of the output pipe according to whether we - * have active connectors/encoders. */ - intel_crtc_update_dpms(&crtc->base); - - if (crtc->active != crtc->base.enabled) { - struct intel_encoder *encoder; - - /* This can happen either due to bugs in the get_hw_state - * functions or because the pipe is force-enabled due to the - * pipe A quirk. */ - DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", - crtc->base.base.id, - crtc->base.enabled ? "enabled" : "disabled", - crtc->active ? "enabled" : "disabled"); - - crtc->base.enabled = crtc->active; - - /* Because we only establish the connector -> encoder -> - * crtc links if something is active, this means the - * crtc is now deactivated. Break the links. connector - * -> encoder links are only establish when things are - * actually up, hence no need to break them. */ - WARN_ON(crtc->active); - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) { - WARN_ON(encoder->connectors_active); - encoder->base.crtc = NULL; - } - } -} - -static void intel_sanitize_encoder(struct intel_encoder *encoder) -{ - struct intel_connector *connector; - struct drm_device *dev = encoder->base.dev; - - /* We need to check both for a crtc link (meaning that the - * encoder is active and trying to read from a pipe) and the - * pipe itself being active. */ - bool has_active_crtc = encoder->base.crtc && - to_intel_crtc(encoder->base.crtc)->active; - - if (encoder->connectors_active && !has_active_crtc) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", - encoder->base.base.id, - drm_get_encoder_name(&encoder->base)); - - /* Connector is active, but has no active pipe. This is - * fallout from our resume register restoring. Disable - * the encoder manually again. */ - if (encoder->base.crtc) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", - encoder->base.base.id, - drm_get_encoder_name(&encoder->base)); - encoder->disable(encoder); - } - - /* Inconsistent output/port/pipe state happens presumably due to - * a bug in one of the get_hw_state functions. Or someplace else - * in our code, like the register restore mess on resume. Clamp - * things to off as a safer default. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { - if (connector->encoder != encoder) - continue; - - intel_connector_break_all_links(connector); - } - } - /* Enabled encoders without active connectors will be fixed in - * the crtc fixup. */ -} - -/* Scan out the current hw modeset state, sanitizes it and maps it into the drm - * and i915 state tracking structures. */ -void intel_modeset_setup_hw_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe pipe; - u32 tmp; - struct intel_crtc *crtc; - struct intel_encoder *encoder; - struct intel_connector *connector; - - for_each_pipe(pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - - tmp = I915_READ(PIPECONF(pipe)); - if (tmp & PIPECONF_ENABLE) - crtc->active = true; - else - crtc->active = false; - - crtc->base.enabled = crtc->active; - - DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", - crtc->base.base.id, - crtc->active ? "enabled" : "disabled"); - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - pipe = 0; - - if (encoder->get_hw_state(encoder, &pipe)) { - encoder->base.crtc = - dev_priv->pipe_to_crtc_mapping[pipe]; - } else { - encoder->base.crtc = NULL; - } - - encoder->connectors_active = false; - DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", - encoder->base.base.id, - drm_get_encoder_name(&encoder->base), - encoder->base.crtc ? "enabled" : "disabled", - pipe); - } - - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (connector->get_hw_state(connector)) { - connector->base.dpms = DRM_MODE_DPMS_ON; - connector->encoder->connectors_active = true; - connector->base.encoder = &connector->encoder->base; - } else { - connector->base.dpms = DRM_MODE_DPMS_OFF; - connector->base.encoder = NULL; - } - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", - connector->base.base.id, - drm_get_connector_name(&connector->base), - connector->base.encoder ? "enabled" : "disabled"); - } - - /* HW state is read out, now we need to sanitize this mess. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, - base.head) { - intel_sanitize_encoder(encoder); - } - for_each_pipe(pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - intel_sanitize_crtc(crtc); - } - - intel_modeset_update_staged_output_state(dev); - - intel_modeset_check_state(dev); + INIT_WORK(&dev_priv->idle_work, intel_idle_update); + setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, + (unsigned long)dev); } void intel_modeset_gem_init(struct drm_device *dev) @@ -8263,8 +7242,6 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_modeset_init_hw(dev); intel_setup_overlay(dev); - - intel_modeset_setup_hw_state(dev); } void intel_modeset_cleanup(struct drm_device *dev) @@ -8303,11 +7280,19 @@ void intel_modeset_cleanup(struct drm_device *dev) * enqueue unpin/hotplug work. */ drm_irq_uninstall(dev); cancel_work_sync(&dev_priv->hotplug_work); - cancel_work_sync(&dev_priv->rps.work); + cancel_work_sync(&dev_priv->rps_work); /* flush any delayed tasks or pending work */ flush_scheduled_work(); + /* Shut off idle work before the crtcs get freed. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + del_timer_sync(&intel_crtc->idle_timer); + } + del_timer_sync(&dev_priv->idle_timer); + cancel_work_sync(&dev_priv->idle_work); + drm_mode_config_cleanup(dev); } @@ -8353,7 +7338,7 @@ struct intel_display_error_state { u32 position; u32 base; u32 size; - } cursor[I915_MAX_PIPES]; + } cursor[2]; struct intel_pipe_error_state { u32 conf; @@ -8365,7 +7350,7 @@ struct intel_display_error_state { u32 vtotal; u32 vblank; u32 vsync; - } pipe[I915_MAX_PIPES]; + } pipe[2]; struct intel_plane_error_state { u32 control; @@ -8375,7 +7360,7 @@ struct intel_display_error_state { u32 addr; u32 surface; u32 tile_offset; - } plane[I915_MAX_PIPES]; + } plane[2]; }; struct intel_display_error_state * @@ -8389,7 +7374,7 @@ intel_display_capture_error_state(struct drm_device *dev) if (error == NULL) return NULL; - for_each_pipe(i) { + for (i = 0; i < 2; i++) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); error->cursor[i].base = I915_READ(CURBASE(i)); @@ -8422,11 +7407,9 @@ intel_display_print_error_state(struct seq_file *m, struct drm_device *dev, struct intel_display_error_state *error) { - drm_i915_private_t *dev_priv = dev->dev_private; int i; - seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); - for_each_pipe(i) { + for (i = 0; i < 2; i++) { seq_printf(m, "Pipe [%d]:\n", i); seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); seq_printf(m, " SRC: %08x\n", error->pipe[i].source); diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index 1474f84fdbd0..ace757af9133 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -36,10 +36,42 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include "drm_dp_helper.h" +#define DP_RECEIVER_CAP_SIZE 0xf #define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) +#define DP_LINK_CONFIGURATION_SIZE 9 + +struct intel_dp { + struct intel_encoder base; + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + enum hdmi_force_audio force_audio; + uint32_t color_range; + int dpms_mode; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; + bool is_pch_edp; + uint8_t train_set[4]; + int panel_power_up_delay; + int panel_power_down_delay; + int panel_power_cycle_delay; + int backlight_on_delay; + int backlight_off_delay; + struct drm_display_mode *panel_fixed_mode; /* for eDP */ + struct delayed_work panel_vdd_work; + bool want_panel_vdd; + struct edid *edid; /* cached EDID for eDP */ + int edid_mode_count; +}; + /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct @@ -808,6 +840,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, } } +static void ironlake_edp_pll_on(struct drm_encoder *encoder); +static void ironlake_edp_pll_off(struct drm_encoder *encoder); + static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -818,6 +853,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + /* Turn on the eDP PLL if needed */ + if (is_edp(intel_dp)) { + if (!is_pch_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + else + ironlake_edp_pll_off(encoder); + } + /* * There are four kinds of DP registers: * @@ -839,8 +882,10 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, * supposed to be read-only. */ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; /* Handle DP bits in common between all three register formats */ + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; switch (intel_dp->lane_count) { @@ -887,6 +932,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp->DP |= intel_crtc->pipe << 29; /* don't miss out required setting for eDP */ + intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else @@ -908,6 +954,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (is_cpu_edp(intel_dp)) { /* don't miss out required setting for eDP */ + intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else @@ -1178,49 +1225,27 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) msleep(intel_dp->backlight_off_delay); } -static void ironlake_edp_pll_on(struct intel_dp *intel_dp) +static void ironlake_edp_pll_on(struct drm_encoder *encoder) { - struct drm_device *dev = intel_dp->base.base.dev; - struct drm_crtc *crtc = intel_dp->base.base.crtc; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; - assert_pipe_disabled(dev_priv, - to_intel_crtc(crtc)->pipe); - DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); - WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); - WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); - - /* We don't adjust intel_dp->DP while tearing down the link, to - * facilitate link retraining (e.g. after hotplug). Hence clear all - * enable bits here to ensure that we don't enable too much. */ - intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); - intel_dp->DP |= DP_PLL_ENABLE; - I915_WRITE(DP_A, intel_dp->DP); + dpa_ctl |= DP_PLL_ENABLE; + I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); udelay(200); } -static void ironlake_edp_pll_off(struct intel_dp *intel_dp) +static void ironlake_edp_pll_off(struct drm_encoder *encoder) { - struct drm_device *dev = intel_dp->base.base.dev; - struct drm_crtc *crtc = intel_dp->base.base.crtc; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; - assert_pipe_disabled(dev_priv, - to_intel_crtc(crtc)->pipe); - dpa_ctl = I915_READ(DP_A); - WARN((dpa_ctl & DP_PLL_ENABLE) == 0, - "dp pll off, should be on\n"); - WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); - - /* We can't rely on the value tracked for the DP register in - * intel_dp->DP because link_down must not change that (otherwise link - * re-training will fail. */ dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); @@ -1257,57 +1282,10 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) } } -static bool intel_dp_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) +static void intel_dp_prepare(struct drm_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp = I915_READ(intel_dp->output_reg); - - if (!(tmp & DP_PORT_EN)) - return false; - - if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { - *pipe = PORT_TO_PIPE_CPT(tmp); - } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { - *pipe = PORT_TO_PIPE(tmp); - } else { - u32 trans_sel; - u32 trans_dp; - int i; - - switch (intel_dp->output_reg) { - case PCH_DP_B: - trans_sel = TRANS_DP_PORT_SEL_B; - break; - case PCH_DP_C: - trans_sel = TRANS_DP_PORT_SEL_C; - break; - case PCH_DP_D: - trans_sel = TRANS_DP_PORT_SEL_D; - break; - default: - return true; - } - - for_each_pipe(i) { - trans_dp = I915_READ(TRANS_DP_CTL(i)); - if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { - *pipe = i; - return true; - } - } - } - - DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); - - return true; -} + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); -static void intel_disable_dp(struct intel_encoder *encoder) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); /* Make sure the panel is off before trying to change the mode. But also * ensure that we have vdd while we switch off the panel. */ @@ -1315,31 +1293,14 @@ static void intel_disable_dp(struct intel_encoder *encoder) ironlake_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); ironlake_edp_panel_off(intel_dp); - - /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ - if (!is_cpu_edp(intel_dp)) - intel_dp_link_down(intel_dp); + intel_dp_link_down(intel_dp); } -static void intel_post_disable_dp(struct intel_encoder *encoder) +static void intel_dp_commit(struct drm_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - - if (is_cpu_edp(intel_dp)) { - intel_dp_link_down(intel_dp); - ironlake_edp_pll_off(intel_dp); - } -} - -static void intel_enable_dp(struct intel_encoder *encoder) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); - - if (WARN_ON(dp_reg & DP_PORT_EN)) - return; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_device *dev = encoder->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); @@ -1348,14 +1309,47 @@ static void intel_enable_dp(struct intel_encoder *encoder) ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); + + intel_dp->dpms_mode = DRM_MODE_DPMS_ON; + + if (HAS_PCH_CPT(dev)) + intel_cpt_verify_modeset(dev, intel_crtc->pipe); } -static void intel_pre_enable_dp(struct intel_encoder *encoder) +static void +intel_dp_dpms(struct drm_encoder *encoder, int mode) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (is_cpu_edp(intel_dp)) - ironlake_edp_pll_on(intel_dp); + if (mode != DRM_MODE_DPMS_ON) { + /* Switching the panel off requires vdd. */ + ironlake_edp_panel_vdd_on(intel_dp); + ironlake_edp_backlight_off(intel_dp); + intel_dp_sink_dpms(intel_dp, mode); + ironlake_edp_panel_off(intel_dp); + intel_dp_link_down(intel_dp); + + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_off(encoder); + } else { + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + + ironlake_edp_panel_vdd_on(intel_dp); + intel_dp_sink_dpms(intel_dp, mode); + if (!(dp_reg & DP_PORT_EN)) { + intel_dp_start_link_train(intel_dp); + ironlake_edp_panel_on(intel_dp); + ironlake_edp_panel_vdd_off(intel_dp, true); + intel_dp_complete_link_train(intel_dp); + } else + ironlake_edp_panel_vdd_off(intel_dp, false); + ironlake_edp_backlight_on(intel_dp); + } + intel_dp->dpms_mode = mode; } /* @@ -1674,45 +1668,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { - dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; - - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { - case DP_TRAINING_PATTERN_DISABLE: - dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; - break; - case DP_TRAINING_PATTERN_1: - dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; - break; - case DP_TRAINING_PATTERN_2: - dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; - break; - case DP_TRAINING_PATTERN_3: - DRM_ERROR("DP training pattern 3 not supported\n"); - dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; - break; - } - - } else { - dp_reg_value &= ~DP_LINK_TRAIN_MASK; - - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { - case DP_TRAINING_PATTERN_DISABLE: - dp_reg_value |= DP_LINK_TRAIN_OFF; - break; - case DP_TRAINING_PATTERN_1: - dp_reg_value |= DP_LINK_TRAIN_PAT_1; - break; - case DP_TRAINING_PATTERN_2: - dp_reg_value |= DP_LINK_TRAIN_PAT_2; - break; - case DP_TRAINING_PATTERN_3: - DRM_ERROR("DP training pattern 3 not supported\n"); - dp_reg_value |= DP_LINK_TRAIN_PAT_2; - break; - } - } - I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); @@ -1720,15 +1675,12 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); - if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != - DP_TRAINING_PATTERN_DISABLE) { - ret = intel_dp_aux_native_write(intel_dp, - DP_TRAINING_LANE0_SET, - intel_dp->train_set, - intel_dp->lane_count); - if (ret != intel_dp->lane_count) - return false; - } + ret = intel_dp_aux_native_write(intel_dp, + DP_TRAINING_LANE0_SET, + intel_dp->train_set, + intel_dp->lane_count); + if (ret != intel_dp->lane_count) + return false; return true; } @@ -1738,12 +1690,26 @@ static void intel_dp_start_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); int i; uint8_t voltage; bool clock_recovery = false; int voltage_tries, loop_tries; + u32 reg; uint32_t DP = intel_dp->DP; + /* + * On CPT we have to enable the port in training pattern 1, which + * will happen below in intel_dp_set_link_train. Otherwise, enable + * the port and wait for it to become active. + */ + if (!HAS_PCH_CPT(dev)) { + I915_WRITE(intel_dp->output_reg, intel_dp->DP); + POSTING_READ(intel_dp->output_reg); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } + /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, intel_dp->link_configuration, @@ -1751,6 +1717,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP |= DP_PORT_EN; + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) + DP &= ~DP_LINK_TRAIN_MASK_CPT; + else + DP &= ~DP_LINK_TRAIN_MASK; memset(intel_dp->train_set, 0, 4); voltage = 0xff; voltage_tries = 0; @@ -1774,7 +1744,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (!intel_dp_set_link_train(intel_dp, DP, + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) + reg = DP | DP_LINK_TRAIN_PAT_1_CPT; + else + reg = DP | DP_LINK_TRAIN_PAT_1; + + if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) break; @@ -1829,8 +1804,10 @@ static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; bool channel_eq = false; int tries, cr_tries; + u32 reg; uint32_t DP = intel_dp->DP; /* channel equalization */ @@ -1859,8 +1836,13 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) + reg = DP | DP_LINK_TRAIN_PAT_2_CPT; + else + reg = DP | DP_LINK_TRAIN_PAT_2; + /* channel eq pattern */ - if (!intel_dp_set_link_train(intel_dp, DP, + if (!intel_dp_set_link_train(intel_dp, reg, DP_TRAINING_PATTERN_2 | DP_LINK_SCRAMBLING_DISABLE)) break; @@ -1895,7 +1877,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } - intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) + reg = DP | DP_LINK_TRAIN_OFF_CPT; + else + reg = DP | DP_LINK_TRAIN_OFF; + + I915_WRITE(intel_dp->output_reg, reg); + POSTING_READ(intel_dp->output_reg); + intel_dp_aux_native_write_1(intel_dp, + DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void @@ -1905,11 +1895,18 @@ intel_dp_link_down(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; - if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) + if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) return; DRM_DEBUG_KMS("\n"); + if (is_edp(intel_dp)) { + DP &= ~DP_PLL_ENABLE; + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); + udelay(100); + } + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); @@ -1921,6 +1918,13 @@ intel_dp_link_down(struct intel_dp *intel_dp) msleep(17); + if (is_edp(intel_dp)) { + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) + DP |= DP_LINK_TRAIN_OFF_CPT; + else + DP |= DP_LINK_TRAIN_OFF; + } + if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { struct drm_crtc *crtc = intel_dp->base.base.crtc; @@ -2029,10 +2033,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) u8 sink_irq_vector; u8 link_status[DP_LINK_STATUS_SIZE]; - if (!intel_dp->base.connectors_active) + if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) return; - if (WARN_ON(!intel_dp->base.base.crtc)) + if (!intel_dp->base.base.crtc) return; /* Try to read receiver status if the link appears to be up */ @@ -2156,6 +2160,7 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada ret = drm_add_edid_modes(connector, intel_dp->edid); drm_edid_to_eld(connector, intel_dp->edid); + connector->display_info.raw_edid = NULL; return intel_dp->edid_mode_count; } @@ -2201,6 +2206,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) edid = intel_dp_get_edid(connector, &intel_dp->adapter); if (edid) { intel_dp->has_audio = drm_detect_monitor_audio(edid); + connector->display_info.raw_edid = NULL; kfree(edid); } } @@ -2265,6 +2271,8 @@ intel_dp_detect_audio(struct drm_connector *connector) edid = intel_dp_get_edid(connector, &intel_dp->adapter); if (edid) { has_audio = drm_detect_monitor_audio(edid); + + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -2318,8 +2326,9 @@ intel_dp_set_property(struct drm_connector *connector, done: if (intel_dp->base.base.crtc) { struct drm_crtc *crtc = intel_dp->base.base.crtc; - intel_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); } return 0; @@ -2353,13 +2362,15 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { + .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, + .prepare = intel_dp_prepare, .mode_set = intel_dp_mode_set, - .disable = intel_encoder_noop, + .commit = intel_dp_commit, }; static const struct drm_connector_funcs intel_dp_connector_funcs = { - .dpms = intel_connector_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, @@ -2430,7 +2441,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect } void -intel_dp_init(struct drm_device *dev, int output_reg, enum port port) +intel_dp_init(struct drm_device *dev, int output_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; @@ -2445,9 +2456,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) return; intel_dp->output_reg = output_reg; - intel_dp->port = port; - /* Preserve the current hw state. */ - intel_dp->DP = I915_READ(intel_dp->output_reg); + intel_dp->dpms_mode = -1; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { @@ -2474,10 +2483,18 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) connector->polled = DRM_CONNECTOR_POLL_HPD; - intel_encoder->cloneable = false; + if (output_reg == DP_B || output_reg == PCH_DP_B) + intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); + else if (output_reg == DP_C || output_reg == PCH_DP_C) + intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); + else if (output_reg == DP_D || output_reg == PCH_DP_D) + intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, - ironlake_panel_vdd_work); + if (is_edp(intel_dp)) { + intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, + ironlake_panel_vdd_work); + } intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); @@ -2491,33 +2508,29 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); - intel_encoder->enable = intel_enable_dp; - intel_encoder->pre_enable = intel_pre_enable_dp; - intel_encoder->disable = intel_disable_dp; - intel_encoder->post_disable = intel_post_disable_dp; - intel_encoder->get_hw_state = intel_dp_get_hw_state; - intel_connector->get_hw_state = intel_connector_get_hw_state; - /* Set up the DDC bus. */ - switch (port) { - case PORT_A: - name = "DPDDC-A"; - break; - case PORT_B: - dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; - name = "DPDDC-B"; - break; - case PORT_C: - dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; - name = "DPDDC-C"; - break; - case PORT_D: - dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; - name = "DPDDC-D"; - break; - default: - WARN(1, "Invalid port %c\n", port_name(port)); - break; + switch (output_reg) { + case DP_A: + name = "DPDDC-A"; + break; + case DP_B: + case PCH_DP_B: + dev_priv->hotplug_supported_mask |= + DPB_HOTPLUG_INT_STATUS; + name = "DPDDC-B"; + break; + case DP_C: + case PCH_DP_C: + dev_priv->hotplug_supported_mask |= + DPC_HOTPLUG_INT_STATUS; + name = "DPDDC-C"; + break; + case DP_D: + case PCH_DP_D: + dev_priv->hotplug_supported_mask |= + DPD_HOTPLUG_INT_STATUS; + name = "DPDDC-D"; + break; } /* Cache some DPCD data in the eDP case */ diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 351fd7179cd7..cd54cf88a28f 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -31,7 +31,6 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" -#include "drm_dp_helper.h" #define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ @@ -41,11 +40,7 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W && drm_can_sleep()) { \ - msleep(W); \ - } else { \ - cpu_relax(); \ - } \ + if (W && drm_can_sleep()) msleep(W); \ } \ ret__; \ }) @@ -95,6 +90,25 @@ #define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_EDP 8 +/* Intel Pipe Clone Bit */ +#define INTEL_HDMIB_CLONE_BIT 1 +#define INTEL_HDMIC_CLONE_BIT 2 +#define INTEL_HDMID_CLONE_BIT 3 +#define INTEL_HDMIE_CLONE_BIT 4 +#define INTEL_HDMIF_CLONE_BIT 5 +#define INTEL_SDVO_NON_TV_CLONE_BIT 6 +#define INTEL_SDVO_TV_CLONE_BIT 7 +#define INTEL_SDVO_LVDS_CLONE_BIT 8 +#define INTEL_ANALOG_CLONE_BIT 9 +#define INTEL_TV_CLONE_BIT 10 +#define INTEL_DP_B_CLONE_BIT 11 +#define INTEL_DP_C_CLONE_BIT 12 +#define INTEL_DP_D_CLONE_BIT 13 +#define INTEL_LVDS_CLONE_BIT 14 +#define INTEL_DVO_TMDS_CLONE_BIT 15 +#define INTEL_DVO_LVDS_CLONE_BIT 16 +#define INTEL_EDP_CLONE_BIT 17 + #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_TMDS 2 @@ -137,48 +151,16 @@ struct intel_fbdev { struct intel_encoder { struct drm_encoder base; - /* - * The new crtc this encoder will be driven from. Only differs from - * base->crtc while a modeset is in progress. - */ - struct intel_crtc *new_crtc; - int type; bool needs_tv_clock; - /* - * Intel hw has only one MUX where encoders could be clone, hence a - * simple flag is enough to compute the possible_clones mask. - */ - bool cloneable; - bool connectors_active; void (*hot_plug)(struct intel_encoder *); - void (*pre_enable)(struct intel_encoder *); - void (*enable)(struct intel_encoder *); - void (*disable)(struct intel_encoder *); - void (*post_disable)(struct intel_encoder *); - /* Read out the current hw state of this connector, returning true if - * the encoder is active. If the encoder is enabled it also set the pipe - * it is connected to in the pipe parameter. */ - bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); int crtc_mask; + int clone_mask; }; struct intel_connector { struct drm_connector base; - /* - * The fixed encoder this connector is connected to. - */ struct intel_encoder *encoder; - - /* - * The new encoder this connector will be driven. Only differs from - * encoder while a modeset is in progress. - */ - struct intel_encoder *new_encoder; - - /* Reads out the current hw, returning true if the connector is enabled - * and active (i.e. dpms ON state). */ - bool (*get_hw_state)(struct intel_connector *); }; struct intel_crtc { @@ -186,13 +168,11 @@ struct intel_crtc { enum pipe pipe; enum plane plane; u8 lut_r[256], lut_g[256], lut_b[256]; - /* - * Whether the crtc and the connected output pipeline is active. Implies - * that crtc->enabled is set, i.e. the current mode configuration has - * some outputs connected to this crtc. - */ - bool active; + int dpms_mode; + bool active; /* is the crtc on? independent of the dpms mode */ bool primary_disabled; /* is the crtc obscured by a plane? */ + bool busy; /* is scanout buffer being updated frequently? */ + struct timer_list idle_timer; bool lowfreq_avail; struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; @@ -331,37 +311,6 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; -#define DP_RECEIVER_CAP_SIZE 0xf -#define DP_LINK_CONFIGURATION_SIZE 9 - -struct intel_dp { - struct intel_encoder base; - uint32_t output_reg; - uint32_t DP; - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; - bool has_audio; - enum hdmi_force_audio force_audio; - enum port port; - uint32_t color_range; - uint8_t link_bw; - uint8_t lane_count; - uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; - struct i2c_adapter adapter; - struct i2c_algo_dp_aux_data algo; - bool is_pch_edp; - uint8_t train_set[4]; - int panel_power_up_delay; - int panel_power_down_delay; - int panel_power_cycle_delay; - int backlight_on_delay; - int backlight_off_delay; - struct drm_display_mode *panel_fixed_mode; /* for eDP */ - struct delayed_work panel_vdd_work; - bool want_panel_vdd; - struct edid *edid; /* cached EDID for eDP */ - int edid_mode_count; -}; - static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { @@ -401,21 +350,17 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector); extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); extern void intel_crt_init(struct drm_device *dev); -extern void intel_hdmi_init(struct drm_device *dev, - int sdvox_reg, enum port port); +extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); -extern void intel_mark_busy(struct drm_device *dev); -extern void intel_mark_idle(struct drm_device *dev); -extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); -extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); +extern void intel_mark_busy(struct drm_device *dev, + struct drm_i915_gem_object *obj); extern bool intel_lvds_init(struct drm_device *dev); -extern void intel_dp_init(struct drm_device *dev, int output_reg, - enum port port); +extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); @@ -428,6 +373,8 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane); +void intel_sanitize_pm(struct drm_device *dev); + /* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); @@ -444,27 +391,10 @@ extern void intel_panel_disable_backlight(struct drm_device *dev); extern void intel_panel_destroy_backlight(struct drm_device *dev); extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); -struct intel_set_config { - struct drm_encoder **save_connector_encoders; - struct drm_crtc **save_encoder_crtcs; - - bool fb_changed; - bool mode_changed; -}; - -extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *old_fb); -extern void intel_modeset_disable(struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); -extern void intel_crtc_update_dpms(struct drm_crtc *crtc); -extern void intel_encoder_noop(struct drm_encoder *encoder); +extern void intel_encoder_prepare(struct drm_encoder *encoder); +extern void intel_encoder_commit(struct drm_encoder *encoder); extern void intel_encoder_destroy(struct drm_encoder *encoder); -extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); -extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder); -extern void intel_connector_dpms(struct drm_connector *, int mode); -extern bool intel_connector_get_hw_state(struct intel_connector *connector); -extern void intel_modeset_check_state(struct drm_device *dev); - static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) { @@ -487,10 +417,12 @@ struct intel_load_detect_pipe { bool load_detect_temp; int dpms_mode; }; -extern bool intel_get_load_detect_pipe(struct drm_connector *connector, +extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old); -extern void intel_release_load_detect_pipe(struct drm_connector *connector, +extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, + struct drm_connector *connector, struct intel_load_detect_pipe *old); extern void intelfb_restore(void); @@ -571,10 +503,7 @@ extern void intel_disable_gt_powersave(struct drm_device *dev); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void ironlake_teardown_rc6(struct drm_device *dev); -extern void intel_enable_ddi(struct intel_encoder *encoder); -extern void intel_disable_ddi(struct intel_encoder *encoder); -extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe); +extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); extern void intel_ddi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); diff --git a/trunk/drivers/gpu/drm/i915/intel_dvo.c b/trunk/drivers/gpu/drm/i915/intel_dvo.c index 4f1fdcc44005..36c542e5036b 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_dvo.c @@ -37,7 +37,6 @@ #define SIL164_ADDR 0x38 #define CH7xxx_ADDR 0x76 #define TFP410_ADDR 0x38 -#define NS2501_ADDR 0x38 static const struct intel_dvo_device intel_dvo_devices[] = { { @@ -75,14 +74,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .slave_addr = 0x75, .gpio = GMBUS_PORT_DPB, .dev_ops = &ch7017_ops, - }, - { - .type = INTEL_DVO_CHIP_TMDS, - .name = "ns2501", - .dvo_reg = DVOC, - .slave_addr = NS2501_ADDR, - .dev_ops = &ns2501_ops, - } + } }; struct intel_dvo { @@ -105,91 +97,22 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) struct intel_dvo, base); } -static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) -{ - struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); - - return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); -} - -static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); - u32 tmp; - - tmp = I915_READ(intel_dvo->dev.dvo_reg); - - if (!(tmp & DVO_ENABLE)) - return false; - - *pipe = PORT_TO_PIPE(tmp); - - return true; -} - -static void intel_disable_dvo(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); - u32 dvo_reg = intel_dvo->dev.dvo_reg; - u32 temp = I915_READ(dvo_reg); - - intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); - I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); - I915_READ(dvo_reg); -} - -static void intel_enable_dvo(struct intel_encoder *encoder) +static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) { - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); - I915_WRITE(dvo_reg, temp | DVO_ENABLE); - I915_READ(dvo_reg); - intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); -} - -static void intel_dvo_dpms(struct drm_connector *connector, int mode) -{ - struct intel_dvo *intel_dvo = intel_attached_dvo(connector); - struct drm_crtc *crtc; - - /* dvo supports only 2 dpms states. */ - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (mode == connector->dpms) - return; - - connector->dpms = mode; - - /* Only need to change hw state when actually enabled */ - crtc = intel_dvo->base.base.crtc; - if (!crtc) { - intel_dvo->base.connectors_active = false; - return; - } - if (mode == DRM_MODE_DPMS_ON) { - intel_dvo->base.connectors_active = true; - - intel_crtc_update_dpms(crtc); - - intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); + I915_WRITE(dvo_reg, temp | DVO_ENABLE); + I915_READ(dvo_reg); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); } else { - intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); - - intel_dvo->base.connectors_active = false; - - intel_crtc_update_dpms(crtc); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); + I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); + I915_READ(dvo_reg); } - - intel_modeset_check_state(connector->dev); } static int intel_dvo_mode_valid(struct drm_connector *connector, @@ -344,13 +267,15 @@ static void intel_dvo_destroy(struct drm_connector *connector) } static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { + .dpms = intel_dvo_dpms, .mode_fixup = intel_dvo_mode_fixup, + .prepare = intel_encoder_prepare, .mode_set = intel_dvo_mode_set, - .disable = intel_encoder_noop, + .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_dvo_connector_funcs = { - .dpms = intel_dvo_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_dvo_detect, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, @@ -439,11 +364,6 @@ void intel_dvo_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_dvo_enc_funcs, encoder_type); - intel_encoder->disable = intel_disable_dvo; - intel_encoder->enable = intel_enable_dvo; - intel_encoder->get_hw_state = intel_dvo_get_hw_state; - intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; - /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_connector->base; @@ -476,14 +396,17 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: - intel_encoder->cloneable = true; + intel_encoder->clone_mask = + (1 << INTEL_DVO_TMDS_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: - intel_encoder->cloneable = false; + intel_encoder->clone_mask = + (1 << INTEL_DVO_LVDS_CLONE_BIT); drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c index 08f2b63d740a..12dc3308ab8c 100644 --- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c +++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c @@ -151,9 +151,6 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VIDEO_DIP_DATA, *data); data++; } - /* Write every possible data byte to force correct ECC calculation. */ - for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(VIDEO_DIP_DATA, 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -189,9 +186,6 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } - /* Write every possible data byte to force correct ECC calculation. */ - for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -230,9 +224,6 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } - /* Write every possible data byte to force correct ECC calculation. */ - for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -268,9 +259,6 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } - /* Write every possible data byte to force correct ECC calculation. */ - for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -304,9 +292,6 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, I915_WRITE(data_reg + i, *data); data++; } - /* Write every possible data byte to force correct ECC calculation. */ - for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(data_reg + i, 0); mmiowb(); val |= hsw_infoframe_enable(frame); @@ -392,7 +377,6 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, port = VIDEO_DIP_PORT_C; break; default: - BUG(); return; } @@ -451,7 +435,6 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, port = VIDEO_DIP_PORT_D; break; default: - BUG(); return; } @@ -618,36 +601,15 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, intel_hdmi->set_infoframes(encoder, adjusted_mode); } -static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - u32 tmp; - - tmp = I915_READ(intel_hdmi->sdvox_reg); - - if (!(tmp & SDVO_ENABLE)) - return false; - - if (HAS_PCH_CPT(dev)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - return true; -} - -static void intel_enable_hdmi(struct intel_encoder *encoder) +static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->base.dev; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; u32 enable_bits = SDVO_ENABLE; - if (intel_hdmi->has_audio) + if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON) enable_bits |= SDVO_AUDIO_ENABLE; temp = I915_READ(intel_hdmi->sdvox_reg); @@ -655,67 +617,30 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) /* HW workaround for IBX, we need to move the port to transcoder A * before disabling it. */ if (HAS_PCH_IBX(dev)) { - struct drm_crtc *crtc = encoder->base.crtc; - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - - /* Restore the transcoder select bit. */ - if (pipe == PIPE_B) - enable_bits |= SDVO_PIPE_B_SELECT; - } - - /* HW workaround, need to toggle enable bit off and on for 12bpc, but - * we do this anyway which shows more stable in testing. - */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); - POSTING_READ(intel_hdmi->sdvox_reg); - } - - temp |= enable_bits; - - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); - - /* HW workaround, need to write this twice for issue that may result - * in first write getting masked. - */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); - } -} - -static void intel_disable_hdmi(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - u32 temp; - u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; - - temp = I915_READ(intel_hdmi->sdvox_reg); - - /* HW workaround for IBX, we need to move the port to transcoder A - * before disabling it. */ - if (HAS_PCH_IBX(dev)) { - struct drm_crtc *crtc = encoder->base.crtc; + struct drm_crtc *crtc = encoder->crtc; int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - if (temp & SDVO_PIPE_B_SELECT) { - temp &= ~SDVO_PIPE_B_SELECT; - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); - - /* Again we need to write this twice. */ - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); - - /* Transcoder selection bits only update - * effectively on vblank. */ - if (crtc) - intel_wait_for_vblank(dev, pipe); - else - msleep(50); + if (mode != DRM_MODE_DPMS_ON) { + if (temp & SDVO_PIPE_B_SELECT) { + temp &= ~SDVO_PIPE_B_SELECT; + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* Again we need to write this twice. */ + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* Transcoder selection bits only update + * effectively on vblank. */ + if (crtc) + intel_wait_for_vblank(dev, pipe); + else + msleep(50); + } + } else { + /* Restore the transcoder select bit. */ + if (pipe == PIPE_B) + enable_bits |= SDVO_PIPE_B_SELECT; } } @@ -727,7 +652,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) POSTING_READ(intel_hdmi->sdvox_reg); } - temp &= ~enable_bits; + if (mode != DRM_MODE_DPMS_ON) { + temp &= ~enable_bits; + } else { + temp |= enable_bits; + } I915_WRITE(intel_hdmi->sdvox_reg, temp); POSTING_READ(intel_hdmi->sdvox_reg); @@ -808,6 +737,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) drm_detect_hdmi_monitor(edid); intel_hdmi->has_audio = drm_detect_monitor_audio(edid); } + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -848,6 +778,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector) if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); + + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -901,8 +833,9 @@ intel_hdmi_set_property(struct drm_connector *connector, done: if (intel_hdmi->base.base.crtc) { struct drm_crtc *crtc = intel_hdmi->base.base.crtc; - intel_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); } return 0; @@ -916,19 +849,23 @@ static void intel_hdmi_destroy(struct drm_connector *connector) } static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { + .dpms = intel_ddi_dpms, .mode_fixup = intel_hdmi_mode_fixup, + .prepare = intel_encoder_prepare, .mode_set = intel_ddi_mode_set, - .disable = intel_encoder_noop, + .commit = intel_encoder_commit, }; static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { + .dpms = intel_hdmi_dpms, .mode_fixup = intel_hdmi_mode_fixup, + .prepare = intel_encoder_prepare, .mode_set = intel_hdmi_mode_set, - .disable = intel_encoder_noop, + .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_hdmi_connector_funcs = { - .dpms = intel_connector_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_hdmi_set_property, @@ -952,7 +889,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_broadcast_rgb_property(connector); } -void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) +void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; @@ -986,25 +923,48 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) connector->doublescan_allowed = 0; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - intel_encoder->cloneable = false; - - intel_hdmi->ddi_port = port; - switch (port) { - case PORT_B: + /* Set up the DDC bus. */ + if (sdvox_reg == SDVOB) { + intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPB; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; - break; - case PORT_C: + } else if (sdvox_reg == SDVOC) { + intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPC; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; - break; - case PORT_D: + } else if (sdvox_reg == HDMIB) { + intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == HDMIC) { + intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == HDMID) { + intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); intel_hdmi->ddc_bus = GMBUS_PORT_DPD; dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; - break; - case PORT_A: - /* Internal port only for eDP. */ - default: + } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) { + DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n"); + intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPB; + intel_hdmi->ddi_port = PORT_B; + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { + DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); + intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPC; + intel_hdmi->ddi_port = PORT_C; + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; + } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { + DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); + intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); + intel_hdmi->ddc_bus = GMBUS_PORT_DPD; + intel_hdmi->ddi_port = PORT_D; + dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; + } else { + /* If we got an unknown sdvox_reg, things are pretty much broken + * in a way that we should let the kernel know about it */ BUG(); } @@ -1027,21 +987,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) intel_hdmi->set_infoframes = cpt_set_infoframes; } - if (IS_HASWELL(dev)) { - intel_encoder->enable = intel_enable_ddi; - intel_encoder->disable = intel_disable_ddi; - intel_encoder->get_hw_state = intel_ddi_get_hw_state; - drm_encoder_helper_add(&intel_encoder->base, - &intel_hdmi_helper_funcs_hsw); - } else { - intel_encoder->enable = intel_enable_hdmi; - intel_encoder->disable = intel_disable_hdmi; - intel_encoder->get_hw_state = intel_hdmi_get_hw_state; - drm_encoder_helper_add(&intel_encoder->base, - &intel_hdmi_helper_funcs); - } - intel_connector->get_hw_state = intel_connector_get_hw_state; - + if (IS_HASWELL(dev)) + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw); + else + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); intel_hdmi_add_properties(intel_hdmi, connector); diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index 40d72bd64e11..e9a6f6aaed85 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -65,40 +65,13 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) struct intel_lvds, base); } -static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 lvds_reg, tmp; - - if (HAS_PCH_SPLIT(dev)) { - lvds_reg = PCH_LVDS; - } else { - lvds_reg = LVDS; - } - - tmp = I915_READ(lvds_reg); - - if (!(tmp & LVDS_PORT_EN)) - return false; - - if (HAS_PCH_CPT(dev)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - return true; -} - /** * Sets the power state for the panel. */ -static void intel_enable_lvds(struct intel_encoder *encoder) +static void intel_lvds_enable(struct intel_lvds *intel_lvds) { - struct drm_device *dev = encoder->base.dev; - struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct drm_device *dev = intel_lvds->base.base.dev; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -138,10 +111,9 @@ static void intel_enable_lvds(struct intel_encoder *encoder) intel_panel_enable_backlight(dev, intel_crtc->pipe); } -static void intel_disable_lvds(struct intel_encoder *encoder) +static void intel_lvds_disable(struct intel_lvds *intel_lvds) { - struct drm_device *dev = encoder->base.dev; - struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); + struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -170,6 +142,18 @@ static void intel_disable_lvds(struct intel_encoder *encoder) POSTING_READ(lvds_reg); } +static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) +{ + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); + + if (mode == DRM_MODE_DPMS_ON) + intel_lvds_enable(intel_lvds); + else + intel_lvds_disable(intel_lvds); + + /* XXX: We never power down the LVDS pairs. */ +} + static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -250,8 +234,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; + struct intel_encoder *tmp_encoder; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; int pipe; @@ -261,8 +246,14 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return false; } - if (intel_encoder_check_is_cloned(&intel_lvds->base)) - return false; + /* Should never happen!! */ + for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) { + if (&tmp_encoder->base != encoder) { + DRM_ERROR("Can't enable LVDS and another " + "encoder on the same pipe\n"); + return false; + } + } /* * We have timings from the BIOS for the panel, put them in @@ -414,6 +405,23 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return true; } +static void intel_lvds_prepare(struct drm_encoder *encoder) +{ + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); + + intel_lvds_disable(intel_lvds); +} + +static void intel_lvds_commit(struct drm_encoder *encoder) +{ + struct intel_lvds *intel_lvds = to_intel_lvds(encoder); + + /* Always do a full power on as we do not know what state + * we were left in. + */ + intel_lvds_enable(intel_lvds); +} + static void intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -527,7 +535,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, dev_priv->modeset_on_lid = 0; mutex_lock(&dev->mode_config.mutex); - intel_modeset_check_state(dev); + drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); return NOTIFY_OK; @@ -579,8 +587,8 @@ static int intel_lvds_set_property(struct drm_connector *connector, * If the CRTC is enabled, the display will be changed * according to the new panel fitting mode. */ - intel_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); } } @@ -588,9 +596,11 @@ static int intel_lvds_set_property(struct drm_connector *connector, } static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { + .dpms = intel_lvds_dpms, .mode_fixup = intel_lvds_mode_fixup, + .prepare = intel_lvds_prepare, .mode_set = intel_lvds_mode_set, - .disable = intel_encoder_noop, + .commit = intel_lvds_commit, }; static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { @@ -600,7 +610,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { - .dpms = intel_connector_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, @@ -962,15 +972,10 @@ bool intel_lvds_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); - intel_encoder->enable = intel_enable_lvds; - intel_encoder->disable = intel_disable_lvds; - intel_encoder->get_hw_state = intel_lvds_get_hw_state; - intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_encoder->cloneable = false; + intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); if (HAS_PCH_SPLIT(dev)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); else if (IS_GEN4(dev)) diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c index 4bc1c0fc342a..29b72593fbb2 100644 --- a/trunk/drivers/gpu/drm/i915/intel_modes.c +++ b/trunk/drivers/gpu/drm/i915/intel_modes.c @@ -45,6 +45,7 @@ int intel_connector_update_modes(struct drm_connector *connector, drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + connector->display_info.raw_edid = NULL; kfree(edid); return ret; diff --git a/trunk/drivers/gpu/drm/i915/intel_opregion.c b/trunk/drivers/gpu/drm/i915/intel_opregion.c index e27c17012628..18bd0af855dc 100644 --- a/trunk/drivers/gpu/drm/i915/intel_opregion.c +++ b/trunk/drivers/gpu/drm/i915/intel_opregion.c @@ -427,25 +427,6 @@ static void intel_didl_outputs(struct drm_device *dev) goto end; } -static void intel_setup_cadls(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_opregion *opregion = &dev_priv->opregion; - int i = 0; - u32 disp_id; - - /* Initialize the CADL field by duplicating the DIDL values. - * Technically, this is not always correct as display outputs may exist, - * but not active. This initialization is necessary for some Clevo - * laptops that check this field before processing the brightness and - * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if - * there are less than eight devices. */ - do { - disp_id = ioread32(&opregion->acpi->didl[i]); - iowrite32(disp_id, &opregion->acpi->cadl[i]); - } while (++i < 8 && disp_id != 0); -} - void intel_opregion_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -455,10 +436,8 @@ void intel_opregion_init(struct drm_device *dev) return; if (opregion->acpi) { - if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_didl_outputs(dev); - intel_setup_cadls(dev); - } /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. diff --git a/trunk/drivers/gpu/drm/i915/intel_overlay.c b/trunk/drivers/gpu/drm/i915/intel_overlay.c index afd0f30ab882..830d0dd610e1 100644 --- a/trunk/drivers/gpu/drm/i915/intel_overlay.c +++ b/trunk/drivers/gpu/drm/i915/intel_overlay.c @@ -235,6 +235,54 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, return 0; } +/* Workaround for i830 bug where pipe a must be enable to change control regs */ +static int +i830_activate_pipe_a(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct drm_crtc_helper_funcs *crtc_funcs; + struct drm_display_mode vesa_640x480 = { + DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, + 752, 800, 0, 480, 489, 492, 525, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) + }, *mode; + + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); + if (crtc->dpms_mode == DRM_MODE_DPMS_ON) + return 0; + + /* most i8xx have pipe a forced on, so don't trust dpms mode */ + if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) + return 0; + + crtc_funcs = crtc->base.helper_private; + if (crtc_funcs->dpms == NULL) + return 0; + + DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); + + mode = drm_mode_duplicate(dev, &vesa_640x480); + + if (!drm_crtc_helper_set_mode(&crtc->base, mode, + crtc->base.x, crtc->base.y, + crtc->base.fb)) + return 0; + + crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); + return 1; +} + +static void +i830_deactivate_pipe_a(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); +} + /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { @@ -242,12 +290,17 @@ static int intel_overlay_on(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; struct drm_i915_gem_request *request; + int pipe_a_quirk = 0; int ret; BUG_ON(overlay->active); overlay->active = 1; - WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); + if (IS_I830(dev)) { + pipe_a_quirk = i830_activate_pipe_a(dev); + if (pipe_a_quirk < 0) + return pipe_a_quirk; + } request = kzalloc(sizeof(*request), GFP_KERNEL); if (request == NULL) { @@ -269,6 +322,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) ret = intel_overlay_do_wait_request(overlay, request, NULL); out: + if (pipe_a_quirk) + i830_deactivate_pipe_a(dev); + return ret; } @@ -1383,7 +1439,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index d69f8f49beb5..ba8a27b1757a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -31,8 +31,6 @@ #include "../../../platform/x86/intel_ips.h" #include -#define FORCEWAKE_ACK_TIMEOUT_MS 2 - /* FBC, or Frame Buffer Compression, is a technique employed to compress the * framebuffer contents in-memory, aiming at reducing the required bandwidth * during in-memory transfers and, therefore, reduce the power packet. @@ -595,7 +593,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) break; } - dev_priv->ips.r_t = dev_priv->mem_freq; + dev_priv->r_t = dev_priv->mem_freq; switch (csipll & 0x3ff) { case 0x00c: @@ -627,11 +625,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) } if (dev_priv->fsb_freq == 3200) { - dev_priv->ips.c_m = 0; + dev_priv->c_m = 0; } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { - dev_priv->ips.c_m = 1; + dev_priv->c_m = 1; } else { - dev_priv->ips.c_m = 2; + dev_priv->c_m = 2; } } @@ -2140,7 +2138,7 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } - ret = i915_gem_object_pin(ctx, 4096, true, false); + ret = i915_gem_object_pin(ctx, 4096, true); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; @@ -2162,22 +2160,11 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } -/** - * Lock protecting IPS related data structures - */ -DEFINE_SPINLOCK(mchdev_lock); - -/* Global for IPS driver to get at the current i915 device. Protected by - * mchdev_lock. */ -static struct drm_i915_private *i915_mch_dev; - bool ironlake_set_drps(struct drm_device *dev, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; - assert_spin_locked(&mchdev_lock); - rgvswctl = I915_READ16(MEMSWCTL); if (rgvswctl & MEMCTL_CMD_STS) { DRM_DEBUG("gpu busy, RCS change rejected\n"); @@ -2201,8 +2188,6 @@ static void ironlake_enable_drps(struct drm_device *dev) u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; - spin_lock_irq(&mchdev_lock); - /* Enable temp reporting */ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); @@ -2226,12 +2211,12 @@ static void ironlake_enable_drps(struct drm_device *dev) vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ - dev_priv->ips.fstart = fstart; + dev_priv->fmax = fmax; /* IPS callback will increase this */ + dev_priv->fstart = fstart; - dev_priv->ips.max_delay = fstart; - dev_priv->ips.min_delay = fmin; - dev_priv->ips.cur_delay = fstart; + dev_priv->max_delay = fstart; + dev_priv->min_delay = fmin; + dev_priv->cur_delay = fstart; DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, fstart); @@ -2248,29 +2233,23 @@ static void ironlake_enable_drps(struct drm_device *dev) rgvmodectl |= MEMMODE_SWMODE_EN; I915_WRITE(MEMMODECTL, rgvmodectl); - if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) + if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) DRM_ERROR("stuck trying to change perf mode\n"); - mdelay(1); + msleep(1); ironlake_set_drps(dev, fstart); - dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + + dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + I915_READ(0x112e0); - dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); - dev_priv->ips.last_count2 = I915_READ(0x112f4); - getrawmonotonic(&dev_priv->ips.last_time2); - - spin_unlock_irq(&mchdev_lock); + dev_priv->last_time1 = jiffies_to_msecs(jiffies); + dev_priv->last_count2 = I915_READ(0x112f4); + getrawmonotonic(&dev_priv->last_time2); } static void ironlake_disable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl; - - spin_lock_irq(&mchdev_lock); - - rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvswctl = I915_READ16(MEMSWCTL); /* Ack interrupts, disable EFC interrupt */ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); @@ -2280,54 +2259,31 @@ static void ironlake_disable_drps(struct drm_device *dev) I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->ips.fstart); - mdelay(1); + ironlake_set_drps(dev, dev_priv->fstart); + msleep(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); - mdelay(1); + msleep(1); - spin_unlock_irq(&mchdev_lock); } -/* There's a funny hw issue where the hw returns all 0 when reading from - * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value - * ourselves, instead of doing a rmw cycle (which might result in us clearing - * all limits and the gpu stuck at whatever frequency it is at atm). - */ -static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) +void gen6_set_rps(struct drm_device *dev, u8 val) { + struct drm_i915_private *dev_priv = dev->dev_private; u32 limits; limits = 0; + if (val >= dev_priv->max_delay) + val = dev_priv->max_delay; + else + limits |= dev_priv->max_delay << 24; - if (*val >= dev_priv->rps.max_delay) - *val = dev_priv->rps.max_delay; - limits |= dev_priv->rps.max_delay << 24; - - /* Only set the down limit when we've reached the lowest level to avoid - * getting more interrupts, otherwise leave this clear. This prevents a - * race in the hw when coming out of rc6: There's a tiny window where - * the hw runs at the minimal clock before selecting the desired - * frequency, if the down threshold expires in that window we will not - * receive a down interrupt. */ - if (*val <= dev_priv->rps.min_delay) { - *val = dev_priv->rps.min_delay; - limits |= dev_priv->rps.min_delay << 16; - } - - return limits; -} - -void gen6_set_rps(struct drm_device *dev, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 limits = gen6_rps_limits(dev_priv, &val); - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(val > dev_priv->rps.max_delay); - WARN_ON(val < dev_priv->rps.min_delay); + if (val <= dev_priv->min_delay) + val = dev_priv->min_delay; + else + limits |= dev_priv->min_delay << 16; - if (val == dev_priv->rps.cur_delay) + if (val == dev_priv->cur_delay) return; I915_WRITE(GEN6_RPNSWREQ, @@ -2340,11 +2296,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) */ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); - POSTING_READ(GEN6_RPNSWREQ); - - dev_priv->rps.cur_delay = val; - - trace_intel_gpu_freq_change(val * 50); + dev_priv->cur_delay = val; } static void gen6_disable_rps(struct drm_device *dev) @@ -2360,40 +2312,40 @@ static void gen6_disable_rps(struct drm_device *dev) * register (PMIMR) to mask PM interrupts. The only risk is in leaving * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - spin_lock_irq(&dev_priv->rps.lock); - dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->rps_lock); + dev_priv->pm_iir = 0; + spin_unlock_irq(&dev_priv->rps_lock); I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); } int intel_enable_rc6(const struct drm_device *dev) { - /* Respect the kernel parameter if it is set */ + /* + * Respect the kernel parameter if it is set + */ if (i915_enable_rc6 >= 0) return i915_enable_rc6; - if (INTEL_INFO(dev)->gen == 5) { -#ifdef CONFIG_INTEL_IOMMU - /* Disable rc6 on ilk if VT-d is on. */ - if (intel_iommu_gfx_mapped) - return false; -#endif - DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); - return INTEL_RC6_ENABLE; - } + /* + * Disable RC6 on Ironlake + */ + if (INTEL_INFO(dev)->gen == 5) + return 0; - if (IS_HASWELL(dev)) { - DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); + /* On Haswell, only RC6 is available. So let's enable it by default to + * provide better testing and coverage since the beginning. + */ + if (IS_HASWELL(dev)) return INTEL_RC6_ENABLE; - } - /* snb/ivb have more than one rc6 state. */ + /* + * Disable rc6 on Sandybridge + */ if (INTEL_INFO(dev)->gen == 6) { DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); return INTEL_RC6_ENABLE; } - DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } @@ -2431,9 +2383,9 @@ static void gen6_enable_rps(struct drm_device *dev) gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); /* In units of 100MHz */ - dev_priv->rps.max_delay = rp_state_cap & 0xff; - dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; - dev_priv->rps.cur_delay = 0; + dev_priv->max_delay = rp_state_cap & 0xff; + dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; + dev_priv->cur_delay = 0; /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -2486,8 +2438,8 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - dev_priv->rps.max_delay << 24 | - dev_priv->rps.min_delay << 16); + dev_priv->max_delay << 24 | + dev_priv->min_delay << 16); I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); @@ -2525,7 +2477,7 @@ static void gen6_enable_rps(struct drm_device *dev) 500)) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); if (pcu_mbox & (1<<31)) { /* OC supported */ - dev_priv->rps.max_delay = pcu_mbox & 0xff; + dev_priv->max_delay = pcu_mbox & 0xff; DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } @@ -2533,10 +2485,10 @@ static void gen6_enable_rps(struct drm_device *dev) /* requires MSI enabled */ I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); - spin_lock_irq(&dev_priv->rps.lock); - WARN_ON(dev_priv->rps.pm_iir != 0); + spin_lock_irq(&dev_priv->rps_lock); + WARN_ON(dev_priv->pm_iir != 0); I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->rps_lock); /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); @@ -2568,9 +2520,9 @@ static void gen6_update_ring_freq(struct drm_device *dev) * to use for memory access. We do this by specifying the IA frequency * the PCU should use as a reference to determine the ring frequency. */ - for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; + for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; gpu_freq--) { - int diff = dev_priv->rps.max_delay - gpu_freq; + int diff = dev_priv->max_delay - gpu_freq; /* * For GPU frequencies less than 750MHz, just use the lowest @@ -2734,16 +2686,14 @@ static const struct cparams { { 0, 800, 231, 23784 }, }; -static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { u64 total_count, diff, ret; u32 count1, count2, count3, m = 0, c = 0; unsigned long now = jiffies_to_msecs(jiffies), diff1; int i; - assert_spin_locked(&mchdev_lock); - - diff1 = now - dev_priv->ips.last_time1; + diff1 = now - dev_priv->last_time1; /* Prevent division-by-zero if we are asking too fast. * Also, we don't get interesting results if we are polling @@ -2751,7 +2701,7 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) * in such cases. */ if (diff1 <= 10) - return dev_priv->ips.chipset_power; + return dev_priv->chipset_power; count1 = I915_READ(DMIEC); count2 = I915_READ(DDREC); @@ -2760,16 +2710,16 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) total_count = count1 + count2 + count3; /* FIXME: handle per-counter overflow */ - if (total_count < dev_priv->ips.last_count1) { - diff = ~0UL - dev_priv->ips.last_count1; + if (total_count < dev_priv->last_count1) { + diff = ~0UL - dev_priv->last_count1; diff += total_count; } else { - diff = total_count - dev_priv->ips.last_count1; + diff = total_count - dev_priv->last_count1; } for (i = 0; i < ARRAY_SIZE(cparams); i++) { - if (cparams[i].i == dev_priv->ips.c_m && - cparams[i].t == dev_priv->ips.r_t) { + if (cparams[i].i == dev_priv->c_m && + cparams[i].t == dev_priv->r_t) { m = cparams[i].m; c = cparams[i].c; break; @@ -2780,30 +2730,14 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) ret = ((m * diff) + c); ret = div_u64(ret, 10); - dev_priv->ips.last_count1 = total_count; - dev_priv->ips.last_time1 = now; + dev_priv->last_count1 = total_count; + dev_priv->last_time1 = now; - dev_priv->ips.chipset_power = ret; + dev_priv->chipset_power = ret; return ret; } -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) -{ - unsigned long val; - - if (dev_priv->info->gen != 5) - return 0; - - spin_lock_irq(&mchdev_lock); - - val = __i915_chipset_val(dev_priv); - - spin_unlock_irq(&mchdev_lock); - - return val; -} - unsigned long i915_mch_val(struct drm_i915_private *dev_priv) { unsigned long m, x, b; @@ -2960,17 +2894,18 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) return v_table[pxvid].vd; } -static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) +void i915_update_gfx_val(struct drm_i915_private *dev_priv) { struct timespec now, diff1; u64 diff; unsigned long diffms; u32 count; - assert_spin_locked(&mchdev_lock); + if (dev_priv->info->gen != 5) + return; getrawmonotonic(&now); - diff1 = timespec_sub(now, dev_priv->ips.last_time2); + diff1 = timespec_sub(now, dev_priv->last_time2); /* Don't divide by 0 */ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; @@ -2979,42 +2914,28 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) count = I915_READ(GFXEC); - if (count < dev_priv->ips.last_count2) { - diff = ~0UL - dev_priv->ips.last_count2; + if (count < dev_priv->last_count2) { + diff = ~0UL - dev_priv->last_count2; diff += count; } else { - diff = count - dev_priv->ips.last_count2; + diff = count - dev_priv->last_count2; } - dev_priv->ips.last_count2 = count; - dev_priv->ips.last_time2 = now; + dev_priv->last_count2 = count; + dev_priv->last_time2 = now; /* More magic constants... */ diff = diff * 1181; diff = div_u64(diff, diffms * 10); - dev_priv->ips.gfx_power = diff; -} - -void i915_update_gfx_val(struct drm_i915_private *dev_priv) -{ - if (dev_priv->info->gen != 5) - return; - - spin_lock_irq(&mchdev_lock); - - __i915_update_gfx_val(dev_priv); - - spin_unlock_irq(&mchdev_lock); + dev_priv->gfx_power = diff; } -static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { unsigned long t, corr, state1, corr2, state2; u32 pxvid, ext_v; - assert_spin_locked(&mchdev_lock); - - pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); pxvid = (pxvid >> 24) & 0x7f; ext_v = pvid_to_extvid(dev_priv, pxvid); @@ -3034,31 +2955,27 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) corr = corr * ((150142 * state1) / 10000 - 78642); corr /= 100000; - corr2 = (corr * dev_priv->ips.corr); + corr2 = (corr * dev_priv->corr); state2 = (corr2 * state1) / 10000; state2 /= 100; /* convert to mW */ - __i915_update_gfx_val(dev_priv); + i915_update_gfx_val(dev_priv); - return dev_priv->ips.gfx_power + state2; + return dev_priv->gfx_power + state2; } -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) -{ - unsigned long val; - - if (dev_priv->info->gen != 5) - return 0; - - spin_lock_irq(&mchdev_lock); - - val = __i915_gfx_val(dev_priv); - - spin_unlock_irq(&mchdev_lock); - - return val; -} +/* Global for IPS driver to get at the current i915 device */ +static struct drm_i915_private *i915_mch_dev; +/* + * Lock protecting IPS related data structures + * - i915_mch_dev + * - dev_priv->max_delay + * - dev_priv->min_delay + * - dev_priv->fmax + * - dev_priv->gpu_busy + */ +static DEFINE_SPINLOCK(mchdev_lock); /** * i915_read_mch_val - return value for IPS use @@ -3071,18 +2988,18 @@ unsigned long i915_read_mch_val(void) struct drm_i915_private *dev_priv; unsigned long chipset_val, graphics_val, ret = 0; - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - chipset_val = __i915_chipset_val(dev_priv); - graphics_val = __i915_gfx_val(dev_priv); + chipset_val = i915_chipset_val(dev_priv); + graphics_val = i915_gfx_val(dev_priv); ret = chipset_val + graphics_val; out_unlock: - spin_unlock_irq(&mchdev_lock); + spin_unlock(&mchdev_lock); return ret; } @@ -3098,18 +3015,18 @@ bool i915_gpu_raise(void) struct drm_i915_private *dev_priv; bool ret = true; - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; - if (dev_priv->ips.max_delay > dev_priv->ips.fmax) - dev_priv->ips.max_delay--; + if (dev_priv->max_delay > dev_priv->fmax) + dev_priv->max_delay--; out_unlock: - spin_unlock_irq(&mchdev_lock); + spin_unlock(&mchdev_lock); return ret; } @@ -3126,18 +3043,18 @@ bool i915_gpu_lower(void) struct drm_i915_private *dev_priv; bool ret = true; - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; - if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) - dev_priv->ips.max_delay++; + if (dev_priv->max_delay < dev_priv->min_delay) + dev_priv->max_delay++; out_unlock: - spin_unlock_irq(&mchdev_lock); + spin_unlock(&mchdev_lock); return ret; } @@ -3151,20 +3068,17 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv; - struct intel_ring_buffer *ring; bool ret = false; - int i; - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - for_each_ring(ring, dev_priv, i) - ret |= !list_empty(&ring->request_list); + ret = dev_priv->busy; out_unlock: - spin_unlock_irq(&mchdev_lock); + spin_unlock(&mchdev_lock); return ret; } @@ -3181,20 +3095,20 @@ bool i915_gpu_turbo_disable(void) struct drm_i915_private *dev_priv; bool ret = true; - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; - dev_priv->ips.max_delay = dev_priv->ips.fstart; + dev_priv->max_delay = dev_priv->fstart; - if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) + if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) ret = false; out_unlock: - spin_unlock_irq(&mchdev_lock); + spin_unlock(&mchdev_lock); return ret; } @@ -3222,20 +3136,19 @@ ips_ping_for_i915_load(void) void intel_gpu_ips_init(struct drm_i915_private *dev_priv) { - /* We only register the i915 ips part with intel-ips once everything is - * set up, to avoid intel-ips sneaking in and reading bogus values. */ - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); i915_mch_dev = dev_priv; - spin_unlock_irq(&mchdev_lock); + dev_priv->mchdev_lock = &mchdev_lock; + spin_unlock(&mchdev_lock); ips_ping_for_i915_load(); } void intel_gpu_ips_teardown(void) { - spin_lock_irq(&mchdev_lock); + spin_lock(&mchdev_lock); i915_mch_dev = NULL; - spin_unlock_irq(&mchdev_lock); + spin_unlock(&mchdev_lock); } static void intel_init_emon(struct drm_device *dev) { @@ -3305,7 +3218,7 @@ static void intel_init_emon(struct drm_device *dev) lcfuse = I915_READ(LCFUSE02); - dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); + dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } void intel_disable_gt_powersave(struct drm_device *dev) @@ -3818,6 +3731,42 @@ void intel_init_clock_gating(struct drm_device *dev) dev_priv->display.init_pch_clock_gating(dev); } +static void gen6_sanitize_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 limits, delay, old; + + gen6_gt_force_wake_get(dev_priv); + + old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS); + /* Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + limits &= ~(0x3f << 16 | 0x3f << 24); + delay = dev_priv->cur_delay; + if (delay < dev_priv->max_delay) + limits |= (dev_priv->max_delay & 0x3f) << 24; + if (delay > dev_priv->min_delay) + limits |= (dev_priv->min_delay & 0x3f) << 16; + + if (old != limits) { + /* Note that the known failure case is to read back 0. */ + DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS " + "expected %08x, was %08x\n", limits, old); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); + } + + gen6_gt_force_wake_put(dev_priv); +} + +void intel_sanitize_pm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.sanitize_pm) + dev_priv->display.sanitize_pm(dev); +} + /* Starting with Haswell, we have different power wells for * different parts of the GPU. This attempts to enable them all. */ @@ -3903,6 +3852,7 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = gen6_init_clock_gating; + dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ if (SNB_READ_WM0_LATENCY()) { @@ -3914,6 +3864,7 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else if (IS_HASWELL(dev)) { if (SNB_READ_WM0_LATENCY()) { dev_priv->display.update_wm = sandybridge_update_wm; @@ -3925,6 +3876,7 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = haswell_init_clock_gating; + dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else dev_priv->display.update_wm = NULL; } else if (IS_VALLEYVIEW(dev)) { @@ -4003,16 +3955,14 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) else forcewake_ack = FORCEWAKE_ACK; - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) + DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + POSTING_READ(FORCEWAKE); - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); __gen6_gt_wait_for_thread_c0(dev_priv); } @@ -4026,16 +3976,14 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) else forcewake_ack = FORCEWAKE_MT_ACK; - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) + DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ + POSTING_READ(FORCEWAKE_MT); - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); __gen6_gt_wait_for_thread_c0(dev_priv); } @@ -4068,14 +4016,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ + POSTING_READ(FORCEWAKE); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ + POSTING_READ(FORCEWAKE_MT); gen6_gt_check_fifodbg(dev_priv); } @@ -4114,24 +4062,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) static void vlv_force_wake_get(struct drm_i915_private *dev_priv) { - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + /* Already awake? */ + if ((I915_READ(0x130094) & 0xa1) == 0xa1) + return; - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); + POSTING_READ(FORCEWAKE_VLV); - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) + DRM_ERROR("Force wake wait timed out\n"); __gen6_gt_wait_for_thread_c0(dev_priv); } static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); + /* FIXME: confirm VLV behavior with Punit folks */ + POSTING_READ(FORCEWAKE_VLV); } void intel_gt_init(struct drm_device *dev) diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index 984a0c5fbf5d..e2a73b38abe9 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -262,83 +262,6 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, return 0; } -static int -gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) -{ - int ret; - - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - - return 0; -} - -static int -gen7_render_ring_flush(struct intel_ring_buffer *ring, - u32 invalidate_domains, u32 flush_domains) -{ - u32 flags = 0; - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; - int ret; - - /* - * Ensure that any following seqno writes only happen when the render - * cache is indeed flushed. - * - * Workaround: 4th PIPE_CONTROL command (except the ones with only - * read-cache invalidate bits set) must have the CS_STALL bit set. We - * don't try to be clever and just set it unconditionally. - */ - flags |= PIPE_CONTROL_CS_STALL; - - /* Just flush everything. Experiments have shown that reducing the - * number of bits based on the write domains has little performance - * impact. - */ - if (flush_domains) { - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - } - if (invalidate_domains) { - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - /* - * TLB invalidate requires a post-sync write. - */ - flags |= PIPE_CONTROL_QW_WRITE; - - /* Workaround: we must issue a pipe_control with CS-stall bit - * set before a pipe_control command that has the state cache - * invalidate bit set. */ - gen7_render_ring_cs_stall_wa(ring); - } - - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - - return 0; -} - static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { @@ -459,12 +382,12 @@ init_pipe_control(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_object_pin(obj, 4096, true); if (ret) goto err_unref; pc->gtt_offset = obj->gtt_offset; - pc->cpu_page = kmap(sg_page(obj->pages->sgl)); + pc->cpu_page = kmap(obj->pages[0]); if (pc->cpu_page == NULL) goto err_unpin; @@ -491,8 +414,7 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) return; obj = pc->obj; - - kunmap(sg_page(obj->pages->sgl)); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); @@ -540,7 +462,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 6) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_GPU_CACHE(dev)) + if (IS_IVYBRIDGE(dev)) I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); return ret; @@ -706,24 +628,26 @@ pc_render_add_request(struct intel_ring_buffer *ring, } static u32 -gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +gen6_ring_get_seqno(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; + /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ - if (!lazy_coherency) + if (IS_GEN6(dev) || IS_GEN7(dev)) intel_ring_get_active_head(ring); return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static u32 -ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +ring_get_seqno(struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +pc_render_get_seqno(struct intel_ring_buffer *ring) { struct pipe_control *pc = ring->private; return pc->cpu_page[0]; @@ -928,7 +852,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { - if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) + if (IS_IVYBRIDGE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | GEN6_RENDER_L3_PARITY_ERROR)); else @@ -951,7 +875,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { - if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) + if (IS_IVYBRIDGE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); else I915_WRITE_IMR(ring, ~0); @@ -1027,7 +951,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) if (obj == NULL) return; - kunmap(sg_page(obj->pages->sgl)); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; @@ -1048,13 +972,13 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } ring->status_page.gfx_addr = obj->gtt_offset; - ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); + ring->status_page.page_addr = kmap(obj->pages[0]); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; goto err_unpin; @@ -1086,6 +1010,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = 32 * PAGE_SIZE; init_waitqueue_head(&ring->irq_queue); @@ -1105,7 +1030,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; @@ -1454,9 +1379,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; - ring->flush = gen7_render_ring_flush; - if (INTEL_INFO(dev)->gen == 6) - ring->flush = gen6_render_ring_flush; + ring->flush = gen6_render_ring_flush; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT; @@ -1558,6 +1481,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = size; ring->effective_size = ring->size; @@ -1650,41 +1574,3 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) return intel_init_ring_buffer(dev, ring); } - -int -intel_ring_flush_all_caches(struct intel_ring_buffer *ring) -{ - int ret; - - if (!ring->gpu_caches_dirty) - return 0; - - ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; - - trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); - - ring->gpu_caches_dirty = false; - return 0; -} - -int -intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) -{ - uint32_t flush_domains; - int ret; - - flush_domains = 0; - if (ring->gpu_caches_dirty) - flush_domains = I915_GEM_GPU_DOMAINS; - - ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); - if (ret) - return ret; - - trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); - - ring->gpu_caches_dirty = false; - return 0; -} diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h index 2ea7a311a1f0..1d3c81fdad92 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -72,14 +72,7 @@ struct intel_ring_buffer { u32 flush_domains); int (*add_request)(struct intel_ring_buffer *ring, u32 *seqno); - /* Some chipsets are not quite as coherent as advertised and need - * an expensive kick to force a true read of the up-to-date seqno. - * However, the up-to-date seqno is not always required and the last - * seen value is good enough. Note that the seqno will always be - * monotonic, even if not coherent. - */ - u32 (*get_seqno)(struct intel_ring_buffer *ring, - bool lazy_coherency); + u32 (*get_seqno)(struct intel_ring_buffer *ring); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, u32 offset, u32 length); void (*cleanup)(struct intel_ring_buffer *ring); @@ -107,6 +100,15 @@ struct intel_ring_buffer { */ struct list_head request_list; + /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + /** * Do we have some not yet emitted requests outstanding? */ @@ -202,8 +204,6 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, void intel_ring_advance(struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); -int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); -int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 39c319827f91..123afd357611 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -97,7 +97,7 @@ struct intel_sdvo { /* * Hotplug activation bits for this device */ - uint16_t hotplug_active; + uint8_t hotplug_active[2]; /** * This is used to select the color range of RBG outputs in HDMI mode. @@ -628,14 +628,6 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, &outputs, sizeof(outputs)); } -static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo, - u16 *outputs) -{ - return intel_sdvo_get_value(intel_sdvo, - SDVO_CMD_GET_ACTIVE_OUTPUTS, - outputs, sizeof(*outputs)); -} - static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { @@ -1150,132 +1142,51 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo_write_sdvox(intel_sdvo, sdvox); } -static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) -{ - struct intel_sdvo_connector *intel_sdvo_connector = - to_intel_sdvo_connector(&connector->base); - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); - u16 active_outputs; - - intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); - - if (active_outputs & intel_sdvo_connector->output_flag) - return true; - else - return false; -} - -static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, - enum pipe *pipe) +static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - u32 tmp; - - tmp = I915_READ(intel_sdvo->sdvo_reg); - - if (!(tmp & SDVO_ENABLE)) - return false; - - if (HAS_PCH_CPT(dev)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - return true; -} - -static void intel_disable_sdvo(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - u32 temp; - - intel_sdvo_set_active_outputs(intel_sdvo, 0); - if (0) - intel_sdvo_set_encoder_power_state(intel_sdvo, - DRM_MODE_DPMS_OFF); - - temp = I915_READ(intel_sdvo->sdvo_reg); - if ((temp & SDVO_ENABLE) != 0) { - intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); - } -} - -static void intel_enable_sdvo(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 temp; - bool input1, input2; - int i; - u8 status; - - temp = I915_READ(intel_sdvo->sdvo_reg); - if ((temp & SDVO_ENABLE) == 0) - intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); - for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev, intel_crtc->pipe); - - status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); - /* Warn if the device reported failure to sync. - * A lot of SDVO devices fail to notify of sync, but it's - * a given it the status is a success, we succeeded. - */ - if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { - DRM_DEBUG_KMS("First %s output reported failure to " - "sync\n", SDVO_NAME(intel_sdvo)); - } - - if (0) - intel_sdvo_set_encoder_power_state(intel_sdvo, - DRM_MODE_DPMS_ON); - intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); -} - -static void intel_sdvo_dpms(struct drm_connector *connector, int mode) -{ - struct drm_crtc *crtc; - struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); - - /* dvo supports only 2 dpms states. */ - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (mode == connector->dpms) - return; - - connector->dpms = mode; - - /* Only need to change hw state when actually enabled */ - crtc = intel_sdvo->base.base.crtc; - if (!crtc) { - intel_sdvo->base.connectors_active = false; - return; - } if (mode != DRM_MODE_DPMS_ON) { intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); - intel_sdvo->base.connectors_active = false; - - intel_crtc_update_dpms(crtc); + if (mode == DRM_MODE_DPMS_OFF) { + temp = I915_READ(intel_sdvo->sdvo_reg); + if ((temp & SDVO_ENABLE) != 0) { + intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); + } + } } else { - intel_sdvo->base.connectors_active = true; - - intel_crtc_update_dpms(crtc); + bool input1, input2; + int i; + u8 status; + + temp = I915_READ(intel_sdvo->sdvo_reg); + if ((temp & SDVO_ENABLE) == 0) + intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); + for (i = 0; i < 2; i++) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); + /* Warn if the device reported failure to sync. + * A lot of SDVO devices fail to notify of sync, but it's + * a given it the status is a success, we succeeded. + */ + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(intel_sdvo)); + } if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } - - intel_modeset_check_state(connector->dev); + return; } static int intel_sdvo_mode_valid(struct drm_connector *connector, @@ -1340,29 +1251,25 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } -static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) +static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { struct drm_device *dev = intel_sdvo->base.base.dev; - uint16_t hotplug; + u8 response[2]; /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise * on the line. */ if (IS_I945G(dev) || IS_I945GM(dev)) - return 0; - - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, - &hotplug, sizeof(hotplug))) - return 0; + return false; - return hotplug; + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, + &response, 2) && response[0]; } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, - &intel_sdvo->hotplug_active, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); } static bool @@ -1438,6 +1345,7 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) } } else status = connector_status_disconnected; + connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1511,6 +1419,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) else ret = connector_status_disconnected; + connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; @@ -1556,6 +1465,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) drm_add_edid_modes(connector, edid); } + connector->display_info.raw_edid = NULL; kfree(edid); } } @@ -1927,8 +1837,8 @@ intel_sdvo_set_property(struct drm_connector *connector, done: if (intel_sdvo->base.base.crtc) { struct drm_crtc *crtc = intel_sdvo->base.base.crtc; - intel_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, + crtc->y, crtc->fb); } return 0; @@ -1936,13 +1846,15 @@ intel_sdvo_set_property(struct drm_connector *connector, } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { + .dpms = intel_sdvo_dpms, .mode_fixup = intel_sdvo_mode_fixup, + .prepare = intel_encoder_prepare, .mode_set = intel_sdvo_mode_set, - .disable = intel_encoder_noop, + .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { - .dpms = intel_sdvo_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, @@ -2114,7 +2026,6 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; - connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; intel_connector_attach_encoder(&connector->base, &encoder->base); drm_sysfs_connector_add(&connector->base.base); @@ -2153,18 +2064,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; - if (intel_sdvo_get_hotplug_support(intel_sdvo) & - intel_sdvo_connector->output_flag) { + if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { connector->polled = DRM_CONNECTOR_POLL_HPD; - intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; + intel_sdvo->hotplug_active[0] |= 1 << device; /* Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ intel_encoder->hot_plug = intel_sdvo_enable_hotplug; intel_sdvo_enable_hotplug(intel_encoder); - } else { - connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } + else + connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2172,7 +2082,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } - intel_sdvo->base.cloneable = true; + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) @@ -2203,7 +2114,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; - intel_sdvo->base.cloneable = false; + intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); @@ -2246,7 +2157,8 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } - intel_sdvo->base.cloneable = true; + intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | + (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); @@ -2278,10 +2190,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, - * as opposed to native LVDS, where we upscale with the panel-fitter - * (and hence only the native LVDS resolution could be cloned). */ - intel_sdvo->base.cloneable = true; + intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | + (1 << INTEL_SDVO_LVDS_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) @@ -2666,10 +2576,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); - intel_encoder->disable = intel_disable_sdvo; - intel_encoder->enable = intel_enable_sdvo; - intel_encoder->get_hw_state = intel_sdvo_get_hw_state; - /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; @@ -2684,7 +2590,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) /* Only enable the hotplug irq if we need it, to work around noisy * hotplug lines. */ - if (intel_sdvo->hotplug_active) + if (intel_sdvo->hotplug_active[0]) dev_priv->hotplug_supported_mask |= hotplug_mask; intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c index d2c5c8f3baf3..befce6c49704 100644 --- a/trunk/drivers/gpu/drm/i915/intel_tv.c +++ b/trunk/drivers/gpu/drm/i915/intel_tv.c @@ -836,37 +836,22 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector) base); } -static bool -intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) -{ - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp = I915_READ(TV_CTL); - - if (!(tmp & TV_ENC_ENABLE)) - return false; - - *pipe = PORT_TO_PIPE(tmp); - - return true; -} - static void -intel_enable_tv(struct intel_encoder *encoder) +intel_tv_dpms(struct drm_encoder *encoder, int mode) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); -} - -static void -intel_disable_tv(struct intel_encoder *encoder) -{ - struct drm_device *dev = encoder->base.dev; + struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); + switch (mode) { + case DRM_MODE_DPMS_ON: + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); + break; + } } static const struct tv_mode * @@ -910,14 +895,17 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct drm_device *dev = encoder->dev; struct intel_tv *intel_tv = enc_to_intel_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + struct intel_encoder *other_encoder; if (!tv_mode) return false; - if (intel_encoder_check_is_cloned(&intel_tv->base)) - return false; + for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder) + if (&other_encoder->base != encoder) + return false; adjusted_mode->clock = tv_mode->clock; return true; @@ -1315,9 +1303,12 @@ intel_tv_detect(struct drm_connector *connector, bool force) if (force) { struct intel_load_detect_pipe tmp; - if (intel_get_load_detect_pipe(connector, &mode, &tmp)) { + if (intel_get_load_detect_pipe(&intel_tv->base, connector, + &mode, &tmp)) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(connector, &tmp); + intel_release_load_detect_pipe(&intel_tv->base, + connector, + &tmp); } else return connector_status_unknown; } else @@ -1483,20 +1474,22 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop } if (changed && crtc) - intel_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, + crtc->y, crtc->fb); out: return ret; } static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { + .dpms = intel_tv_dpms, .mode_fixup = intel_tv_mode_fixup, + .prepare = intel_encoder_prepare, .mode_set = intel_tv_mode_set, - .disable = intel_encoder_noop, + .commit = intel_encoder_commit, }; static const struct drm_connector_funcs intel_tv_connector_funcs = { - .dpms = intel_connector_dpms, + .dpms = drm_helper_connector_dpms, .detect = intel_tv_detect, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, @@ -1626,15 +1619,10 @@ intel_tv_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); - intel_encoder->enable = intel_enable_tv; - intel_encoder->disable = intel_disable_tv; - intel_encoder->get_hw_state = intel_tv_get_hw_state; - intel_connector->get_hw_state = intel_connector_get_hw_state; - intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); - intel_encoder->cloneable = false; + intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h index d22cbbf3a202..6f13b3563234 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -195,6 +195,7 @@ struct mga_device { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; + atomic_t validate_sequence; } ttm; u32 reg_1e24; /* SE model number */ diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index c7420e83c0b9..b69642d5d850 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1399,6 +1399,7 @@ static int mga_vga_get_modes(struct drm_connector *connector) if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); + connector->display_info.raw_edid = NULL; kfree(edid); } return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c index ff23d88880e5..3ca240b4413d 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) return 0; } else if (init->class == 0x906e) { - NV_ERROR(dev, "906e not supported yet\n"); + NV_DEBUG(dev, "906e not supported yet\n"); return -EINVAL; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c index e754aa32edf1..7e289d2ad8e4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -289,7 +289,9 @@ dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay) if (ret) return ret; - NV_DEBUG_KMS(dev, "status %*ph\n", 6, dp->stat); + NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n", + dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3], + dp->stat[4], dp->stat[5]); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c index f704e942372e..f376c39310df 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c @@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev) priv = dev_priv->engine.fb.priv; nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); + nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */ return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c index 47ab388a606e..8e5a2f407ed4 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fence.c @@ -32,6 +32,7 @@ struct nvc0_fence_priv { struct nouveau_fence_priv base; struct nouveau_bo *bo; + u32 *suspend; }; struct nvc0_fence_chan { @@ -125,12 +126,36 @@ nvc0_fence_context_new(struct nouveau_channel *chan, int engine) static int nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend) { + struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct nvc0_fence_priv *priv = nv_engine(dev, engine); + int i; + + if (suspend) { + priv->suspend = vmalloc(pfifo->channels * sizeof(u32)); + if (!priv->suspend) + return -ENOMEM; + + for (i = 0; i < pfifo->channels; i++) + priv->suspend[i] = nouveau_bo_rd32(priv->bo, i); + } + return 0; } static int nvc0_fence_init(struct drm_device *dev, int engine) { + struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct nvc0_fence_priv *priv = nv_engine(dev, engine); + int i; + + if (priv->suspend) { + for (i = 0; i < pfifo->channels; i++) + nouveau_bo_wr32(priv->bo, i, priv->suspend[i]); + vfree(priv->suspend); + priv->suspend = NULL; + } + return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c index 7d85553d518c..cd39eb99f5b1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) static void nvc0_fifo_isr(struct drm_device *dev) { - u32 stat = nv_rd32(dev, 0x002100); + u32 mask = nv_rd32(dev, 0x002140); + u32 stat = nv_rd32(dev, 0x002100) & mask; if (stat & 0x00000100) { NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c b/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c index e98d144e6eb9..281bece751b6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c @@ -345,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) static void nve0_fifo_isr(struct drm_device *dev) { - u32 stat = nv_rd32(dev, 0x002100); + u32 mask = nv_rd32(dev, 0x002140); + u32 stat = nv_rd32(dev, 0x002100) & mask; if (stat & 0x00000100) { NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 96184d02c8d9..2817101fb167 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -83,19 +83,25 @@ static void atombios_scaler_setup(struct drm_crtc *crtc) struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); ENABLE_SCALER_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); - struct radeon_encoder *radeon_encoder = - to_radeon_encoder(radeon_crtc->encoder); + /* fixme - fill in enc_priv for atom dac */ enum radeon_tv_std tv_std = TV_STD_NTSC; bool is_tv = false, is_cv = false; + struct drm_encoder *encoder; if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) return; - if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { - struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; - tv_std = tv_dac->tv_std; - is_tv = true; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + /* find tv std */ + if (encoder->crtc == crtc) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { + struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; + tv_std = tv_dac->tv_std; + is_tv = true; + } + } } memset(&args, 0, sizeof(args)); @@ -527,87 +533,99 @@ union adjust_pixel_clock { }; static u32 atombios_adjust_pll(struct drm_crtc *crtc, - struct drm_display_mode *mode) + struct drm_display_mode *mode, + struct radeon_pll *pll, + bool ss_enabled, + struct radeon_atom_ss *ss) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct drm_encoder *encoder = radeon_crtc->encoder; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct drm_encoder *encoder = NULL; + struct radeon_encoder *radeon_encoder = NULL; + struct drm_connector *connector = NULL; u32 adjusted_clock = mode->clock; - int encoder_mode = atombios_get_encoder_mode(encoder); + int encoder_mode = 0; u32 dp_clock = mode->clock; - int bpc = radeon_get_monitor_bpc(connector); - bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); + int bpc = 8; + bool is_duallink = false; /* reset the pll flags */ - radeon_crtc->pll_flags = 0; + pll->flags = 0; if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ - RADEON_PLL_PREFER_CLOSEST_LOWER); + pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ + RADEON_PLL_PREFER_CLOSEST_LOWER); if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; if (rdev->family < CHIP_RV770) - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; + pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) - radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; } else { - radeon_crtc->pll_flags |= RADEON_PLL_LEGACY; + pll->flags |= RADEON_PLL_LEGACY; if (mode->clock > 200000) /* range limits??? */ - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; + pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; else - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; + pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; } - if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || - (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { - if (connector) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector_atom_dig *dig_connector = - radeon_connector->con_priv; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); + connector = radeon_get_connector_for_encoder(encoder); + bpc = radeon_get_monitor_bpc(connector); + encoder_mode = atombios_get_encoder_mode(encoder); + is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); + if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + dp_clock = dig_connector->dp_clock; + } + } - dp_clock = dig_connector->dp_clock; - } - } + /* use recommended ref_div for ss */ + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (ss_enabled) { + if (ss->refdiv) { + pll->flags |= RADEON_PLL_USE_REF_DIV; + pll->reference_div = ss->refdiv; + if (ASIC_IS_AVIVO(rdev)) + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; + } + } + } - /* use recommended ref_div for ss */ - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (radeon_crtc->ss_enabled) { - if (radeon_crtc->ss.refdiv) { - radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; - radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; - if (ASIC_IS_AVIVO(rdev)) - radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; + if (ASIC_IS_AVIVO(rdev)) { + /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) + adjusted_clock = mode->clock * 2; + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + pll->flags |= RADEON_PLL_IS_LCD; + } else { + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) + pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; + if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) + pll->flags |= RADEON_PLL_USE_REF_DIV; } + break; } } - if (ASIC_IS_AVIVO(rdev)) { - /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ - if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) - adjusted_clock = mode->clock * 2; - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD; - } else { - if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) - radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; - if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) - radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; - } - /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock * accordingly based on the encoder/transmitter to work around * special hw requirements. @@ -632,7 +650,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; - if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) + if (ss_enabled && ss->percentage) args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; @@ -645,7 +663,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; args.v3.sInput.ucEncodeMode = encoder_mode; args.v3.sInput.ucDispPllConfig = 0; - if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage) + if (ss_enabled && ss->percentage) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; if (ENCODER_MODE_IS_DP(encoder_mode)) { @@ -677,14 +695,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { - radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; - radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; - radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv; + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; + pll->flags |= RADEON_PLL_USE_REF_DIV; + pll->reference_div = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { - radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; - radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV; - radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv; + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; + pll->flags |= RADEON_PLL_USE_POST_DIV; + pll->post_div = args.v3.sOutput.ucPostDiv; } break; default: @@ -819,10 +837,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, args.v3.ucFracFbDiv = frac_fb_div; args.v3.ucPostDiv = post_div; args.v3.ucPpll = pll_id; - if (crtc_id == ATOM_CRTC2) - args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2; - else - args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1; + args.v3.ucMiscInfo = (pll_id << 2); if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; args.v3.ucTransmitterId = encoder_id; @@ -892,29 +907,58 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } -static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = - to_radeon_encoder(radeon_crtc->encoder); - int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); + struct drm_encoder *encoder = NULL; + struct radeon_encoder *radeon_encoder = NULL; + u32 pll_clock = mode->clock; + u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; + struct radeon_pll *pll; + u32 adjusted_clock; + int encoder_mode = 0; + struct radeon_atom_ss ss; + bool ss_enabled = false; + int bpc = 8; - radeon_crtc->bpc = 8; - radeon_crtc->ss_enabled = false; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); + encoder_mode = atombios_get_encoder_mode(encoder); + break; + } + } + + if (!radeon_encoder) + return; + + switch (radeon_crtc->pll_id) { + case ATOM_PPLL1: + pll = &rdev->clock.p1pll; + break; + case ATOM_PPLL2: + pll = &rdev->clock.p2pll; + break; + case ATOM_DCPLL: + case ATOM_PPLL_INVALID: + default: + pll = &rdev->clock.dcpll; + break; + } if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || - (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) { + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = - radeon_get_connector_for_encoder(radeon_crtc->encoder); + radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; int dp_clock; - radeon_crtc->bpc = radeon_get_monitor_bpc(connector); + bpc = radeon_get_monitor_bpc(connector); switch (encoder_mode) { case ATOM_ENCODER_MODE_DP_MST: @@ -922,54 +966,45 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_ /* DP/eDP */ dp_clock = dig_connector->dp_clock / 10; if (ASIC_IS_DCE4(rdev)) - radeon_crtc->ss_enabled = - radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss, + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_DP, dp_clock); else { if (dp_clock == 16200) { - radeon_crtc->ss_enabled = - radeon_atombios_get_ppll_ss_info(rdev, - &radeon_crtc->ss, + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, ATOM_DP_SS_ID2); - if (!radeon_crtc->ss_enabled) - radeon_crtc->ss_enabled = - radeon_atombios_get_ppll_ss_info(rdev, - &radeon_crtc->ss, + if (!ss_enabled) + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, ATOM_DP_SS_ID1); } else - radeon_crtc->ss_enabled = - radeon_atombios_get_ppll_ss_info(rdev, - &radeon_crtc->ss, + ss_enabled = + radeon_atombios_get_ppll_ss_info(rdev, &ss, ATOM_DP_SS_ID1); } break; case ATOM_ENCODER_MODE_LVDS: if (ASIC_IS_DCE4(rdev)) - radeon_crtc->ss_enabled = - radeon_atombios_get_asic_ss_info(rdev, - &radeon_crtc->ss, - dig->lcd_ss_id, - mode->clock / 10); + ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, + dig->lcd_ss_id, + mode->clock / 10); else - radeon_crtc->ss_enabled = - radeon_atombios_get_ppll_ss_info(rdev, - &radeon_crtc->ss, - dig->lcd_ss_id); + ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, + dig->lcd_ss_id); break; case ATOM_ENCODER_MODE_DVI: if (ASIC_IS_DCE4(rdev)) - radeon_crtc->ss_enabled = - radeon_atombios_get_asic_ss_info(rdev, - &radeon_crtc->ss, + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_TMDS, mode->clock / 10); break; case ATOM_ENCODER_MODE_HDMI: if (ASIC_IS_DCE4(rdev)) - radeon_crtc->ss_enabled = - radeon_atombios_get_asic_ss_info(rdev, - &radeon_crtc->ss, + ss_enabled = + radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_SS_ON_HDMI, mode->clock / 10); break; @@ -979,80 +1014,43 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_ } /* adjust pixel clock as needed */ - radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode); - - return true; -} - -static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = - to_radeon_encoder(radeon_crtc->encoder); - u32 pll_clock = mode->clock; - u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; - struct radeon_pll *pll; - int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder); - - switch (radeon_crtc->pll_id) { - case ATOM_PPLL1: - pll = &rdev->clock.p1pll; - break; - case ATOM_PPLL2: - pll = &rdev->clock.p2pll; - break; - case ATOM_DCPLL: - case ATOM_PPLL_INVALID: - default: - pll = &rdev->clock.dcpll; - break; - } - - /* update pll params */ - pll->flags = radeon_crtc->pll_flags; - pll->reference_div = radeon_crtc->pll_reference_div; - pll->post_div = radeon_crtc->pll_post_div; + adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) /* TV seems to prefer the legacy algo on some boards */ - radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock, - &fb_div, &frac_fb_div, &ref_div, &post_div); + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); else if (ASIC_IS_AVIVO(rdev)) - radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock, - &fb_div, &frac_fb_div, &ref_div, &post_div); + radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); else - radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock, - &fb_div, &frac_fb_div, &ref_div, &post_div); + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); - atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, - radeon_crtc->crtc_id, &radeon_crtc->ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, - ref_div, fb_div, frac_fb_div, post_div, - radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss); + ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss); - if (radeon_crtc->ss_enabled) { + if (ss_enabled) { /* calculate ss amount and step size */ if (ASIC_IS_DCE4(rdev)) { u32 step_size; - u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000; - radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; - radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & + u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; + ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; + ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; - if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) - step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) / + if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) + step_size = (4 * amount * ref_div * (ss.rate * 2048)) / (125 * 25 * pll->reference_freq / 100); else - step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) / + step_size = (2 * amount * ref_div * (ss.rate * 2048)) / (125 * 25 * pll->reference_freq / 100); - radeon_crtc->ss.step = step_size; + ss.step = step_size; } - atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, - radeon_crtc->crtc_id, &radeon_crtc->ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); } } @@ -1481,251 +1479,85 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) } } -/** - * radeon_get_pll_use_mask - look up a mask of which pplls are in use - * - * @crtc: drm crtc - * - * Returns the mask of which PPLLs (Pixel PLLs) are in use. - */ -static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_crtc *test_crtc; - struct radeon_crtc *test_radeon_crtc; - u32 pll_in_use = 0; - - list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { - if (crtc == test_crtc) - continue; - - test_radeon_crtc = to_radeon_crtc(test_crtc); - if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) - pll_in_use |= (1 << test_radeon_crtc->pll_id); - } - return pll_in_use; -} - -/** - * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP - * - * @crtc: drm crtc - * - * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is - * also in DP mode. For DP, a single PPLL can be used for all DP - * crtcs/encoders. - */ -static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_crtc *test_crtc; - struct radeon_crtc *test_radeon_crtc; - - list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { - if (crtc == test_crtc) - continue; - test_radeon_crtc = to_radeon_crtc(test_crtc); - if (test_radeon_crtc->encoder && - ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { - /* for DP use the same PLL for all */ - if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) - return test_radeon_crtc->pll_id; - } - } - return ATOM_PPLL_INVALID; -} - -/** - * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc - * - * @crtc: drm crtc - * @encoder: drm encoder - * - * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can - * be shared (i.e., same clock). - */ -static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_crtc *test_crtc; - struct radeon_crtc *test_radeon_crtc; - u32 adjusted_clock, test_adjusted_clock; - - adjusted_clock = radeon_crtc->adjusted_clock; - - if (adjusted_clock == 0) - return ATOM_PPLL_INVALID; - - list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { - if (crtc == test_crtc) - continue; - test_radeon_crtc = to_radeon_crtc(test_crtc); - if (test_radeon_crtc->encoder && - !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) { - /* check if we are already driving this connector with another crtc */ - if (test_radeon_crtc->connector == radeon_crtc->connector) { - /* if we are, return that pll */ - if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) - return test_radeon_crtc->pll_id; - } - /* for non-DP check the clock */ - test_adjusted_clock = test_radeon_crtc->adjusted_clock; - if ((crtc->mode.clock == test_crtc->mode.clock) && - (adjusted_clock == test_adjusted_clock) && - (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) - return test_radeon_crtc->pll_id; - } - } - return ATOM_PPLL_INVALID; -} - -/** - * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc. - * - * @crtc: drm crtc - * - * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors - * a single PPLL can be used for all DP crtcs/encoders. For non-DP - * monitors a dedicated PPLL must be used. If a particular board has - * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming - * as there is no need to program the PLL itself. If we are not able to - * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to - * avoid messing up an existing monitor. - * - * Asic specific PLL information - * - * DCE 6.1 - * - PPLL2 is only available to UNIPHYA (both DP and non-DP) - * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP) - * - * DCE 6.0 - * - PPLL0 is available to all UNIPHY (DP only) - * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC - * - * DCE 5.0 - * - DCPLL is available to all UNIPHY (DP only) - * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC - * - * DCE 3.0/4.0/4.1 - * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC - * - */ static int radeon_atom_pick_pll(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = - to_radeon_encoder(radeon_crtc->encoder); - u32 pll_in_use; - int pll; + struct drm_encoder *test_encoder; + struct drm_crtc *test_crtc; + uint32_t pll_in_use = 0; if (ASIC_IS_DCE61(rdev)) { - struct radeon_encoder_atom_dig *dig = - radeon_encoder->enc_priv; - - if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && - (dig->linkb == false)) - /* UNIPHY A uses PPLL2 */ - return ATOM_PPLL2; - else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { - /* UNIPHY B/C/D/E/F */ - if (rdev->clock.dp_extclk) - /* skip PPLL programming if using ext clock */ - return ATOM_PPLL_INVALID; - else { - /* use the same PPLL for all DP monitors */ - pll = radeon_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; + list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { + if (test_encoder->crtc && (test_encoder->crtc == crtc)) { + struct radeon_encoder *test_radeon_encoder = + to_radeon_encoder(test_encoder); + struct radeon_encoder_atom_dig *dig = + test_radeon_encoder->enc_priv; + + if ((test_radeon_encoder->encoder_id == + ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && + (dig->linkb == false)) /* UNIPHY A uses PPLL2 */ + return ATOM_PPLL2; } - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = radeon_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; } /* UNIPHY B/C/D/E/F */ - pll_in_use = radeon_get_pll_use_mask(crtc); - if (!(pll_in_use & (1 << ATOM_PPLL0))) + list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_test_crtc; + + if (crtc == test_crtc) + continue; + + radeon_test_crtc = to_radeon_crtc(test_crtc); + if ((radeon_test_crtc->pll_id == ATOM_PPLL0) || + (radeon_test_crtc->pll_id == ATOM_PPLL1)) + pll_in_use |= (1 << radeon_test_crtc->pll_id); + } + if (!(pll_in_use & 4)) return ATOM_PPLL0; - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; + return ATOM_PPLL1; } else if (ASIC_IS_DCE4(rdev)) { - /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, - * depending on the asic: - * DCE4: PPLL or ext clock - * DCE5: PPLL, DCPLL, or ext clock - * DCE6: PPLL, PPLL0, or ext clock - * - * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip - * PPLL/DCPLL programming and only program the DP DTO for the - * crtc virtual pixel clock. - */ - if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { - if (rdev->clock.dp_extclk) - /* skip PPLL programming if using ext clock */ - return ATOM_PPLL_INVALID; - else if (ASIC_IS_DCE6(rdev)) - /* use PPLL0 for all DP */ - return ATOM_PPLL0; - else if (ASIC_IS_DCE5(rdev)) - /* use DCPLL for all DP */ - return ATOM_DCPLL; - else { - /* use the same PPLL for all DP monitors */ - pll = radeon_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; + list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { + if (test_encoder->crtc && (test_encoder->crtc == crtc)) { + /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, + * depending on the asic: + * DCE4: PPLL or ext clock + * DCE5: DCPLL or ext clock + * + * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip + * PPLL/DCPLL programming and only program the DP DTO for the + * crtc virtual pixel clock. + */ + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { + if (rdev->clock.dp_extclk) + return ATOM_PPLL_INVALID; + else if (ASIC_IS_DCE6(rdev)) + return ATOM_PPLL0; + else if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; + } } - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = radeon_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; } - /* all other cases */ - pll_in_use = radeon_get_pll_use_mask(crtc); - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } else { - if (ASIC_IS_AVIVO(rdev)) { - /* in DP mode, the DP ref clock can come from either PPLL - * depending on the asic: - * DCE3: PPLL1 or PPLL2 - */ - if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { - /* use the same PPLL for all DP monitors */ - pll = radeon_get_shared_dp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } else { - /* use the same PPLL for all monitors with the same clock */ - pll = radeon_get_shared_nondp_ppll(crtc); - if (pll != ATOM_PPLL_INVALID) - return pll; - } - /* all other cases */ - pll_in_use = radeon_get_pll_use_mask(crtc); - if (!(pll_in_use & (1 << ATOM_PPLL2))) - return ATOM_PPLL2; - if (!(pll_in_use & (1 << ATOM_PPLL1))) - return ATOM_PPLL1; - DRM_ERROR("unable to allocate a PPLL\n"); - return ATOM_PPLL_INVALID; - } else { - /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */ - return radeon_crtc->crtc_id; + + /* otherwise, pick one of the plls */ + list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_test_crtc; + + if (crtc == test_crtc) + continue; + + radeon_test_crtc = to_radeon_crtc(test_crtc); + if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) && + (radeon_test_crtc->pll_id <= ATOM_PPLL2)) + pll_in_use |= (1 << radeon_test_crtc->pll_id); } - } + if (!(pll_in_use & 1)) + return ATOM_PPLL1; + return ATOM_PPLL2; + } else + return radeon_crtc->crtc_id; + } void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) @@ -1756,13 +1588,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = - to_radeon_encoder(radeon_crtc->encoder); + struct drm_encoder *encoder; bool is_tvcv = false; - if (radeon_encoder->active_device & - (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) - is_tvcv = true; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + /* find tv std */ + if (encoder->crtc == crtc) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + if (radeon_encoder->active_device & + (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) + is_tvcv = true; + } + } atombios_crtc_set_pll(crtc, adjusted_mode); @@ -1789,34 +1626,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct drm_encoder *encoder; - - /* assign the encoder to the radeon crtc to avoid repeated lookups later */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - radeon_crtc->encoder = encoder; - radeon_crtc->connector = radeon_get_connector_for_encoder(encoder); - break; - } - } - if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) { - radeon_crtc->encoder = NULL; - radeon_crtc->connector = NULL; - return false; - } if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) return false; - if (!atombios_crtc_prepare_pll(crtc, adjusted_mode)) - return false; - /* pick pll */ - radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); - /* if we can't get a PPLL for a non-DP encoder, fail */ - if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) && - !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) - return false; - return true; } @@ -1827,6 +1638,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc) struct radeon_device *rdev = dev->dev_private; radeon_crtc->in_mode_set = true; + /* pick pll */ + radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev)) @@ -1884,10 +1697,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) break; } done: - radeon_crtc->pll_id = ATOM_PPLL_INVALID; - radeon_crtc->adjusted_clock = 0; - radeon_crtc->encoder = NULL; - radeon_crtc->connector = NULL; + radeon_crtc->pll_id = -1; } static const struct drm_crtc_helper_funcs atombios_helper_funcs = { @@ -1936,9 +1746,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev, else radeon_crtc->crtc_offset = 0; } - radeon_crtc->pll_id = ATOM_PPLL_INVALID; - radeon_crtc->adjusted_clock = 0; - radeon_crtc->encoder = NULL; - radeon_crtc->connector = NULL; + radeon_crtc->pll_id = -1; drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index d48224b0d6b4..3623b98ed3fe 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -653,7 +653,9 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, return false; } - DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); + DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n", + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); return true; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c index 806cbcc94fdd..6e8803a1170c 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c @@ -28,251 +28,9 @@ #include "radeon_drm.h" #include "radeon.h" #include "atom.h" -#include extern int atom_debug; -static u8 -radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev) -{ - u8 backlight_level; - u32 bios_2_scratch; - - if (rdev->family >= CHIP_R600) - bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); - else - bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); - - backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >> - ATOM_S2_CURRENT_BL_LEVEL_SHIFT); - - return backlight_level; -} - -static void -radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev, - u8 backlight_level) -{ - u32 bios_2_scratch; - - if (rdev->family >= CHIP_R600) - bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); - else - bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); - - bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; - bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & - ATOM_S2_CURRENT_BL_LEVEL_MASK); - - if (rdev->family >= CHIP_R600) - WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); - else - WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); -} - -u8 -atombios_get_backlight_level(struct radeon_encoder *radeon_encoder) -{ - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - - if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) - return 0; - - return radeon_atom_get_backlight_level_from_reg(rdev); -} - -void -atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) -{ - struct drm_encoder *encoder = &radeon_encoder->base; - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder_atom_dig *dig; - DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; - int index; - - if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) - return; - - if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) && - radeon_encoder->enc_priv) { - dig = radeon_encoder->enc_priv; - dig->backlight_level = level; - radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level); - - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_LVDS: - case ENCODER_OBJECT_ID_INTERNAL_LVTM1: - index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); - if (dig->backlight_level == 0) { - args.ucAction = ATOM_LCD_BLOFF; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } else { - args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->backlight_level == 0) - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); - else { - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); - } - break; - default: - break; - } - } -} - -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - -static u8 radeon_atom_bl_level(struct backlight_device *bd) -{ - u8 level; - - /* Convert brightness to hardware level */ - if (bd->props.brightness < 0) - level = 0; - else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) - level = RADEON_MAX_BL_LEVEL; - else - level = bd->props.brightness; - - return level; -} - -static int radeon_atom_backlight_update_status(struct backlight_device *bd) -{ - struct radeon_backlight_privdata *pdata = bl_get_data(bd); - struct radeon_encoder *radeon_encoder = pdata->encoder; - - atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd)); - - return 0; -} - -static int radeon_atom_backlight_get_brightness(struct backlight_device *bd) -{ - struct radeon_backlight_privdata *pdata = bl_get_data(bd); - struct radeon_encoder *radeon_encoder = pdata->encoder; - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - - return radeon_atom_get_backlight_level_from_reg(rdev); -} - -static const struct backlight_ops radeon_atom_backlight_ops = { - .get_brightness = radeon_atom_backlight_get_brightness, - .update_status = radeon_atom_backlight_update_status, -}; - -void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, - struct drm_connector *drm_connector) -{ - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - struct backlight_device *bd; - struct backlight_properties props; - struct radeon_backlight_privdata *pdata; - struct radeon_encoder_atom_dig *dig; - u8 backlight_level; - - if (!radeon_encoder->enc_priv) - return; - - if (!rdev->is_atom_bios) - return; - - if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) - return; - - pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL); - if (!pdata) { - DRM_ERROR("Memory allocation failed\n"); - goto error; - } - - memset(&props, 0, sizeof(props)); - props.max_brightness = RADEON_MAX_BL_LEVEL; - props.type = BACKLIGHT_RAW; - bd = backlight_device_register("radeon_bl", &drm_connector->kdev, - pdata, &radeon_atom_backlight_ops, &props); - if (IS_ERR(bd)) { - DRM_ERROR("Backlight registration failed\n"); - goto error; - } - - pdata->encoder = radeon_encoder; - - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); - - dig = radeon_encoder->enc_priv; - dig->bl_dev = bd; - - bd->props.brightness = radeon_atom_backlight_get_brightness(bd); - bd->props.power = FB_BLANK_UNBLANK; - backlight_update_status(bd); - - DRM_INFO("radeon atom DIG backlight initialized\n"); - - return; - -error: - kfree(pdata); - return; -} - -static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder) -{ - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - struct backlight_device *bd = NULL; - struct radeon_encoder_atom_dig *dig; - - if (!radeon_encoder->enc_priv) - return; - - if (!rdev->is_atom_bios) - return; - - if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) - return; - - dig = radeon_encoder->enc_priv; - bd = dig->bl_dev; - dig->bl_dev = NULL; - - if (bd) { - struct radeon_legacy_backlight_privdata *pdata; - - pdata = bl_get_data(bd); - backlight_device_unregister(bd); - kfree(pdata); - - DRM_INFO("radeon atom LVDS backlight unloaded\n"); - } -} - -#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */ - -void radeon_atom_backlight_init(struct radeon_encoder *encoder) -{ -} - -static void radeon_atom_backlight_exit(struct radeon_encoder *encoder) -{ -} - -#endif - /* evil but including atombios.h is much worse */ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode); @@ -451,32 +209,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) } -static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) -{ - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - int bpc = 8; - - if (connector) - bpc = radeon_get_monitor_bpc(connector); - - switch (bpc) { - case 0: - return PANEL_BPC_UNDEFINE; - case 6: - return PANEL_6BIT_PER_COLOR; - case 8: - default: - return PANEL_8BIT_PER_COLOR; - case 10: - return PANEL_10BIT_PER_COLOR; - case 12: - return PANEL_12BIT_PER_COLOR; - case 16: - return PANEL_16BIT_PER_COLOR; - } -} - - union dvo_encoder_control { ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; @@ -674,8 +406,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) return ATOM_ENCODER_MODE_DP; /* DVO is always DVO */ - if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) || - (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)) + if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) return ATOM_ENCODER_MODE_DVO; connector = radeon_get_connector_for_encoder(encoder); @@ -804,6 +535,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo int dp_clock = 0; int dp_lane_count = 0; int hpd_id = RADEON_HPD_NONE; + int bpc = 8; if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -813,6 +545,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; hpd_id = radeon_connector->hpd.hpd; + bpc = radeon_get_monitor_bpc(connector); } /* no dig encoder assigned */ @@ -879,17 +612,37 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo else args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); - if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode)) + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) args.v3.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.ucLaneNum = 8; else args.v3.ucLaneNum = 4; - if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000)) + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; args.v3.acConfig.ucDigSel = dig->dig_encoder; - args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder); + switch (bpc) { + case 0: + args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; + break; + case 6: + args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; + break; + case 8: + default: + args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case 10: + args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case 12: + args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case 16: + args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + } break; case 4: args.v4.ucAction = action; @@ -899,21 +652,41 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo else args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder); - if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) args.v4.ucLaneNum = dp_lane_count; else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.ucLaneNum = 8; else args.v4.ucLaneNum = 4; - if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) { + if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) { if (dp_clock == 270000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; else if (dp_clock == 540000) args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; } args.v4.acConfig.ucDigSel = dig->dig_encoder; - args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder); + switch (bpc) { + case 0: + args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; + break; + case 6: + args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; + break; + case 8: + default: + args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case 10: + args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case 12: + args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case 16: + args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + } if (hpd_id == RADEON_HPD_NONE) args.v4.ucHPD_ID = 0; else @@ -1026,7 +799,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v1.asMode.ucLaneSet = lane_set; } else { if (is_dp) - args.v1.usPixelClock = cpu_to_le16(dp_clock / 10); + args.v1.usPixelClock = + cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else @@ -1083,7 +857,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v2.asMode.ucLaneSet = lane_set; } else { if (is_dp) - args.v2.usPixelClock = cpu_to_le16(dp_clock / 10); + args.v2.usPixelClock = + cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else @@ -1125,7 +900,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v3.asMode.ucLaneSet = lane_set; } else { if (is_dp) - args.v3.usPixelClock = cpu_to_le16(dp_clock / 10); + args.v3.usPixelClock = + cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else @@ -1184,7 +960,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v4.asMode.ucLaneSet = lane_set; } else { if (is_dp) - args.v4.usPixelClock = cpu_to_le16(dp_clock / 10); + args.v4.usPixelClock = + cpu_to_le16(dp_clock / 10); else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); else @@ -1370,6 +1147,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, int dp_lane_count = 0; int connector_object_id = 0; u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; + int bpc = 8; if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) connector = radeon_get_connector_for_encoder_init(encoder); @@ -1385,6 +1163,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, dp_lane_count = dig_connector->dp_lane_count; connector_object_id = (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + bpc = radeon_get_monitor_bpc(connector); } memset(&args, 0, sizeof(args)); @@ -1442,7 +1221,27 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; break; } - args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder); + switch (bpc) { + case 0: + args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; + break; + case 6: + args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; + break; + case 8: + default: + args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; + case 10: + args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; + break; + case 12: + args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; + break; + case 16: + args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; + break; + } break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); @@ -2487,8 +2286,6 @@ static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { void radeon_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - radeon_atom_backlight_exit(radeon_encoder); kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); @@ -2498,7 +2295,7 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = { .destroy = radeon_enc_destroy, }; -static struct radeon_encoder_atom_dac * +struct radeon_encoder_atom_dac * radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) { struct drm_device *dev = radeon_encoder->base.dev; @@ -2512,7 +2309,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) return dac; } -static struct radeon_encoder_atom_dig * +struct radeon_encoder_atom_dig * radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) { int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index c4ded396b78d..e93b80a6d4e9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -37,16 +37,6 @@ #define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PM4_UCODE_SIZE 1376 -static const u32 crtc_offsets[6] = -{ - EVERGREEN_CRTC0_REGISTER_OFFSET, - EVERGREEN_CRTC1_REGISTER_OFFSET, - EVERGREEN_CRTC2_REGISTER_OFFSET, - EVERGREEN_CRTC3_REGISTER_OFFSET, - EVERGREEN_CRTC4_REGISTER_OFFSET, - EVERGREEN_CRTC5_REGISTER_OFFSET -}; - static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); void evergreen_pcie_gen2_enable(struct radeon_device *rdev); @@ -119,19 +109,17 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) */ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) { + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; int i; - if (crtc >= rdev->num_crtc) - return; - - if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { + if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) { for (i = 0; i < rdev->usec_timeout; i++) { - if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) + if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)) break; udelay(1); } for (i = 0; i < rdev->usec_timeout; i++) { - if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) + if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK) break; udelay(1); } @@ -325,64 +313,6 @@ void sumo_pm_init_profile(struct radeon_device *rdev) rdev->pm.power_state[idx].num_clock_modes - 1; } -/** - * btc_pm_init_profile - Initialize power profiles callback. - * - * @rdev: radeon_device pointer - * - * Initialize the power states used in profile mode - * (BTC, cayman). - * Used for profile mode only. - */ -void btc_pm_init_profile(struct radeon_device *rdev) -{ - int idx; - - /* default */ - rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; - rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; - rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; - /* starting with BTC, there is one state that is used for both - * MH and SH. Difference is that we always use the high clock index for - * mclk. - */ - if (rdev->flags & RADEON_IS_MOBILITY) - idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); - else - idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); - /* low sh */ - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; - /* mid sh */ - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; - /* high sh */ - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; - /* low mh */ - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; - /* mid mh */ - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; - /* high mh */ - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; - rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; -} - /** * evergreen_pm_misc - set additional pm hw parameters callback. * @@ -1179,7 +1109,7 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev) } } -static int evergreen_pcie_gart_enable(struct radeon_device *rdev) +int evergreen_pcie_gart_enable(struct radeon_device *rdev) { u32 tmp; int r; @@ -1238,7 +1168,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev) return 0; } -static void evergreen_pcie_gart_disable(struct radeon_device *rdev) +void evergreen_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; @@ -1263,7 +1193,7 @@ static void evergreen_pcie_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -static void evergreen_pcie_gart_fini(struct radeon_device *rdev) +void evergreen_pcie_gart_fini(struct radeon_device *rdev) { evergreen_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); @@ -1271,7 +1201,7 @@ static void evergreen_pcie_gart_fini(struct radeon_device *rdev) } -static void evergreen_agp_enable(struct radeon_device *rdev) +void evergreen_agp_enable(struct radeon_device *rdev) { u32 tmp; @@ -1299,103 +1229,116 @@ static void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { - u32 crtc_enabled, tmp, frame_count, blackout; - int i, j; - save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - /* disable VGA render */ + /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); - /* blank the display controllers */ - for (i = 0; i < rdev->num_crtc; i++) { - crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; - if (crtc_enabled) { - save->crtc_enabled[i] = true; - if (ASIC_IS_DCE6(rdev)) { - tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); - if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { - radeon_wait_for_vblank(rdev, i); - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; - WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } - } else { - tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); - if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { - radeon_wait_for_vblank(rdev, i); - tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; - WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); - } - } - /* wait for the next frame */ - frame_count = radeon_get_vblank_counter(rdev, i); - for (j = 0; j < rdev->usec_timeout; j++) { - if (radeon_get_vblank_counter(rdev, i) != frame_count) - break; - udelay(1); - } - } + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); + if (rdev->num_crtc >= 4) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + } + if (rdev->num_crtc >= 6) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + } + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 4) { + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 4) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - radeon_mc_wait_for_idle(rdev); - - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - if ((blackout & BLACKOUT_MODE_MASK) != 1) { - /* Block CPU access */ - WREG32(BIF_FB_EN, 0); - /* blackout the MC */ - blackout &= ~BLACKOUT_MODE_MASK; - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); + WREG32(D1VGA_CONTROL, 0); + WREG32(D2VGA_CONTROL, 0); + if (rdev->num_crtc >= 4) { + WREG32(EVERGREEN_D3VGA_CONTROL, 0); + WREG32(EVERGREEN_D4VGA_CONTROL, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(EVERGREEN_D5VGA_CONTROL, 0); + WREG32(EVERGREEN_D6VGA_CONTROL, 0); } } void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) { - u32 tmp, frame_count; - int i, j; + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); - /* update crtc base addresses */ - for (i = 0; i < rdev->num_crtc; i++) { - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + if (rdev->num_crtc >= 4) { + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, (u32)rdev->mc.vram_start); - } - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); - /* unblackout the MC */ - tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); - tmp &= ~BLACKOUT_MODE_MASK; - WREG32(MC_SHARED_BLACKOUT_CNTL, tmp); - /* allow CPU access */ - WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + } + if (rdev->num_crtc >= 6) { + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); - for (i = 0; i < rdev->num_crtc; i++) { - if (save->crtc_enabled) { - if (ASIC_IS_DCE6(rdev)) { - tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; - WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - } else { - tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); - tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; - WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); - } - /* wait for the next frame */ - frame_count = radeon_get_vblank_counter(rdev, i); - for (j = 0; j < rdev->usec_timeout; j++) { - if (radeon_get_vblank_counter(rdev, i) != frame_count) - break; - udelay(1); - } - } + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); } - /* Unlock vga access */ + + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); + /* Unlock host access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); WREG32(VGA_RENDER_CONTROL, save->vga_render_control); @@ -1614,7 +1557,7 @@ static int evergreen_cp_start(struct radeon_device *rdev) return 0; } -static int evergreen_cp_resume(struct radeon_device *rdev) +int evergreen_cp_resume(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; u32 tmp; @@ -2390,10 +2333,22 @@ int evergreen_asic_reset(struct radeon_device *rdev) u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) { - if (crtc >= rdev->num_crtc) + switch (crtc) { + case 0: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET); + case 1: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET); + case 2: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET); + case 3: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET); + case 4: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET); + case 5: + return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET); + default: return 0; - else - return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); + } } void evergreen_disable_interrupt_state(struct radeon_device *rdev) @@ -2586,6 +2541,10 @@ int evergreen_irq_set(struct radeon_device *rdev) DRM_DEBUG("evergreen_irq_set: hdmi 5\n"); afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK; } + if (rdev->irq.gui_idle) { + DRM_DEBUG("gui idle\n"); + grbm_int_cntl |= GUI_IDLE_INT_ENABLE; + } if (rdev->family >= CHIP_CAYMAN) { cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl); @@ -2767,7 +2726,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev) } } -static void evergreen_irq_disable(struct radeon_device *rdev) +void evergreen_irq_disable(struct radeon_device *rdev) { r600_disable_interrupts(rdev); /* Wait and acknowledge irq */ @@ -3120,6 +3079,7 @@ int evergreen_irq_process(struct radeon_device *rdev) break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); + wake_up(&rdev->irq.idle_queue); break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c index 2ceab2b52d69..e44a62a07fe3 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c @@ -846,16 +846,6 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, return -EINVAL; } - if (!mipmap) { - if (llevel) { - dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n", - __func__, __LINE__); - return -EINVAL; - } else { - return 0; /* everything's ok */ - } - } - /* check mipmap size */ for (i = 1; i <= llevel; i++) { unsigned w, h, d; @@ -1005,7 +995,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) * Assume that chunk_ib_index is properly set. Will return -EINVAL * if packet is bigger than remaining ib size. or if packets is unknown. **/ -static int evergreen_cs_packet_parse(struct radeon_cs_parser *p, +int evergreen_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { @@ -1090,27 +1080,6 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, return 0; } -/** - * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP - * @p: structure holding the parser context. - * - * Check if the next packet is a relocation packet3. - **/ -static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) -{ - struct radeon_cs_packet p3reloc; - int r; - - r = evergreen_cs_packet_parse(p, &p3reloc, p->idx); - if (r) { - return false; - } - if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { - return false; - } - return true; -} - /** * evergreen_cs_packet_next_vline() - parse userspace VLINE packet * @parser: parser structure holding parsing context. @@ -2361,7 +2330,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, for (i = 0; i < (pkt->count / 8); i++) { struct radeon_bo *texture, *mipmap; u32 toffset, moffset; - u32 size, offset, mip_address, tex_dim; + u32 size, offset; switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { case SQ_TEX_VTX_VALID_TEXTURE: @@ -2390,28 +2359,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } texture = reloc->robj; toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - /* tex mip base */ - tex_dim = ib[idx+1+(i*8)+0] & 0x7; - mip_address = ib[idx+1+(i*8)+3]; - - if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) && - !mip_address && - !evergreen_cs_packet_next_is_pkt3_nop(p)) { - /* MIP_ADDRESS should point to FMASK for an MSAA texture. - * It should be 0 if FMASK is disabled. */ - moffset = 0; - mipmap = NULL; - } else { - r = evergreen_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("bad SET_RESOURCE (tex)\n"); - return -EINVAL; - } - moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - mipmap = reloc->robj; + r = evergreen_cs_packet_next_reloc(p, &reloc); + if (r) { + DRM_ERROR("bad SET_RESOURCE (tex)\n"); + return -EINVAL; } - + moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + mipmap = reloc->robj; r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8)); if (r) return r; diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_reg.h b/trunk/drivers/gpu/drm/radeon/evergreen_reg.h index 034f4c22e5db..8beac1065025 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/trunk/drivers/gpu/drm/radeon/evergreen_reg.h @@ -218,8 +218,6 @@ #define EVERGREEN_CRTC_CONTROL 0x6e70 # define EVERGREEN_CRTC_MASTER_EN (1 << 0) # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) -#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74 -# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8) #define EVERGREEN_CRTC_STATUS 0x6e8c # define EVERGREEN_CRTC_V_BLANK (1 << 0) #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h index df542f1a5dfb..79347855d9bf 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreend.h +++ b/trunk/drivers/gpu/drm/radeon/evergreend.h @@ -87,10 +87,6 @@ #define CONFIG_MEMSIZE 0x5428 -#define BIF_FB_EN 0x5490 -#define FB_READ_EN (1 << 0) -#define FB_WRITE_EN (1 << 1) - #define CP_COHER_BASE 0x85F8 #define CP_STALLED_STAT1 0x8674 #define CP_STALLED_STAT2 0x8678 @@ -434,9 +430,6 @@ #define NOOFCHAN_MASK 0x00003000 #define MC_SHARED_CHREMAP 0x2008 -#define MC_SHARED_BLACKOUT_CNTL 0x20ac -#define BLACKOUT_MODE_MASK 0x00000007 - #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 #define NOOFBANK_MASK 0x00000003 diff --git a/trunk/drivers/gpu/drm/radeon/ni.c b/trunk/drivers/gpu/drm/radeon/ni.c index 9a46f7d4e61f..853800e8582f 100644 --- a/trunk/drivers/gpu/drm/radeon/ni.c +++ b/trunk/drivers/gpu/drm/radeon/ni.c @@ -726,7 +726,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) WREG32(VM_INVALIDATE_REQUEST, 1); } -static int cayman_pcie_gart_enable(struct radeon_device *rdev) +int cayman_pcie_gart_enable(struct radeon_device *rdev) { int i, r; @@ -782,7 +782,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 0); WREG32(VM_CONTEXT1_CNTL, 0); - WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); cayman_pcie_gart_tlb_flush(rdev); @@ -793,7 +793,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) return 0; } -static void cayman_pcie_gart_disable(struct radeon_device *rdev) +void cayman_pcie_gart_disable(struct radeon_device *rdev) { /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); @@ -813,7 +813,7 @@ static void cayman_pcie_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -static void cayman_pcie_gart_fini(struct radeon_device *rdev) +void cayman_pcie_gart_fini(struct radeon_device *rdev) { cayman_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); @@ -879,13 +879,12 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) #endif (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); - radeon_ring_write(ring, ib->length_dw | - (ib->vm ? (ib->vm->id << 24) : 0)); + radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); /* flush read cache over gart for this vmid */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); + radeon_ring_write(ring, ib->vm_id); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); radeon_ring_write(ring, 0xFFFFFFFF); @@ -1005,7 +1004,7 @@ static void cayman_cp_fini(struct radeon_device *rdev) radeon_scratch_free(rdev, ring->rptr_save_reg); } -static int cayman_cp_resume(struct radeon_device *rdev) +int cayman_cp_resume(struct radeon_device *rdev) { static const int ridx[] = { RADEON_RING_TYPE_GFX_INDEX, @@ -1497,16 +1496,53 @@ void cayman_vm_fini(struct radeon_device *rdev) { } -#define R600_ENTRY_VALID (1 << 0) +int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) +{ + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn); + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); + /* flush hdp cache */ + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* bits 0-7 are the VM contexts0-7 */ + WREG32(VM_INVALIDATE_REQUEST, 1 << id); + return 0; +} + +void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) +{ + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0); + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0); + /* flush hdp cache */ + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* bits 0-7 are the VM contexts0-7 */ + WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); +} + +void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) +{ + if (vm->id == -1) + return; + + /* flush hdp cache */ + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* bits 0-7 are the VM contexts0-7 */ + WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); +} + +#define R600_PTE_VALID (1 << 0) #define R600_PTE_SYSTEM (1 << 1) #define R600_PTE_SNOOPED (1 << 2) #define R600_PTE_READABLE (1 << 5) #define R600_PTE_WRITEABLE (1 << 6) -uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) +uint32_t cayman_vm_page_flags(struct radeon_device *rdev, + struct radeon_vm *vm, + uint32_t flags) { uint32_t r600_flags = 0; - r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0; + + r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; if (flags & RADEON_VM_PAGE_SYSTEM) { @@ -1516,76 +1552,12 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) return r600_flags; } -/** - * cayman_vm_set_page - update the page tables using the CP - * - * @rdev: radeon_device pointer - * @pe: addr of the page entry - * @addr: dst addr to write into pe - * @count: number of page entries to update - * @incr: increase next addr by incr bytes - * @flags: access flags - * - * Update the page tables using the CP (cayman-si). - */ -void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; - uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - int i; - - radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2)); - radeon_ring_write(ring, pe); - radeon_ring_write(ring, upper_32_bits(pe) & 0xff); - for (i = 0; i < count; ++i) { - uint64_t value = 0; - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - addr += incr; - - } else if (flags & RADEON_VM_PAGE_VALID) { - value = addr; - addr += incr; - } - - value |= r600_flags; - radeon_ring_write(ring, value); - radeon_ring_write(ring, upper_32_bits(value)); - } -} - -/** - * cayman_vm_flush - vm flush using the CP - * - * @rdev: radeon_device pointer - * - * Update the page table base and flush the VM TLB - * using the CP (cayman-si). - */ -void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, + unsigned pfn, uint64_t addr, uint32_t flags) { - struct radeon_ring *ring = &rdev->ring[ridx]; - - if (vm == NULL) - return; - - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0)); - radeon_ring_write(ring, 0); - - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0)); - radeon_ring_write(ring, vm->last_pfn); + void __iomem *ptr = (void *)vm->pt; - radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - - /* flush hdp cache */ - radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); - radeon_ring_write(ring, 0x1); - - /* bits 0-7 are the VM contexts0-7 */ - radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); - radeon_ring_write(ring, 1 << vm->id); + addr = addr & 0xFFFFFFFFFFFFF000ULL; + addr |= flags; + writeq(addr, ptr + (pfn * 8)); } diff --git a/trunk/drivers/gpu/drm/radeon/nid.h b/trunk/drivers/gpu/drm/radeon/nid.h index 2423d1b5d385..870db340d377 100644 --- a/trunk/drivers/gpu/drm/radeon/nid.h +++ b/trunk/drivers/gpu/drm/radeon/nid.h @@ -585,7 +585,6 @@ #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 #define PACKET3_SET_RESOURCE_INDIRECT 0x74 #define PACKET3_SET_APPEND_CNT 0x75 -#define PACKET3_ME_WRITE 0x7A #endif diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index b41237bf884b..8d7e33a0b243 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -80,12 +80,10 @@ MODULE_FIRMWARE(FIRMWARE_R520); */ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) { + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; int i; - if (crtc >= rdev->num_crtc) - return; - - if (crtc == 0) { + if (radeon_crtc->crtc_id == 0) { if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { for (i = 0; i < rdev->usec_timeout; i++) { if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) @@ -700,6 +698,9 @@ int r100_irq_set(struct radeon_device *rdev) if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { tmp |= RADEON_SW_INT_ENABLE; } + if (rdev->irq.gui_idle) { + tmp |= RADEON_GUI_IDLE_MASK; + } if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { tmp |= RADEON_CRTC_VBLANK_MASK; @@ -736,6 +737,12 @@ static uint32_t r100_irq_ack(struct radeon_device *rdev) RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; + /* the interrupt works, but the status bit is permanently asserted */ + if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { + if (!rdev->irq.gui_idle_acked) + irq_mask |= RADEON_GUI_IDLE_STAT; + } + if (irqs) { WREG32(RADEON_GEN_INT_STATUS, irqs); } @@ -747,6 +754,9 @@ int r100_irq_process(struct radeon_device *rdev) uint32_t status, msi_rearm; bool queue_hotplug = false; + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; + status = r100_irq_ack(rdev); if (!status) { return IRQ_NONE; @@ -759,6 +769,11 @@ int r100_irq_process(struct radeon_device *rdev) if (status & RADEON_SW_INT_TEST) { radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); } + /* gui idle interrupt */ + if (status & RADEON_GUI_IDLE_STAT) { + rdev->irq.gui_idle_acked = true; + wake_up(&rdev->irq.idle_queue); + } /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { if (rdev->irq.crtc_vblank_int[0]) { @@ -788,6 +803,8 @@ int r100_irq_process(struct radeon_device *rdev) } status = r100_irq_ack(rdev); } + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (rdev->msi_enabled) { @@ -2513,7 +2530,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track /* * Global GPU functions */ -static void r100_errata(struct radeon_device *rdev) +void r100_errata(struct radeon_device *rdev) { rdev->pll_errata = 0; @@ -2528,7 +2545,51 @@ static void r100_errata(struct radeon_device *rdev) } } -static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) +/* Wait for vertical sync on primary CRTC */ +void r100_gpu_wait_for_vsync(struct radeon_device *rdev) +{ + uint32_t crtc_gen_cntl, tmp; + int i; + + crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); + if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) || + !(crtc_gen_cntl & RADEON_CRTC_EN)) { + return; + } + /* Clear the CRTC_VBLANK_SAVE bit */ + WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(RADEON_CRTC_STATUS); + if (tmp & RADEON_CRTC_VBLANK_SAVE) { + return; + } + DRM_UDELAY(1); + } +} + +/* Wait for vertical sync on secondary CRTC */ +void r100_gpu_wait_for_vsync2(struct radeon_device *rdev) +{ + uint32_t crtc2_gen_cntl, tmp; + int i; + + crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); + if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) || + !(crtc2_gen_cntl & RADEON_CRTC2_EN)) + return; + + /* Clear the CRTC_VBLANK_SAVE bit */ + WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR); + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(RADEON_CRTC2_STATUS); + if (tmp & RADEON_CRTC2_VBLANK_SAVE) { + return; + } + DRM_UDELAY(1); + } +} + +int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n) { unsigned i; uint32_t tmp; @@ -2889,7 +2950,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state) WREG32(RADEON_CONFIG_CNTL, temp); } -static void r100_mc_init(struct radeon_device *rdev) +void r100_mc_init(struct radeon_device *rdev) { u64 base; @@ -2961,7 +3022,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) r100_pll_errata_after_data(rdev); } -static void r100_set_safe_registers(struct radeon_device *rdev) +void r100_set_safe_registers(struct radeon_device *rdev) { if (ASIC_IS_RN50(rdev)) { rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; @@ -3756,10 +3817,9 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256); + r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256); if (r) { - DRM_ERROR("radeon: failed to get ib (%d).\n", r); - goto free_scratch; + return r; } ib.ptr[0] = PACKET0(scratch, 0); ib.ptr[1] = 0xDEADBEEF; @@ -3772,13 +3832,13 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.length_dw = 8; r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { - DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); - goto free_ib; + radeon_scratch_free(rdev, scratch); + radeon_ib_free(rdev, &ib); + return r; } r = radeon_fence_wait(ib.fence, false); if (r) { - DRM_ERROR("radeon: fence wait failed (%d).\n", r); - goto free_ib; + return r; } for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -3794,10 +3854,8 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) scratch, tmp); r = -EINVAL; } -free_ib: - radeon_ib_free(rdev, &ib); -free_scratch: radeon_scratch_free(rdev, scratch); + radeon_ib_free(rdev, &ib); return r; } @@ -3906,7 +3964,7 @@ static void r100_mc_program(struct radeon_device *rdev) r100_mc_resume(rdev, &save); } -static void r100_clock_startup(struct radeon_device *rdev) +void r100_clock_startup(struct radeon_device *rdev) { u32 tmp; diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index 4949bfc14b58..646a1927dda7 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -296,7 +296,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) radeon_ring_unlock_commit(rdev, ring); } -static void r300_errata(struct radeon_device *rdev) +void r300_errata(struct radeon_device *rdev) { rdev->pll_errata = 0; @@ -322,7 +322,7 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r300_gpu_init(struct radeon_device *rdev) +void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; diff --git a/trunk/drivers/gpu/drm/radeon/r520.c b/trunk/drivers/gpu/drm/radeon/r520.c index 28b4f871aaf4..079d3c52c08a 100644 --- a/trunk/drivers/gpu/drm/radeon/r520.c +++ b/trunk/drivers/gpu/drm/radeon/r520.c @@ -119,7 +119,7 @@ static void r520_vram_get_type(struct radeon_device *rdev) rdev->mc.vram_width *= 2; } -static void r520_mc_init(struct radeon_device *rdev) +void r520_mc_init(struct radeon_device *rdev) { r520_vram_get_type(rdev); @@ -131,7 +131,7 @@ static void r520_mc_init(struct radeon_device *rdev) radeon_update_bandwidth_info(rdev); } -static void r520_mc_program(struct radeon_device *rdev) +void r520_mc_program(struct radeon_device *rdev) { struct rv515_mc_save save; diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 39b743fff791..d79c639ae739 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -98,7 +98,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev); /* r600,rv610,rv630,rv620,rv635,rv670 */ int r600_mc_wait_for_idle(struct radeon_device *rdev); -static void r600_gpu_init(struct radeon_device *rdev); +void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); void r600_irq_disable(struct radeon_device *rdev); static void r600_pcie_gen2_enable(struct radeon_device *rdev); @@ -881,7 +881,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) return radeon_gart_table_vram_alloc(rdev); } -static int r600_pcie_gart_enable(struct radeon_device *rdev) +int r600_pcie_gart_enable(struct radeon_device *rdev) { u32 tmp; int r, i; @@ -938,7 +938,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) return 0; } -static void r600_pcie_gart_disable(struct radeon_device *rdev) +void r600_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; int i; @@ -971,14 +971,14 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -static void r600_pcie_gart_fini(struct radeon_device *rdev) +void r600_pcie_gart_fini(struct radeon_device *rdev) { radeon_gart_fini(rdev); r600_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); } -static void r600_agp_enable(struct radeon_device *rdev) +void r600_agp_enable(struct radeon_device *rdev) { u32 tmp; int i; @@ -1158,7 +1158,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc } } -static int r600_mc_init(struct radeon_device *rdev) +int r600_mc_init(struct radeon_device *rdev) { u32 tmp; int chansize, numchan; @@ -1258,7 +1258,7 @@ void r600_vram_scratch_fini(struct radeon_device *rdev) * reset, it's up to the caller to determine if the GPU needs one. We * might add an helper function to check that. */ -static int r600_gpu_soft_reset(struct radeon_device *rdev) +int r600_gpu_soft_reset(struct radeon_device *rdev) { struct rv515_mc_save save; u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | @@ -1433,7 +1433,7 @@ int r600_count_pipe_bits(uint32_t val) return ret; } -static void r600_gpu_init(struct radeon_device *rdev) +void r600_gpu_init(struct radeon_device *rdev) { u32 tiling_config; u32 ramcfg; @@ -2347,7 +2347,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) /* FIXME: implement */ } -static int r600_startup(struct radeon_device *rdev) +int r600_startup(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; int r; @@ -2635,10 +2635,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) return r; } WREG32(scratch, 0xCAFEDEAD); - r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); + r = radeon_ib_get(rdev, ring->idx, &ib, 256); if (r) { DRM_ERROR("radeon: failed to get ib (%d).\n", r); - goto free_scratch; + return r; } ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); @@ -2646,13 +2646,15 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) ib.length_dw = 3; r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { + radeon_scratch_free(rdev, scratch); + radeon_ib_free(rdev, &ib); DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); - goto free_ib; + return r; } r = radeon_fence_wait(ib.fence, false); if (r) { DRM_ERROR("radeon: fence wait failed (%d).\n", r); - goto free_ib; + return r; } for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(scratch); @@ -2667,10 +2669,8 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) scratch, tmp); r = -EINVAL; } -free_ib: - radeon_ib_free(rdev, &ib); -free_scratch: radeon_scratch_free(rdev, scratch); + radeon_ib_free(rdev, &ib); return r; } @@ -3088,6 +3088,10 @@ int r600_irq_set(struct radeon_device *rdev) DRM_DEBUG("r600_irq_set: hdmi 0\n"); hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; } + if (rdev->irq.gui_idle) { + DRM_DEBUG("gui idle\n"); + grbm_int_cntl |= GUI_IDLE_INT_ENABLE; + } WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DxMODE_INT_MASK, mode_int); @@ -3471,6 +3475,7 @@ int r600_irq_process(struct radeon_device *rdev) break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); + wake_up(&rdev->irq.idle_queue); break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit.c b/trunk/drivers/gpu/drm/radeon/r600_blit.c index 661fec2a2cc1..3c031a48205d 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_blit.c +++ b/trunk/drivers/gpu/drm/radeon/r600_blit.c @@ -489,37 +489,32 @@ set_default_state(drm_radeon_private_t *dev_priv) ADVANCE_RING(); } -/* 23 bits of float fractional data */ -#define I2F_FRAC_BITS 23 -#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1) - -/* - * Converts unsigned integer into 32-bit IEEE floating point representation. - * Will be exact from 0 to 2^24. Above that, we round towards zero - * as the fractional bits will not fit in a float. (It would be better to - * round towards even as the fpu does, but that is slower.) - */ -__pure uint32_t int2float(uint32_t x) +static uint32_t i2f(uint32_t input) { - uint32_t msb, exponent, fraction; - - /* Zero is special */ - if (!x) return 0; - - /* Get location of the most significant bit */ - msb = __fls(x); - - /* - * Use a rotate instead of a shift because that works both leftwards - * and rightwards due to the mod(32) behaviour. This means we don't - * need to check to see if we are above 2^24 or not. - */ - fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK; - exponent = (127 + msb) << I2F_FRAC_BITS; - - return fraction + exponent; + u32 result, i, exponent, fraction; + + if ((input & 0x3fff) == 0) + result = 0; /* 0 is a special case */ + else { + exponent = 140; /* exponent biased by 127; */ + fraction = (input & 0x3fff) << 10; /* cheat and only + handle numbers below 2^^15 */ + for (i = 0; i < 14; i++) { + if (fraction & 0x800000) + break; + else { + fraction = fraction << 1; /* keep + shifting left until top bit = 1 */ + exponent = exponent - 1; + } + } + result = exponent << 23 | (fraction & 0x7fffff); /* mask + off top bit; assumed 1 */ + } + return result; } + static int r600_nomm_get_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -637,20 +632,20 @@ r600_blit_copy(struct drm_device *dev, vb = r600_nomm_get_vb_ptr(dev); } - vb[0] = int2float(dst_x); + vb[0] = i2f(dst_x); vb[1] = 0; - vb[2] = int2float(src_x); + vb[2] = i2f(src_x); vb[3] = 0; - vb[4] = int2float(dst_x); - vb[5] = int2float(h); - vb[6] = int2float(src_x); - vb[7] = int2float(h); + vb[4] = i2f(dst_x); + vb[5] = i2f(h); + vb[6] = i2f(src_x); + vb[7] = i2f(h); - vb[8] = int2float(dst_x + cur_size); - vb[9] = int2float(h); - vb[10] = int2float(src_x + cur_size); - vb[11] = int2float(h); + vb[8] = i2f(dst_x + cur_size); + vb[9] = i2f(h); + vb[10] = i2f(src_x + cur_size); + vb[11] = i2f(h); /* src */ set_tex_resource(dev_priv, FMT_8, @@ -726,20 +721,20 @@ r600_blit_copy(struct drm_device *dev, vb = r600_nomm_get_vb_ptr(dev); } - vb[0] = int2float(dst_x / 4); + vb[0] = i2f(dst_x / 4); vb[1] = 0; - vb[2] = int2float(src_x / 4); + vb[2] = i2f(src_x / 4); vb[3] = 0; - vb[4] = int2float(dst_x / 4); - vb[5] = int2float(h); - vb[6] = int2float(src_x / 4); - vb[7] = int2float(h); + vb[4] = i2f(dst_x / 4); + vb[5] = i2f(h); + vb[6] = i2f(src_x / 4); + vb[7] = i2f(h); - vb[8] = int2float((dst_x + cur_size) / 4); - vb[9] = int2float(h); - vb[10] = int2float((src_x + cur_size) / 4); - vb[11] = int2float(h); + vb[8] = i2f((dst_x + cur_size) / 4); + vb[9] = i2f(h); + vb[10] = i2f((src_x + cur_size) / 4); + vb[11] = i2f(h); /* src */ set_tex_resource(dev_priv, FMT_8_8_8_8, @@ -809,20 +804,20 @@ r600_blit_swap(struct drm_device *dev, dx2 = dx + w; dy2 = dy + h; - vb[0] = int2float(dx); - vb[1] = int2float(dy); - vb[2] = int2float(sx); - vb[3] = int2float(sy); + vb[0] = i2f(dx); + vb[1] = i2f(dy); + vb[2] = i2f(sx); + vb[3] = i2f(sy); - vb[4] = int2float(dx); - vb[5] = int2float(dy2); - vb[6] = int2float(sx); - vb[7] = int2float(sy2); + vb[4] = i2f(dx); + vb[5] = i2f(dy2); + vb[6] = i2f(sx); + vb[7] = i2f(sy2); - vb[8] = int2float(dx2); - vb[9] = int2float(dy2); - vb[10] = int2float(sx2); - vb[11] = int2float(sy2); + vb[8] = i2f(dx2); + vb[9] = i2f(dy2); + vb[10] = i2f(sx2); + vb[11] = i2f(sy2); switch(cpp) { case 4: diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c b/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c index 1c7ed3a5d045..2bef8549ddfe 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -455,6 +455,46 @@ set_default_state(struct radeon_device *rdev) radeon_ring_write(ring, sq_stack_resource_mgmt_2); } +#define I2F_MAX_BITS 15 +#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1) +#define I2F_SHIFT (24 - I2F_MAX_BITS) + +/* + * Converts unsigned integer into 32-bit IEEE floating point representation. + * Conversion is not universal and only works for the range from 0 + * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between + * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary, + * I2F_MAX_BITS can be increased, but that will add to the loop iterations + * and slow us down. Conversion is done by shifting the input and counting + * down until the first 1 reaches bit position 23. The resulting counter + * and the shifted input are, respectively, the exponent and the fraction. + * The sign is always zero. + */ +static uint32_t i2f(uint32_t input) +{ + u32 result, i, exponent, fraction; + + WARN_ON_ONCE(input > I2F_MAX_INPUT); + + if ((input & I2F_MAX_INPUT) == 0) + result = 0; + else { + exponent = 126 + I2F_MAX_BITS; + fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT; + + for (i = 0; i < I2F_MAX_BITS; i++) { + if (fraction & 0x800000) + break; + else { + fraction = fraction << 1; + exponent = exponent - 1; + } + } + result = exponent << 23 | (fraction & 0x7fffff); + } + return result; +} + int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; @@ -726,14 +766,14 @@ void r600_kms_blit_copy(struct radeon_device *rdev, vb_cpu_addr[3] = 0; vb_cpu_addr[4] = 0; - vb_cpu_addr[5] = int2float(h); + vb_cpu_addr[5] = i2f(h); vb_cpu_addr[6] = 0; - vb_cpu_addr[7] = int2float(h); + vb_cpu_addr[7] = i2f(h); - vb_cpu_addr[8] = int2float(w); - vb_cpu_addr[9] = int2float(h); - vb_cpu_addr[10] = int2float(w); - vb_cpu_addr[11] = int2float(h); + vb_cpu_addr[8] = i2f(w); + vb_cpu_addr[9] = i2f(h); + vb_cpu_addr[10] = i2f(w); + vb_cpu_addr[11] = i2f(h); rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, w, h, w, src_gpu_addr, size_in_bytes); diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.h b/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.h index 2f3ce7a75976..f437d36dd98c 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.h +++ b/trunk/drivers/gpu/drm/radeon/r600_blit_shaders.h @@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[]; extern const u32 r6xx_ps_size, r6xx_vs_size; extern const u32 r6xx_default_size, r7xx_default_size; -__pure uint32_t int2float(uint32_t x); #endif diff --git a/trunk/drivers/gpu/drm/radeon/r600_cs.c b/trunk/drivers/gpu/drm/radeon/r600_cs.c index 853f05ced1b1..f37676d7f217 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cs.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cs.c @@ -847,7 +847,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) * Assume that chunk_ib_index is properly set. Will return -EINVAL * if packet is bigger than remaining ib size. or if packets is unknown. **/ -static int r600_cs_packet_parse(struct radeon_cs_parser *p, +int r600_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx) { @@ -2180,8 +2180,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } break; case PACKET3_STRMOUT_BASE_UPDATE: - /* RS780 and RS880 also need this */ - if (p->family < CHIP_RS780) { + if (p->family < CHIP_RV770) { DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); return -EINVAL; } diff --git a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c index 857a7d7862fb..e3558c3ef24a 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c @@ -53,7 +53,7 @@ enum r600_hdmi_iec_status_bits { AUDIO_STATUS_LEVEL = 0x80 }; -static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { +struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { /* 32kHz 44.1kHz 48kHz */ /* Clock N CTS N CTS N CTS */ { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index b04c06444d8b..59a15315ae9f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -123,7 +123,6 @@ extern int radeon_lockup_timeout; #define CAYMAN_RING_TYPE_CP2_INDEX 2 /* hardcode those limit for now */ -#define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20) #define RADEON_IB_VM_MAX_SIZE (64 << 10) @@ -254,22 +253,6 @@ static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a, } } -static inline bool radeon_fence_is_earlier(struct radeon_fence *a, - struct radeon_fence *b) -{ - if (!a) { - return false; - } - - if (!b) { - return true; - } - - BUG_ON(a->ring != b->ring); - - return a->seq < b->seq; -} - /* * Tiling registers */ @@ -292,20 +275,18 @@ struct radeon_mman { /* bo virtual address in a specific vm */ struct radeon_bo_va { - /* protected by bo being reserved */ + /* bo list is protected by bo being reserved */ struct list_head bo_list; - uint64_t soffset; - uint64_t eoffset; - uint32_t flags; - bool valid; - unsigned ref_count; - - /* protected by vm mutex */ + /* vm list is protected by vm mutex */ struct list_head vm_list; - /* constant after initialization */ struct radeon_vm *vm; struct radeon_bo *bo; + uint64_t soffset; + uint64_t eoffset; + uint32_t flags; + struct radeon_fence *fence; + bool valid; }; struct radeon_bo { @@ -585,6 +566,9 @@ struct radeon_irq { atomic_t pflip[RADEON_MAX_CRTCS]; wait_queue_head_t vblank_queue; bool hpd[RADEON_MAX_HPD_PINS]; + bool gui_idle; + bool gui_idle_acked; + wait_queue_head_t idle_queue; bool afmt[RADEON_MAX_AFMT_BLOCKS]; union radeon_irq_stat_regs stat_regs; }; @@ -599,6 +583,7 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block); void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); +int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev); /* * CP & rings. @@ -611,7 +596,7 @@ struct radeon_ib { uint32_t *ptr; int ring; struct radeon_fence *fence; - struct radeon_vm *vm; + unsigned vm_id; bool is_const_ib; struct radeon_fence *sync_to[RADEON_NUM_RINGS]; struct radeon_semaphore *semaphore; @@ -647,38 +632,41 @@ struct radeon_ring { /* * VM */ - -/* maximum number of VMIDs */ -#define RADEON_NUM_VM 16 - -/* defines number of bits in page table versus page directory, - * a page is 4KB so we have 12 bits offset, 9 bits in the page - * table and the remaining 19 bits are in the page directory */ -#define RADEON_VM_BLOCK_SIZE 9 - -/* number of entries in page table */ -#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) - struct radeon_vm { struct list_head list; struct list_head va; - unsigned id; + int id; unsigned last_pfn; - u64 pd_gpu_addr; + u64 pt_gpu_addr; + u64 *pt; struct radeon_sa_bo *sa_bo; struct mutex mutex; /* last fence for cs using this vm */ struct radeon_fence *fence; - /* last flush or NULL if we still need to flush */ - struct radeon_fence *last_flush; +}; + +struct radeon_vm_funcs { + int (*init)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); + /* cs mutex must be lock for schedule_ib */ + int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id); + void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm); + void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm); + uint32_t (*page_flags)(struct radeon_device *rdev, + struct radeon_vm *vm, + uint32_t flags); + void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm, + unsigned pfn, uint64_t addr, uint32_t flags); }; struct radeon_vm_manager { struct mutex lock; struct list_head lru_vm; - struct radeon_fence *active[RADEON_NUM_VM]; + uint32_t use_bitmap; struct radeon_sa_manager sa_manager; uint32_t max_pfn; + /* fields constant after init */ + const struct radeon_vm_funcs *funcs; /* number of VMIDs */ unsigned nvm; /* vram base address for page table entry */ @@ -750,8 +738,7 @@ struct si_rlc { }; int radeon_ib_get(struct radeon_device *rdev, int ring, - struct radeon_ib *ib, struct radeon_vm *vm, - unsigned size); + struct radeon_ib *ib, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, struct radeon_ib *const_ib); @@ -1144,15 +1131,6 @@ struct radeon_asic { void (*tlb_flush)(struct radeon_device *rdev); int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); } gart; - struct { - int (*init)(struct radeon_device *rdev); - void (*fini)(struct radeon_device *rdev); - - u32 pt_ring_index; - void (*set_page)(struct radeon_device *rdev, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); - } vm; /* ring specific callbacks */ struct { void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); @@ -1165,7 +1143,6 @@ struct radeon_asic { int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); - void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); } ring[RADEON_NUM_RINGS]; /* irqs */ struct { @@ -1180,10 +1157,6 @@ struct radeon_asic { u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); /* wait for vblank */ void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); - /* set backlight level */ - void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); - /* get backlight level */ - u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); } display; /* copy functions for bo handling */ struct { @@ -1455,56 +1428,6 @@ struct r600_vram_scratch { u64 gpu_addr; }; -/* - * ACPI - */ -struct radeon_atif_notification_cfg { - bool enabled; - int command_code; -}; - -struct radeon_atif_notifications { - bool display_switch; - bool expansion_mode_change; - bool thermal_state; - bool forced_power_state; - bool system_power_state; - bool display_conf_change; - bool px_gfx_switch; - bool brightness_change; - bool dgpu_display_event; -}; - -struct radeon_atif_functions { - bool system_params; - bool sbios_requests; - bool select_active_disp; - bool lid_state; - bool get_tv_standard; - bool set_tv_standard; - bool get_panel_expansion_mode; - bool set_panel_expansion_mode; - bool temperature_change; - bool graphics_device_types; -}; - -struct radeon_atif { - struct radeon_atif_notifications notifications; - struct radeon_atif_functions functions; - struct radeon_atif_notification_cfg notification_cfg; - struct radeon_encoder *encoder_for_bl; -}; - -struct radeon_atcs_functions { - bool get_ext_state; - bool pcie_perf_req; - bool pcie_dev_rdy; - bool pcie_bus_width; -}; - -struct radeon_atcs { - struct radeon_atcs_functions functions; -}; /* * Core structure, functions and helpers. @@ -1597,9 +1520,6 @@ struct radeon_device { /* virtual memory */ struct radeon_vm_manager vm_manager; struct mutex gpu_clock_mutex; - /* ACPI interface */ - struct radeon_atif atif; - struct radeon_atcs atcs; }; int radeon_device_init(struct radeon_device *rdev, @@ -1763,21 +1683,15 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) -#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) -#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) -#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags))) #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) #define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) #define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) #define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) -#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) #define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) #define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) -#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) -#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) @@ -1845,30 +1759,22 @@ int radeon_vm_manager_init(struct radeon_device *rdev); void radeon_vm_manager_fini(struct radeon_device *rdev); int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); -int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); -struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, - struct radeon_vm *vm, int ring); -void radeon_vm_fence(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_fence *fence); -uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); +int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); int radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, struct ttm_mem_reg *mem); void radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo); -struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, - struct radeon_bo *bo); -struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo); -int radeon_vm_bo_set_addr(struct radeon_device *rdev, - struct radeon_bo_va *bo_va, - uint64_t offset, - uint32_t flags); +int radeon_vm_bo_add(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + uint64_t offset, + uint32_t flags); int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va); + struct radeon_vm *vm, + struct radeon_bo *bo); /* audio */ void r600_audio_update_hdmi(struct work_struct *work); @@ -1926,14 +1832,12 @@ extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_displ extern int ni_init_microcode(struct radeon_device *rdev); extern int ni_mc_load_microcode(struct radeon_device *rdev); -/* radeon_acpi.c */ -#if defined(CONFIG_ACPI) -extern int radeon_acpi_init(struct radeon_device *rdev); -extern void radeon_acpi_fini(struct radeon_device *rdev); -#else -static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } -static inline void radeon_acpi_fini(struct radeon_device *rdev) { } -#endif +/* radeon_acpi.c */ +#if defined(CONFIG_ACPI) +extern int radeon_acpi_init(struct radeon_device *rdev); +#else +static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } +#endif #include "radeon_object.h" diff --git a/trunk/drivers/gpu/drm/radeon/radeon_acpi.c b/trunk/drivers/gpu/drm/radeon/radeon_acpi.c index c3976eb341bf..3516a6081dcf 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_acpi.c @@ -1,120 +1,35 @@ -/* - * Copyright 2012 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - #include #include #include -#include #include #include -#include #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "drm_crtc_helper.h" #include "radeon.h" -#include "radeon_acpi.h" -#include "atom.h" #include -#define ACPI_AC_CLASS "ac_adapter" - -extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev); - -struct atif_verify_interface { - u16 size; /* structure size in bytes (includes size field) */ - u16 version; /* version */ - u32 notification_mask; /* supported notifications mask */ - u32 function_bits; /* supported functions bit vector */ -} __packed; - -struct atif_system_params { - u16 size; /* structure size in bytes (includes size field) */ - u32 valid_mask; /* valid flags mask */ - u32 flags; /* flags */ - u8 command_code; /* notify command code */ -} __packed; - -struct atif_sbios_requests { - u16 size; /* structure size in bytes (includes size field) */ - u32 pending; /* pending sbios requests */ - u8 panel_exp_mode; /* panel expansion mode */ - u8 thermal_gfx; /* thermal state: target gfx controller */ - u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */ - u8 forced_power_gfx; /* forced power state: target gfx controller */ - u8 forced_power_state; /* forced power state: state id */ - u8 system_power_src; /* system power source */ - u8 backlight_level; /* panel backlight level (0-255) */ -} __packed; - -#define ATIF_NOTIFY_MASK 0x3 -#define ATIF_NOTIFY_NONE 0 -#define ATIF_NOTIFY_81 1 -#define ATIF_NOTIFY_N 2 - -struct atcs_verify_interface { - u16 size; /* structure size in bytes (includes size field) */ - u16 version; /* version */ - u32 function_bits; /* supported functions bit vector */ -} __packed; - /* Call the ATIF method - */ -/** - * radeon_atif_call - call an ATIF method * - * @handle: acpi handle - * @function: the ATIF function to execute - * @params: ATIF function params - * - * Executes the requested ATIF function (all asics). - * Returns a pointer to the acpi output buffer. + * Note: currently we discard the output */ -static union acpi_object *radeon_atif_call(acpi_handle handle, int function, - struct acpi_buffer *params) +static int radeon_atif_call(acpi_handle handle) { acpi_status status; union acpi_object atif_arg_elements[2]; struct acpi_object_list atif_arg; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; atif_arg.count = 2; atif_arg.pointer = &atif_arg_elements[0]; atif_arg_elements[0].type = ACPI_TYPE_INTEGER; - atif_arg_elements[0].integer.value = function; - - if (params) { - atif_arg_elements[1].type = ACPI_TYPE_BUFFER; - atif_arg_elements[1].buffer.length = params->length; - atif_arg_elements[1].buffer.pointer = params->pointer; - } else { - /* We need a second fake parameter */ - atif_arg_elements[1].type = ACPI_TYPE_INTEGER; - atif_arg_elements[1].integer.value = 0; - } + atif_arg_elements[0].integer.value = 0; + atif_arg_elements[1].type = ACPI_TYPE_INTEGER; + atif_arg_elements[1].integer.value = 0; status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); @@ -123,434 +38,17 @@ static union acpi_object *radeon_atif_call(acpi_handle handle, int function, DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", acpi_format_exception(status)); kfree(buffer.pointer); - return NULL; + return 1; } - return buffer.pointer; -} - -/** - * radeon_atif_parse_notification - parse supported notifications - * - * @n: supported notifications struct - * @mask: supported notifications mask from ATIF - * - * Use the supported notifications mask from ATIF function - * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications - * are supported (all asics). - */ -static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask) -{ - n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED; - n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED; - n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED; - n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED; - n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED; - n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED; - n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED; - n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED; - n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED; -} - -/** - * radeon_atif_parse_functions - parse supported functions - * - * @f: supported functions struct - * @mask: supported functions mask from ATIF - * - * Use the supported functions mask from ATIF function - * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions - * are supported (all asics). - */ -static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask) -{ - f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED; - f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED; - f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED; - f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED; - f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED; - f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED; - f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED; - f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED; - f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED; - f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED; -} - -/** - * radeon_atif_verify_interface - verify ATIF - * - * @handle: acpi handle - * @atif: radeon atif struct - * - * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function - * to initialize ATIF and determine what features are supported - * (all asics). - * returns 0 on success, error on failure. - */ -static int radeon_atif_verify_interface(acpi_handle handle, - struct radeon_atif *atif) -{ - union acpi_object *info; - struct atif_verify_interface output; - size_t size; - int err = 0; - - info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); - if (!info) - return -EIO; - - memset(&output, 0, sizeof(output)); - - size = *(u16 *) info->buffer.pointer; - if (size < 12) { - DRM_INFO("ATIF buffer is too small: %lu\n", size); - err = -EINVAL; - goto out; - } - size = min(sizeof(output), size); - - memcpy(&output, info->buffer.pointer, size); - - /* TODO: check version? */ - DRM_DEBUG_DRIVER("ATIF version %u\n", output.version); - - radeon_atif_parse_notification(&atif->notifications, output.notification_mask); - radeon_atif_parse_functions(&atif->functions, output.function_bits); - -out: - kfree(info); - return err; -} - -/** - * radeon_atif_get_notification_params - determine notify configuration - * - * @handle: acpi handle - * @n: atif notification configuration struct - * - * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function - * to determine if a notifier is used and if so which one - * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n) - * where n is specified in the result if a notifier is used. - * Returns 0 on success, error on failure. - */ -static int radeon_atif_get_notification_params(acpi_handle handle, - struct radeon_atif_notification_cfg *n) -{ - union acpi_object *info; - struct atif_system_params params; - size_t size; - int err = 0; - - info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); - if (!info) { - err = -EIO; - goto out; - } - - size = *(u16 *) info->buffer.pointer; - if (size < 10) { - err = -EINVAL; - goto out; - } - - memset(¶ms, 0, sizeof(params)); - size = min(sizeof(params), size); - memcpy(¶ms, info->buffer.pointer, size); - - DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n", - params.flags, params.valid_mask); - params.flags = params.flags & params.valid_mask; - - if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) { - n->enabled = false; - n->command_code = 0; - } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) { - n->enabled = true; - n->command_code = 0x81; - } else { - if (size < 11) { - err = -EINVAL; - goto out; - } - n->enabled = true; - n->command_code = params.command_code; - } - -out: - DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n", - (n->enabled ? "enabled" : "disabled"), - n->command_code); - kfree(info); - return err; -} - -/** - * radeon_atif_get_sbios_requests - get requested sbios event - * - * @handle: acpi handle - * @req: atif sbios request struct - * - * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function - * to determine what requests the sbios is making to the driver - * (all asics). - * Returns 0 on success, error on failure. - */ -static int radeon_atif_get_sbios_requests(acpi_handle handle, - struct atif_sbios_requests *req) -{ - union acpi_object *info; - size_t size; - int count = 0; - - info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); - if (!info) - return -EIO; - - size = *(u16 *)info->buffer.pointer; - if (size < 0xd) { - count = -EINVAL; - goto out; - } - memset(req, 0, sizeof(*req)); - - size = min(sizeof(*req), size); - memcpy(req, info->buffer.pointer, size); - DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending); - - count = hweight32(req->pending); - -out: - kfree(info); - return count; -} - -/** - * radeon_atif_handler - handle ATIF notify requests - * - * @rdev: radeon_device pointer - * @event: atif sbios request struct - * - * Checks the acpi event and if it matches an atif event, - * handles it. - * Returns NOTIFY code - */ -int radeon_atif_handler(struct radeon_device *rdev, - struct acpi_bus_event *event) -{ - struct radeon_atif *atif = &rdev->atif; - struct atif_sbios_requests req; - acpi_handle handle; - int count; - - DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", - event->device_class, event->type); - - if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) - return NOTIFY_DONE; - - if (!atif->notification_cfg.enabled || - event->type != atif->notification_cfg.command_code) - /* Not our event */ - return NOTIFY_DONE; - - /* Check pending SBIOS requests */ - handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); - count = radeon_atif_get_sbios_requests(handle, &req); - - if (count <= 0) - return NOTIFY_DONE; - - DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); - - if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) { - struct radeon_encoder *enc = atif->encoder_for_bl; - - if (enc) { - DRM_DEBUG_DRIVER("Changing brightness to %d\n", - req.backlight_level); - - radeon_set_backlight_level(rdev, enc, req.backlight_level); - - if (rdev->is_atom_bios) { - struct radeon_encoder_atom_dig *dig = enc->enc_priv; - backlight_force_update(dig->bl_dev, - BACKLIGHT_UPDATE_HOTKEY); - } else { - struct radeon_encoder_lvds *dig = enc->enc_priv; - backlight_force_update(dig->bl_dev, - BACKLIGHT_UPDATE_HOTKEY); - } - } - } - /* TODO: check other events */ - - /* We've handled the event, stop the notifier chain. The ACPI interface - * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to - * userspace if the event was generated only to signal a SBIOS - * request. - */ - return NOTIFY_BAD; -} - -/* Call the ATCS method - */ -/** - * radeon_atcs_call - call an ATCS method - * - * @handle: acpi handle - * @function: the ATCS function to execute - * @params: ATCS function params - * - * Executes the requested ATCS function (all asics). - * Returns a pointer to the acpi output buffer. - */ -static union acpi_object *radeon_atcs_call(acpi_handle handle, int function, - struct acpi_buffer *params) -{ - acpi_status status; - union acpi_object atcs_arg_elements[2]; - struct acpi_object_list atcs_arg; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - - atcs_arg.count = 2; - atcs_arg.pointer = &atcs_arg_elements[0]; - - atcs_arg_elements[0].type = ACPI_TYPE_INTEGER; - atcs_arg_elements[0].integer.value = function; - - if (params) { - atcs_arg_elements[1].type = ACPI_TYPE_BUFFER; - atcs_arg_elements[1].buffer.length = params->length; - atcs_arg_elements[1].buffer.pointer = params->pointer; - } else { - /* We need a second fake parameter */ - atcs_arg_elements[1].type = ACPI_TYPE_INTEGER; - atcs_arg_elements[1].integer.value = 0; - } - - status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer); - - /* Fail only if calling the method fails and ATIF is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n", - acpi_format_exception(status)); - kfree(buffer.pointer); - return NULL; - } - - return buffer.pointer; -} - -/** - * radeon_atcs_parse_functions - parse supported functions - * - * @f: supported functions struct - * @mask: supported functions mask from ATCS - * - * Use the supported functions mask from ATCS function - * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions - * are supported (all asics). - */ -static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask) -{ - f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED; - f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; - f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; - f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; -} - -/** - * radeon_atcs_verify_interface - verify ATCS - * - * @handle: acpi handle - * @atcs: radeon atcs struct - * - * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function - * to initialize ATCS and determine what features are supported - * (all asics). - * returns 0 on success, error on failure. - */ -static int radeon_atcs_verify_interface(acpi_handle handle, - struct radeon_atcs *atcs) -{ - union acpi_object *info; - struct atcs_verify_interface output; - size_t size; - int err = 0; - - info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); - if (!info) - return -EIO; - - memset(&output, 0, sizeof(output)); - - size = *(u16 *) info->buffer.pointer; - if (size < 8) { - DRM_INFO("ATCS buffer is too small: %lu\n", size); - err = -EINVAL; - goto out; - } - size = min(sizeof(output), size); - - memcpy(&output, info->buffer.pointer, size); - - /* TODO: check version? */ - DRM_DEBUG_DRIVER("ATCS version %u\n", output.version); - - radeon_atcs_parse_functions(&atcs->functions, output.function_bits); - -out: - kfree(info); - return err; -} - -/** - * radeon_acpi_event - handle notify events - * - * @nb: notifier block - * @val: val - * @data: acpi event - * - * Calls relevant radeon functions in response to various - * acpi events. - * Returns NOTIFY code - */ -static int radeon_acpi_event(struct notifier_block *nb, - unsigned long val, - void *data) -{ - struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); - struct acpi_bus_event *entry = (struct acpi_bus_event *)data; - - if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { - if (power_supply_is_system_supplied() > 0) - DRM_DEBUG_DRIVER("pm: AC\n"); - else - DRM_DEBUG_DRIVER("pm: DC\n"); - - radeon_pm_acpi_event_handler(rdev); - } - - /* Check for pending SBIOS requests */ - return radeon_atif_handler(rdev, entry); + kfree(buffer.pointer); + return 0; } /* Call all ACPI methods here */ -/** - * radeon_acpi_init - init driver acpi support - * - * @rdev: radeon_device pointer - * - * Verifies the AMD ACPI interfaces and registers with the acpi - * notifier chain (all asics). - * Returns 0 on success, error on failure. - */ int radeon_acpi_init(struct radeon_device *rdev) { acpi_handle handle; - struct radeon_atif *atif = &rdev->atif; - struct radeon_atcs *atcs = &rdev->atcs; int ret; /* Get the device handle */ @@ -560,90 +58,11 @@ int radeon_acpi_init(struct radeon_device *rdev) if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) return 0; - /* Call the ATCS method */ - ret = radeon_atcs_verify_interface(handle, atcs); - if (ret) { - DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); - } - /* Call the ATIF method */ - ret = radeon_atif_verify_interface(handle, atif); - if (ret) { - DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); - goto out; - } - - if (atif->notifications.brightness_change) { - struct drm_encoder *tmp; - struct radeon_encoder *target = NULL; - - /* Find the encoder controlling the brightness */ - list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list, - head) { - struct radeon_encoder *enc = to_radeon_encoder(tmp); - - if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) && - enc->enc_priv) { - if (rdev->is_atom_bios) { - struct radeon_encoder_atom_dig *dig = enc->enc_priv; - if (dig->bl_dev) { - target = enc; - break; - } - } else { - struct radeon_encoder_lvds *dig = enc->enc_priv; - if (dig->bl_dev) { - target = enc; - break; - } - } - } - } - - atif->encoder_for_bl = target; - if (!target) { - /* Brightness change notification is enabled, but we - * didn't find a backlight controller, this should - * never happen. - */ - DRM_ERROR("Cannot find a backlight controller\n"); - } - } + ret = radeon_atif_call(handle); + if (ret) + return ret; - if (atif->functions.sbios_requests && !atif->functions.system_params) { - /* XXX check this workraround, if sbios request function is - * present we have to see how it's configured in the system - * params - */ - atif->functions.system_params = true; - } - - if (atif->functions.system_params) { - ret = radeon_atif_get_notification_params(handle, - &atif->notification_cfg); - if (ret) { - DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", - ret); - /* Disable notification */ - atif->notification_cfg.enabled = false; - } - } - -out: - rdev->acpi_nb.notifier_call = radeon_acpi_event; - register_acpi_notifier(&rdev->acpi_nb); - - return ret; + return 0; } -/** - * radeon_acpi_fini - tear down driver acpi support - * - * @rdev: radeon_device pointer - * - * Unregisters with the acpi notifier chain (all asics). - */ -void radeon_acpi_fini(struct radeon_device *rdev) -{ - unregister_acpi_notifier(&rdev->acpi_nb); -} diff --git a/trunk/drivers/gpu/drm/radeon/radeon_acpi.h b/trunk/drivers/gpu/drm/radeon/radeon_acpi.h deleted file mode 100644 index be4af76f213d..000000000000 --- a/trunk/drivers/gpu/drm/radeon/radeon_acpi.h +++ /dev/null @@ -1,445 +0,0 @@ -/* - * Copyright 2012 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef RADEON_ACPI_H -#define RADEON_ACPI_H - -struct radeon_device; -struct acpi_bus_event; - -int radeon_atif_handler(struct radeon_device *rdev, - struct acpi_bus_event *event); - -/* AMD hw uses four ACPI control methods: - * 1. ATIF - * ARG0: (ACPI_INTEGER) function code - * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes - * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes - * ATIF provides an entry point for the gfx driver to interact with the sbios. - * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom - * notification. Which notification is used as indicated by the ATIF Control - * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or - * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS - * to identify pending System BIOS requests and associated parameters. For - * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver - * will perform display device detection and invoke ATIF Control Method - * SELECT_ACTIVE_DISPLAYS. - * - * 2. ATPX - * ARG0: (ACPI_INTEGER) function code - * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes - * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes - * ATPX methods are used on PowerXpress systems to handle mux switching and - * discrete GPU power control. - * - * 3. ATRM - * ARG0: (ACPI_INTEGER) offset of vbios rom data - * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K). - * OUTPUT: (ACPI_BUFFER) output buffer - * ATRM provides an interfacess to access the discrete GPU vbios image on - * PowerXpress systems with multiple GPUs. - * - * 4. ATCS - * ARG0: (ACPI_INTEGER) function code - * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes - * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes - * ATCS provides an interface to AMD chipset specific functionality. - * - */ -/* ATIF */ -#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0 -/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * WORD - version - * DWORD - supported notifications mask - * DWORD - supported functions bit vector - */ -/* Notifications mask */ -# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0) -# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1) -# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2) -# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3) -# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4) -# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5) -# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6) -# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7) -# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8) -/* supported functions vector */ -# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0) -# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1) -# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2) -# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3) -# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4) -# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5) -# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6) -# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7) -# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12) -# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14) -#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1 -/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * DWORD - valid flags mask - * DWORD - flags - * - * OR - * - * WORD - structure size in bytes (includes size field) - * DWORD - valid flags mask - * DWORD - flags - * BYTE - notify command code - * - * flags - * bits 1:0: - * 0 - Notify(VGA, 0x81) is not used for notification - * 1 - Notify(VGA, 0x81) is used for notification - * 2 - Notify(VGA, n) is used for notification where - * n (0xd0-0xd9) is specified in notify command code. - * bit 2: - * 1 - lid changes not reported though int10 - */ -#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2 -/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * DWORD - pending sbios requests - * BYTE - panel expansion mode - * BYTE - thermal state: target gfx controller - * BYTE - thermal state: state id (0: exit state, non-0: state) - * BYTE - forced power state: target gfx controller - * BYTE - forced power state: state id - * BYTE - system power source - * BYTE - panel backlight level (0-255) - */ -/* pending sbios requests */ -# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0) -# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1) -# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2) -# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3) -# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4) -# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5) -# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6) -# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7) -# define ATIF_DGPU_DISPLAY_EVENT (1 << 8) -/* panel expansion mode */ -# define ATIF_PANEL_EXPANSION_DISABLE 0 -# define ATIF_PANEL_EXPANSION_FULL 1 -# define ATIF_PANEL_EXPANSION_ASPECT 2 -/* target gfx controller */ -# define ATIF_TARGET_GFX_SINGLE 0 -# define ATIF_TARGET_GFX_PX_IGPU 1 -# define ATIF_TARGET_GFX_PX_DGPU 2 -/* system power source */ -# define ATIF_POWER_SOURCE_AC 1 -# define ATIF_POWER_SOURCE_DC 2 -# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3 -# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4 -#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3 -/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - selected displays - * WORD - connected displays - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * WORD - selected displays - */ -# define ATIF_LCD1 (1 << 0) -# define ATIF_CRT1 (1 << 1) -# define ATIF_TV (1 << 2) -# define ATIF_DFP1 (1 << 3) -# define ATIF_CRT2 (1 << 4) -# define ATIF_LCD2 (1 << 5) -# define ATIF_DFP2 (1 << 7) -# define ATIF_CV (1 << 8) -# define ATIF_DFP3 (1 << 9) -# define ATIF_DFP4 (1 << 10) -# define ATIF_DFP5 (1 << 11) -# define ATIF_DFP6 (1 << 12) -#define ATIF_FUNCTION_GET_LID_STATE 0x4 -/* ARG0: ATIF_FUNCTION_GET_LID_STATE - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - lid state (0: open, 1: closed) - * - * GET_LID_STATE only works at boot and resume, for general lid - * status, use the kernel provided status - */ -#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5 -/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - 0 - * BYTE - TV standard - */ -# define ATIF_TV_STD_NTSC 0 -# define ATIF_TV_STD_PAL 1 -# define ATIF_TV_STD_PALM 2 -# define ATIF_TV_STD_PAL60 3 -# define ATIF_TV_STD_NTSCJ 4 -# define ATIF_TV_STD_PALCN 5 -# define ATIF_TV_STD_PALN 6 -# define ATIF_TV_STD_SCART_RGB 9 -#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6 -/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS - * ARG1: - * WORD - structure size in bytes (includes size field) - * BYTE - 0 - * BYTE - TV standard - * OUTPUT: none - */ -#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7 -/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - panel expansion mode - */ -#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8 -/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS - * ARG1: - * WORD - structure size in bytes (includes size field) - * BYTE - panel expansion mode - * OUTPUT: none - */ -#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD -/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - gfx controller id - * BYTE - current temperature (degress Celsius) - * OUTPUT: none - */ -#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF -/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES - * ARG1: none - * OUTPUT: - * WORD - number of gfx devices - * WORD - device structure size in bytes (excludes device size field) - * DWORD - flags \ - * WORD - bus number } repeated structure - * WORD - device number / - */ -/* flags */ -# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0) -# define ATIF_XGP_PORT (1 << 1) -# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2) -# define ATIF_XGP_PORT_IN_DOCK (1 << 3) - -/* ATPX */ -#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0 -/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * WORD - version - * DWORD - supported functions bit vector - */ -/* supported functions vector */ -# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0) -# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1) -# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2) -# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3) -# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4) -# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5) -# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7) -# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8) -#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1 -/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * DWORD - valid flags mask - * DWORD - flags - */ -/* flags */ -# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0) -# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1) -# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2) -# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3) -# define ATPX_TV_SIGNAL_MUXED (1 << 4) -# define ATPX_DFP_SIGNAL_MUXED (1 << 5) -# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6) -# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7) -# define ATPX_ACF_NOT_SUPPORTED (1 << 8) -# define ATPX_FIXED_NOT_SUPPORTED (1 << 9) -# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10) -# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11) -#define ATPX_FUNCTION_POWER_CONTROL 0x2 -/* ARG0: ATPX_FUNCTION_POWER_CONTROL - * ARG1: - * WORD - structure size in bytes (includes size field) - * BYTE - dGPU power state (0: power off, 1: power on) - * OUTPUT: none - */ -#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3 -/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - display mux control (0: iGPU, 1: dGPU) - * OUTPUT: none - */ -# define ATPX_INTEGRATED_GPU 0 -# define ATPX_DISCRETE_GPU 1 -#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4 -/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU) - * OUTPUT: none - */ -#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5 -/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - target gpu (0: iGPU, 1: dGPU) - * OUTPUT: none - */ -#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6 -/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - target gpu (0: iGPU, 1: dGPU) - * OUTPUT: none - */ -#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8 -/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING - * ARG1: none - * OUTPUT: - * WORD - number of display connectors - * WORD - connector structure size in bytes (excludes connector size field) - * BYTE - flags \ - * BYTE - ATIF display vector bit position } repeated - * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure - * WORD - connector ACPI id / - */ -/* flags */ -# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0) -# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1) -# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2) -#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9 -/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS - * ARG1: none - * OUTPUT: - * WORD - number of HPD/DDC ports - * WORD - port structure size in bytes (excludes port size field) - * BYTE - ATIF display vector bit position \ - * BYTE - hpd id } reapeated structure - * BYTE - ddc id / - * - * available on A+A systems only - */ -/* hpd id */ -# define ATPX_HPD_NONE 0 -# define ATPX_HPD1 1 -# define ATPX_HPD2 2 -# define ATPX_HPD3 3 -# define ATPX_HPD4 4 -# define ATPX_HPD5 5 -# define ATPX_HPD6 6 -/* ddc id */ -# define ATPX_DDC_NONE 0 -# define ATPX_DDC1 1 -# define ATPX_DDC2 2 -# define ATPX_DDC3 3 -# define ATPX_DDC4 4 -# define ATPX_DDC5 5 -# define ATPX_DDC6 6 -# define ATPX_DDC7 7 -# define ATPX_DDC8 8 - -/* ATCS */ -#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0 -/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * WORD - version - * DWORD - supported functions bit vector - */ -/* supported functions vector */ -# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0) -# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1) -# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2) -# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3) -#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1 -/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE - * ARG1: none - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * DWORD - valid flags mask - * DWORD - flags (0: undocked, 1: docked) - */ -/* flags */ -# define ATCS_DOCKED (1 << 0) -#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2 -/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) - * WORD - valid flags mask - * WORD - flags - * BYTE - request type - * BYTE - performance request - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - return value - */ -/* flags */ -# define ATCS_ADVERTISE_CAPS (1 << 0) -# define ATCS_WAIT_FOR_COMPLETION (1 << 1) -/* request type */ -# define ATCS_PCIE_LINK_SPEED 1 -/* performance request */ -# define ATCS_REMOVE 0 -# define ATCS_FORCE_LOW_POWER 1 -# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */ -# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */ -# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */ -/* return value */ -# define ATCS_REQUEST_REFUSED 1 -# define ATCS_REQUEST_COMPLETE 2 -# define ATCS_REQUEST_IN_PROGRESS 3 -#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3 -/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION - * ARG1: none - * OUTPUT: none - */ -#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4 -/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH - * ARG1: - * WORD - structure size in bytes (includes size field) - * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) - * BYTE - number of active lanes - * OUTPUT: - * WORD - structure size in bytes (includes size field) - * BYTE - number of active lanes - */ - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c index 654520b95ab7..973417c4b014 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c @@ -198,8 +198,6 @@ static struct radeon_asic r100_asic = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, - .set_backlight_level = &radeon_legacy_set_backlight_level, - .get_backlight_level = &radeon_legacy_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -274,8 +272,6 @@ static struct radeon_asic r200_asic = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, - .set_backlight_level = &radeon_legacy_set_backlight_level, - .get_backlight_level = &radeon_legacy_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -350,8 +346,6 @@ static struct radeon_asic r300_asic = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, - .set_backlight_level = &radeon_legacy_set_backlight_level, - .get_backlight_level = &radeon_legacy_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -426,8 +420,6 @@ static struct radeon_asic r300_asic_pcie = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, - .set_backlight_level = &radeon_legacy_set_backlight_level, - .get_backlight_level = &radeon_legacy_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -502,8 +494,6 @@ static struct radeon_asic r420_asic = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -578,8 +568,6 @@ static struct radeon_asic rs400_asic = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, - .set_backlight_level = &radeon_legacy_set_backlight_level, - .get_backlight_level = &radeon_legacy_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -654,8 +642,6 @@ static struct radeon_asic rs600_asic = { .bandwidth_update = &rs600_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -730,8 +716,6 @@ static struct radeon_asic rs690_asic = { .get_vblank_counter = &rs600_get_vblank_counter, .bandwidth_update = &rs690_bandwidth_update, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -806,8 +790,6 @@ static struct radeon_asic rv515_asic = { .get_vblank_counter = &rs600_get_vblank_counter, .bandwidth_update = &rv515_bandwidth_update, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -882,8 +864,6 @@ static struct radeon_asic r520_asic = { .bandwidth_update = &rv515_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r100_copy_blit, @@ -957,8 +937,6 @@ static struct radeon_asic r600_asic = { .bandwidth_update = &rv515_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1032,8 +1010,6 @@ static struct radeon_asic rs780_asic = { .bandwidth_update = &rs690_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1107,8 +1083,6 @@ static struct radeon_asic rv770_asic = { .bandwidth_update = &rv515_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1182,8 +1156,6 @@ static struct radeon_asic evergreen_asic = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1257,8 +1229,6 @@ static struct radeon_asic sumo_asic = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1332,8 +1302,6 @@ static struct radeon_asic btc_asic = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1357,7 +1325,7 @@ static struct radeon_asic btc_asic = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, - .init_profile = &btc_pm_init_profile, + .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -1374,6 +1342,16 @@ static struct radeon_asic btc_asic = { }, }; +static const struct radeon_vm_funcs cayman_vm_funcs = { + .init = &cayman_vm_init, + .fini = &cayman_vm_fini, + .bind = &cayman_vm_bind, + .unbind = &cayman_vm_unbind, + .tlb_flush = &cayman_vm_tlb_flush, + .page_flags = &cayman_vm_page_flags, + .set_page = &cayman_vm_set_page, +}; + static struct radeon_asic cayman_asic = { .init = &cayman_init, .fini = &cayman_fini, @@ -1388,12 +1366,6 @@ static struct radeon_asic cayman_asic = { .tlb_flush = &cayman_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, - .vm = { - .init = &cayman_vm_init, - .fini = &cayman_vm_fini, - .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .set_page = &cayman_vm_set_page, - }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &cayman_ring_ib_execute, @@ -1404,7 +1376,6 @@ static struct radeon_asic cayman_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, - .vm_flush = &cayman_vm_flush, }, [CAYMAN_RING_TYPE_CP1_INDEX] = { .ib_execute = &cayman_ring_ib_execute, @@ -1415,7 +1386,6 @@ static struct radeon_asic cayman_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, - .vm_flush = &cayman_vm_flush, }, [CAYMAN_RING_TYPE_CP2_INDEX] = { .ib_execute = &cayman_ring_ib_execute, @@ -1426,7 +1396,6 @@ static struct radeon_asic cayman_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, - .vm_flush = &cayman_vm_flush, } }, .irq = { @@ -1437,8 +1406,6 @@ static struct radeon_asic cayman_asic = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1462,7 +1429,7 @@ static struct radeon_asic cayman_asic = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, - .init_profile = &btc_pm_init_profile, + .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, @@ -1493,12 +1460,6 @@ static struct radeon_asic trinity_asic = { .tlb_flush = &cayman_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, - .vm = { - .init = &cayman_vm_init, - .fini = &cayman_vm_fini, - .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .set_page = &cayman_vm_set_page, - }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &cayman_ring_ib_execute, @@ -1509,7 +1470,6 @@ static struct radeon_asic trinity_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, - .vm_flush = &cayman_vm_flush, }, [CAYMAN_RING_TYPE_CP1_INDEX] = { .ib_execute = &cayman_ring_ib_execute, @@ -1520,7 +1480,6 @@ static struct radeon_asic trinity_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, - .vm_flush = &cayman_vm_flush, }, [CAYMAN_RING_TYPE_CP2_INDEX] = { .ib_execute = &cayman_ring_ib_execute, @@ -1531,7 +1490,6 @@ static struct radeon_asic trinity_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &evergreen_gpu_is_lockup, - .vm_flush = &cayman_vm_flush, } }, .irq = { @@ -1542,8 +1500,6 @@ static struct radeon_asic trinity_asic = { .bandwidth_update = &dce6_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = &r600_copy_blit, @@ -1584,6 +1540,16 @@ static struct radeon_asic trinity_asic = { }, }; +static const struct radeon_vm_funcs si_vm_funcs = { + .init = &si_vm_init, + .fini = &si_vm_fini, + .bind = &si_vm_bind, + .unbind = &si_vm_unbind, + .tlb_flush = &si_vm_tlb_flush, + .page_flags = &cayman_vm_page_flags, + .set_page = &cayman_vm_set_page, +}; + static struct radeon_asic si_asic = { .init = &si_init, .fini = &si_fini, @@ -1598,12 +1564,6 @@ static struct radeon_asic si_asic = { .tlb_flush = &si_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, - .vm = { - .init = &si_vm_init, - .fini = &si_vm_fini, - .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, - .set_page = &si_vm_set_page, - }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &si_ring_ib_execute, @@ -1614,7 +1574,6 @@ static struct radeon_asic si_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &si_gpu_is_lockup, - .vm_flush = &si_vm_flush, }, [CAYMAN_RING_TYPE_CP1_INDEX] = { .ib_execute = &si_ring_ib_execute, @@ -1625,7 +1584,6 @@ static struct radeon_asic si_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &si_gpu_is_lockup, - .vm_flush = &si_vm_flush, }, [CAYMAN_RING_TYPE_CP2_INDEX] = { .ib_execute = &si_ring_ib_execute, @@ -1636,7 +1594,6 @@ static struct radeon_asic si_asic = { .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, .is_lockup = &si_gpu_is_lockup, - .vm_flush = &si_vm_flush, } }, .irq = { @@ -1647,8 +1604,6 @@ static struct radeon_asic si_asic = { .bandwidth_update = &dce6_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, - .set_backlight_level = &atombios_set_backlight_level, - .get_backlight_level = &atombios_get_backlight_level, }, .copy = { .blit = NULL, @@ -1742,7 +1697,6 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock; rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock; rdev->asic->pm.set_memory_clock = NULL; - rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level; } break; case CHIP_RS400: @@ -1815,11 +1769,13 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->asic = &cayman_asic; /* set num crtcs */ rdev->num_crtc = 6; + rdev->vm_manager.funcs = &cayman_vm_funcs; break; case CHIP_ARUBA: rdev->asic = &trinity_asic; /* set num crtcs */ rdev->num_crtc = 4; + rdev->vm_manager.funcs = &cayman_vm_funcs; break; case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -1827,6 +1783,7 @@ int radeon_asic_init(struct radeon_device *rdev) rdev->asic = &si_asic; /* set num crtcs */ rdev->num_crtc = 6; + rdev->vm_manager.funcs = &si_vm_funcs; break; default: /* FIXME: not supported yet */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index 5e3a0e5c6be1..18c38d14c8cd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -42,12 +42,6 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev); void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); -void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); -u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder); -void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level); -u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder); - - /* * r100,rv100,rs100,rv200,rs200 */ @@ -395,7 +389,6 @@ void r700_cp_fini(struct radeon_device *rdev); struct evergreen_mc_save { u32 vga_render_control; u32 vga_hdp_control; - bool crtc_enabled[RADEON_MAX_CRTCS]; }; void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); @@ -420,7 +413,6 @@ extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev); extern void sumo_pm_init_profile(struct radeon_device *rdev); -extern void btc_pm_init_profile(struct radeon_device *rdev); extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); @@ -443,11 +435,14 @@ int cayman_asic_reset(struct radeon_device *rdev); void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int cayman_vm_init(struct radeon_device *rdev); void cayman_vm_fini(struct radeon_device *rdev); -void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); -uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); -void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); +int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); +void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); +void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); +uint32_t cayman_vm_page_flags(struct radeon_device *rdev, + struct radeon_vm *vm, + uint32_t flags); +void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, + unsigned pfn, uint64_t addr, uint32_t flags); int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); /* DCE6 - SI */ @@ -470,10 +465,9 @@ int si_irq_set(struct radeon_device *rdev); int si_irq_process(struct radeon_device *rdev); int si_vm_init(struct radeon_device *rdev); void si_vm_fini(struct radeon_device *rdev); -void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); -void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); +int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); +void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); +void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); uint64_t si_get_gpu_clock(struct radeon_device *rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c index 01b90b4f5e22..d67d4f3eb6f4 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1254,10 +1254,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) if (rdev->clock.max_pixel_clock == 0) rdev->clock.max_pixel_clock = 40000; - /* not technically a clock, but... */ - rdev->mode_info.firmware_flags = - le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); - return true; } @@ -2009,8 +2005,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); /* add the i2c bus for thermal/fan chip */ - if ((power_info->info.ucOverdriveThermalController > 0) && - (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) { + if (power_info->info.ucOverdriveThermalController > 0) { DRM_INFO("Possible %s thermal controller at 0x%02x\n", thermal_controller_names[power_info->info.ucOverdriveThermalController], power_info->info.ucOverdriveControllerAddress >> 1); @@ -2214,7 +2209,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { DRM_INFO("Special thermal controller config\n"); - } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { + } else { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, @@ -2229,12 +2224,6 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } - } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", - controller->ucType, - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); } } } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 582e99449c12..2a2cf0b88a28 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -12,62 +12,30 @@ #include #include -#include "radeon_acpi.h" - -struct radeon_atpx_functions { - bool px_params; - bool power_cntl; - bool disp_mux_cntl; - bool i2c_mux_cntl; - bool switch_start; - bool switch_end; - bool disp_connectors_mapping; - bool disp_detetion_ports; -}; +#define ATPX_VERSION 0 +#define ATPX_GPU_PWR 2 +#define ATPX_MUX_SELECT 3 +#define ATPX_I2C_MUX_SELECT 4 +#define ATPX_SWITCH_START 5 +#define ATPX_SWITCH_END 6 -struct radeon_atpx { - acpi_handle handle; - struct radeon_atpx_functions functions; -}; +#define ATPX_INTEGRATED 0 +#define ATPX_DISCRETE 1 + +#define ATPX_MUX_IGD 0 +#define ATPX_MUX_DISCRETE 1 static struct radeon_atpx_priv { bool atpx_detected; /* handle for device - and atpx */ acpi_handle dhandle; - struct radeon_atpx atpx; + acpi_handle atpx_handle; } radeon_atpx_priv; -struct atpx_verify_interface { - u16 size; /* structure size in bytes (includes size field) */ - u16 version; /* version */ - u32 function_bits; /* supported functions bit vector */ -} __packed; - -struct atpx_power_control { - u16 size; - u8 dgpu_state; -} __packed; - -struct atpx_mux { - u16 size; - u16 mux; -} __packed; - -/** - * radeon_atpx_call - call an ATPX method - * - * @handle: acpi handle - * @function: the ATPX function to execute - * @params: ATPX function params - * - * Executes the requested ATPX function (all asics). - * Returns a pointer to the acpi output buffer. - */ -static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, - struct acpi_buffer *params) +static int radeon_atpx_get_version(acpi_handle handle) { acpi_status status; - union acpi_object atpx_arg_elements[2]; + union acpi_object atpx_arg_elements[2], *obj; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -75,292 +43,99 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function, atpx_arg.pointer = &atpx_arg_elements[0]; atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; - atpx_arg_elements[0].integer.value = function; - - if (params) { - atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; - atpx_arg_elements[1].buffer.length = params->length; - atpx_arg_elements[1].buffer.pointer = params->pointer; - } else { - /* We need a second fake parameter */ - atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; - atpx_arg_elements[1].integer.value = 0; - } + atpx_arg_elements[0].integer.value = ATPX_VERSION; - status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer); + atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; + atpx_arg_elements[1].integer.value = ATPX_VERSION; - /* Fail only if calling the method fails and ATPX is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk("failed to evaluate ATPX got %s\n", - acpi_format_exception(status)); - kfree(buffer.pointer); - return NULL; + status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); + return -ENOSYS; } - - return buffer.pointer; + obj = (union acpi_object *)buffer.pointer; + if (obj && (obj->type == ACPI_TYPE_BUFFER)) + printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2)); + kfree(buffer.pointer); + return 0; } -/** - * radeon_atpx_parse_functions - parse supported functions - * - * @f: supported functions struct - * @mask: supported functions mask from ATPX - * - * Use the supported functions mask from ATPX function - * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions - * are supported (all asics). - */ -static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask) +static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value) { - f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED; - f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED; - f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED; - f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED; - f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED; - f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED; - f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED; - f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED; -} + acpi_status status; + union acpi_object atpx_arg_elements[2]; + struct acpi_object_list atpx_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + uint8_t buf[4] = {0}; -/** - * radeon_atpx_verify_interface - verify ATPX - * - * @handle: acpi handle - * @atpx: radeon atpx struct - * - * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function - * to initialize ATPX and determine what features are supported - * (all asics). - * returns 0 on success, error on failure. - */ -static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) -{ - union acpi_object *info; - struct atpx_verify_interface output; - size_t size; - int err = 0; - - info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL); - if (!info) - return -EIO; - - memset(&output, 0, sizeof(output)); - - size = *(u16 *) info->buffer.pointer; - if (size < 8) { - printk("ATPX buffer is too small: %lu\n", size); - err = -EINVAL; - goto out; - } - size = min(sizeof(output), size); + if (!handle) + return -EINVAL; - memcpy(&output, info->buffer.pointer, size); + atpx_arg.count = 2; + atpx_arg.pointer = &atpx_arg_elements[0]; - /* TODO: check version? */ - printk("ATPX version %u\n", output.version); + atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; + atpx_arg_elements[0].integer.value = cmd_id; - radeon_atpx_parse_functions(&atpx->functions, output.function_bits); + buf[2] = value & 0xff; + buf[3] = (value >> 8) & 0xff; -out: - kfree(info); - return err; -} + atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; + atpx_arg_elements[1].buffer.length = 4; + atpx_arg_elements[1].buffer.pointer = buf; -/** - * radeon_atpx_set_discrete_state - power up/down discrete GPU - * - * @atpx: atpx info struct - * @state: discrete GPU state (0 = power down, 1 = power up) - * - * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to - * power down/up the discrete GPU (all asics). - * Returns 0 on success, error on failure. - */ -static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) -{ - struct acpi_buffer params; - union acpi_object *info; - struct atpx_power_control input; - - if (atpx->functions.power_cntl) { - input.size = 3; - input.dgpu_state = state; - params.length = input.size; - params.pointer = &input; - info = radeon_atpx_call(atpx->handle, - ATPX_FUNCTION_POWER_CONTROL, - ¶ms); - if (!info) - return -EIO; - kfree(info); + status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); + return -ENOSYS; } + kfree(buffer.pointer); + return 0; } -/** - * radeon_atpx_switch_disp_mux - switch display mux - * - * @atpx: atpx info struct - * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) - * - * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to - * switch the display mux between the discrete GPU and integrated GPU - * (all asics). - * Returns 0 on success, error on failure. - */ -static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id) +static int radeon_atpx_set_discrete_state(acpi_handle handle, int state) { - struct acpi_buffer params; - union acpi_object *info; - struct atpx_mux input; - - if (atpx->functions.disp_mux_cntl) { - input.size = 4; - input.mux = mux_id; - params.length = input.size; - params.pointer = &input; - info = radeon_atpx_call(atpx->handle, - ATPX_FUNCTION_DISPLAY_MUX_CONTROL, - ¶ms); - if (!info) - return -EIO; - kfree(info); - } - return 0; + return radeon_atpx_execute(handle, ATPX_GPU_PWR, state); } -/** - * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux - * - * @atpx: atpx info struct - * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) - * - * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to - * switch the i2c/hpd mux between the discrete GPU and integrated GPU - * (all asics). - * Returns 0 on success, error on failure. - */ -static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id) +static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) { - struct acpi_buffer params; - union acpi_object *info; - struct atpx_mux input; - - if (atpx->functions.i2c_mux_cntl) { - input.size = 4; - input.mux = mux_id; - params.length = input.size; - params.pointer = &input; - info = radeon_atpx_call(atpx->handle, - ATPX_FUNCTION_I2C_MUX_CONTROL, - ¶ms); - if (!info) - return -EIO; - kfree(info); - } - return 0; + return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); } -/** - * radeon_atpx_switch_start - notify the sbios of a GPU switch - * - * @atpx: atpx info struct - * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) - * - * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX - * function to notify the sbios that a switch between the discrete GPU and - * integrated GPU has begun (all asics). - * Returns 0 on success, error on failure. - */ -static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id) +static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) { - struct acpi_buffer params; - union acpi_object *info; - struct atpx_mux input; - - if (atpx->functions.switch_start) { - input.size = 4; - input.mux = mux_id; - params.length = input.size; - params.pointer = &input; - info = radeon_atpx_call(atpx->handle, - ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION, - ¶ms); - if (!info) - return -EIO; - kfree(info); - } - return 0; + return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); } -/** - * radeon_atpx_switch_end - notify the sbios of a GPU switch - * - * @atpx: atpx info struct - * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU) - * - * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX - * function to notify the sbios that a switch between the discrete GPU and - * integrated GPU has ended (all asics). - * Returns 0 on success, error on failure. - */ -static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id) +static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) { - struct acpi_buffer params; - union acpi_object *info; - struct atpx_mux input; - - if (atpx->functions.switch_end) { - input.size = 4; - input.mux = mux_id; - params.length = input.size; - params.pointer = &input; - info = radeon_atpx_call(atpx->handle, - ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION, - ¶ms); - if (!info) - return -EIO; - kfree(info); - } - return 0; + return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); +} + +static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) +{ + return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); } -/** - * radeon_atpx_switchto - switch to the requested GPU - * - * @id: GPU to switch to - * - * Execute the necessary ATPX functions to switch between the discrete GPU and - * integrated GPU (all asics). - * Returns 0 on success, error on failure. - */ static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) { - u16 gpu_id; + int gpu_id; if (id == VGA_SWITCHEROO_IGD) - gpu_id = ATPX_INTEGRATED_GPU; + gpu_id = ATPX_INTEGRATED; else - gpu_id = ATPX_DISCRETE_GPU; + gpu_id = ATPX_DISCRETE; - radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id); - radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id); - radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id); - radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id); + radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); + radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); return 0; } -/** - * radeon_atpx_switchto - switch to the requested GPU - * - * @id: GPU to switch to - * @state: requested power state (0 = off, 1 = on) - * - * Execute the necessary ATPX function to power down/up the discrete GPU - * (all asics). - * Returns 0 on success, error on failure. - */ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { @@ -368,18 +143,10 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, if (id == VGA_SWITCHEROO_IGD) return 0; - radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state); + radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state); return 0; } -/** - * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles - * - * @pdev: pci device - * - * Look up the ATPX and ATRM handles (all asics). - * Returns true if the handles are found, false if not. - */ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) { acpi_handle dhandle, atpx_handle; @@ -394,30 +161,18 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) return false; radeon_atpx_priv.dhandle = dhandle; - radeon_atpx_priv.atpx.handle = atpx_handle; + radeon_atpx_priv.atpx_handle = atpx_handle; return true; } -/** - * radeon_atpx_init - verify the ATPX interface - * - * Verify the ATPX interface (all asics). - * Returns 0 on success, error on failure. - */ static int radeon_atpx_init(void) { /* set up the ATPX handle */ - return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx); + + radeon_atpx_get_version(radeon_atpx_priv.atpx_handle); + return 0; } -/** - * radeon_atpx_get_client_id - get the client id - * - * @pdev: pci device - * - * look up whether we are the integrated or discrete GPU (all asics). - * Returns the client id. - */ static int radeon_atpx_get_client_id(struct pci_dev *pdev) { if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) @@ -433,12 +188,6 @@ static struct vga_switcheroo_handler radeon_atpx_handler = { .get_client_id = radeon_atpx_get_client_id, }; -/** - * radeon_atpx_detect - detect whether we have PX - * - * Check if we have a PX system (all asics). - * Returns true if we have a PX system, false if not. - */ static bool radeon_atpx_detect(void) { char acpi_method_name[255] = { 0 }; @@ -454,7 +203,7 @@ static bool radeon_atpx_detect(void) } if (has_atpx && vga_count == 2) { - acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); + acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; @@ -463,11 +212,6 @@ static bool radeon_atpx_detect(void) return false; } -/** - * radeon_register_atpx_handler - register with vga_switcheroo - * - * Register the PX callbacks with vga_switcheroo (all asics). - */ void radeon_register_atpx_handler(void) { bool r; @@ -480,11 +224,6 @@ void radeon_register_atpx_handler(void) vga_switcheroo_register_handler(&radeon_atpx_handler); } -/** - * radeon_unregister_atpx_handler - unregister with vga_switcheroo - * - * Unregister the PX callbacks with vga_switcheroo (all asics). - */ void radeon_unregister_atpx_handler(void) { vga_switcheroo_unregister_handler(); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index 8a73f0758903..f75247d42ffd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -3319,6 +3319,15 @@ static void combios_write_ram_size(struct drm_device *dev) WREG32(RADEON_CONFIG_MEMSIZE, mem_size); } +void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable) +{ + uint16_t dyn_clk_info = + combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); + + if (dyn_clk_info) + combios_parse_pll_table(dev, dyn_clk_info); +} + void radeon_combios_asic_init(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c index 69a142fc3d1d..895e628b60f8 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_connectors.c @@ -40,6 +40,10 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected); +extern void +radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, + struct drm_connector *drm_connector); + void radeon_connector_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -194,7 +198,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c } } -static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) +struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) { struct drm_mode_object *obj; struct drm_encoder *encoder; @@ -215,7 +219,7 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, return NULL; } -static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) +struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct drm_mode_object *obj; @@ -366,7 +370,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn } } -static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, +int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_device *dev = connector->dev; @@ -687,13 +691,13 @@ static int radeon_lvds_set_property(struct drm_connector *connector, } -static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { +struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { .get_modes = radeon_lvds_get_modes, .mode_valid = radeon_lvds_mode_valid, .best_encoder = radeon_best_single_encoder, }; -static const struct drm_connector_funcs radeon_lvds_connector_funcs = { +struct drm_connector_funcs radeon_lvds_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -805,13 +809,13 @@ radeon_vga_detect(struct drm_connector *connector, bool force) return ret; } -static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { +struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { .get_modes = radeon_vga_get_modes, .mode_valid = radeon_vga_mode_valid, .best_encoder = radeon_best_single_encoder, }; -static const struct drm_connector_funcs radeon_vga_connector_funcs = { +struct drm_connector_funcs radeon_vga_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_vga_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -875,13 +879,13 @@ radeon_tv_detect(struct drm_connector *connector, bool force) return ret; } -static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { +struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { .get_modes = radeon_tv_get_modes, .mode_valid = radeon_tv_mode_valid, .best_encoder = radeon_best_single_encoder, }; -static const struct drm_connector_funcs radeon_tv_connector_funcs = { +struct drm_connector_funcs radeon_tv_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_tv_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -1085,7 +1089,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) } /* okay need to be smart in here about which encoder to pick */ -static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) +struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -1175,13 +1179,13 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector, return MODE_OK; } -static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { +struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { .get_modes = radeon_dvi_get_modes, .mode_valid = radeon_dvi_mode_valid, .best_encoder = radeon_dvi_encoder, }; -static const struct drm_connector_funcs radeon_dvi_connector_funcs = { +struct drm_connector_funcs radeon_dvi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dvi_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -1458,13 +1462,13 @@ static int radeon_dp_mode_valid(struct drm_connector *connector, } } -static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { +struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { .get_modes = radeon_dp_get_modes, .mode_valid = radeon_dp_mode_valid, .best_encoder = radeon_dvi_encoder, }; -static const struct drm_connector_funcs radeon_dp_connector_funcs = { +struct drm_connector_funcs radeon_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = radeon_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, @@ -2004,4 +2008,15 @@ radeon_add_legacy_connector(struct drm_device *dev, connector->polled = DRM_CONNECTOR_POLL_HPD; connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); + if (connector_type == DRM_MODE_CONNECTOR_LVDS) { + struct drm_encoder *drm_encoder; + + list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) { + struct radeon_encoder *radeon_encoder; + + radeon_encoder = to_radeon_encoder(drm_encoder); + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS) + radeon_legacy_backlight_init(radeon_encoder, connector); + } + } } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cs.c b/trunk/drivers/gpu/drm/radeon/radeon_cs.c index d59eb59cdb81..b4a0db24f4dd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cs.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cs.c @@ -32,7 +32,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt); -static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) +int radeon_cs_parser_relocs(struct radeon_cs_parser *p) { struct drm_device *ddev = p->rdev->ddev; struct radeon_cs_chunk *chunk; @@ -115,27 +115,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority return 0; } -static void radeon_cs_sync_to(struct radeon_cs_parser *p, - struct radeon_fence *fence) -{ - struct radeon_fence *other; - - if (!fence) - return; - - other = p->ib.sync_to[fence->ring]; - p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other); -} - static void radeon_cs_sync_rings(struct radeon_cs_parser *p) { int i; for (i = 0; i < p->nrelocs; i++) { - if (!p->relocs[i].robj) + struct radeon_fence *a, *b; + + if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj) continue; - radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj); + a = p->relocs[i].robj->tbo.sync_obj; + b = p->ib.sync_to[a->ring]; + p->ib.sync_to[a->ring] = radeon_fence_later(a, b); } } @@ -286,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return 0; } +static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser, + struct radeon_fence *fence) +{ + struct radeon_fpriv *fpriv = parser->filp->driver_priv; + struct radeon_vm *vm = &fpriv->vm; + struct radeon_bo_list *lobj; + + if (parser->chunk_ib_idx == -1) { + return; + } + if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) { + return; + } + + list_for_each_entry(lobj, &parser->validated, tv.head) { + struct radeon_bo_va *bo_va; + struct radeon_bo *rbo = lobj->bo; + + bo_va = radeon_bo_va(rbo, vm); + radeon_fence_unref(&bo_va->fence); + bo_va->fence = radeon_fence_ref(fence); + } +} + /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -299,6 +315,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) unsigned i; if (!error) { + /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */ + radeon_bo_vm_fence_va(parser, parser->ib.fence); ttm_eu_fence_buffer_objects(&parser->validated, parser->ib.fence); } else { @@ -345,7 +363,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, * uncached). */ r = radeon_ib_get(rdev, parser->ring, &parser->ib, - NULL, ib_chunk->length_dw * 4); + ib_chunk->length_dw * 4); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -362,6 +380,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, return r; } radeon_cs_sync_rings(parser); + parser->ib.vm_id = 0; r = radeon_ib_schedule(rdev, &parser->ib, NULL); if (r) { DRM_ERROR("Failed to schedule IB !\n"); @@ -372,15 +391,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev, static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, struct radeon_vm *vm) { - struct radeon_device *rdev = parser->rdev; struct radeon_bo_list *lobj; struct radeon_bo *bo; int r; - r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem); - if (r) { - return r; - } list_for_each_entry(lobj, &parser->validated, tv.head) { bo = lobj->bo; r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); @@ -412,7 +426,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, return -EINVAL; } r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, - vm, ib_chunk->length_dw * 4); + ib_chunk->length_dw * 4); if (r) { DRM_ERROR("Failed to get const ib !\n"); return r; @@ -436,7 +450,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, return -EINVAL; } r = radeon_ib_get(rdev, parser->ring, &parser->ib, - vm, ib_chunk->length_dw * 4); + ib_chunk->length_dw * 4); if (r) { DRM_ERROR("Failed to get ib !\n"); return r; @@ -454,7 +468,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); - r = radeon_vm_alloc_pt(rdev, vm); + r = radeon_vm_bind(rdev, vm); if (r) { goto out; } @@ -463,21 +477,32 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, goto out; } radeon_cs_sync_rings(parser); - radeon_cs_sync_to(parser, vm->fence); - radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring)); + + parser->ib.vm_id = vm->id; + /* ib pool is bind at 0 in virtual address space, + * so gpu_addr is the offset inside the pool bo + */ + parser->ib.gpu_addr = parser->ib.sa_bo->soffset; if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { + parser->const_ib.vm_id = vm->id; + /* ib pool is bind at 0 in virtual address space, + * so gpu_addr is the offset inside the pool bo + */ + parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset; r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); } else { r = radeon_ib_schedule(rdev, &parser->ib, NULL); } +out: if (!r) { - radeon_vm_fence(rdev, vm, parser->ib.fence); + if (vm->fence) { + radeon_fence_unref(&vm->fence); + } + vm->fence = radeon_fence_ref(parser->ib.fence); } - -out: mutex_unlock(&vm->mutex); mutex_unlock(&rdev->vm_manager.lock); return r; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index 64a42647f08a..7a3daebd732d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -842,7 +842,7 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) * Validates certain module parameters and updates * the associated values used by the driver (all asics). */ -static void radeon_check_arguments(struct radeon_device *rdev) +void radeon_check_arguments(struct radeon_device *rdev) { /* vramlimit must be a power of two */ switch (radeon_vram_limit) { @@ -1013,11 +1013,13 @@ int radeon_device_init(struct radeon_device *rdev, init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); + init_waitqueue_head(&rdev->irq.idle_queue); r = radeon_gem_init(rdev); if (r) return r; /* initialize vm here */ mutex_init(&rdev->vm_manager.lock); + rdev->vm_manager.use_bitmap = 1; rdev->vm_manager.max_pfn = 1 << 20; INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); @@ -1282,13 +1284,6 @@ int radeon_resume_kms(struct drm_device *dev) if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); radeon_atom_disp_eng_pll_init(rdev); - /* turn on the BL */ - if (rdev->mode_info.bl_encoder) { - u8 bl_level = radeon_get_backlight_level(rdev, - rdev->mode_info.bl_encoder); - radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, - bl_level); - } } /* reset hpd state */ radeon_hpd_init(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index 27ece75b4789..8c593ea82c41 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -64,11 +64,9 @@ * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query * 2.21.0 - r600-r700: FMASK and CMASK * 2.22.0 - r600 only: RESOLVE_BOX allowed - * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880 - * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 24 +#define KMS_DRIVER_MINOR 22 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c index e66c807df9e6..74670696277d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c @@ -29,14 +29,6 @@ #include "radeon.h" #include "atom.h" -extern void -radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, - struct drm_connector *drm_connector); -extern void -radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, - struct drm_connector *drm_connector); - - static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -161,7 +153,6 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 void radeon_link_encoder_connector(struct drm_device *dev) { - struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct drm_encoder *encoder; @@ -172,16 +163,8 @@ radeon_link_encoder_connector(struct drm_device *dev) radeon_connector = to_radeon_connector(connector); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->devices & radeon_connector->devices) { + if (radeon_encoder->devices & radeon_connector->devices) drm_mode_connector_attach_encoder(connector, encoder); - if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - if (rdev->is_atom_bios) - radeon_atom_backlight_init(radeon_encoder, connector); - else - radeon_legacy_backlight_init(radeon_encoder, connector); - rdev->mode_info.bl_encoder = radeon_encoder; - } - } } } } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fb.c b/trunk/drivers/gpu/drm/radeon/radeon_fb.c index 6ada72c6d7a1..5906914a78bc 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fb.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fb.c @@ -316,6 +316,22 @@ static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, return new_fb; } +static char *mode_option; +int radeon_parse_options(char *options) +{ + char *this_opt; + + if (!options || !*options) + return 0; + + while ((this_opt = strsep(&options, ",")) != NULL) { + if (!*this_opt) + continue; + mode_option = this_opt; + } + return 0; +} + void radeon_fb_output_poll_changed(struct radeon_device *rdev) { drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c index 1eb3db52a36e..2a59375dbe52 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c @@ -399,7 +399,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) return 0; } -static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) +bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) { unsigned i; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index 753b7ca3c807..bb3b7fe05ccd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -422,18 +422,6 @@ void radeon_gart_fini(struct radeon_device *rdev) * TODO bind a default page at vm initialization for default address */ -/** - * radeon_vm_directory_size - returns the size of the page directory in bytes - * - * @rdev: radeon_device pointer - * - * Calculate the size of the page directory in bytes (cayman+). - */ -static unsigned radeon_vm_directory_size(struct radeon_device *rdev) -{ - return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8; -} - /** * radeon_vm_manager_init - init the vm manager * @@ -447,15 +435,12 @@ int radeon_vm_manager_init(struct radeon_device *rdev) struct radeon_vm *vm; struct radeon_bo_va *bo_va; int r; - unsigned size; if (!rdev->vm_manager.enabled) { + /* mark first vm as always in use, it's the system one */ /* allocate enough for 2 full VM pts */ - size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); - size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8); - size *= 2; r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - size, + rdev->vm_manager.max_pfn * 8 * 2, RADEON_GEM_DOMAIN_VRAM); if (r) { dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", @@ -463,10 +448,10 @@ int radeon_vm_manager_init(struct radeon_device *rdev) return r; } - r = radeon_asic_vm_init(rdev); + r = rdev->vm_manager.funcs->init(rdev); if (r) return r; - + rdev->vm_manager.enabled = true; r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); @@ -476,36 +461,73 @@ int radeon_vm_manager_init(struct radeon_device *rdev) /* restore page table */ list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { - if (vm->sa_bo == NULL) + if (vm->id == -1) continue; list_for_each_entry(bo_va, &vm->va, vm_list) { + struct ttm_mem_reg *mem = NULL; + if (bo_va->valid) + mem = &bo_va->bo->tbo.mem; + bo_va->valid = false; + r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem); + if (r) { + DRM_ERROR("Failed to update pte for vm %d!\n", vm->id); + } + } + + r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id); + if (r) { + DRM_ERROR("Failed to bind vm %d!\n", vm->id); } } return 0; } +/* global mutex must be lock */ /** - * radeon_vm_free_pt - free the page table for a specific vm + * radeon_vm_unbind_locked - unbind a specific vm * * @rdev: radeon_device pointer * @vm: vm to unbind * - * Free the page table of a specific vm (cayman+). - * - * Global and local mutex must be lock! + * Unbind the requested vm (cayman+). + * Wait for use of the VM to finish, then unbind the page table, + * and free the page table memory. */ -static void radeon_vm_free_pt(struct radeon_device *rdev, +static void radeon_vm_unbind_locked(struct radeon_device *rdev, struct radeon_vm *vm) { struct radeon_bo_va *bo_va; - if (!vm->sa_bo) + if (vm->id == -1) { return; + } + /* wait for vm use to end */ + while (vm->fence) { + int r; + r = radeon_fence_wait(vm->fence, false); + if (r) + DRM_ERROR("error while waiting for fence: %d\n", r); + if (r == -EDEADLK) { + mutex_unlock(&rdev->vm_manager.lock); + r = radeon_gpu_reset(rdev); + mutex_lock(&rdev->vm_manager.lock); + if (!r) + continue; + } + break; + } + radeon_fence_unref(&vm->fence); + + /* hw unbind */ + rdev->vm_manager.funcs->unbind(rdev, vm); + rdev->vm_manager.use_bitmap &= ~(1 << vm->id); list_del_init(&vm->list); - radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); + vm->id = -1; + radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); + vm->pt = NULL; list_for_each_entry(bo_va, &vm->va, vm_list) { bo_va->valid = false; @@ -522,22 +544,16 @@ static void radeon_vm_free_pt(struct radeon_device *rdev, void radeon_vm_manager_fini(struct radeon_device *rdev) { struct radeon_vm *vm, *tmp; - int i; if (!rdev->vm_manager.enabled) return; mutex_lock(&rdev->vm_manager.lock); - /* free all allocated page tables */ + /* unbind all active vm */ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { - mutex_lock(&vm->mutex); - radeon_vm_free_pt(rdev, vm); - mutex_unlock(&vm->mutex); + radeon_vm_unbind_locked(rdev, vm); } - for (i = 0; i < RADEON_NUM_VM; ++i) { - radeon_fence_unref(&rdev->vm_manager.active[i]); - } - radeon_asic_vm_fini(rdev); + rdev->vm_manager.funcs->fini(rdev); mutex_unlock(&rdev->vm_manager.lock); radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); @@ -545,34 +561,46 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) rdev->vm_manager.enabled = false; } +/* global mutex must be locked */ /** - * radeon_vm_alloc_pt - allocates a page table for a VM + * radeon_vm_unbind - locked version of unbind + * + * @rdev: radeon_device pointer + * @vm: vm to unbind + * + * Locked version that wraps radeon_vm_unbind_locked (cayman+). + */ +void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) +{ + mutex_lock(&vm->mutex); + radeon_vm_unbind_locked(rdev, vm); + mutex_unlock(&vm->mutex); +} + +/* global and local mutex must be locked */ +/** + * radeon_vm_bind - bind a page table to a VMID * * @rdev: radeon_device pointer * @vm: vm to bind * - * Allocate a page table for the requested vm (cayman+). - * Also starts to populate the page table. + * Bind the requested vm (cayman+). + * Suballocate memory for the page table, allocate a VMID + * and bind the page table to it, and finally start to populate + * the page table. * Returns 0 for success, error for failure. - * - * Global and local mutex must be locked! */ -int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) +int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) { struct radeon_vm *vm_evict; - int r; - u64 *pd_addr; - int tables_size; + unsigned i; + int id = -1, r; if (vm == NULL) { return -EINVAL; } - /* allocate enough to cover the current VM size */ - tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); - tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8); - - if (vm->sa_bo != NULL) { + if (vm->id != -1) { /* update lru */ list_del_init(&vm->list); list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); @@ -581,215 +609,98 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) retry: r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, - tables_size, RADEON_GPU_PAGE_SIZE, false); - if (r == -ENOMEM) { + RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), + RADEON_GPU_PAGE_SIZE, false); + if (r) { if (list_empty(&rdev->vm_manager.lru_vm)) { return r; } vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); - mutex_lock(&vm_evict->mutex); - radeon_vm_free_pt(rdev, vm_evict); - mutex_unlock(&vm_evict->mutex); + radeon_vm_unbind(rdev, vm_evict); goto retry; - - } else if (r) { - return r; } + vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo); + vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); + memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); - pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo); - vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); - memset(pd_addr, 0, tables_size); - - list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); - return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, - &rdev->ring_tmp_bo.bo->tbo.mem); -} - -/** - * radeon_vm_grab_id - allocate the next free VMID - * - * @rdev: radeon_device pointer - * @vm: vm to allocate id for - * @ring: ring we want to submit job to - * - * Allocate an id for the vm (cayman+). - * Returns the fence we need to sync to (if any). - * - * Global and local mutex must be locked! - */ -struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, - struct radeon_vm *vm, int ring) -{ - struct radeon_fence *best[RADEON_NUM_RINGS] = {}; - unsigned choices[2] = {}; - unsigned i; - - /* check if the id is still valid */ - if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) - return NULL; - - /* we definately need to flush */ - radeon_fence_unref(&vm->last_flush); - - /* skip over VMID 0, since it is the system VM */ - for (i = 1; i < rdev->vm_manager.nvm; ++i) { - struct radeon_fence *fence = rdev->vm_manager.active[i]; - - if (fence == NULL) { - /* found a free one */ - vm->id = i; - return NULL; - } - - if (radeon_fence_is_earlier(fence, best[fence->ring])) { - best[fence->ring] = fence; - choices[fence->ring == ring ? 0 : 1] = i; +retry_id: + /* search for free vm */ + for (i = 0; i < rdev->vm_manager.nvm; i++) { + if (!(rdev->vm_manager.use_bitmap & (1 << i))) { + id = i; + break; } } - - for (i = 0; i < 2; ++i) { - if (choices[i]) { - vm->id = choices[i]; - return rdev->vm_manager.active[choices[i]]; - } + /* evict vm if necessary */ + if (id == -1) { + vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); + radeon_vm_unbind(rdev, vm_evict); + goto retry_id; } - /* should never happen */ - BUG(); - return NULL; -} - -/** - * radeon_vm_fence - remember fence for vm - * - * @rdev: radeon_device pointer - * @vm: vm we want to fence - * @fence: fence to remember - * - * Fence the vm (cayman+). - * Set the fence used to protect page table and id. - * - * Global and local mutex must be locked! - */ -void radeon_vm_fence(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_fence *fence) -{ - radeon_fence_unref(&rdev->vm_manager.active[vm->id]); - rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); - - radeon_fence_unref(&vm->fence); - vm->fence = radeon_fence_ref(fence); -} - -/** - * radeon_vm_bo_find - find the bo_va for a specific vm & bo - * - * @vm: requested vm - * @bo: requested buffer object - * - * Find @bo inside the requested vm (cayman+). - * Search inside the @bos vm list for the requested vm - * Returns the found bo_va or NULL if none is found - * - * Object has to be reserved! - */ -struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, - struct radeon_bo *bo) -{ - struct radeon_bo_va *bo_va; - - list_for_each_entry(bo_va, &bo->va, bo_list) { - if (bo_va->vm == vm) { - return bo_va; - } + /* do hw bind */ + r = rdev->vm_manager.funcs->bind(rdev, vm, id); + if (r) { + radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); + return r; } - return NULL; + rdev->vm_manager.use_bitmap |= 1 << id; + vm->id = id; + list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); + return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, + &rdev->ring_tmp_bo.bo->tbo.mem); } +/* object have to be reserved */ /** * radeon_vm_bo_add - add a bo to a specific vm * * @rdev: radeon_device pointer * @vm: requested vm * @bo: radeon buffer object + * @offset: requested offset of the buffer in the VM address space + * @flags: attributes of pages (read/write/valid/etc.) * * Add @bo into the requested vm (cayman+). - * Add @bo to the list of bos associated with the vm - * Returns newly added bo_va or NULL for failure - * - * Object has to be reserved! + * Add @bo to the list of bos associated with the vm and validate + * the offset requested within the vm address space. + * Returns 0 for success, error for failure. */ -struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, - struct radeon_vm *vm, - struct radeon_bo *bo) +int radeon_vm_bo_add(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + uint64_t offset, + uint32_t flags) { - struct radeon_bo_va *bo_va; + struct radeon_bo_va *bo_va, *tmp; + struct list_head *head; + uint64_t size = radeon_bo_size(bo), last_offset = 0; + unsigned last_pfn; bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); if (bo_va == NULL) { - return NULL; + return -ENOMEM; } bo_va->vm = vm; bo_va->bo = bo; - bo_va->soffset = 0; - bo_va->eoffset = 0; - bo_va->flags = 0; + bo_va->soffset = offset; + bo_va->eoffset = offset + size; + bo_va->flags = flags; bo_va->valid = false; - bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->bo_list); INIT_LIST_HEAD(&bo_va->vm_list); + /* make sure object fit at this offset */ + if (bo_va->soffset >= bo_va->eoffset) { + kfree(bo_va); + return -EINVAL; + } - mutex_lock(&vm->mutex); - list_add(&bo_va->vm_list, &vm->va); - list_add_tail(&bo_va->bo_list, &bo->va); - mutex_unlock(&vm->mutex); - - return bo_va; -} - -/** - * radeon_vm_bo_set_addr - set bos virtual address inside a vm - * - * @rdev: radeon_device pointer - * @bo_va: bo_va to store the address - * @soffset: requested offset of the buffer in the VM address space - * @flags: attributes of pages (read/write/valid/etc.) - * - * Set offset of @bo_va (cayman+). - * Validate and set the offset requested within the vm address space. - * Returns 0 for success, error for failure. - * - * Object has to be reserved! - */ -int radeon_vm_bo_set_addr(struct radeon_device *rdev, - struct radeon_bo_va *bo_va, - uint64_t soffset, - uint32_t flags) -{ - uint64_t size = radeon_bo_size(bo_va->bo); - uint64_t eoffset, last_offset = 0; - struct radeon_vm *vm = bo_va->vm; - struct radeon_bo_va *tmp; - struct list_head *head; - unsigned last_pfn; - - if (soffset) { - /* make sure object fit at this offset */ - eoffset = soffset + size; - if (soffset >= eoffset) { - return -EINVAL; - } - - last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; - if (last_pfn > rdev->vm_manager.max_pfn) { - dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", - last_pfn, rdev->vm_manager.max_pfn); - return -EINVAL; - } - - } else { - eoffset = last_pfn = 0; + last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE; + if (last_pfn > rdev->vm_manager.max_pfn) { + kfree(bo_va); + dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", + last_pfn, rdev->vm_manager.max_pfn); + return -EINVAL; } mutex_lock(&vm->mutex); @@ -802,7 +713,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, if (last_pfn > vm->last_pfn) { /* grow va space 32M by 32M */ unsigned align = ((32 << 20) >> 12) - 1; - radeon_vm_free_pt(rdev, vm); + radeon_vm_unbind_locked(rdev, vm); vm->last_pfn = (last_pfn + align) & ~align; } mutex_unlock(&rdev->vm_manager.lock); @@ -810,60 +721,68 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, head = &vm->va; last_offset = 0; list_for_each_entry(tmp, &vm->va, vm_list) { - if (bo_va == tmp) { - /* skip over currently modified bo */ - continue; - } - - if (soffset >= last_offset && eoffset <= tmp->soffset) { + if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) { /* bo can be added before this one */ break; } - if (eoffset > tmp->soffset && soffset < tmp->eoffset) { + if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) { /* bo and tmp overlap, invalid offset */ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", - bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, + bo, (unsigned)bo_va->soffset, tmp->bo, (unsigned)tmp->soffset, (unsigned)tmp->eoffset); + kfree(bo_va); mutex_unlock(&vm->mutex); return -EINVAL; } last_offset = tmp->eoffset; head = &tmp->vm_list; } - - bo_va->soffset = soffset; - bo_va->eoffset = eoffset; - bo_va->flags = flags; - bo_va->valid = false; - list_move(&bo_va->vm_list, head); - + list_add(&bo_va->vm_list, head); + list_add_tail(&bo_va->bo_list, &bo->va); mutex_unlock(&vm->mutex); return 0; } /** - * radeon_vm_map_gart - get the physical address of a gart page + * radeon_vm_get_addr - get the physical address of the page * * @rdev: radeon_device pointer - * @addr: the unmapped addr + * @mem: ttm mem + * @pfn: pfn * * Look up the physical address of the page that the pte resolves * to (cayman+). * Returns the physical address of the page. */ -uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) +static u64 radeon_vm_get_addr(struct radeon_device *rdev, + struct ttm_mem_reg *mem, + unsigned pfn) { - uint64_t result; - - /* page table offset */ - result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); - - return result; + u64 addr = 0; + + switch (mem->mem_type) { + case TTM_PL_VRAM: + addr = (mem->start << PAGE_SHIFT); + addr += pfn * RADEON_GPU_PAGE_SIZE; + addr += rdev->vm_manager.vram_base_offset; + break; + case TTM_PL_TT: + /* offset inside page table */ + addr = mem->start << PAGE_SHIFT; + addr += pfn * RADEON_GPU_PAGE_SIZE; + addr = addr >> PAGE_SHIFT; + /* page table offset */ + addr = rdev->gart.pages_addr[addr]; + /* in case cpu page size != gpu page size*/ + addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK); + break; + default: + break; + } + return addr; } +/* object have to be reserved & global and local mutex must be locked */ /** * radeon_vm_bo_update_pte - map a bo into the vm page table * @@ -874,160 +793,103 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) * * Fill in the page table entries for @bo (cayman+). * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved & global and local mutex must be locked! */ int radeon_vm_bo_update_pte(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo, struct ttm_mem_reg *mem) { - unsigned ridx = rdev->asic->vm.pt_ring_index; - struct radeon_ring *ring = &rdev->ring[ridx]; - struct radeon_semaphore *sem = NULL; struct radeon_bo_va *bo_va; - unsigned nptes, npdes, ndw; - uint64_t pe, addr; - uint64_t pfn; - int r; + unsigned ngpu_pages, i; + uint64_t addr = 0, pfn; + uint32_t flags; /* nothing to do if vm isn't bound */ - if (vm->sa_bo == NULL) + if (vm->id == -1) return 0; - bo_va = radeon_vm_bo_find(vm, bo); + bo_va = radeon_bo_va(bo, vm); if (bo_va == NULL) { dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); return -EINVAL; } - if (!bo_va->soffset) { - dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", - bo, vm); - return -EINVAL; - } - - if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) + if (bo_va->valid && mem) return 0; + ngpu_pages = radeon_bo_ngpu_pages(bo); bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; if (mem) { - addr = mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_SYSTEM) { bo_va->flags |= RADEON_VM_PAGE_VALID; bo_va->valid = true; } if (mem->mem_type == TTM_PL_TT) { bo_va->flags |= RADEON_VM_PAGE_SYSTEM; - } else { - addr += rdev->vm_manager.vram_base_offset; } - } else { - addr = 0; - bo_va->valid = false; } - - if (vm->fence && radeon_fence_signaled(vm->fence)) { - radeon_fence_unref(&vm->fence); - } - - if (vm->fence && vm->fence->ring != ridx) { - r = radeon_semaphore_create(rdev, &sem); - if (r) { - return r; + pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE; + flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags); + for (i = 0, addr = 0; i < ngpu_pages; i++) { + if (mem && bo_va->valid) { + addr = radeon_vm_get_addr(rdev, mem, i); } + rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags); } - - /* estimate number of dw needed */ - /* reserve space for 32-bit padding */ - ndw = 32; - - nptes = radeon_bo_ngpu_pages(bo); - - pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE); - - /* handle cases where a bo spans several pdes */ - npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) - - (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE; - - /* reserve space for one header for every 2k dwords */ - ndw += (nptes >> 11) * 3; - /* reserve space for pte addresses */ - ndw += nptes * 2; - - /* reserve space for one header for every 2k dwords */ - ndw += (npdes >> 11) * 3; - /* reserve space for pde addresses */ - ndw += npdes * 2; - - r = radeon_ring_lock(rdev, ring, ndw); - if (r) { - return r; - } - - if (sem && radeon_fence_need_sync(vm->fence, ridx)) { - radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx); - radeon_fence_note_sync(vm->fence, ridx); - } - - /* update page table entries */ - pe = vm->pd_gpu_addr; - pe += radeon_vm_directory_size(rdev); - pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8; - - radeon_asic_vm_set_page(rdev, pe, addr, nptes, - RADEON_GPU_PAGE_SIZE, bo_va->flags); - - /* update page directory entries */ - addr = pe; - - pe = vm->pd_gpu_addr; - pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8; - - radeon_asic_vm_set_page(rdev, pe, addr, npdes, - RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID); - - radeon_fence_unref(&vm->fence); - r = radeon_fence_emit(rdev, &vm->fence, ridx); - if (r) { - radeon_ring_unlock_undo(rdev, ring); - return r; - } - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, vm->fence); - radeon_fence_unref(&vm->last_flush); + rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm); return 0; } +/* object have to be reserved */ /** * radeon_vm_bo_rmv - remove a bo to a specific vm * * @rdev: radeon_device pointer - * @bo_va: requested bo_va + * @vm: requested vm + * @bo: radeon buffer object * - * Remove @bo_va->bo from the requested vm (cayman+). - * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and - * remove the ptes for @bo_va in the page table. + * Remove @bo from the requested vm (cayman+). + * Remove @bo from the list of bos associated with the vm and + * remove the ptes for @bo in the page table. * Returns 0 for success. - * - * Object have to be reserved! */ int radeon_vm_bo_rmv(struct radeon_device *rdev, - struct radeon_bo_va *bo_va) + struct radeon_vm *vm, + struct radeon_bo *bo) { + struct radeon_bo_va *bo_va; int r; + bo_va = radeon_bo_va(bo, vm); + if (bo_va == NULL) + return 0; + + /* wait for va use to end */ + while (bo_va->fence) { + r = radeon_fence_wait(bo_va->fence, false); + if (r) { + DRM_ERROR("error while waiting for fence: %d\n", r); + } + if (r == -EDEADLK) { + r = radeon_gpu_reset(rdev); + if (!r) + continue; + } + break; + } + radeon_fence_unref(&bo_va->fence); + mutex_lock(&rdev->vm_manager.lock); - mutex_lock(&bo_va->vm->mutex); - r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); + mutex_lock(&vm->mutex); + radeon_vm_bo_update_pte(rdev, vm, bo, NULL); mutex_unlock(&rdev->vm_manager.lock); list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); + mutex_unlock(&vm->mutex); list_del(&bo_va->bo_list); kfree(bo_va); - return r; + return 0; } /** @@ -1063,23 +925,27 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, */ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) { - struct radeon_bo_va *bo_va; int r; - vm->id = 0; + vm->id = -1; vm->fence = NULL; - vm->last_pfn = 0; mutex_init(&vm->mutex); INIT_LIST_HEAD(&vm->list); INIT_LIST_HEAD(&vm->va); - + /* SI requires equal sized PTs for all VMs, so always set + * last_pfn to max_pfn. cayman allows variable sized + * pts so we can grow then as needed. Once we switch + * to two level pts we can unify this again. + */ + if (rdev->family >= CHIP_TAHITI) + vm->last_pfn = rdev->vm_manager.max_pfn; + else + vm->last_pfn = 0; /* map the ib pool buffer at 0 in virtual address space, set * read only */ - bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); + r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0, + RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); return r; } @@ -1099,7 +965,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); - radeon_vm_free_pt(rdev, vm); + radeon_vm_unbind_locked(rdev, vm); mutex_unlock(&rdev->vm_manager.lock); /* remove all bo at this point non are busy any more because unbind @@ -1107,9 +973,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) */ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { - bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo); + bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); list_del_init(&bo_va->bo_list); list_del_init(&bo_va->vm_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); kfree(bo_va); } @@ -1121,11 +988,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_reserve(bo_va->bo, false); if (!r) { list_del_init(&bo_va->bo_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(bo_va->bo); kfree(bo_va); } } - radeon_fence_unref(&vm->fence); - radeon_fence_unref(&vm->last_flush); mutex_unlock(&vm->mutex); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c index 6579befa5101..1b57b0058ad6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c @@ -124,30 +124,6 @@ void radeon_gem_fini(struct radeon_device *rdev) */ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { - struct radeon_bo *rbo = gem_to_radeon_bo(obj); - struct radeon_device *rdev = rbo->rdev; - struct radeon_fpriv *fpriv = file_priv->driver_priv; - struct radeon_vm *vm = &fpriv->vm; - struct radeon_bo_va *bo_va; - int r; - - if (rdev->family < CHIP_CAYMAN) { - return 0; - } - - r = radeon_bo_reserve(rbo, false); - if (r) { - return r; - } - - bo_va = radeon_vm_bo_find(vm, rbo); - if (!bo_va) { - bo_va = radeon_vm_bo_add(rdev, vm, rbo); - } else { - ++bo_va->ref_count; - } - radeon_bo_unreserve(rbo); - return 0; } @@ -158,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_device *rdev = rbo->rdev; struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; - struct radeon_bo_va *bo_va; - int r; if (rdev->family < CHIP_CAYMAN) { return; } - r = radeon_bo_reserve(rbo, true); - if (r) { - dev_err(rdev->dev, "leaking bo va because " - "we fail to reserve bo (%d)\n", r); + if (radeon_bo_reserve(rbo, false)) { + dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); return; } - bo_va = radeon_vm_bo_find(vm, rbo); - if (bo_va) { - if (--bo_va->ref_count == 0) { - radeon_vm_bo_rmv(rdev, bo_va); - } - } + radeon_vm_bo_rmv(rdev, vm, rbo); radeon_bo_unreserve(rbo); } @@ -492,24 +459,19 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, drm_gem_object_unreference_unlocked(gobj); return r; } - bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); - if (!bo_va) { - args->operation = RADEON_VA_RESULT_ERROR; - drm_gem_object_unreference_unlocked(gobj); - return -ENOENT; - } - switch (args->operation) { case RADEON_VA_MAP: - if (bo_va->soffset) { + bo_va = radeon_bo_va(rbo, &fpriv->vm); + if (bo_va) { args->operation = RADEON_VA_RESULT_VA_EXIST; args->offset = bo_va->soffset; goto out; } - r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); + r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo, + args->offset, args->flags); break; case RADEON_VA_UNMAP: - r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); + r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo); break; default: break; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ioc32.c b/trunk/drivers/gpu/drm/radeon/radeon_ioc32.c index c4bb2269be10..48b7cea31e08 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ioc32.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ioc32.c @@ -369,7 +369,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, #define compat_radeon_cp_setparam NULL #endif /* X86_64 || IA64 */ -static drm_ioctl_compat_t *radeon_compat_ioctls[] = { +drm_ioctl_compat_t *radeon_compat_ioctls[] = { [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, diff --git a/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c index b06fa5936100..afaa1727abd2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -99,6 +99,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) atomic_set(&rdev->irq.ring_int[i], 0); + rdev->irq.gui_idle = false; for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; for (i = 0; i < RADEON_MAX_CRTCS; i++) { @@ -146,6 +147,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) /* Disable *all* interrupts */ for (i = 0; i < RADEON_NUM_RINGS; i++) atomic_set(&rdev->irq.ring_int[i], 0); + rdev->irq.gui_idle = false; for (i = 0; i < RADEON_MAX_HPD_PINS; i++) rdev->irq.hpd[i] = false; for (i = 0; i < RADEON_MAX_CRTCS; i++) { @@ -202,16 +204,6 @@ static bool radeon_msi_ok(struct radeon_device *rdev) (rdev->pdev->subsystem_device == 0x01fd)) return true; - /* Gateway RS690 only seems to work with MSIs. */ - if ((rdev->pdev->device == 0x791f) && - (rdev->pdev->subsystem_vendor == 0x107b) && - (rdev->pdev->subsystem_device == 0x0185)) - return true; - - /* try and enable MSIs by default on all RS690s */ - if (rdev->family == CHIP_RS690) - return true; - /* RV515 seems to have MSI issues where it loses * MSI rearms occasionally. This leads to lockups and freezes. * disable it by default. @@ -465,3 +457,34 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) spin_unlock_irqrestore(&rdev->irq.lock, irqflags); } +/** + * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle + * + * @rdev: radeon device pointer + * + * Enabled the GUI idle interrupt and waits for it to fire (r6xx+). + * This is currently used to make sure the 3D engine is idle for power + * management, but should be replaces with proper fence waits. + * GUI idle interrupts don't work very well on pre-r6xx hw and it also + * does not take into account other aspects of the chip that may be busy. + * DO NOT USE GOING FORWARD. + */ +int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev) +{ + unsigned long irqflags; + int r; + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + rdev->irq.gui_idle = true; + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + + r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev), + msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); + + spin_lock_irqsave(&rdev->irq.lock, irqflags); + rdev->irq.gui_idle = false; + radeon_irq_set(rdev); + spin_unlock_irqrestore(&rdev->irq.lock, irqflags); + return r; +} diff --git a/trunk/drivers/gpu/drm/radeon/radeon_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_kms.c index cb8e94d1a2b2..414b4acf6947 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_kms.c @@ -51,7 +51,6 @@ int radeon_driver_unload_kms(struct drm_device *dev) if (rdev == NULL) return 0; - radeon_acpi_fini(rdev); radeon_modeset_fini(rdev); radeon_device_fini(rdev); kfree(rdev); @@ -104,6 +103,11 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) goto out; } + /* Call ACPI methods */ + acpi_status = radeon_acpi_init(rdev); + if (acpi_status) + dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); + /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run @@ -111,17 +115,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) r = radeon_modeset_init(rdev); if (r) dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); - - /* Call ACPI methods: require modeset init - * but failure is not fatal - */ - if (!r) { - acpi_status = radeon_acpi_init(rdev); - if (acpi_status) - dev_dbg(&dev->pdev->dev, - "Error during ACPI methods call\n"); - } - out: if (r) radeon_driver_unload_kms(dev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 5677a424b585..94b4a1c12893 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -206,6 +206,11 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); } +void radeon_restore_common_regs(struct drm_device *dev) +{ + /* don't need this yet */ +} + static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -290,7 +295,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, return 1; } -static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) +void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 8ad9c5f16014..670e9910f869 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -271,6 +271,13 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) +#define MAX_RADEON_LEVEL 0xFF + +struct radeon_backlight_privdata { + struct radeon_encoder *encoder; + uint8_t negative; +}; + static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) { struct radeon_backlight_privdata *pdata = bl_get_data(bd); @@ -279,33 +286,21 @@ static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) /* Convert brightness to hardware level */ if (bd->props.brightness < 0) level = 0; - else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) - level = RADEON_MAX_BL_LEVEL; + else if (bd->props.brightness > MAX_RADEON_LEVEL) + level = MAX_RADEON_LEVEL; else level = bd->props.brightness; if (pdata->negative) - level = RADEON_MAX_BL_LEVEL - level; + level = MAX_RADEON_LEVEL - level; return level; } -u8 -radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) -{ - struct drm_device *dev = radeon_encoder->base.dev; - struct radeon_device *rdev = dev->dev_private; - u8 backlight_level; - - backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> - RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; - - return backlight_level; -} - -void -radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) +static int radeon_legacy_backlight_update_status(struct backlight_device *bd) { + struct radeon_backlight_privdata *pdata = bl_get_data(bd); + struct radeon_encoder *radeon_encoder = pdata->encoder; struct drm_device *dev = radeon_encoder->base.dev; struct radeon_device *rdev = dev->dev_private; int dpms_mode = DRM_MODE_DPMS_ON; @@ -313,31 +308,19 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; - if (lvds->backlight_level > 0) - dpms_mode = lvds->dpms_mode; - else - dpms_mode = DRM_MODE_DPMS_OFF; - lvds->backlight_level = level; + dpms_mode = lvds->dpms_mode; + lvds->backlight_level = radeon_legacy_lvds_level(bd); } else { struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; - if (lvds->backlight_level > 0) - dpms_mode = lvds->dpms_mode; - else - dpms_mode = DRM_MODE_DPMS_OFF; - lvds->backlight_level = level; + dpms_mode = lvds->dpms_mode; + lvds->backlight_level = radeon_legacy_lvds_level(bd); } } - radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); -} - -static int radeon_legacy_backlight_update_status(struct backlight_device *bd) -{ - struct radeon_backlight_privdata *pdata = bl_get_data(bd); - struct radeon_encoder *radeon_encoder = pdata->encoder; - - radeon_legacy_set_backlight_level(radeon_encoder, - radeon_legacy_lvds_level(bd)); + if (bd->props.brightness > 0) + radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); + else + radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF); return 0; } @@ -353,7 +336,7 @@ static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd) backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; - return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level; + return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level; } static const struct backlight_ops radeon_backlight_ops = { @@ -387,7 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, } memset(&props, 0, sizeof(props)); - props.max_brightness = RADEON_MAX_BL_LEVEL; + props.max_brightness = MAX_RADEON_LEVEL; props.type = BACKLIGHT_RAW; bd = backlight_device_register("radeon_bl", &drm_connector->kdev, pdata, &radeon_backlight_ops, &props); @@ -466,7 +449,7 @@ static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder) } if (bd) { - struct radeon_backlight_privdata *pdata; + struct radeon_legacy_backlight_privdata *pdata; pdata = bl_get_data(bd); backlight_device_unregister(bd); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index 527761801590..d56978949f34 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -252,23 +252,8 @@ struct radeon_mode_info { /* pointer to fbdev info structure */ struct radeon_fbdev *rfbdev; - /* firmware flags */ - u16 firmware_flags; - /* pointer to backlight encoder */ - struct radeon_encoder *bl_encoder; }; -#define RADEON_MAX_BL_LEVEL 0xFF - -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - -struct radeon_backlight_privdata { - struct radeon_encoder *encoder; - uint8_t negative; -}; - -#endif - #define MAX_H_CODE_TIMING_LEN 32 #define MAX_V_CODE_TIMING_LEN 32 @@ -284,18 +269,6 @@ struct radeon_tv_regs { uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN]; }; -struct radeon_atom_ss { - uint16_t percentage; - uint8_t type; - uint16_t step; - uint8_t delay; - uint8_t range; - uint8_t refdiv; - /* asic_ss */ - uint16_t rate; - uint16_t amount; -}; - struct radeon_crtc { struct drm_crtc base; int crtc_id; @@ -320,16 +293,6 @@ struct radeon_crtc { /* page flipping */ struct radeon_unpin_work *unpin_work; int deferred_flip_completion; - /* pll sharing */ - struct radeon_atom_ss ss; - bool ss_enabled; - u32 adjusted_clock; - int bpc; - u32 pll_reference_div; - u32 pll_post_div; - u32 pll_flags; - struct drm_encoder *encoder; - struct drm_connector *connector; }; struct radeon_encoder_primary_dac { @@ -383,6 +346,18 @@ struct radeon_encoder_ext_tmds { }; /* spread spectrum */ +struct radeon_atom_ss { + uint16_t percentage; + uint8_t type; + uint16_t step; + uint8_t delay; + uint8_t range; + uint8_t refdiv; + /* asic_ss */ + uint16_t rate; + uint16_t amount; +}; + struct radeon_encoder_atom_dig { bool linkb; /* atom dig */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c index a236795dc69a..9024e7222839 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c @@ -52,7 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { /* remove from all vm address space */ - radeon_vm_bo_rmv(bo->rdev, bo_va); + radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); } } @@ -627,17 +627,18 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) /** * radeon_bo_reserve - reserve bo * @bo: bo structure - * @no_intr: don't return -ERESTARTSYS on pending signal + * @no_wait: don't sleep while trying to reserve (return -EBUSY) * * Returns: + * -EBUSY: buffer is busy and @no_wait is true * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. */ -int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) +int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) { int r; - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); + r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) dev_err(bo->rdev->dev, "%p reserve failed\n", bo); @@ -645,3 +646,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) } return 0; } + +/* object have to be reserved */ +struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm) +{ + struct radeon_bo_va *bo_va; + + list_for_each_entry(bo_va, &rbo->va, bo_list) { + if (bo_va->vm == vm) { + return bo_va; + } + } + return NULL; +} diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.h b/trunk/drivers/gpu/drm/radeon/radeon_object.h index 93cd491fff2e..17fb99f177cf 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.h @@ -52,7 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type) return 0; } -int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr); +int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait); static inline void radeon_bo_unreserve(struct radeon_bo *bo) { @@ -141,6 +141,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); +extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, + struct radeon_vm *vm); /* * sub allocation diff --git a/trunk/drivers/gpu/drm/radeon/radeon_pm.c b/trunk/drivers/gpu/drm/radeon/radeon_pm.c index bc2e7050a9d8..7ae606600107 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_pm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_pm.c @@ -24,6 +24,9 @@ #include "radeon.h" #include "avivod.h" #include "atom.h" +#ifdef CONFIG_ACPI +#include +#endif #include #include #include @@ -33,7 +36,7 @@ #define RADEON_WAIT_VBLANK_TIMEOUT 200 static const char *radeon_pm_state_type_name[5] = { - "", + "Default", "Powersave", "Battery", "Balanced", @@ -47,6 +50,8 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish static void radeon_pm_update_profile(struct radeon_device *rdev); static void radeon_pm_set_clocks(struct radeon_device *rdev); +#define ACPI_AC_CLASS "ac_adapter" + int radeon_pm_get_type_index(struct radeon_device *rdev, enum radeon_pm_state_type ps_type, int instance) @@ -65,17 +70,33 @@ int radeon_pm_get_type_index(struct radeon_device *rdev, return rdev->pm.default_power_state_index; } -void radeon_pm_acpi_event_handler(struct radeon_device *rdev) +#ifdef CONFIG_ACPI +static int radeon_acpi_event(struct notifier_block *nb, + unsigned long val, + void *data) { - if (rdev->pm.pm_method == PM_METHOD_PROFILE) { - if (rdev->pm.profile == PM_PROFILE_AUTO) { - mutex_lock(&rdev->pm.mutex); - radeon_pm_update_profile(rdev); - radeon_pm_set_clocks(rdev); - mutex_unlock(&rdev->pm.mutex); + struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); + struct acpi_bus_event *entry = (struct acpi_bus_event *)data; + + if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { + if (power_supply_is_system_supplied() > 0) + DRM_DEBUG_DRIVER("pm: AC\n"); + else + DRM_DEBUG_DRIVER("pm: DC\n"); + + if (rdev->pm.pm_method == PM_METHOD_PROFILE) { + if (rdev->pm.profile == PM_PROFILE_AUTO) { + mutex_lock(&rdev->pm.mutex); + radeon_pm_update_profile(rdev); + radeon_pm_set_clocks(rdev); + mutex_unlock(&rdev->pm.mutex); + } } } + + return NOTIFY_OK; } +#endif static void radeon_pm_update_profile(struct radeon_device *rdev) { @@ -167,21 +188,8 @@ static void radeon_set_power_state(struct radeon_device *rdev) if (sclk > rdev->pm.default_sclk) sclk = rdev->pm.default_sclk; - /* starting with BTC, there is one state that is used for both - * MH and SH. Difference is that we always use the high clock index for - * mclk. - */ - if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && - (rdev->family >= CHIP_BARTS) && - rdev->pm.active_crtc_count && - ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || - (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) - mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. - clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; - else - mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. - clock_info[rdev->pm.requested_clock_mode_index].mclk; - + mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. + clock_info[rdev->pm.requested_clock_mode_index].mclk; if (mclk > rdev->pm.default_mclk) mclk = rdev->pm.default_mclk; @@ -245,13 +253,18 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) down_write(&rdev->pm.mclk_lock); mutex_lock(&rdev->ring_lock); - /* wait for the rings to drain */ - for (i = 0; i < RADEON_NUM_RINGS; i++) { - struct radeon_ring *ring = &rdev->ring[i]; - if (ring->ready) - radeon_fence_wait_empty_locked(rdev, i); + /* gui idle int has issues on older chips it seems */ + if (rdev->family >= CHIP_R600) { + if (rdev->irq.installed) { + /* wait for GPU to become idle */ + radeon_irq_kms_wait_gui_idle(rdev); + } + } else { + struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + if (ring->ready) { + radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX); + } } - radeon_unmap_vram_bos(rdev); if (rdev->irq.installed) { @@ -307,15 +320,17 @@ static void radeon_pm_print_states(struct radeon_device *rdev) for (j = 0; j < power_state->num_clock_modes; j++) { clock_info = &(power_state->clock_info[j]); if (rdev->flags & RADEON_IS_IGP) - DRM_DEBUG_DRIVER("\t\t%d e: %d\n", - j, - clock_info->sclk * 10); + DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n", + j, + clock_info->sclk * 10, + clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); else - DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", - j, - clock_info->sclk * 10, - clock_info->mclk * 10, - clock_info->voltage.voltage); + DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n", + j, + clock_info->sclk * 10, + clock_info->mclk * 10, + clock_info->voltage.voltage, + clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); } } } @@ -532,9 +547,7 @@ void radeon_pm_suspend(struct radeon_device *rdev) void radeon_pm_resume(struct radeon_device *rdev) { /* set up the default clocks if the MC ucode is loaded */ - if ((rdev->family >= CHIP_BARTS) && - (rdev->family <= CHIP_CAYMAN) && - rdev->mc_fw) { + if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); @@ -589,9 +602,7 @@ int radeon_pm_init(struct radeon_device *rdev) radeon_pm_print_states(rdev); radeon_pm_init_profile(rdev); /* set up the default clocks if the MC ucode is loaded */ - if ((rdev->family >= CHIP_BARTS) && - (rdev->family <= CHIP_CAYMAN) && - rdev->mc_fw) { + if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { if (rdev->pm.default_vddc) radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, SET_VOLTAGE_TYPE_ASIC_VDDC); @@ -621,6 +632,10 @@ int radeon_pm_init(struct radeon_device *rdev) if (ret) DRM_ERROR("failed to create device file for power method\n"); +#ifdef CONFIG_ACPI + rdev->acpi_nb.notifier_call = radeon_acpi_event; + register_acpi_notifier(&rdev->acpi_nb); +#endif if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -651,6 +666,9 @@ void radeon_pm_fini(struct radeon_device *rdev) device_remove_file(rdev->dev, &dev_attr_power_profile); device_remove_file(rdev->dev, &dev_attr_power_method); +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&rdev->acpi_nb); +#endif } if (rdev->pm.power_state) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c index 028508859a3b..43c431a2686d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c @@ -43,7 +43,7 @@ * produce command buffers which are send to the kernel and * put in IBs for execution by the requested ring. */ -static int radeon_debugfs_sa_init(struct radeon_device *rdev); +int radeon_debugfs_sa_init(struct radeon_device *rdev); /** * radeon_ib_get - request an IB (Indirect Buffer) @@ -58,8 +58,7 @@ static int radeon_debugfs_sa_init(struct radeon_device *rdev); * Returns 0 on success, error on failure. */ int radeon_ib_get(struct radeon_device *rdev, int ring, - struct radeon_ib *ib, struct radeon_vm *vm, - unsigned size) + struct radeon_ib *ib, unsigned size) { int i, r; @@ -77,15 +76,8 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ib->ring = ring; ib->fence = NULL; ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); - ib->vm = vm; - if (vm) { - /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address - * space and soffset is the offset inside the pool bo - */ - ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET; - } else { - ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); - } + ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); + ib->vm_id = 0; ib->is_const_ib = false; for (i = 0; i < RADEON_NUM_RINGS; ++i) ib->sync_to[i] = NULL; @@ -160,10 +152,6 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, if (!need_sync) { radeon_semaphore_free(rdev, &ib->semaphore, NULL); } - /* if we can't remember our last VM flush then flush now! */ - if (ib->vm && !ib->vm->last_flush) { - radeon_ring_vm_flush(rdev, ib->ring, ib->vm); - } if (const_ib) { radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); @@ -178,10 +166,6 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, if (const_ib) { const_ib->fence = radeon_fence_ref(ib->fence); } - /* we just flushed the VM, remember that */ - if (ib->vm && !ib->vm->last_flush) { - ib->vm->last_flush = radeon_fence_ref(ib->fence); - } radeon_ring_unlock_commit(rdev, ring); return 0; } @@ -291,7 +275,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) * wptr. The GPU then starts fetching commands and executes * them until the pointers are equal again. */ -static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); +int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); /** * radeon_ring_write - write a value to the ring @@ -819,7 +803,7 @@ static struct drm_info_list radeon_debugfs_sa_list[] = { #endif -static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) +int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) { #if defined(CONFIG_DEBUG_FS) unsigned i; @@ -839,7 +823,7 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri return 0; } -static int radeon_debugfs_sa_init(struct radeon_device *rdev) +int radeon_debugfs_sa_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_sa.c b/trunk/drivers/gpu/drm/radeon/radeon_sa.c index 105fde69d045..4e771240fdd0 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_sa.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_sa.c @@ -316,7 +316,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev, { struct radeon_fence *fences[RADEON_NUM_RINGS]; unsigned tries[RADEON_NUM_RINGS]; - int i, r; + int i, r = -ENOMEM; BUG_ON(align > RADEON_GPU_PAGE_SIZE); BUG_ON(size > sa_manager->size); @@ -331,7 +331,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev, INIT_LIST_HEAD(&(*sa_bo)->flist); spin_lock(&sa_manager->wq.lock); - do { + while(1) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { fences[i] = NULL; tries[i] = 0; @@ -349,22 +349,26 @@ int radeon_sa_bo_new(struct radeon_device *rdev, /* see if we can skip over some allocations */ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + if (!block) { + break; + } + spin_unlock(&sa_manager->wq.lock); r = radeon_fence_wait_any(rdev, fences, false); spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ - if (r == -ENOENT && block) { + if (r == -ENOENT) { r = wait_event_interruptible_locked( sa_manager->wq, radeon_sa_event(sa_manager, size, align) ); - - } else if (r == -ENOENT) { - r = -ENOMEM; } + if (r) { + goto out_err; + } + }; - } while (!r); - +out_err: spin_unlock(&sa_manager->wq.lock); kfree(*sa_bo); *sa_bo = NULL; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_test.c b/trunk/drivers/gpu/drm/radeon/radeon_test.c index 587c09a00ba2..7c16540c10ff 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_test.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_test.c @@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); } -static void radeon_test_ring_sync2(struct radeon_device *rdev, +void radeon_test_ring_sync2(struct radeon_device *rdev, struct radeon_ring *ringA, struct radeon_ring *ringB, struct radeon_ring *ringC) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c index 5ebe1b3e5db2..5b71c716d83f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c @@ -549,7 +549,7 @@ static struct ttm_backend_func radeon_backend_func = { .destroy = &radeon_ttm_backend_destroy, }; -static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, +struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c index 73051ce3121e..2752f7f78237 100644 --- a/trunk/drivers/gpu/drm/radeon/rs400.c +++ b/trunk/drivers/gpu/drm/radeon/rs400.c @@ -242,7 +242,7 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void rs400_gpu_init(struct radeon_device *rdev) +void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: is this correct ? */ r420_pipes_init(rdev); @@ -252,7 +252,7 @@ static void rs400_gpu_init(struct radeon_device *rdev) } } -static void rs400_mc_init(struct radeon_device *rdev) +void rs400_mc_init(struct radeon_device *rdev) { u64 base; @@ -370,7 +370,7 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) #endif } -static void rs400_mc_program(struct radeon_device *rdev) +void rs400_mc_program(struct radeon_device *rdev) { struct r100_mc_save save; diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index dc8d021a999b..5301b3df8466 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -43,30 +43,22 @@ #include "rs600_reg_safe.h" -static void rs600_gpu_init(struct radeon_device *rdev); +void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); -static const u32 crtc_offsets[2] = -{ - 0, - AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL -}; - void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) { + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; int i; - if (crtc >= rdev->num_crtc) - return; - - if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) { + if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { for (i = 0; i < rdev->usec_timeout; i++) { - if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)) + if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) break; udelay(1); } for (i = 0; i < rdev->usec_timeout; i++) { - if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) + if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) break; udelay(1); } @@ -432,7 +424,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) tmp = RREG32_MC(R_000100_MC_PT0_CNTL); } -static int rs600_gart_init(struct radeon_device *rdev) +int rs600_gart_init(struct radeon_device *rdev) { int r; @@ -514,7 +506,7 @@ static int rs600_gart_enable(struct radeon_device *rdev) return 0; } -static void rs600_gart_disable(struct radeon_device *rdev) +void rs600_gart_disable(struct radeon_device *rdev) { u32 tmp; @@ -525,7 +517,7 @@ static void rs600_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -static void rs600_gart_fini(struct radeon_device *rdev) +void rs600_gart_fini(struct radeon_device *rdev) { radeon_gart_fini(rdev); rs600_gart_disable(rdev); @@ -575,6 +567,9 @@ int rs600_irq_set(struct radeon_device *rdev) if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { tmp |= S_000040_SW_INT_EN(1); } + if (rdev->irq.gui_idle) { + tmp |= S_000040_GUI_IDLE(1); + } if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); @@ -607,6 +602,12 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev) uint32_t irq_mask = S_000044_SW_INT(1); u32 tmp; + /* the interrupt works, but the status bit is permanently asserted */ + if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) { + if (!rdev->irq.gui_idle_acked) + irq_mask |= S_000044_GUI_IDLE_STAT(1); + } + if (G_000044_DISPLAY_INT_STAT(irqs)) { rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { @@ -666,6 +667,9 @@ int rs600_irq_process(struct radeon_device *rdev) bool queue_hotplug = false; bool queue_hdmi = false; + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; + status = rs600_irq_ack(rdev); if (!status && !rdev->irq.stat_regs.r500.disp_int && @@ -679,6 +683,11 @@ int rs600_irq_process(struct radeon_device *rdev) if (G_000044_SW_INT(status)) { radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); } + /* GUI idle */ + if (G_000040_GUI_IDLE(status)) { + rdev->irq.gui_idle_acked = true; + wake_up(&rdev->irq.idle_queue); + } /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { if (rdev->irq.crtc_vblank_int[0]) { @@ -712,6 +721,8 @@ int rs600_irq_process(struct radeon_device *rdev) } status = rs600_irq_ack(rdev); } + /* reset gui idle ack. the status bit is broken */ + rdev->irq.gui_idle_acked = false; if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_hdmi) @@ -753,7 +764,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void rs600_gpu_init(struct radeon_device *rdev) +void rs600_gpu_init(struct radeon_device *rdev) { r420_pipes_init(rdev); /* Wait for mc idle */ @@ -761,7 +772,7 @@ static void rs600_gpu_init(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); } -static void rs600_mc_init(struct radeon_device *rdev) +void rs600_mc_init(struct radeon_device *rdev) { u64 base; @@ -823,7 +834,7 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) WREG32(R_000074_MC_IND_DATA, v); } -static void rs600_debugfs(struct radeon_device *rdev) +void rs600_debugfs(struct radeon_device *rdev) { if (r100_debugfs_rbbm_init(rdev)) DRM_ERROR("Failed to register debugfs file for RBBM !\n"); diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 5cd5aceb69fa..3b663fcfe061 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -145,7 +145,7 @@ void rs690_pm_info(struct radeon_device *rdev) rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); } -static void rs690_mc_init(struct radeon_device *rdev) +void rs690_mc_init(struct radeon_device *rdev) { u64 base; @@ -224,7 +224,7 @@ struct rs690_watermark { fixed20_12 sclk; }; -static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, +void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, struct radeon_crtc *crtc, struct rs690_watermark *wm) { @@ -581,7 +581,7 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) WREG32(R_000078_MC_INDEX, 0x7F); } -static void rs690_mc_program(struct radeon_device *rdev) +void rs690_mc_program(struct radeon_device *rdev) { struct rv515_mc_save save; diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index 2d75d30be5b4..aa8ef491ef3c 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -35,9 +35,9 @@ #include "rv515_reg_safe.h" /* This files gather functions specifics to: rv515 */ -static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); -static int rv515_debugfs_ga_info_init(struct radeon_device *rdev); -static void rv515_gpu_init(struct radeon_device *rdev); +int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); +int rv515_debugfs_ga_info_init(struct radeon_device *rdev); +void rv515_gpu_init(struct radeon_device *rdev); int rv515_mc_wait_for_idle(struct radeon_device *rdev); void rv515_debugfs(struct radeon_device *rdev) @@ -143,7 +143,7 @@ void rv515_vga_render_disable(struct radeon_device *rdev) RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); } -static void rv515_gpu_init(struct radeon_device *rdev) +void rv515_gpu_init(struct radeon_device *rdev) { unsigned pipe_select_current, gb_pipe_select, tmp; @@ -189,7 +189,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev) } } -static void rv515_mc_init(struct radeon_device *rdev) +void rv515_mc_init(struct radeon_device *rdev) { rv515_vram_get_type(rdev); @@ -261,7 +261,7 @@ static struct drm_info_list rv515_ga_info_list[] = { }; #endif -static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev) +int rv515_debugfs_pipes_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1); @@ -270,7 +270,7 @@ static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev) #endif } -static int rv515_debugfs_ga_info_init(struct radeon_device *rdev) +int rv515_debugfs_ga_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1); @@ -310,7 +310,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); } -static void rv515_mc_program(struct radeon_device *rdev) +void rv515_mc_program(struct radeon_device *rdev) { struct rv515_mc_save save; @@ -787,7 +787,7 @@ struct rv515_watermark { fixed20_12 sclk; }; -static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, +void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, struct radeon_crtc *crtc, struct rv515_watermark *wm) { diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 2469afe11b85..ca8ffec10ff6 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -124,7 +124,7 @@ void rv770_pm_misc(struct radeon_device *rdev) /* * GART */ -static int rv770_pcie_gart_enable(struct radeon_device *rdev) +int rv770_pcie_gart_enable(struct radeon_device *rdev) { u32 tmp; int r, i; @@ -175,7 +175,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev) return 0; } -static void rv770_pcie_gart_disable(struct radeon_device *rdev) +void rv770_pcie_gart_disable(struct radeon_device *rdev) { u32 tmp; int i; @@ -201,7 +201,7 @@ static void rv770_pcie_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -static void rv770_pcie_gart_fini(struct radeon_device *rdev) +void rv770_pcie_gart_fini(struct radeon_device *rdev) { radeon_gart_fini(rdev); rv770_pcie_gart_disable(rdev); @@ -209,7 +209,7 @@ static void rv770_pcie_gart_fini(struct radeon_device *rdev) } -static void rv770_agp_enable(struct radeon_device *rdev) +void rv770_agp_enable(struct radeon_device *rdev) { u32 tmp; int i; @@ -839,7 +839,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) } } -static int rv770_mc_init(struct radeon_device *rdev) +int rv770_mc_init(struct radeon_device *rdev) { u32 tmp; int chansize, numchan; diff --git a/trunk/drivers/gpu/drm/radeon/si.c b/trunk/drivers/gpu/drm/radeon/si.c index c76825ffa37f..0139e227e3c7 100644 --- a/trunk/drivers/gpu/drm/radeon/si.c +++ b/trunk/drivers/gpu/drm/radeon/si.c @@ -1806,14 +1806,13 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) #endif (ib->gpu_addr & 0xFFFFFFFC)); radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); - radeon_ring_write(ring, ib->length_dw | - (ib->vm ? (ib->vm->id << 24) : 0)); + radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); if (!ib->is_const_ib) { /* flush read cache over gart for this vmid */ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); + radeon_ring_write(ring, ib->vm_id); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | PACKET3_TC_ACTION_ENA | @@ -2364,7 +2363,7 @@ void si_pcie_gart_tlb_flush(struct radeon_device *rdev) WREG32(VM_INVALIDATE_REQUEST, 1); } -static int si_pcie_gart_enable(struct radeon_device *rdev) +int si_pcie_gart_enable(struct radeon_device *rdev) { int r, i; @@ -2426,7 +2425,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); WREG32(VM_CONTEXT1_CNTL2, 0); - WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | + WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); si_pcie_gart_tlb_flush(rdev); @@ -2437,7 +2436,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) return 0; } -static void si_pcie_gart_disable(struct radeon_device *rdev) +void si_pcie_gart_disable(struct radeon_device *rdev) { /* Disable all tables */ WREG32(VM_CONTEXT0_CNTL, 0); @@ -2456,7 +2455,7 @@ static void si_pcie_gart_disable(struct radeon_device *rdev) radeon_gart_table_vram_unpin(rdev); } -static void si_pcie_gart_fini(struct radeon_device *rdev) +void si_pcie_gart_fini(struct radeon_device *rdev) { si_pcie_gart_disable(rdev); radeon_gart_table_vram_free(rdev); @@ -2789,84 +2788,41 @@ void si_vm_fini(struct radeon_device *rdev) { } -/** - * si_vm_set_page - update the page tables using the CP - * - * @rdev: radeon_device pointer - * @pe: addr of the page entry - * @addr: dst addr to write into pe - * @count: number of page entries to update - * @incr: increase next addr by incr bytes - * @flags: access flags - * - * Update the page tables using the CP (cayman-si). - */ -void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) { - struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; - uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - int i; - uint64_t value; - - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(1))); - radeon_ring_write(ring, pe); - radeon_ring_write(ring, upper_32_bits(pe)); - for (i = 0; i < count; ++i) { - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - addr += incr; - value |= r600_flags; - radeon_ring_write(ring, value); - radeon_ring_write(ring, upper_32_bits(value)); - } + if (id < 8) + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); + else + WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2), + vm->pt_gpu_addr >> 12); + /* flush hdp cache */ + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* bits 0-15 are the VM contexts0-15 */ + WREG32(VM_INVALIDATE_REQUEST, 1 << id); + return 0; } -void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) { - struct radeon_ring *ring = &rdev->ring[ridx]; + if (vm->id < 8) + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0); + else + WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0); + /* flush hdp cache */ + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* bits 0-15 are the VM contexts0-15 */ + WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); +} - if (vm == NULL) +void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) +{ + if (vm->id == -1) return; - /* write new base address */ - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(0))); - - if (vm->id < 8) { - radeon_ring_write(ring, - (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); - } else { - radeon_ring_write(ring, - (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); - } - radeon_ring_write(ring, 0); - radeon_ring_write(ring, vm->pd_gpu_addr >> 12); - /* flush hdp cache */ - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(0))); - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 0x1); - + WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); /* bits 0-15 are the VM contexts0-15 */ - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(0))); - radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); - radeon_ring_write(ring, 0); - radeon_ring_write(ring, 1 << vm->id); + WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); } /* @@ -3243,6 +3199,10 @@ int si_irq_set(struct radeon_device *rdev) DRM_DEBUG("si_irq_set: hpd 6\n"); hpd6 |= DC_HPDx_INT_EN; } + if (rdev->irq.gui_idle) { + DRM_DEBUG("gui idle\n"); + grbm_int_cntl |= GUI_IDLE_INT_ENABLE; + } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); @@ -3698,6 +3658,7 @@ int si_irq_process(struct radeon_device *rdev) break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); + wake_up(&rdev->irq.idle_queue); break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); diff --git a/trunk/drivers/gpu/drm/radeon/sid.h b/trunk/drivers/gpu/drm/radeon/sid.h index 7d2a20e56577..ef4815c27b1c 100644 --- a/trunk/drivers/gpu/drm/radeon/sid.h +++ b/trunk/drivers/gpu/drm/radeon/sid.h @@ -812,21 +812,6 @@ #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 #define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 #define PACKET3_WRITE_DATA 0x37 -#define WRITE_DATA_DST_SEL(x) ((x) << 8) - /* 0 - register - * 1 - memory (sync - via GRBM) - * 2 - tc/l2 - * 3 - gds - * 4 - reserved - * 5 - memory (async - direct) - */ -#define WR_ONE_ADDR (1 << 16) -#define WR_CONFIRM (1 << 20) -#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30) - /* 0 - me - * 1 - pfp - * 2 - ce - */ #define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A diff --git a/trunk/drivers/gpu/drm/savage/savage_bci.c b/trunk/drivers/gpu/drm/savage/savage_bci.c index c89aef420971..1efbb9075837 100644 --- a/trunk/drivers/gpu/drm/savage/savage_bci.c +++ b/trunk/drivers/gpu/drm/savage/savage_bci.c @@ -547,8 +547,6 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->chipset = (enum savage_family)chipset; - pci_set_master(dev->pdev); - return 0; } diff --git a/trunk/drivers/gpu/drm/shmobile/Kconfig b/trunk/drivers/gpu/drm/shmobile/Kconfig deleted file mode 100644 index 7e7d52b2a2fc..000000000000 --- a/trunk/drivers/gpu/drm/shmobile/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -config DRM_SHMOBILE - tristate "DRM Support for SH Mobile" - depends on DRM && (SUPERH || ARCH_SHMOBILE) - select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER - select DRM_GEM_CMA_HELPER - help - Choose this option if you have an SH Mobile chipset. - If M is selected the module will be called shmob-drm. - diff --git a/trunk/drivers/gpu/drm/shmobile/Makefile b/trunk/drivers/gpu/drm/shmobile/Makefile deleted file mode 100644 index 4c3eeb355630..000000000000 --- a/trunk/drivers/gpu/drm/shmobile/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -shmob-drm-y := shmob_drm_backlight.o \ - shmob_drm_crtc.o \ - shmob_drm_drv.o \ - shmob_drm_kms.o \ - shmob_drm_plane.o - -obj-$(CONFIG_DRM_SHMOBILE) += shmob-drm.o diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.c deleted file mode 100644 index 463aee18f774..000000000000 --- a/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * shmob_drm_backlight.c -- SH Mobile DRM Backlight - * - * Copyright (C) 2012 Renesas Corporation - * - * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include - -#include "shmob_drm_backlight.h" -#include "shmob_drm_crtc.h" -#include "shmob_drm_drv.h" - -static int shmob_drm_backlight_update(struct backlight_device *bdev) -{ - struct shmob_drm_connector *scon = bl_get_data(bdev); - struct shmob_drm_device *sdev = scon->connector.dev->dev_private; - const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; - int brightness = bdev->props.brightness; - - if (bdev->props.power != FB_BLANK_UNBLANK || - bdev->props.state & BL_CORE_SUSPENDED) - brightness = 0; - - return bdata->set_brightness(brightness); -} - -static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev) -{ - struct shmob_drm_connector *scon = bl_get_data(bdev); - struct shmob_drm_device *sdev = scon->connector.dev->dev_private; - const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; - - return bdata->get_brightness(); -} - -static const struct backlight_ops shmob_drm_backlight_ops = { - .options = BL_CORE_SUSPENDRESUME, - .update_status = shmob_drm_backlight_update, - .get_brightness = shmob_drm_backlight_get_brightness, -}; - -void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode) -{ - if (scon->backlight == NULL) - return; - - scon->backlight->props.power = mode == DRM_MODE_DPMS_ON - ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; - backlight_update_status(scon->backlight); -} - -int shmob_drm_backlight_init(struct shmob_drm_connector *scon) -{ - struct shmob_drm_device *sdev = scon->connector.dev->dev_private; - const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; - struct drm_connector *connector = &scon->connector; - struct drm_device *dev = connector->dev; - struct backlight_device *backlight; - - if (!bdata->max_brightness) - return 0; - - backlight = backlight_device_register(bdata->name, dev->dev, scon, - &shmob_drm_backlight_ops, NULL); - if (IS_ERR(backlight)) { - dev_err(dev->dev, "unable to register backlight device: %ld\n", - PTR_ERR(backlight)); - return PTR_ERR(backlight); - } - - backlight->props.max_brightness = bdata->max_brightness; - backlight->props.brightness = bdata->max_brightness; - backlight->props.power = FB_BLANK_POWERDOWN; - backlight_update_status(backlight); - - scon->backlight = backlight; - return 0; -} - -void shmob_drm_backlight_exit(struct shmob_drm_connector *scon) -{ - backlight_device_unregister(scon->backlight); -} diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.h deleted file mode 100644 index 9477595d2ff3..000000000000 --- a/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * shmob_drm_backlight.h -- SH Mobile DRM Backlight - * - * Copyright (C) 2012 Renesas Corporation - * - * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __SHMOB_DRM_BACKLIGHT_H__ -#define __SHMOB_DRM_BACKLIGHT_H__ - -struct shmob_drm_connector; - -void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode); -int shmob_drm_backlight_init(struct shmob_drm_connector *scon); -void shmob_drm_backlight_exit(struct shmob_drm_connector *scon); - -#endif /* __SHMOB_DRM_BACKLIGHT_H__ */ diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c deleted file mode 100644 index 0e7a9306bd0c..000000000000 --- a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ /dev/null @@ -1,763 +0,0 @@ -/* - * shmob_drm_crtc.c -- SH Mobile DRM CRTCs - * - * Copyright (C) 2012 Renesas Corporation - * - * Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include - -#include -#include -#include -#include -#include - -#include