diff --git a/[refs] b/[refs] index d82adb29372c..da71bed32a81 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 82ffd92b162ece87c863c075d993c65333e8e78b +refs/heads/master: 8ff1f792dd68ad46f3cfe01e01a375b402cf08da diff --git a/trunk/Documentation/DocBook/drm.tmpl b/trunk/Documentation/DocBook/drm.tmpl index 196b8b9dba11..b0300529ab13 100644 --- a/trunk/Documentation/DocBook/drm.tmpl +++ b/trunk/Documentation/DocBook/drm.tmpl @@ -6,11 +6,36 @@ Linux DRM Developer's Guide + + + Jesse + Barnes + Initial version + + Intel Corporation +
+ jesse.barnes@intel.com +
+
+
+ + Laurent + Pinchart + Driver internals + + Ideas on board SPRL +
+ laurent.pinchart@ideasonboard.com +
+
+
+
+ 2008-2009 - - Intel Corporation (Jesse Barnes <jesse.barnes@intel.com>) - + 2012 + Intel Corporation + Laurent Pinchart @@ -20,6 +45,17 @@ the kernel source COPYING file. + + + + + 1.0 + 2012-07-13 + LP + Added extensive documentation about driver internals. + + +
@@ -72,342 +108,361 @@ submission & fencing, suspend/resume support, and DMA services. - - The core of every DRM driver is struct drm_driver. Drivers - typically statically initialize a drm_driver structure, - then pass it to drm_init() at load time. - - Driver initialization - - Before calling the DRM initialization routines, the driver must - first create and fill out a struct drm_driver structure. - - - static struct drm_driver driver = { - /* Don't use MTRRs here; the Xserver or userspace app should - * deal with them for Intel hardware. - */ - .driver_features = - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_MODESET, - .load = i915_driver_load, - .unload = i915_driver_unload, - .firstopen = i915_driver_firstopen, - .lastclose = i915_driver_lastclose, - .preclose = i915_driver_preclose, - .save = i915_save, - .restore = i915_restore, - .device_is_agp = i915_driver_device_is_agp, - .get_vblank_counter = i915_get_vblank_counter, - .enable_vblank = i915_enable_vblank, - .disable_vblank = i915_disable_vblank, - .irq_preinstall = i915_driver_irq_preinstall, - .irq_postinstall = i915_driver_irq_postinstall, - .irq_uninstall = i915_driver_irq_uninstall, - .irq_handler = i915_driver_irq_handler, - .reclaim_buffers = drm_core_reclaim_buffers, - .get_map_ofs = drm_core_get_map_ofs, - .get_reg_ofs = drm_core_get_reg_ofs, - .fb_probe = intelfb_probe, - .fb_remove = intelfb_remove, - .fb_resize = intelfb_resize, - .master_create = i915_master_create, - .master_destroy = i915_master_destroy, -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = i915_debugfs_init, - .debugfs_cleanup = i915_debugfs_cleanup, -#endif - .gem_init_object = i915_gem_init_object, - .gem_free_object = i915_gem_free_object, - .gem_vm_ops = &i915_gem_vm_ops, - .ioctls = i915_ioctls, - .fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .ioctl = drm_ioctl, - .mmap = drm_mmap, - .poll = drm_poll, - .fasync = drm_fasync, -#ifdef CONFIG_COMPAT - .compat_ioctl = i915_compat_ioctl, -#endif - .llseek = noop_llseek, - }, - .pci_driver = { - .name = DRIVER_NAME, - .id_table = pciidlist, - .probe = probe, - .remove = __devexit_p(drm_cleanup_pci), - }, - .name = DRIVER_NAME, - .desc = DRIVER_DESC, - .date = DRIVER_DATE, - .major = DRIVER_MAJOR, - .minor = DRIVER_MINOR, - .patchlevel = DRIVER_PATCHLEVEL, - }; - - - In the example above, taken from the i915 DRM driver, the driver - sets several flags indicating what core features it supports; - we go over the individual callbacks in later sections. Since - flags indicate which features your driver supports to the DRM - core, you need to set most of them prior to calling drm_init(). Some, - like DRIVER_MODESET can be set later based on user supplied parameters, - but that's the exception rather than the rule. - - - Driver flags - - DRIVER_USE_AGP - - Driver uses AGP interface - - - - DRIVER_REQUIRE_AGP - - Driver needs AGP interface to function. - - - - DRIVER_USE_MTRR - - - Driver uses MTRR interface for mapping memory. Deprecated. - - - - - DRIVER_PCI_DMA - - Driver is capable of PCI DMA. Deprecated. - - - - DRIVER_SG - - Driver can perform scatter/gather DMA. Deprecated. - - - - DRIVER_HAVE_DMA - Driver supports DMA. Deprecated. - - - DRIVER_HAVE_IRQDRIVER_IRQ_SHARED - - - DRIVER_HAVE_IRQ indicates whether the driver has an IRQ - handler. DRIVER_IRQ_SHARED indicates whether the device & - handler support shared IRQs (note that this is required of - PCI drivers). - - - - - DRIVER_DMA_QUEUE - - - Should be set if the driver queues DMA requests and completes them - asynchronously. Deprecated. - - - - - DRIVER_FB_DMA - - - Driver supports DMA to/from the framebuffer. Deprecated. - - - - - DRIVER_MODESET - - - Driver supports mode setting interfaces. - - - - - - In this specific case, the driver requires AGP and supports - IRQs. DMA, as discussed later, is handled by device-specific ioctls - in this case. It also supports the kernel mode setting APIs, though - unlike in the actual i915 driver source, this example unconditionally - exports KMS capability. + Driver Initialization + + At the core of every DRM driver is a drm_driver + structure. Drivers typically statically initialize a drm_driver structure, + and then pass it to one of the drm_*_init() functions + to register it with the DRM subsystem. - - - - - - Driver load - - In the previous section, we saw what a typical drm_driver - structure might look like. One of the more important fields in - the structure is the hook for the load function. - - - static struct drm_driver driver = { - ... - .load = i915_driver_load, - ... - }; - - - The load function has many responsibilities: allocating a driver - private structure, specifying supported performance counters, - configuring the device (e.g. mapping registers & command - buffers), initializing the memory manager, and setting up the - initial output configuration. - - - If compatibility is a concern (e.g. with drivers converted over - to the new interfaces from the old ones), care must be taken to - prevent device initialization and control that is incompatible with - currently active userspace drivers. For instance, if user - level mode setting drivers are in use, it would be problematic - to perform output discovery & configuration at load time. - Likewise, if user-level drivers unaware of memory management are - in use, memory management and command buffer setup may need to - be omitted. These requirements are driver-specific, and care - needs to be taken to keep both old and new applications and - libraries working. The i915 driver supports the "modeset" - module parameter to control whether advanced features are - enabled at load time or in legacy fashion. + + The drm_driver structure contains static + information that describes the driver and features it supports, and + pointers to methods that the DRM core will call to implement the DRM API. + We will first go through the drm_driver static + information fields, and will then describe individual operations in + details as they get used in later sections. - - Driver private & performance counters - - The driver private hangs off the main drm_device structure and - can be used for tracking various device-specific bits of - information, like register offsets, command buffer status, - register state for suspend/resume, etc. At load time, a - driver may simply allocate one and set drm_device.dev_priv - appropriately; it should be freed and drm_device.dev_priv set - to NULL when the driver is unloaded. - + Driver Information + + Driver Features + + Drivers inform the DRM core about their requirements and supported + features by setting appropriate flags in the + driver_features field. Since those flags + influence the DRM core behaviour since registration time, most of them + must be set to registering the drm_driver + instance. + + u32 driver_features; + + Driver Feature Flags + + DRIVER_USE_AGP + + Driver uses AGP interface, the DRM core will manage AGP resources. + + + + DRIVER_REQUIRE_AGP + + Driver needs AGP interface to function. AGP initialization failure + will become a fatal error. + + + + DRIVER_USE_MTRR + + Driver uses MTRR interface for mapping memory, the DRM core will + manage MTRR resources. Deprecated. + + + + DRIVER_PCI_DMA + + Driver is capable of PCI DMA, mapping of PCI DMA buffers to + userspace will be enabled. Deprecated. + + + + DRIVER_SG + + Driver can perform scatter/gather DMA, allocation and mapping of + scatter/gather buffers will be enabled. Deprecated. + + + + DRIVER_HAVE_DMA + + Driver supports DMA, the userspace DMA API will be supported. + Deprecated. + + + + DRIVER_HAVE_IRQDRIVER_IRQ_SHARED + + DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler. The + DRM core will automatically register an interrupt handler when the + flag is set. DRIVER_IRQ_SHARED indicates whether the device & + handler support shared IRQs (note that this is required of PCI + drivers). + + + + DRIVER_IRQ_VBL + Unused. Deprecated. + + + DRIVER_DMA_QUEUE + + Should be set if the driver queues DMA requests and completes them + asynchronously. Deprecated. + + + + DRIVER_FB_DMA + + Driver supports DMA to/from the framebuffer, mapping of frambuffer + DMA buffers to userspace will be supported. Deprecated. + + + + DRIVER_IRQ_VBL2 + Unused. Deprecated. + + + DRIVER_GEM + + Driver use the GEM memory manager. + + + + DRIVER_MODESET + + Driver supports mode setting interfaces (KMS). + + + + DRIVER_PRIME + + Driver implements DRM PRIME buffer sharing. + + + + + + Major, Minor and Patchlevel + int major; +int minor; +int patchlevel; + + The DRM core identifies driver versions by a major, minor and patch + level triplet. The information is printed to the kernel log at + initialization time and passed to userspace through the + DRM_IOCTL_VERSION ioctl. + + + The major and minor numbers are also used to verify the requested driver + API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes + between minor versions, applications can call DRM_IOCTL_SET_VERSION to + select a specific version of the API. If the requested major isn't equal + to the driver major, or the requested minor is larger than the driver + minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise + the driver's set_version() method will be called with the requested + version. + + + + Name, Description and Date + char *name; +char *desc; +char *date; + + The driver name is printed to the kernel log at initialization time, + used for IRQ registration and passed to userspace through + DRM_IOCTL_VERSION. + + + The driver description is a purely informative string passed to + userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by + the kernel. + + + The driver date, formatted as YYYYMMDD, is meant to identify the date of + the latest modification to the driver. However, as most drivers fail to + update it, its value is mostly useless. The DRM core prints it to the + kernel log at initialization time and passes it to userspace through the + DRM_IOCTL_VERSION ioctl. + + + + + Driver Load - The DRM supports several counters which may be used for rough - performance characterization. Note that the DRM stat counter - system is not often used by applications, and supporting - additional counters is completely optional. + The load method is the driver and device + initialization entry point. The method is responsible for allocating and + initializing driver private data, specifying supported performance + counters, performing resource allocation and mapping (e.g. acquiring + clocks, mapping registers or allocating command buffers), initializing + the memory manager (), installing + the IRQ handler (), setting up + vertical blanking handling (), mode + setting () and initial output + configuration (). + + If compatibility is a concern (e.g. with drivers converted over from + User Mode Setting to Kernel Mode Setting), care must be taken to prevent + device initialization and control that is incompatible with currently + active userspace drivers. For instance, if user level mode setting + drivers are in use, it would be problematic to perform output discovery + & configuration at load time. Likewise, if user-level drivers + unaware of memory management are in use, memory management and command + buffer setup may need to be omitted. These requirements are + driver-specific, and care needs to be taken to keep both old and new + applications and libraries working. + + int (*load) (struct drm_device *, unsigned long flags); - These interfaces are deprecated and should not be used. If performance - monitoring is desired, the developer should investigate and - potentially enhance the kernel perf and tracing infrastructure to export - GPU related performance information for consumption by performance - monitoring tools and applications. + The method takes two arguments, a pointer to the newly created + drm_device and flags. The flags are used to + pass the driver_data field of the device id + corresponding to the device passed to drm_*_init(). + Only PCI devices currently use this, USB and platform DRM drivers have + their load method called with flags to 0. + + Driver Private & Performance Counters + + The driver private hangs off the main + drm_device structure and can be used for + tracking various device-specific bits of information, like register + offsets, command buffer status, register state for suspend/resume, etc. + At load time, a driver may simply allocate one and set + drm_device.dev_priv + appropriately; it should be freed and + drm_device.dev_priv + set to NULL when the driver is unloaded. + + + DRM supports several counters which were used for rough performance + characterization. This stat counter system is deprecated and should not + be used. If performance monitoring is desired, the developer should + investigate and potentially enhance the kernel perf and tracing + infrastructure to export GPU related performance information for + consumption by performance monitoring tools and applications. + + + + IRQ Registration + + The DRM core tries to facilitate IRQ handler registration and + unregistration by providing drm_irq_install and + drm_irq_uninstall functions. Those functions only + support a single interrupt per device. + + + + Both functions get the device IRQ by calling + drm_dev_to_irq. This inline function will call a + bus-specific operation to retrieve the IRQ number. For platform devices, + platform_get_irq(..., 0) is used to retrieve the + IRQ number. + + + drm_irq_install starts by calling the + irq_preinstall driver operation. The operation + is optional and must make sure that the interrupt will not get fired by + clearing all pending interrupt flags or disabling the interrupt. + + + The IRQ will then be requested by a call to + request_irq. If the DRIVER_IRQ_SHARED driver + feature flag is set, a shared (IRQF_SHARED) IRQ handler will be + requested. + + + The IRQ handler function must be provided as the mandatory irq_handler + driver operation. It will get passed directly to + request_irq and thus has the same prototype as all + IRQ handlers. It will get called with a pointer to the DRM device as the + second argument. + + + Finally the function calls the optional + irq_postinstall driver operation. The operation + usually enables interrupts (excluding the vblank interrupt, which is + enabled separately), but drivers may choose to enable/disable interrupts + at a different time. + + + drm_irq_uninstall is similarly used to uninstall an + IRQ handler. It starts by waking up all processes waiting on a vblank + interrupt to make sure they don't hang, and then calls the optional + irq_uninstall driver operation. The operation + must disable all hardware interrupts. Finally the function frees the IRQ + by calling free_irq. + + + + Memory Manager Initialization + + Every DRM driver requires a memory manager which must be initialized at + load time. DRM currently contains two memory managers, the Translation + Table Manager (TTM) and the Graphics Execution Manager (GEM). + This document describes the use of the GEM memory manager only. See + for details. + + + + Miscellaneous Device Configuration + + Another task that may be necessary for PCI devices during configuration + is mapping the video BIOS. On many devices, the VBIOS describes device + configuration, LCD panel timings (if any), and contains flags indicating + device state. Mapping the BIOS can be done using the pci_map_rom() call, + a convenience function that takes care of mapping the actual ROM, + whether it has been shadowed into memory (typically at address 0xc0000) + or exists on the PCI device in the ROM BAR. Note that after the ROM has + been mapped and any necessary information has been extracted, it should + be unmapped; on many devices, the ROM address decoder is shared with + other BARs, so leaving it mapped could cause undesired behaviour like + hangs or memory corruption. + + + + - - Configuring the device - - Obviously, device configuration is device-specific. - However, there are several common operations: finding a - device's PCI resources, mapping them, and potentially setting - up an IRQ handler. - - - Finding & mapping resources is fairly straightforward. The - DRM wrapper functions, drm_get_resource_start() and - drm_get_resource_len(), may be used to find BARs on the given - drm_device struct. Once those values have been retrieved, the - driver load function can call drm_addmap() to create a new - mapping for the BAR in question. Note that you probably want a - drm_local_map_t in your driver private structure to track any - mappings you create. - - - - - if compatibility with other operating systems isn't a concern - (DRM drivers can run under various BSD variants and OpenSolaris), - native Linux calls may be used for the above, e.g. pci_resource_* - and iomap*/iounmap. See the Linux device driver book for more - info. - - - Once you have a register map, you may use the DRM_READn() and - DRM_WRITEn() macros to access the registers on your device, or - use driver-specific versions to offset into your MMIO space - relative to a driver-specific base pointer (see I915_READ for - an example). - - - If your device supports interrupt generation, you may want to - set up an interrupt handler when the driver is loaded. This - is done using the drm_irq_install() function. If your device - supports vertical blank interrupts, it should call - drm_vblank_init() to initialize the core vblank handling code before - enabling interrupts on your device. This ensures the vblank related - structures are allocated and allows the core to handle vblank events. - - - - Once your interrupt handler is registered (it uses your - drm_driver.irq_handler as the actual interrupt handling - function), you can safely enable interrupts on your device, - assuming any other state your interrupt handler uses is also - initialized. - - - Another task that may be necessary during configuration is - mapping the video BIOS. On many devices, the VBIOS describes - device configuration, LCD panel timings (if any), and contains - flags indicating device state. Mapping the BIOS can be done - using the pci_map_rom() call, a convenience function that - takes care of mapping the actual ROM, whether it has been - shadowed into memory (typically at address 0xc0000) or exists - on the PCI device in the ROM BAR. Note that after the ROM - has been mapped and any necessary information has been extracted, - it should be unmapped; on many devices, the ROM address decoder is - shared with other BARs, so leaving it mapped could cause - undesired behavior like hangs or memory corruption. - - - + + + Memory management + + Modern Linux systems require large amount of graphics memory to store + frame buffers, textures, vertices and other graphics-related data. Given + the very dynamic nature of many of that data, managing graphics memory + efficiently is thus crucial for the graphics stack and plays a central + role in the DRM infrastructure. + + + The DRM core includes two memory managers, namely Translation Table Maps + (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory + manager to be developed and tried to be a one-size-fits-them all + solution. It provides a single userspace API to accomodate the need of + all hardware, supporting both Unified Memory Architecture (UMA) devices + and devices with dedicated video RAM (i.e. most discrete video cards). + This resulted in a large, complex piece of code that turned out to be + hard to use for driver development. + + + GEM started as an Intel-sponsored project in reaction to TTM's + complexity. Its design philosophy is completely different: instead of + providing a solution to every graphics memory-related problems, GEM + identified common code between drivers and created a support library to + share it. GEM has simpler initialization and execution requirements than + TTM, but has no video RAM management capabitilies and is thus limited to + UMA devices. + - Memory manager initialization - - In order to allocate command buffers, cursor memory, scanout - buffers, etc., as well as support the latest features provided - by packages like Mesa and the X.Org X server, your driver - should support a memory manager. - + The Translation Table Manager (TTM) - If your driver supports memory management (it should!), you - need to set that up at load time as well. How you initialize - it depends on which memory manager you're using: TTM or GEM. + TTM design background and information belongs here. TTM initialization - - TTM (for Translation Table Manager) manages video memory and - aperture space for graphics devices. TTM supports both UMA devices - and devices with dedicated video RAM (VRAM), i.e. most discrete - graphics devices. If your device has dedicated RAM, supporting - TTM is desirable. TTM also integrates tightly with your - driver-specific buffer execution function. See the radeon - driver for examples. - - - The core TTM structure is the ttm_bo_driver struct. It contains - several fields with function pointers for initializing the TTM, - allocating and freeing memory, waiting for command completion - and fence synchronization, and memory migration. See the - radeon_ttm.c file for an example of usage. + This section is outdated. + + Drivers wishing to support TTM must fill out a drm_bo_driver + structure. The structure contains several fields with function + pointers for initializing the TTM, allocating and freeing memory, + waiting for command completion and fence synchronization, and memory + migration. See the radeon_ttm.c file for an example of usage. The ttm_global_reference structure is made up of several fields: @@ -445,82 +500,1081 @@ count for the TTM, which will call your initialization function. + + + The Graphics Execution Manager (GEM) + + The GEM design approach has resulted in a memory manager that doesn't + provide full coverage of all (or even all common) use cases in its + userspace or kernel API. GEM exposes a set of standard memory-related + operations to userspace and a set of helper functions to drivers, and let + drivers implement hardware-specific operations with their own private API. + + + The GEM userspace API is described in the + GEM - the Graphics + Execution Manager article on LWN. While slightly + outdated, the document provides a good overview of the GEM API principles. + Buffer allocation and read and write operations, described as part of the + common GEM API, are currently implemented using driver-specific ioctls. + + + GEM is data-agnostic. It manages abstract buffer objects without knowing + what individual buffers contain. APIs that require knowledge of buffer + contents or purpose, such as buffer allocation or synchronization + primitives, are thus outside of the scope of GEM and must be implemented + using driver-specific ioctls. + + + On a fundamental level, GEM involves several operations: + + Memory allocation and freeing + Command execution + Aperture management at command execution time + + Buffer object allocation is relatively straightforward and largely + provided by Linux's shmem layer, which provides memory to back each + object. + + + Device-specific operations, such as command execution, pinning, buffer + read & write, mapping, and domain ownership transfers are left to + driver-specific ioctls. + + + GEM Initialization + + Drivers that use GEM must set the DRIVER_GEM bit in the struct + drm_driver + driver_features field. The DRM core will + then automatically initialize the GEM core before calling the + load operation. Behind the scene, this will + create a DRM Memory Manager object which provides an address space + pool for object allocation. + + + In a KMS configuration, drivers need to allocate and initialize a + command ring buffer following core GEM initialization if required by + the hardware. UMA devices usually have what is called a "stolen" + memory region, which provides space for the initial framebuffer and + large, contiguous memory regions required by the device. This space is + typically not managed by GEM, and must be initialized separately into + its own DRM MM object. + + - GEM initialization - - GEM is an alternative to TTM, designed specifically for UMA - devices. It has simpler initialization and execution requirements - than TTM, but has no VRAM management capability. Core GEM - is initialized by calling drm_mm_init() to create - a GTT DRM MM object, which provides an address space pool for - object allocation. In a KMS configuration, the driver - needs to allocate and initialize a command ring buffer following - core GEM initialization. A UMA device usually has what is called a - "stolen" memory region, which provides space for the initial - framebuffer and large, contiguous memory regions required by the - device. This space is not typically managed by GEM, and it must - be initialized separately into its own DRM MM object. - - - Initialization is driver-specific. In the case of Intel - integrated graphics chips like 965GM, GEM initialization can - be done by calling the internal GEM init function, - i915_gem_do_init(). Since the 965GM is a UMA device - (i.e. it doesn't have dedicated VRAM), GEM manages - making regular RAM available for GPU operations. Memory set - aside by the BIOS (called "stolen" memory by the i915 - driver) is managed by the DRM memrange allocator; the - rest of the aperture is managed by GEM. - - /* Basic memrange allocator for stolen space (aka vram) */ - drm_memrange_init(&dev_priv->vram, 0, prealloc_size); - /* Let GEM Manage from end of prealloc space to end of aperture */ - i915_gem_do_init(dev, prealloc_size, agp_size); - - - - - Once the memory manager has been set up, we may allocate the - command buffer. In the i915 case, this is also done with a - GEM function, i915_gem_init_ringbuffer(). - + GEM Objects Creation + + GEM splits creation of GEM objects and allocation of the memory that + backs them in two distinct operations. + + + GEM objects are represented by an instance of struct + drm_gem_object. Drivers usually need to extend + GEM objects with private information and thus create a driver-specific + GEM object structure type that embeds an instance of struct + drm_gem_object. + + + To create a GEM object, a driver allocates memory for an instance of its + specific GEM object type and initializes the embedded struct + drm_gem_object with a call to + drm_gem_object_init. The function takes a pointer to + the DRM device, a pointer to the GEM object and the buffer object size + in bytes. + + + GEM uses shmem to allocate anonymous pageable memory. + drm_gem_object_init will create an shmfs file of + the requested size and store it into the struct + drm_gem_object filp + field. The memory is used as either main storage for the object when the + graphics hardware uses system memory directly or as a backing store + otherwise. + + + Drivers are responsible for the actual physical pages allocation by + calling shmem_read_mapping_page_gfp for each page. + Note that they can decide to allocate pages when initializing the GEM + object, or to delay allocation until the memory is needed (for instance + when a page fault occurs as a result of a userspace memory access or + when the driver needs to start a DMA transfer involving the memory). + + + Anonymous pageable memory allocation is not always desired, for instance + when the hardware requires physically contiguous system memory as is + often the case in embedded devices. Drivers can create GEM objects with + no shmfs backing (called private GEM objects) by initializing them with + a call to drm_gem_private_object_init instead of + drm_gem_object_init. Storage for private GEM + objects must be managed by drivers. + + + Drivers that do not need to extend GEM objects with private information + can call the drm_gem_object_alloc function to + allocate and initialize a struct drm_gem_object + instance. The GEM core will call the optional driver + gem_init_object operation after initializing + the GEM object with drm_gem_object_init. + int (*gem_init_object) (struct drm_gem_object *obj); + + + No alloc-and-init function exists for private GEM objects. + + + + GEM Objects Lifetime + + All GEM objects are reference-counted by the GEM core. References can be + acquired and release by calling drm_gem_object_reference + and drm_gem_object_unreference respectively. The + caller must hold the drm_device + struct_mutex lock. As a convenience, GEM + provides the drm_gem_object_reference_unlocked and + drm_gem_object_unreference_unlocked functions that + can be called without holding the lock. + + + When the last reference to a GEM object is released the GEM core calls + the drm_driver + gem_free_object operation. That operation is + mandatory for GEM-enabled drivers and must free the GEM object and all + associated resources. + + + void (*gem_free_object) (struct drm_gem_object *obj); + Drivers are responsible for freeing all GEM object resources, including + the resources created by the GEM core. If an mmap offset has been + created for the object (in which case + drm_gem_object::map_list::map + is not NULL) it must be freed by a call to + drm_gem_free_mmap_offset. The shmfs backing store + must be released by calling drm_gem_object_release + (that function can safely be called if no shmfs backing store has been + created). + + + + GEM Objects Naming + + Communication between userspace and the kernel refers to GEM objects + using local handles, global names or, more recently, file descriptors. + All of those are 32-bit integer values; the usual Linux kernel limits + apply to the file descriptors. + + + GEM handles are local to a DRM file. Applications get a handle to a GEM + object through a driver-specific ioctl, and can use that handle to refer + to the GEM object in other standard or driver-specific ioctls. Closing a + DRM file handle frees all its GEM handles and dereferences the + associated GEM objects. + + + To create a handle for a GEM object drivers call + drm_gem_handle_create. The function takes a pointer + to the DRM file and the GEM object and returns a locally unique handle. + When the handle is no longer needed drivers delete it with a call to + drm_gem_handle_delete. Finally the GEM object + associated with a handle can be retrieved by a call to + drm_gem_object_lookup. + + + Handles don't take ownership of GEM objects, they only take a reference + to the object that will be dropped when the handle is destroyed. To + avoid leaking GEM objects, drivers must make sure they drop the + reference(s) they own (such as the initial reference taken at object + creation time) as appropriate, without any special consideration for the + handle. For example, in the particular case of combined GEM object and + handle creation in the implementation of the + dumb_create operation, drivers must drop the + initial reference to the GEM object before returning the handle. + + + GEM names are similar in purpose to handles but are not local to DRM + files. They can be passed between processes to reference a GEM object + globally. Names can't be used directly to refer to objects in the DRM + API, applications must convert handles to names and names to handles + using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls + respectively. The conversion is handled by the DRM core without any + driver-specific support. + + + Similar to global names, GEM file descriptors are also used to share GEM + objects across processes. They offer additional security: as file + descriptors must be explictly sent over UNIX domain sockets to be shared + between applications, they can't be guessed like the globally unique GEM + names. + + + Drivers that support GEM file descriptors, also known as the DRM PRIME + API, must set the DRIVER_PRIME bit in the struct + drm_driver + driver_features field, and implement the + prime_handle_to_fd and + prime_fd_to_handle operations. + + + int (*prime_handle_to_fd)(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, + uint32_t flags, int *prime_fd); + int (*prime_fd_to_handle)(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, + uint32_t *handle); + Those two operations convert a handle to a PRIME file descriptor and + vice versa. Drivers must use the kernel dma-buf buffer sharing framework + to manage the PRIME file descriptors. + + + While non-GEM drivers must implement the operations themselves, GEM + drivers must use the drm_gem_prime_handle_to_fd + and drm_gem_prime_fd_to_handle helper functions. + Those helpers rely on the driver + gem_prime_export and + gem_prime_import operations to create a dma-buf + instance from a GEM object (dma-buf exporter role) and to create a GEM + object from a dma-buf instance (dma-buf importer role). + + + struct dma_buf * (*gem_prime_export)(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); + struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, + struct dma_buf *dma_buf); + These two operations are mandatory for GEM drivers that support DRM + PRIME. + + + + GEM Objects Mapping + + Because mapping operations are fairly heavyweight GEM favours + read/write-like access to buffers, implemented through driver-specific + ioctls, over mapping buffers to userspace. However, when random access + to the buffer is needed (to perform software rendering for instance), + direct access to the object can be more efficient. + + + The mmap system call can't be used directly to map GEM objects, as they + don't have their own file handle. Two alternative methods currently + co-exist to map GEM objects to userspace. The first method uses a + driver-specific ioctl to perform the mapping operation, calling + do_mmap under the hood. This is often considered + dubious, seems to be discouraged for new GEM-enabled drivers, and will + thus not be described here. + + + The second method uses the mmap system call on the DRM file handle. + void *mmap(void *addr, size_t length, int prot, int flags, int fd, + off_t offset); + DRM identifies the GEM object to be mapped by a fake offset passed + through the mmap offset argument. Prior to being mapped, a GEM object + must thus be associated with a fake offset. To do so, drivers must call + drm_gem_create_mmap_offset on the object. The + function allocates a fake offset range from a pool and stores the + offset divided by PAGE_SIZE in + obj->map_list.hash.key. Care must be taken not to + call drm_gem_create_mmap_offset if a fake offset + has already been allocated for the object. This can be tested by + obj->map_list.map being non-NULL. + + + Once allocated, the fake offset value + (obj->map_list.hash.key << PAGE_SHIFT) + must be passed to the application in a driver-specific way and can then + be used as the mmap offset argument. + + + The GEM core provides a helper method drm_gem_mmap + to handle object mapping. The method can be set directly as the mmap + file operation handler. It will look up the GEM object based on the + offset value and set the VMA operations to the + drm_driver gem_vm_ops + field. Note that drm_gem_mmap doesn't map memory to + userspace, but relies on the driver-provided fault handler to map pages + individually. + + + To use drm_gem_mmap, drivers must fill the struct + drm_driver gem_vm_ops + field with a pointer to VM operations. + + + struct vm_operations_struct *gem_vm_ops + + struct vm_operations_struct { + void (*open)(struct vm_area_struct * area); + void (*close)(struct vm_area_struct * area); + int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); + }; + + + The open and close + operations must update the GEM object reference count. Drivers can use + the drm_gem_vm_open and + drm_gem_vm_close helper functions directly as open + and close handlers. + + + The fault operation handler is responsible for mapping individual pages + to userspace when a page fault occurs. Depending on the memory + allocation scheme, drivers can allocate pages at fault time, or can + decide to allocate memory for the GEM object at the time the object is + created. + + + Drivers that want to map the GEM object upfront instead of handling page + faults can implement their own mmap file operation handler. + + + + Dumb GEM Objects + + The GEM API doesn't standardize GEM objects creation and leaves it to + driver-specific ioctls. While not an issue for full-fledged graphics + stacks that include device-specific userspace components (in libdrm for + instance), this limit makes DRM-based early boot graphics unnecessarily + complex. + + + Dumb GEM objects partly alleviate the problem by providing a standard + API to create dumb buffers suitable for scanout, which can then be used + to create KMS frame buffers. + + + To support dumb GEM objects drivers must implement the + dumb_create, + dumb_destroy and + dumb_map_offset operations. + + + + int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, + struct drm_mode_create_dumb *args); + + The dumb_create operation creates a GEM + object suitable for scanout based on the width, height and depth + from the struct drm_mode_create_dumb + argument. It fills the argument's handle, + pitch and size + fields with a handle for the newly created GEM object and its line + pitch and size in bytes. + + + + int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle); + + The dumb_destroy operation destroys a dumb + GEM object created by dumb_create. + + + + int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, + uint32_t handle, uint64_t *offset); + + The dumb_map_offset operation associates an + mmap fake offset with the GEM object given by the handle and returns + it. Drivers must use the + drm_gem_create_mmap_offset function to + associate the fake offset as described in + . + + + + + + Memory Coherency + + When mapped to the device or used in a command buffer, backing pages + for an object are flushed to memory and marked write combined so as to + be coherent with the GPU. Likewise, if the CPU accesses an object + after the GPU has finished rendering to the object, then the object + must be made coherent with the CPU's view of memory, usually involving + GPU cache flushing of various kinds. This core CPU<->GPU + coherency management is provided by a device-specific ioctl, which + evaluates an object's current domain and performs any necessary + flushing or synchronization to put the object into the desired + coherency domain (note that the object may be busy, i.e. an active + render target; in that case, setting the domain blocks the client and + waits for rendering to complete before performing any necessary + flushing operations). + + + + Command Execution + + Perhaps the most important GEM function for GPU devices is providing a + command execution interface to clients. Client programs construct + command buffers containing references to previously allocated memory + objects, and then submit them to GEM. At that point, GEM takes care to + bind all the objects into the GTT, execute the buffer, and provide + necessary synchronization between clients accessing the same buffers. + This often involves evicting some objects from the GTT and re-binding + others (a fairly expensive operation), and providing relocation + support which hides fixed GTT offsets from clients. Clients must take + care not to submit command buffers that reference more objects than + can fit in the GTT; otherwise, GEM will reject them and no rendering + will occur. Similarly, if several objects in the buffer require fence + registers to be allocated for correct rendering (e.g. 2D blits on + pre-965 chips), care must be taken not to require more fence registers + than are available to the client. Such resource management should be + abstracted from the client in libdrm. + + + + + + Mode Setting + + Drivers must initialize the mode setting core by calling + drm_mode_config_init on the DRM device. The function + initializes the drm_device + mode_config field and never fails. Once done, + mode configuration must be setup by initializing the following fields. + + + + int min_width, min_height; +int max_width, max_height; + + Minimum and maximum width and height of the frame buffers in pixel + units. + + + + struct drm_mode_config_funcs *funcs; + Mode setting functions. + + - Output configuration + Frame Buffer Creation + struct drm_framebuffer *(*fb_create)(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_mode_fb_cmd2 *mode_cmd); - The final initialization task is output configuration. This involves: - - - Finding and initializing the CRTCs, encoders, and connectors - for the device. - - - Creating an initial configuration. - - - Registering a framebuffer console driver. - - + Frame buffers are abstract memory objects that provide a source of + pixels to scanout to a CRTC. Applications explicitly request the + creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and + receive an opaque handle that can be passed to the KMS CRTC control, + plane configuration and page flip functions. + + + Frame buffers rely on the underneath memory manager for low-level memory + operations. When creating a frame buffer applications pass a memory + handle (or a list of memory handles for multi-planar formats) through + the drm_mode_fb_cmd2 argument. This document + assumes that the driver uses GEM, those handles thus reference GEM + objects. + + + Drivers must first validate the requested frame buffer parameters passed + through the mode_cmd argument. In particular this is where invalid + sizes, pixel formats or pitches can be caught. + + + If the parameters are deemed valid, drivers then create, initialize and + return an instance of struct drm_framebuffer. + If desired the instance can be embedded in a larger driver-specific + structure. The new instance is initialized with a call to + drm_framebuffer_init which takes a pointer to DRM + frame buffer operations (struct + drm_framebuffer_funcs). Frame buffer operations are + + + int (*create_handle)(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int *handle); + + Create a handle to the frame buffer underlying memory object. If + the frame buffer uses a multi-plane format, the handle will + reference the memory object associated with the first plane. + + + Drivers call drm_gem_handle_create to create + the handle. + + + + void (*destroy)(struct drm_framebuffer *framebuffer); + + Destroy the frame buffer object and frees all associated + resources. Drivers must call + drm_framebuffer_cleanup to free resources + allocated by the DRM core for the frame buffer object, and must + make sure to unreference all memory objects associated with the + frame buffer. Handles created by the + create_handle operation are released by + the DRM core. + + + + int (*dirty)(struct drm_framebuffer *framebuffer, + struct drm_file *file_priv, unsigned flags, unsigned color, + struct drm_clip_rect *clips, unsigned num_clips); + + This optional operation notifies the driver that a region of the + frame buffer has changed in response to a DRM_IOCTL_MODE_DIRTYFB + ioctl call. + + + + + + After initializing the drm_framebuffer + instance drivers must fill its width, + height, pitches, + offsets, depth, + bits_per_pixel and + pixel_format fields from the values passed + through the drm_mode_fb_cmd2 argument. They + should call the drm_helper_mode_fill_fb_struct + helper function to do so. + + + + Output Polling + void (*output_poll_changed)(struct drm_device *dev); + + This operation notifies the driver that the status of one or more + connectors has changed. Drivers that use the fb helper can just call the + drm_fb_helper_hotplug_event function to handle this + operation. + + + + + + + + KMS Initialization and Cleanup + + A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders + and connectors. KMS drivers must thus create and initialize all those + objects at load time after initializing mode setting. + + + CRTCs (struct <structname>drm_crtc</structname>) + + A CRTC is an abstraction representing a part of the chip that contains a + pointer to a scanout buffer. Therefore, the number of CRTCs available + determines how many independent scanout buffers can be active at any + given time. The CRTC structure contains several fields to support this: + a pointer to some video memory (abstracted as a frame buffer object), a + display mode, and an (x, y) offset into the video memory to support + panning or configurations where one piece of video memory spans multiple + CRTCs. - Output discovery and initialization - - Several core functions exist to create CRTCs, encoders, and - connectors, namely: drm_crtc_init(), drm_connector_init(), and - drm_encoder_init(), along with several "helper" functions to - perform common tasks. - - - Connectors should be registered with sysfs once they've been - detected and initialized, using the - drm_sysfs_connector_add() function. Likewise, when they're - removed from the system, they should be destroyed with - drm_sysfs_connector_remove(). - - -CRTC Initialization + + A KMS device must create and register at least one struct + drm_crtc instance. The instance is allocated + and zeroed by the driver, possibly as part of a larger structure, and + registered with a call to drm_crtc_init with a + pointer to CRTC functions. + + + + CRTC Operations + + Set Configuration + int (*set_config)(struct drm_mode_set *set); + + Apply a new CRTC configuration to the device. The configuration + specifies a CRTC, a frame buffer to scan out from, a (x,y) position in + the frame buffer, a display mode and an array of connectors to drive + with the CRTC if possible. + + + If the frame buffer specified in the configuration is NULL, the driver + must detach all encoders connected to the CRTC and all connectors + attached to those encoders and disable them. + + + This operation is called with the mode config lock held. + + + FIXME: How should set_config interact with DPMS? If the CRTC is + suspended, should it be resumed? + + + + Page Flipping + int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event); + + Schedule a page flip to the given frame buffer for the CRTC. This + operation is called with the mode config mutex held. + + + Page flipping is a synchronization mechanism that replaces the frame + buffer being scanned out by the CRTC with a new frame buffer during + vertical blanking, avoiding tearing. When an application requests a page + flip the DRM core verifies that the new frame buffer is large enough to + be scanned out by the CRTC in the currently configured mode and then + calls the CRTC page_flip operation with a + pointer to the new frame buffer. + + + The page_flip operation schedules a page flip. + Once any pending rendering targetting the new frame buffer has + completed, the CRTC will be reprogrammed to display that frame buffer + after the next vertical refresh. The operation must return immediately + without waiting for rendering or page flip to complete and must block + any new rendering to the frame buffer until the page flip completes. + + + If a page flip is already pending, the + page_flip operation must return + -EBUSY. + + + To synchronize page flip to vertical blanking the driver will likely + need to enable vertical blanking interrupts. It should call + drm_vblank_get for that purpose, and call + drm_vblank_put after the page flip completes. + + + If the application has requested to be notified when page flip completes + the page_flip operation will be called with a + non-NULL event argument pointing to a + drm_pending_vblank_event instance. Upon page + flip completion the driver must fill the + event::event + sequence, tv_sec + and tv_usec fields with the associated + vertical blanking count and timestamp, add the event to the + drm_file list of events to be signaled, and wake + up any waiting process. This can be performed with + event.sequence = drm_vblank_count_and_time(..., &now); + event->event.tv_sec = now.tv_sec; + event->event.tv_usec = now.tv_usec; + + spin_lock_irqsave(&dev->event_lock, flags); + list_add_tail(&event->base.link, &event->base.file_priv->event_list); + wake_up_interruptible(&event->base.file_priv->event_wait); + spin_unlock_irqrestore(&dev->event_lock, flags); + ]]> + + + FIXME: Could drivers that don't need to wait for rendering to complete + just add the event to dev->vblank_event_list and + let the DRM core handle everything, as for "normal" vertical blanking + events? + + + While waiting for the page flip to complete, the + event->base.link list head can be used freely by + the driver to store the pending event in a driver-specific list. + + + If the file handle is closed before the event is signaled, drivers must + take care to destroy the event in their + preclose operation (and, if needed, call + drm_vblank_put). + + + + Miscellaneous + + + void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t start, uint32_t size); + + Apply a gamma table to the device. The operation is optional. + + + + void (*destroy)(struct drm_crtc *crtc); + + Destroy the CRTC when not needed anymore. See + . + + + + + + + + Planes (struct <structname>drm_plane</structname>) + + A plane represents an image source that can be blended with or overlayed + on top of a CRTC during the scanout process. Planes are associated with + a frame buffer to crop a portion of the image memory (source) and + optionally scale it to a destination size. The result is then blended + with or overlayed on top of a CRTC. + + + Plane Initialization + + Planes are optional. To create a plane, a KMS drivers allocates and + zeroes an instances of struct drm_plane + (possibly as part of a larger structure) and registers it with a call + to drm_plane_init. The function takes a bitmask + of the CRTCs that can be associated with the plane, a pointer to the + plane functions and a list of format supported formats. + + + + Plane Operations + + + int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); + + Enable and configure the plane to use the given CRTC and frame buffer. + + + The source rectangle in frame buffer memory coordinates is given by + the src_x, src_y, + src_w and src_h + parameters (as 16.16 fixed point values). Devices that don't support + subpixel plane coordinates can ignore the fractional part. + + + The destination rectangle in CRTC coordinates is given by the + crtc_x, crtc_y, + crtc_w and crtc_h + parameters (as integer values). Devices scale the source rectangle to + the destination rectangle. If scaling is not supported, and the source + rectangle size doesn't match the destination rectangle size, the + driver must return a -EINVAL error. + + + + int (*disable_plane)(struct drm_plane *plane); + + Disable the plane. The DRM core calls this method in response to a + DRM_IOCTL_MODE_SETPLANE ioctl call with the frame buffer ID set to 0. + Disabled planes must not be processed by the CRTC. + + + + void (*destroy)(struct drm_plane *plane); + + Destroy the plane when not needed anymore. See + . + + + + + + + Encoders (struct <structname>drm_encoder</structname>) + + An encoder takes pixel data from a CRTC and converts it to a format + suitable for any attached connectors. On some devices, it may be + possible to have a CRTC send data to more than one encoder. In that + case, both encoders would receive data from the same scanout buffer, + resulting in a "cloned" display configuration across the connectors + attached to each encoder. + + + Encoder Initialization + + As for CRTCs, a KMS driver must create, initialize and register at + least one struct drm_encoder instance. The + instance is allocated and zeroed by the driver, possibly as part of a + larger structure. + + + Drivers must initialize the struct drm_encoder + possible_crtcs and + possible_clones fields before registering the + encoder. Both fields are bitmasks of respectively the CRTCs that the + encoder can be connected to, and sibling encoders candidate for cloning. + + + After being initialized, the encoder must be registered with a call to + drm_encoder_init. The function takes a pointer to + the encoder functions and an encoder type. Supported types are + + + DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A + + + DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort + + + DRM_MODE_ENCODER_LVDS for display panels + + + DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component, + SCART) + + + DRM_MODE_ENCODER_VIRTUAL for virtual machine displays + + + + + Encoders must be attached to a CRTC to be used. DRM drivers leave + encoders unattached at initialization time. Applications (or the fbdev + compatibility layer when implemented) are responsible for attaching the + encoders they want to use to a CRTC. + + + + Encoder Operations + + + void (*destroy)(struct drm_encoder *encoder); + + Called to destroy the encoder when not needed anymore. See + . + + + + + + + Connectors (struct <structname>drm_connector</structname>) + + A connector is the final destination for pixel data on a device, and + usually connects directly to an external display device like a monitor + or laptop panel. A connector can only be attached to one encoder at a + time. The connector is also the structure where information about the + attached display is kept, so it contains fields for display data, EDID + data, DPMS & connection status, and information about modes + supported on the attached displays. + + + Connector Initialization + + Finally a KMS driver must create, initialize, register and attach at + least one struct drm_connector instance. The + instance is created as other KMS objects and initialized by setting the + following fields. + + + + interlace_allowed + + Whether the connector can handle interlaced modes. + + + + doublescan_allowed + + Whether the connector can handle doublescan. + + + + display_info + + + Display information is filled from EDID information when a display + is detected. For non hot-pluggable displays such as flat panels in + embedded systems, the driver should initialize the + display_info.width_mm + and + display_info.height_mm + fields with the physical size of the display. + + + + polled + + Connector polling mode, a combination of + + + DRM_CONNECTOR_POLL_HPD + + The connector generates hotplug events and doesn't need to be + periodically polled. The CONNECT and DISCONNECT flags must not + be set together with the HPD flag. + + + + DRM_CONNECTOR_POLL_CONNECT + + Periodically poll the connector for connection. + + + + DRM_CONNECTOR_POLL_DISCONNECT + + Periodically poll the connector for disconnection. + + + + Set to 0 for connectors that don't support connection status + discovery. + + + + + The connector is then registered with a call to + drm_connector_init with a pointer to the connector + functions and a connector type, and exposed through sysfs with a call to + drm_sysfs_connector_add. + + + Supported connector types are + + DRM_MODE_CONNECTOR_VGA + DRM_MODE_CONNECTOR_DVII + DRM_MODE_CONNECTOR_DVID + DRM_MODE_CONNECTOR_DVIA + DRM_MODE_CONNECTOR_Composite + DRM_MODE_CONNECTOR_SVIDEO + DRM_MODE_CONNECTOR_LVDS + DRM_MODE_CONNECTOR_Component + DRM_MODE_CONNECTOR_9PinDIN + DRM_MODE_CONNECTOR_DisplayPort + DRM_MODE_CONNECTOR_HDMIA + DRM_MODE_CONNECTOR_HDMIB + DRM_MODE_CONNECTOR_TV + DRM_MODE_CONNECTOR_eDP + DRM_MODE_CONNECTOR_VIRTUAL + + + + Connectors must be attached to an encoder to be used. For devices that + map connectors to encoders 1:1, the connector should be attached at + initialization time with a call to + drm_mode_connector_attach_encoder. The driver must + also set the drm_connector + encoder field to point to the attached + encoder. + + + Finally, drivers must initialize the connectors state change detection + with a call to drm_kms_helper_poll_init. If at + least one connector is pollable but can't generate hotplug interrupts + (indicated by the DRM_CONNECTOR_POLL_CONNECT and + DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will + automatically be queued to periodically poll for changes. Connectors + that can generate hotplug interrupts must be marked with the + DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must + call drm_helper_hpd_irq_event. The function will + queue a delayed work to check the state of all connectors, but no + periodic polling will be done. + + + + Connector Operations + + Unless otherwise state, all operations are mandatory. + + + DPMS + void (*dpms)(struct drm_connector *connector, int mode); + + The DPMS operation sets the power state of a connector. The mode + argument is one of + + DRM_MODE_DPMS_ON + DRM_MODE_DPMS_STANDBY + DRM_MODE_DPMS_SUSPEND + DRM_MODE_DPMS_OFF + + + + In all but DPMS_ON mode the encoder to which the connector is attached + should put the display in low-power mode by driving its signals + appropriately. If more than one connector is attached to the encoder + care should be taken not to change the power state of other displays as + a side effect. Low-power mode should be propagated to the encoders and + CRTCs when all related connectors are put in low-power mode. + + + + Modes + int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, + uint32_t max_height); + + Fill the mode list with all supported modes for the connector. If the + max_width and max_height + arguments are non-zero, the implementation must ignore all modes wider + than max_width or higher than + max_height. + + + The connector must also fill in this operation its + display_info + width_mm and + height_mm fields with the connected display + physical size in millimeters. The fields should be set to 0 if the value + isn't known or is not applicable (for instance for projector devices). + + + + Connection Status + + The connection status is updated through polling or hotplug events when + supported (see ). The status + value is reported to userspace through ioctls and must not be used + inside the driver, as it only gets initialized by a call to + drm_mode_getconnector from userspace. + + enum drm_connector_status (*detect)(struct drm_connector *connector, + bool force); + + Check to see if anything is attached to the connector. The + force parameter is set to false whilst polling or + to true when checking the connector due to user request. + force can be used by the driver to avoid + expensive, destructive operations during automated probing. + + + Return connector_status_connected if something is connected to the + connector, connector_status_disconnected if nothing is connected and + connector_status_unknown if the connection state isn't known. + + + Drivers should only return connector_status_connected if the connection + status has really been probed as connected. Connectors that can't detect + the connection status, or failed connection status probes, should return + connector_status_unknown. + + + + Miscellaneous + + + void (*destroy)(struct drm_connector *connector); + + Destroy the connector when not needed anymore. See + . + + + + + + + + Cleanup + + The DRM core manages its objects' lifetime. When an object is not needed + anymore the core calls its destroy function, which must clean up and + free every resource allocated for the object. Every + drm_*_init call must be matched with a + corresponding drm_*_cleanup call to cleanup CRTCs + (drm_crtc_cleanup), planes + (drm_plane_cleanup), encoders + (drm_encoder_cleanup) and connectors + (drm_connector_cleanup). Furthermore, connectors + that have been added to sysfs must be removed by a call to + drm_sysfs_connector_remove before calling + drm_connector_cleanup. + + + Connectors state change detection must be cleanup up with a call to + drm_kms_helper_poll_fini. + + + + Output discovery and initialization example + - - - In the example above (again, taken from the i915 driver), a - CRT connector and encoder combination is created. A device-specific - i2c bus is also created for fetching EDID data and - performing monitor detection. Once the process is complete, - the new connector is registered with sysfs to make its - properties available to applications. - - - Helper functions and core functions - - Since many PC-class graphics devices have similar display output - designs, the DRM provides a set of helper functions to make - output management easier. The core helper routines handle - encoder re-routing and the disabling of unused functions following - mode setting. Using the helpers is optional, but recommended for - devices with PC-style architectures (i.e. a set of display planes - for feeding pixels to encoders which are in turn routed to - connectors). Devices with more complex requirements needing - finer grained management may opt to use the core callbacks - directly. - - - [Insert typical diagram here.] [Insert OMAP style config here.] - - - - Each encoder object needs to provide: - - - A DPMS (basically on/off) function. - - - A mode-fixup function (for converting requested modes into - native hardware timings). - - - Functions (prepare, set, and commit) for use by the core DRM - helper functions. - - - Connector helpers need to provide functions (mode-fetch, validity, - and encoder-matching) for returning an ideal encoder for a given - connector. The core connector functions include a DPMS callback, - save/restore routines (deprecated), detection, mode probing, - property handling, and cleanup functions. - - - - - +}]]> + + In the example above (taken from the i915 driver), a CRTC, connector and + encoder combination is created. A device-specific i2c bus is also + created for fetching EDID data and performing monitor detection. Once + the process is complete, the new connector is registered with sysfs to + make its properties available to applications. + - + - VBlank event handling + Mid-layer Helper Functions - The DRM core exposes two vertical blank related ioctls: - - - DRM_IOCTL_WAIT_VBLANK - - - This takes a struct drm_wait_vblank structure as its argument, - and it is used to block or request a signal when a specified - vblank event occurs. - - - - - DRM_IOCTL_MODESET_CTL - - - This should be called by application level drivers before and - after mode setting, since on many devices the vertical blank - counter is reset at that time. Internally, the DRM snapshots - the last vblank count when the ioctl is called with the - _DRM_PRE_MODESET command, so that the counter won't go backwards - (which is dealt with when _DRM_POST_MODESET is used). - - - - - + The CRTC, encoder and connector functions provided by the drivers + implement the DRM API. They're called by the DRM core and ioctl handlers + to handle device state changes and configuration request. As implementing + those functions often requires logic not specific to drivers, mid-layer + helper functions are available to avoid duplicating boilerplate code. + + + The DRM core contains one mid-layer implementation. The mid-layer provides + implementations of several CRTC, encoder and connector functions (called + from the top of the mid-layer) that pre-process requests and call + lower-level functions provided by the driver (at the bottom of the + mid-layer). For instance, the + drm_crtc_helper_set_config function can be used to + fill the struct drm_crtc_funcs + set_config field. When called, it will split + the set_config operation in smaller, simpler + operations and call the driver to handle them. - To support the functions above, the DRM core provides several - helper functions for tracking vertical blank counters, and - requires drivers to provide several callbacks: - get_vblank_counter(), enable_vblank() and disable_vblank(). The - core uses get_vblank_counter() to keep the counter accurate - across interrupt disable periods. It should return the current - vertical blank event count, which is often tracked in a device - register. The enable and disable vblank callbacks should enable - and disable vertical blank interrupts, respectively. In the - absence of DRM clients waiting on vblank events, the core DRM - code uses the disable_vblank() function to disable - interrupts, which saves power. They are re-enabled again when - a client calls the vblank wait ioctl above. + To use the mid-layer, drivers call drm_crtc_helper_add, + drm_encoder_helper_add and + drm_connector_helper_add functions to install their + mid-layer bottom operations handlers, and fill the + drm_crtc_funcs, + drm_encoder_funcs and + drm_connector_funcs structures with pointers to + the mid-layer top API functions. Installing the mid-layer bottom operation + handlers is best done right after registering the corresponding KMS object. - A device that doesn't provide a count register may simply use an - internal atomic counter incremented on every vertical blank - interrupt (and then treat the enable_vblank() and disable_vblank() - callbacks as no-ops). + The mid-layer is not split between CRTC, encoder and connector operations. + To use it, a driver must provide bottom functions for all of the three KMS + entities. + + Helper Functions + + + int drm_crtc_helper_set_config(struct drm_mode_set *set); + + The drm_crtc_helper_set_config helper function + is a CRTC set_config implementation. It + first tries to locate the best encoder for each connector by calling + the connector best_encoder helper + operation. + + + After locating the appropriate encoders, the helper function will + call the mode_fixup encoder and CRTC helper + operations to adjust the requested mode, or reject it completely in + which case an error will be returned to the application. If the new + configuration after mode adjustment is identical to the current + configuration the helper function will return without performing any + other operation. + + + If the adjusted mode is identical to the current mode but changes to + the frame buffer need to be applied, the + drm_crtc_helper_set_config function will call + the CRTC mode_set_base helper operation. If + the adjusted mode differs from the current mode, or if the + mode_set_base helper operation is not + provided, the helper function performs a full mode set sequence by + calling the prepare, + mode_set and + commit CRTC and encoder helper operations, + in that order. + + + + void drm_helper_connector_dpms(struct drm_connector *connector, int mode); + + The drm_helper_connector_dpms helper function + is a connector dpms implementation that + tracks power state of connectors. To use the function, drivers must + provide dpms helper operations for CRTCs + and encoders to apply the DPMS state to the device. + + + The mid-layer doesn't track the power state of CRTCs and encoders. + The dpms helper operations can thus be + called with a mode identical to the currently active mode. + + + + int drm_helper_probe_single_connector_modes(struct drm_connector *connector, + uint32_t maxX, uint32_t maxY); + + The drm_helper_probe_single_connector_modes helper + function is a connector fill_modes + implementation that updates the connection status for the connector + and then retrieves a list of modes by calling the connector + get_modes helper operation. + + + The function filters out modes larger than + max_width and max_height + if specified. It then calls the connector + mode_valid helper operation for each mode in + the probed list to check whether the mode is valid for the connector. + + + + + + CRTC Helper Operations + + + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + Let CRTCs adjust the requested mode or reject it completely. This + operation returns true if the mode is accepted (possibly after being + adjusted) or false if it is rejected. + + + The mode_fixup operation should reject the + mode if it can't reasonably use it. The definition of "reasonable" + is currently fuzzy in this context. One possible behaviour would be + to set the adjusted mode to the panel timings when a fixed-mode + panel is used with hardware capable of scaling. Another behaviour + would be to accept any input mode and adjust it to the closest mode + supported by the hardware (FIXME: This needs to be clarified). + + + + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) + + Move the CRTC on the current frame buffer (stored in + crtc->fb) to position (x,y). Any of the frame + buffer, x position or y position may have been modified. + + + This helper operation is optional. If not provided, the + drm_crtc_helper_set_config function will fall + back to the mode_set helper operation. + + + FIXME: Why are x and y passed as arguments, as they can be accessed + through crtc->x and + crtc->y? + + + + void (*prepare)(struct drm_crtc *crtc); + + Prepare the CRTC for mode setting. This operation is called after + validating the requested mode. Drivers use it to perform + device-specific operations required before setting the new mode. + + + + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + + Set a new mode, position and frame buffer. Depending on the device + requirements, the mode can be stored internally by the driver and + applied in the commit operation, or + programmed to the hardware immediately. + + + The mode_set operation returns 0 on success + or a negative error code if an error occurs. + + + + void (*commit)(struct drm_crtc *crtc); + + Commit a mode. This operation is called after setting the new mode. + Upon return the device must use the new mode and be fully + operational. + + + + + + Encoder Helper Operations + + + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + FIXME: The mode argument be const, but the i915 driver modifies + mode->clock in intel_dp_mode_fixup. + + + Let encoders adjust the requested mode or reject it completely. This + operation returns true if the mode is accepted (possibly after being + adjusted) or false if it is rejected. See the + mode_fixup CRTC helper + operation for an explanation of the allowed adjustments. + + + + void (*prepare)(struct drm_encoder *encoder); + + Prepare the encoder for mode setting. This operation is called after + validating the requested mode. Drivers use it to perform + device-specific operations required before setting the new mode. + + + + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + Set a new mode. Depending on the device requirements, the mode can + be stored internally by the driver and applied in the + commit operation, or programmed to the + hardware immediately. + + + + void (*commit)(struct drm_encoder *encoder); + + Commit a mode. This operation is called after setting the new mode. + Upon return the device must use the new mode and be fully + operational. + + + + + + Connector Helper Operations + + + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + Return a pointer to the best encoder for the connecter. Device that + map connectors to encoders 1:1 simply return the pointer to the + associated encoder. This operation is mandatory. + + + + int (*get_modes)(struct drm_connector *connector); + + Fill the connector's probed_modes list + by parsing EDID data with drm_add_edid_modes or + calling drm_mode_probed_add directly for every + supported mode and return the number of modes it has detected. This + operation is mandatory. + + + When adding modes manually the driver creates each mode with a call to + drm_mode_create and must fill the following fields. + + + __u32 type; + + Mode type bitmask, a combination of + + + DRM_MODE_TYPE_BUILTIN + not used? + + + DRM_MODE_TYPE_CLOCK_C + not used? + + + DRM_MODE_TYPE_CRTC_C + not used? + + + + DRM_MODE_TYPE_PREFERRED - The preferred mode for the connector + + + not used? + + + + DRM_MODE_TYPE_DEFAULT + not used? + + + DRM_MODE_TYPE_USERDEF + not used? + + + DRM_MODE_TYPE_DRIVER + + + The mode has been created by the driver (as opposed to + to user-created modes). + + + + + Drivers must set the DRM_MODE_TYPE_DRIVER bit for all modes they + create, and set the DRM_MODE_TYPE_PREFERRED bit for the preferred + mode. + + + + __u32 clock; + Pixel clock frequency in kHz unit + + + __u16 hdisplay, hsync_start, hsync_end, htotal; + __u16 vdisplay, vsync_start, vsync_end, vtotal; + Horizontal and vertical timing information + <----------------><-------------><--------------> + + //////////////////////| + ////////////////////// | + ////////////////////// |.................. ................ + _______________ + + <----- [hv]display -----> + <------------- [hv]sync_start ------------> + <--------------------- [hv]sync_end ---------------------> + <-------------------------------- [hv]total -----------------------------> +]]> + + + __u16 hskew; + __u16 vscan; + Unknown + + + __u32 flags; + + Mode flags, a combination of + + + DRM_MODE_FLAG_PHSYNC + + Horizontal sync is active high + + + + DRM_MODE_FLAG_NHSYNC + + Horizontal sync is active low + + + + DRM_MODE_FLAG_PVSYNC + + Vertical sync is active high + + + + DRM_MODE_FLAG_NVSYNC + + Vertical sync is active low + + + + DRM_MODE_FLAG_INTERLACE + + Mode is interlaced + + + + DRM_MODE_FLAG_DBLSCAN + + Mode uses doublescan + + + + DRM_MODE_FLAG_CSYNC + + Mode uses composite sync + + + + DRM_MODE_FLAG_PCSYNC + + Composite sync is active high + + + + DRM_MODE_FLAG_NCSYNC + + Composite sync is active low + + + + DRM_MODE_FLAG_HSKEW + + hskew provided (not used?) + + + + DRM_MODE_FLAG_BCAST + + not used? + + + + DRM_MODE_FLAG_PIXMUX + + not used? + + + + DRM_MODE_FLAG_DBLCLK + + not used? + + + + DRM_MODE_FLAG_CLKDIV2 + + ? + + + + + + Note that modes marked with the INTERLACE or DBLSCAN flags will be + filtered out by + drm_helper_probe_single_connector_modes if + the connector's interlace_allowed or + doublescan_allowed field is set to 0. + + + + char name[DRM_DISPLAY_MODE_LEN]; + + Mode name. The driver must call + drm_mode_set_name to fill the mode name from + hdisplay, + vdisplay and interlace flag after + filling the corresponding fields. + + + + + + The vrefresh value is computed by + drm_helper_probe_single_connector_modes. + + + When parsing EDID data, drm_add_edid_modes fill the + connector display_info + width_mm and + height_mm fields. When creating modes + manually the get_modes helper operation must + set the display_info + width_mm and + height_mm fields if they haven't been set + already (for instance at initilization time when a fixed-size panel is + attached to the connector). The mode width_mm + and height_mm fields are only used internally + during EDID parsing and should not be set when creating modes manually. + + + + int (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + + Verify whether a mode is valid for the connector. Return MODE_OK for + supported modes and one of the enum drm_mode_status values (MODE_*) + for unsupported modes. This operation is mandatory. + + + As the mode rejection reason is currently not used beside for + immediately removing the unsupported mode, an implementation can + return MODE_BAD regardless of the exact reason why the mode is not + valid. + + + Note that the mode_valid helper operation is + only called for modes detected by the device, and + not for modes set by the user through the CRTC + set_config operation. + + + + - - Memory management + + + + Vertical Blanking + + Vertical blanking plays a major role in graphics rendering. To achieve + tear-free display, users must synchronize page flips and/or rendering to + vertical blanking. The DRM API offers ioctls to perform page flips + synchronized to vertical blanking and wait for vertical blanking. + + + The DRM core handles most of the vertical blanking management logic, which + involves filtering out spurious interrupts, keeping race-free blanking + counters, coping with counter wrap-around and resets and keeping use + counts. It relies on the driver to generate vertical blanking interrupts + and optionally provide a hardware vertical blanking counter. Drivers must + implement the following operations. + + + + int (*enable_vblank) (struct drm_device *dev, int crtc); +void (*disable_vblank) (struct drm_device *dev, int crtc); + + Enable or disable vertical blanking interrupts for the given CRTC. + + + + u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); + + Retrieve the value of the vertical blanking counter for the given + CRTC. If the hardware maintains a vertical blanking counter its value + should be returned. Otherwise drivers can use the + drm_vblank_count helper function to handle this + operation. + + + - The memory manager lies at the heart of many DRM operations; it - is required to support advanced client features like OpenGL - pbuffers. The DRM currently contains two memory managers: TTM - and GEM. + Drivers must initialize the vertical blanking handling core with a call to + drm_vblank_init in their + load operation. The function will set the struct + drm_device + vblank_disable_allowed field to 0. This will + keep vertical blanking interrupts enabled permanently until the first mode + set operation, where vblank_disable_allowed is + set to 1. The reason behind this is not clear. Drivers can set the field + to 1 after calling drm_vblank_init to make vertical + blanking interrupts dynamically managed from the beginning. + + Vertical blanking interrupts can be enabled by the DRM core or by drivers + themselves (for instance to handle page flipping operations). The DRM core + maintains a vertical blanking use count to ensure that the interrupts are + not disabled while a user still needs them. To increment the use count, + drivers call drm_vblank_get. Upon return vertical + blanking interrupts are guaranteed to be enabled. + + + To decrement the use count drivers call + drm_vblank_put. Only when the use count drops to zero + will the DRM core disable the vertical blanking interrupts after a delay + by scheduling a timer. The delay is accessible through the vblankoffdelay + module parameter or the drm_vblank_offdelay global + variable and expressed in milliseconds. Its default value is 5000 ms. + + + When a vertical blanking interrupt occurs drivers only need to call the + drm_handle_vblank function to account for the + interrupt. + + + Resources allocated by drm_vblank_init must be freed + with a call to drm_vblank_cleanup in the driver + unload operation handler. + + + + + + Open/Close, File Operations and IOCTLs - The Translation Table Manager (TTM) + Open and Close + int (*firstopen) (struct drm_device *); +void (*lastclose) (struct drm_device *); +int (*open) (struct drm_device *, struct drm_file *); +void (*preclose) (struct drm_device *, struct drm_file *); +void (*postclose) (struct drm_device *, struct drm_file *); + Open and close handlers. None of those methods are mandatory. + - TTM was developed by Tungsten Graphics, primarily by Thomas - Hellström, and is intended to be a flexible, high performance - graphics memory manager. + The firstopen method is called by the DRM core + when an application opens a device that has no other opened file handle. + Similarly the lastclose method is called when + the last application holding a file handle opened on the device closes + it. Both methods are mostly used for UMS (User Mode Setting) drivers to + acquire and release device resources which should be done in the + load and unload + methods for KMS drivers. - Drivers wishing to support TTM must fill out a drm_bo_driver - structure. + Note that the lastclose method is also called + at module unload time or, for hot-pluggable devices, when the device is + unplugged. The firstopen and + lastclose calls can thus be unbalanced. - TTM design background and information belongs here. + The open method is called every time the device + is opened by an application. Drivers can allocate per-file private data + in this method and store them in the struct + drm_file driver_priv + field. Note that the open method is called + before firstopen. + + + The close operation is split into preclose and + postclose methods. Drivers must stop and + cleanup all per-file operations in the preclose + method. For instance pending vertical blanking and page flip events must + be cancelled. No per-file operation is allowed on the file handle after + returning from the preclose method. + + + Finally the postclose method is called as the + last step of the close operation, right before calling the + lastclose method if no other open file handle + exists for the device. Drivers that have allocated per-file private data + in the open method should free it here. + + + The lastclose method should restore CRTC and + plane properties to default value, so that a subsequent open of the + device will not inherit state from the previous user. - - The Graphics Execution Manager (GEM) + File Operations + const struct file_operations *fops + File operations for the DRM device node. - GEM is an Intel project, authored by Eric Anholt and Keith - Packard. It provides simpler interfaces than TTM, and is well - suited for UMA devices. + Drivers must define the file operations structure that forms the DRM + userspace API entry point, even though most of those operations are + implemented in the DRM core. The open, + release and ioctl + operations are handled by + + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + #ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, + #endif + - GEM-enabled drivers must provide gem_init_object() and - gem_free_object() callbacks to support the core memory - allocation routines. They should also provide several driver-specific - ioctls to support command execution, pinning, buffer - read & write, mapping, and domain ownership transfers. + Drivers that implement private ioctls that requires 32/64bit + compatibility support must provide their own + compat_ioctl handler that processes private + ioctls and calls drm_compat_ioctl for core ioctls. - On a fundamental level, GEM involves several operations: - - Memory allocation and freeing - Command execution - Aperture management at command execution time - - Buffer object allocation is relatively - straightforward and largely provided by Linux's shmem layer, which - provides memory to back each object. When mapped into the GTT - or used in a command buffer, the backing pages for an object are - flushed to memory and marked write combined so as to be coherent - with the GPU. Likewise, if the CPU accesses an object after the GPU - has finished rendering to the object, then the object must be made - coherent with the CPU's view - of memory, usually involving GPU cache flushing of various kinds. - This core CPU<->GPU coherency management is provided by a - device-specific ioctl, which evaluates an object's current domain and - performs any necessary flushing or synchronization to put the object - into the desired coherency domain (note that the object may be busy, - i.e. an active render target; in that case, setting the domain - blocks the client and waits for rendering to complete before - performing any necessary flushing operations). - - - Perhaps the most important GEM function is providing a command - execution interface to clients. Client programs construct command - buffers containing references to previously allocated memory objects, - and then submit them to GEM. At that point, GEM takes care to bind - all the objects into the GTT, execute the buffer, and provide - necessary synchronization between clients accessing the same buffers. - This often involves evicting some objects from the GTT and re-binding - others (a fairly expensive operation), and providing relocation - support which hides fixed GTT offsets from clients. Clients must - take care not to submit command buffers that reference more objects - than can fit in the GTT; otherwise, GEM will reject them and no rendering - will occur. Similarly, if several objects in the buffer require - fence registers to be allocated for correct rendering (e.g. 2D blits - on pre-965 chips), care must be taken not to require more fence - registers than are available to the client. Such resource management - should be abstracted from the client in libdrm. + The read and poll + operations provide support for reading DRM events and polling them. They + are implemented by + + .poll = drm_poll, + .read = drm_read, + .fasync = drm_fasync, + .llseek = no_llseek, + + + + The memory mapping implementation varies depending on how the driver + manages memory. Pre-GEM drivers will use drm_mmap, + while GEM-aware drivers will use drm_gem_mmap. See + . + + .mmap = drm_gem_mmap, + + + + No other file operation is supported by the DRM API. + + + + IOCTLs + struct drm_ioctl_desc *ioctls; +int num_ioctls; + Driver-specific ioctls descriptors table. + + Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls + descriptors table is indexed by the ioctl number offset from the base + value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the + table entries. + + + DRM_IOCTL_DEF_DRV(ioctl, func, flags) + + ioctl is the ioctl name. Drivers must define + the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number + offset from DRM_COMMAND_BASE and the ioctl number respectively. The + first macro is private to the device while the second must be exposed + to userspace in a public header. + + + func is a pointer to the ioctl handler function + compatible with the drm_ioctl_t type. + typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); + + + flags is a bitmask combination of the following + values. It restricts how the ioctl is allowed to be called. + + + DRM_AUTH - Only authenticated callers allowed + + + DRM_MASTER - The ioctl can only be called on the master file + handle + + + DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed + + + DRM_CONTROL_ALLOW - The ioctl can only be called on a control + device + + + DRM_UNLOCKED - The ioctl handler will be called without locking + the DRM global mutex + + + - - - - - - Output management - - At the core of the DRM output management code is a set of - structures representing CRTCs, encoders, and connectors. - - - A CRTC is an abstraction representing a part of the chip that - contains a pointer to a scanout buffer. Therefore, the number - of CRTCs available determines how many independent scanout - buffers can be active at any given time. The CRTC structure - contains several fields to support this: a pointer to some video - memory, a display mode, and an (x, y) offset into the video - memory to support panning or configurations where one piece of - video memory spans multiple CRTCs. - - - An encoder takes pixel data from a CRTC and converts it to a - format suitable for any attached connectors. On some devices, - it may be possible to have a CRTC send data to more than one - encoder. In that case, both encoders would receive data from - the same scanout buffer, resulting in a "cloned" display - configuration across the connectors attached to each encoder. - - - A connector is the final destination for pixel data on a device, - and usually connects directly to an external display device like - a monitor or laptop panel. A connector can only be attached to - one encoder at a time. The connector is also the structure - where information about the attached display is kept, so it - contains fields for display data, EDID data, DPMS & - connection status, and information about modes supported on the - attached displays. - - - - - - Framebuffer management - - Clients need to provide a framebuffer object which provides a source - of pixels for a CRTC to deliver to the encoder(s) and ultimately the - connector(s). A framebuffer is fundamentally a driver-specific memory - object, made into an opaque handle by the DRM's addfb() function. - Once a framebuffer has been created this way, it may be passed to the - KMS mode setting routines for use in a completed configuration. - @@ -812,15 +2355,24 @@ void intel_crt_init(struct drm_device *dev) + + - Suspend/resume + Suspend/Resume + + The DRM core provides some suspend/resume code, but drivers wanting full + suspend/resume support should provide save() and restore() functions. + These are called at suspend, hibernate, or resume time, and should perform + any state save or restore required by your device across suspend or + hibernate states. + + int (*suspend) (struct drm_device *, pm_message_t state); +int (*resume) (struct drm_device *); - The DRM core provides some suspend/resume code, but drivers - wanting full suspend/resume support should provide save() and - restore() functions. These are called at suspend, - hibernate, or resume time, and should perform any state save or - restore required by your device across suspend or hibernate - states. + Those are legacy suspend and resume methods. New driver should use the + power management interface provided by their bus type (usually through + the struct device_driver dev_pm_ops) and set + these methods to NULL. @@ -833,6 +2385,35 @@ void intel_crt_init(struct drm_device *dev) + + @@ -853,6 +2434,42 @@ void intel_crt_init(struct drm_device *dev) Cover generic ioctls and sysfs layout here. We only need high-level info, since man pages should cover the rest. + + + + + VBlank event handling + + The DRM core exposes two vertical blank related ioctls: + + + DRM_IOCTL_WAIT_VBLANK + + + This takes a struct drm_wait_vblank structure as its argument, + and it is used to block or request a signal when a specified + vblank event occurs. + + + + + DRM_IOCTL_MODESET_CTL + + + This should be called by application level drivers before and + after mode setting, since on many devices the vertical blank + counter is reset at that time. Internally, the DRM snapshots + the last vblank count when the ioctl is called with the + _DRM_PRE_MODESET command, so that the counter won't go backwards + (which is dealt with when _DRM_POST_MODESET is used). + + + + + + + + diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index fdc0119963e7..b17587d9412f 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3388,7 +3388,7 @@ M: "Wolfram Sang (embedded platforms)" L: linux-i2c@vger.kernel.org W: http://i2c.wiki.kernel.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ -T: git git://git.fluff.org/bjdooks/linux.git +T: git git://git.pengutronix.de/git/wsa/linux.git S: Maintained F: Documentation/i2c/ F: drivers/i2c/ @@ -3666,11 +3666,12 @@ F: Documentation/networking/README.ipw2200 F: drivers/net/wireless/ipw2x00/ INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) -M: Joseph Cihula +M: Richard L Maliszewski +M: Gang Wei M: Shane Wang L: tboot-devel@lists.sourceforge.net W: http://tboot.sourceforge.net -T: Mercurial http://www.bughost.org/repos.hg/tboot.hg +T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot S: Supported F: Documentation/intel_txt.txt F: include/linux/tboot.h diff --git a/trunk/Makefile b/trunk/Makefile index 0f66f146d57e..a3c11d589681 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc5 -NAME = Saber-toothed Squirrel +EXTRAVERSION = -rc7 +NAME = Terrified Chipmunk # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/trunk/arch/arm/Kconfig.debug b/trunk/arch/arm/Kconfig.debug index f15f82bf3a50..e968a52e4881 100644 --- a/trunk/arch/arm/Kconfig.debug +++ b/trunk/arch/arm/Kconfig.debug @@ -356,15 +356,15 @@ choice is nothing connected to read from the DCC. config DEBUG_SEMIHOSTING - bool "Kernel low-level debug output via semihosting I" + bool "Kernel low-level debug output via semihosting I/O" help Semihosting enables code running on an ARM target to use the I/O facilities on a host debugger/emulator through a - simple SVC calls. The host debugger or emulator must have + simple SVC call. The host debugger or emulator must have semihosting enabled for the special svc call to be trapped otherwise the kernel will crash. - This is known to work with OpenOCD, as wellas + This is known to work with OpenOCD, as well as ARM's Fast Models, or any other controlling environment that implements semihosting. diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index 30eae87ead6d..a051dfbdd7db 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux zinstall uinstall install: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: +%.dtb: scripts $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -dtbs: +dtbs: scripts $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ # We use MRPROPER_FILES and CLEAN_FILES now diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S index b8c64b80bafc..bc67cbff3944 100644 --- a/trunk/arch/arm/boot/compressed/head.S +++ b/trunk/arch/arm/boot/compressed/head.S @@ -653,16 +653,21 @@ __armv7_mmu_cache_on: mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg + bic r0, r0, #1 << 28 @ clear SCTLR.TRE orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer #ifdef CONFIG_MMU #ifdef CONFIG_CPU_ENDIAN_BE8 orr r0, r0, #1 << 25 @ big-endian page tables #endif + mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client + bic r6, r6, #1 << 31 @ 32-bit translation system + bic r6, r6, #3 << 0 @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control + mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif mcr p15, 0, r0, c7, c5, 4 @ ISB mcr p15, 0, r0, c1, c0, 0 @ load control register diff --git a/trunk/arch/arm/boot/dts/at91sam9260.dtsi b/trunk/arch/arm/boot/dts/at91sam9260.dtsi index 66389c1c6f62..7c95f76398de 100644 --- a/trunk/arch/arm/boot/dts/at91sam9260.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9260.dtsi @@ -104,6 +104,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff600 { @@ -113,6 +114,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff800 { @@ -122,6 +124,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@fffff200 { diff --git a/trunk/arch/arm/boot/dts/at91sam9263.dtsi b/trunk/arch/arm/boot/dts/at91sam9263.dtsi index b460d6ce9eb5..195019b7ca0e 100644 --- a/trunk/arch/arm/boot/dts/at91sam9263.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9263.dtsi @@ -95,6 +95,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff400 { @@ -104,6 +105,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff600 { @@ -113,6 +115,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffff800 { @@ -122,6 +125,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioE: gpio@fffffa00 { @@ -131,6 +135,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@ffffee00 { diff --git a/trunk/arch/arm/boot/dts/at91sam9g45.dtsi b/trunk/arch/arm/boot/dts/at91sam9g45.dtsi index bafa8806fc17..63751b1e744b 100644 --- a/trunk/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9g45.dtsi @@ -113,6 +113,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff400 { @@ -122,6 +123,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff600 { @@ -131,6 +133,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffff800 { @@ -140,6 +143,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioE: gpio@fffffa00 { @@ -149,6 +153,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@ffffee00 { diff --git a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi index bfac0dfc332c..ef9336ae9614 100644 --- a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi @@ -107,6 +107,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff600 { @@ -116,6 +117,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff800 { @@ -125,6 +127,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffffa00 { @@ -134,6 +137,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@fffff200 { diff --git a/trunk/arch/arm/boot/dts/at91sam9x5.dtsi b/trunk/arch/arm/boot/dts/at91sam9x5.dtsi index 4a18c393b136..8a387a8d61b7 100644 --- a/trunk/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9x5.dtsi @@ -115,6 +115,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff600 { @@ -124,6 +125,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff800 { @@ -133,6 +135,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffffa00 { @@ -142,6 +145,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@fffff200 { diff --git a/trunk/arch/arm/include/asm/assembler.h b/trunk/arch/arm/include/asm/assembler.h index 03fb93621d0d..5c8b3bf4d825 100644 --- a/trunk/arch/arm/include/asm/assembler.h +++ b/trunk/arch/arm/include/asm/assembler.h @@ -320,4 +320,12 @@ .size \name , . - \name .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcccs \tmp, \tmp, \limit + bcs \bad +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/trunk/arch/arm/include/asm/memory.h b/trunk/arch/arm/include/asm/memory.h index e965f1b560f1..5f6ddcc56452 100644 --- a/trunk/arch/arm/include/asm/memory.h +++ b/trunk/arch/arm/include/asm/memory.h @@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif #endif +#endif /* __ASSEMBLY__ */ #ifndef PHYS_OFFSET #ifdef PLAT_PHYS_OFFSET @@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif #endif +#ifndef __ASSEMBLY__ + /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. diff --git a/trunk/arch/arm/include/asm/tlb.h b/trunk/arch/arm/include/asm/tlb.h index 314d4664eae7..99a19512ee26 100644 --- a/trunk/arch/arm/include/asm/tlb.h +++ b/trunk/arch/arm/include/asm/tlb.h @@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); +#ifdef CONFIG_ARM_LPAE + tlb_add_flush(tlb, addr); +#else /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. @@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, addr &= PMD_MASK; tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M); +#endif tlb_remove_page(tlb, pte); } diff --git a/trunk/arch/arm/include/asm/uaccess.h b/trunk/arch/arm/include/asm/uaccess.h index 479a6352e0b5..77bd79f2ffdb 100644 --- a/trunk/arch/arm/include/asm/uaccess.h +++ b/trunk/arch/arm/include/asm/uaccess.h @@ -101,28 +101,39 @@ extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...) \ +#define __GUP_CLOBBER_1 "lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2 "ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4 "lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ - : "0" (__p) \ - : __i, "cc") + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) -#define get_user(x,p) \ +#define __get_user_check(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ register unsigned long __r2 asm("r2"); \ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, 1, "lr"); \ - break; \ + __get_user_x(__r2, __p, __e, __l, 1); \ + break; \ case 2: \ - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, 4, "lr"); \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ default: __e = __get_user_bad(); break; \ } \ @@ -130,42 +141,57 @@ extern int __get_user_4(void *); __e; \ }) +#define get_user(x,p) \ + ({ \ + might_fault(); \ + __get_user_check(x,p); \ + }) + extern int __put_user_1(void *, unsigned int); extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s) \ +#define __put_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ "bl __put_user_" #__s \ : "=&r" (__e) \ - : "0" (__p), "r" (__r2) \ + : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") -#define put_user(x,p) \ +#define __put_user_check(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r2, __p, __e, 1); \ + __put_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __put_user_x(__r2, __p, __e, 2); \ + __put_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __put_user_x(__r2, __p, __e, 4); \ + __put_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ - __put_user_x(__r2, __p, __e, 8); \ + __put_user_x(__r2, __p, __e, __l, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ __e; \ }) +#define put_user(x,p) \ + ({ \ + might_fault(); \ + __put_user_check(x,p); \ + }) + #else /* CONFIG_MMU */ /* @@ -219,6 +245,7 @@ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ @@ -300,6 +327,7 @@ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ diff --git a/trunk/arch/arm/include/asm/unistd.h b/trunk/arch/arm/include/asm/unistd.h index 0cab47d4a83f..2fde5fd1acce 100644 --- a/trunk/arch/arm/include/asm/unistd.h +++ b/trunk/arch/arm/include/asm/unistd.h @@ -404,6 +404,7 @@ #define __NR_setns (__NR_SYSCALL_BASE+375) #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) + /* 378 for kcmp */ /* * The following SWIs are ARM private. @@ -483,6 +484,7 @@ */ #define __IGNORE_fadvise64_64 #define __IGNORE_migrate_pages +#define __IGNORE_kcmp #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ diff --git a/trunk/arch/arm/kernel/calls.S b/trunk/arch/arm/kernel/calls.S index 463ff4a0ec8a..e337879595e5 100644 --- a/trunk/arch/arm/kernel/calls.S +++ b/trunk/arch/arm/kernel/calls.S @@ -387,6 +387,7 @@ /* 375 */ CALL(sys_setns) CALL(sys_process_vm_readv) CALL(sys_process_vm_writev) + CALL(sys_ni_syscall) /* reserved for sys_kcmp */ #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c index ba386bd94107..281bf3301241 100644 --- a/trunk/arch/arm/kernel/hw_breakpoint.c +++ b/trunk/arch/arm/kernel/hw_breakpoint.c @@ -159,6 +159,12 @@ static int debug_arch_supported(void) arch >= ARM_DEBUG_ARCH_V7_1; } +/* Can we determine the watchpoint access type from the fsr? */ +static int debug_exception_updates_fsr(void) +{ + return 0; +} + /* Determine number of WRP registers available. */ static int get_num_wrp_resources(void) { @@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) /* Aligned */ break; case 1: - /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + case 3: + /* Allow single byte watchpoint. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; default: ret = -EINVAL; goto out; @@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; - /* - * Currently we rely on an overflow handler to take - * care of single-stepping the breakpoint when it fires. - * In the case of userspace breakpoints on a core with V7 debug, - * we can use the mismatch feature as a poor-man's hardware - * single-step, but this only works for per-task breakpoints. - */ - if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || - !core_has_mismatch_brps() || !bp->hw.bp_target)) { - pr_warning("overflow handler required but none found\n"); - ret = -EINVAL; + if (!bp->overflow_handler) { + /* + * Mismatch breakpoints are required for single-stepping + * breakpoints. + */ + if (!core_has_mismatch_brps()) + return -EINVAL; + + /* We don't allow mismatch breakpoints in kernel space. */ + if (arch_check_bp_in_kernelspace(bp)) + return -EPERM; + + /* + * Per-cpu breakpoints are not supported by our stepping + * mechanism. + */ + if (!bp->hw.bp_target) + return -EINVAL; + + /* + * We only support specific access types if the fsr + * reports them. + */ + if (!debug_exception_updates_fsr() && + (info->ctrl.type == ARM_BREAKPOINT_LOAD || + info->ctrl.type == ARM_BREAKPOINT_STORE)) + return -EINVAL; } + out: return ret; } @@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, goto unlock; /* Check that the access type matches. */ - access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : - HW_BREAKPOINT_R; - if (!(access & hw_breakpoint_type(wp))) - goto unlock; + if (debug_exception_updates_fsr()) { + access = (fsr & ARM_FSR_ACCESS_MASK) ? + HW_BREAKPOINT_W : HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + goto unlock; + } /* We have a winner. */ info->trigger = addr; diff --git a/trunk/arch/arm/kernel/smp_twd.c b/trunk/arch/arm/kernel/smp_twd.c index fef42b21cecb..e1f906989bb8 100644 --- a/trunk/arch/arm/kernel/smp_twd.c +++ b/trunk/arch/arm/kernel/smp_twd.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -96,7 +95,52 @@ static void twd_timer_stop(struct clock_event_device *clk) disable_percpu_irq(clk->irq); } -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_COMMON_CLK + +/* + * Updates clockevent frequency when the cpu frequency changes. + * Called on the cpu that is changing frequency with interrupts disabled. + */ +static void twd_update_frequency(void *new_rate) +{ + twd_timer_rate = *((unsigned long *) new_rate); + + clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); +} + +static int twd_rate_change(struct notifier_block *nb, + unsigned long flags, void *data) +{ + struct clk_notifier_data *cnd = data; + + /* + * The twd clock events must be reprogrammed to account for the new + * frequency. The timer is local to a cpu, so cross-call to the + * changing cpu. + */ + if (flags == POST_RATE_CHANGE) + smp_call_function(twd_update_frequency, + (void *)&cnd->new_rate, 1); + + return NOTIFY_OK; +} + +static struct notifier_block twd_clk_nb = { + .notifier_call = twd_rate_change, +}; + +static int twd_clk_init(void) +{ + if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) + return clk_notifier_register(twd_clk, &twd_clk_nb); + + return 0; +} +core_initcall(twd_clk_init); + +#elif defined (CONFIG_CPU_FREQ) + +#include /* * Updates clockevent frequency when the cpu frequency changes. diff --git a/trunk/arch/arm/kernel/traps.c b/trunk/arch/arm/kernel/traps.c index f7945218b8c6..b0179b89a04c 100644 --- a/trunk/arch/arm/kernel/traps.c +++ b/trunk/arch/arm/kernel/traps.c @@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #endif instr = *(u32 *) pc; } else if (thumb_mode(regs)) { - get_user(instr, (u16 __user *)pc); + if (get_user(instr, (u16 __user *)pc)) + goto die_sig; if (is_wide_instruction(instr)) { unsigned int instr2; - get_user(instr2, (u16 __user *)pc+1); + if (get_user(instr2, (u16 __user *)pc+1)) + goto die_sig; instr <<= 16; instr |= instr2; } - } else { - get_user(instr, (u32 __user *)pc); + } else if (get_user(instr, (u32 __user *)pc)) { + goto die_sig; } if (call_undef_hook(regs, instr) == 0) return; +die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", diff --git a/trunk/arch/arm/lib/delay.c b/trunk/arch/arm/lib/delay.c index d6dacc69254e..395d5fbb8fa2 100644 --- a/trunk/arch/arm/lib/delay.c +++ b/trunk/arch/arm/lib/delay.c @@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq) { pr_info("Switching to timer-based delay loop\n"); lpj_fine = freq / HZ; + loops_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; diff --git a/trunk/arch/arm/lib/getuser.S b/trunk/arch/arm/lib/getuser.S index 11093a7c3e32..9b06bb41fca6 100644 --- a/trunk/arch/arm/lib/getuser.S +++ b/trunk/arch/arm/lib/getuser.S @@ -16,8 +16,9 @@ * __get_user_X * * Inputs: r0 contains the address + * r1 contains the address limit, which must be preserved * Outputs: r0 is the error code - * r2, r3 contains the zero-extended value + * r2 contains the zero-extended value * lr corrupted * * No other registers must be altered. (see @@ -27,33 +28,39 @@ * Note also that it is intended that __get_user_bad is not global. */ #include +#include #include #include ENTRY(__get_user_1) + check_uaccess r0, 1, r1, r2, __get_user_bad 1: TUSER(ldrb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__get_user_1) ENTRY(__get_user_2) -#ifdef CONFIG_THUMB2_KERNEL -2: TUSER(ldrb) r2, [r0] -3: TUSER(ldrb) r3, [r0, #1] + check_uaccess r0, 2, r1, r2, __get_user_bad +#ifdef CONFIG_CPU_USE_DOMAINS +rb .req ip +2: ldrbt r2, [r0], #1 +3: ldrbt rb, [r0], #0 #else -2: TUSER(ldrb) r2, [r0], #1 -3: TUSER(ldrb) r3, [r0] +rb .req r0 +2: ldrb r2, [r0] +3: ldrb rb, [r0, #1] #endif #ifndef __ARMEB__ - orr r2, r2, r3, lsl #8 + orr r2, r2, rb, lsl #8 #else - orr r2, r3, r2, lsl #8 + orr r2, rb, r2, lsl #8 #endif mov r0, #0 mov pc, lr ENDPROC(__get_user_2) ENTRY(__get_user_4) + check_uaccess r0, 4, r1, r2, __get_user_bad 4: TUSER(ldr) r2, [r0] mov r0, #0 mov pc, lr diff --git a/trunk/arch/arm/lib/putuser.S b/trunk/arch/arm/lib/putuser.S index 7db25990c589..3d73dcb959b0 100644 --- a/trunk/arch/arm/lib/putuser.S +++ b/trunk/arch/arm/lib/putuser.S @@ -16,6 +16,7 @@ * __put_user_X * * Inputs: r0 contains the address + * r1 contains the address limit, which must be preserved * r2, r3 contains the value * Outputs: r0 is the error code * lr corrupted @@ -27,16 +28,19 @@ * Note also that it is intended that __put_user_bad is not global. */ #include +#include #include #include ENTRY(__put_user_1) + check_uaccess r0, 1, r1, ip, __put_user_bad 1: TUSER(strb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_1) ENTRY(__put_user_2) + check_uaccess r0, 2, r1, ip, __put_user_bad mov ip, r2, lsr #8 #ifdef CONFIG_THUMB2_KERNEL #ifndef __ARMEB__ @@ -60,12 +64,14 @@ ENTRY(__put_user_2) ENDPROC(__put_user_2) ENTRY(__put_user_4) + check_uaccess r0, 4, r1, ip, __put_user_bad 4: TUSER(str) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_4) ENTRY(__put_user_8) + check_uaccess r0, 8, r1, ip, __put_user_bad #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(str) r2, [r0] 6: TUSER(str) r3, [r0, #4] diff --git a/trunk/arch/arm/mach-imx/clk-imx25.c b/trunk/arch/arm/mach-imx/clk-imx25.c index fdd8cc87c9fe..d20d4795f4ea 100644 --- a/trunk/arch/arm/mach-imx/clk-imx25.c +++ b/trunk/arch/arm/mach-imx/clk-imx25.c @@ -222,10 +222,8 @@ int __init mx25_clocks_init(void) clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); - clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); - clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); - clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1"); - clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1"); + clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0"); + clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1"); clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); @@ -243,6 +241,6 @@ int __init mx25_clocks_init(void) clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); clk_register_clkdev(clk[iim_ipg], "iim", NULL); - mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); + mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1); return 0; } diff --git a/trunk/arch/arm/mach-imx/clk-imx35.c b/trunk/arch/arm/mach-imx/clk-imx35.c index c6422fb10bae..65fb8bcd86cb 100644 --- a/trunk/arch/arm/mach-imx/clk-imx35.c +++ b/trunk/arch/arm/mach-imx/clk-imx35.c @@ -230,10 +230,8 @@ int __init mx35_clocks_init() clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); - clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); - clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); - clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1"); - clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1"); + clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0"); + clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1"); /* i.mx35 has the i.mx21 type uart */ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); diff --git a/trunk/arch/arm/mach-imx/mach-armadillo5x0.c b/trunk/arch/arm/mach-imx/mach-armadillo5x0.c index 2c6ab3273f9e..5985ed1b8c98 100644 --- a/trunk/arch/arm/mach-imx/mach-armadillo5x0.c +++ b/trunk/arch/arm/mach-imx/mach-armadillo5x0.c @@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void) imx31_add_mxc_nand(&armadillo5x0_nand_board_info); /* set NAND page size to 2k if not configured via boot mode pins */ - __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR); + __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) | + (1 << 30), mx3_ccm_base + MXC_CCM_RCSR); /* RTC */ /* Get RTC IRQ and register the chip */ diff --git a/trunk/arch/arm/mach-omap2/Kconfig b/trunk/arch/arm/mach-omap2/Kconfig index fcd4e85c4ddc..346fd26f3aa6 100644 --- a/trunk/arch/arm/mach-omap2/Kconfig +++ b/trunk/arch/arm/mach-omap2/Kconfig @@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA select OMAP_PACKAGE_CBB select REGULATOR_FIXED_VOLTAGE if REGULATOR -config MACH_OMAP3_TOUCHBOOK +config MACH_TOUCHBOOK bool "OMAP3 Touch Book" depends on ARCH_OMAP3 default y + select OMAP_PACKAGE_CBB config MACH_OMAP_3430SDP bool "OMAP 3430 SDP board" diff --git a/trunk/arch/arm/mach-omap2/Makefile b/trunk/arch/arm/mach-omap2/Makefile index f6a24b3f9c4f..34c2c7f59f0a 100644 --- a/trunk/arch/arm/mach-omap2/Makefile +++ b/trunk/arch/arm/mach-omap2/Makefile @@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o -obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o +obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o diff --git a/trunk/arch/arm/mach-omap2/clock33xx_data.c b/trunk/arch/arm/mach-omap2/clock33xx_data.c index 25bbcc7ca4dc..ae27de8899a6 100644 --- a/trunk/arch/arm/mach-omap2/clock33xx_data.c +++ b/trunk/arch/arm/mach-omap2/clock33xx_data.c @@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = { CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX), CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX), CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX), - CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX), - CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX), - CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX), - CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX), - CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX), - CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX), - CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX), + CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX), + CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX), + CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX), + CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX), + CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX), + CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX), + CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX), CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX), CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX), CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX), diff --git a/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c index a0d68dbecfa3..f99e65cfb862 100644 --- a/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c +++ b/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c @@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm) _clkdm_del_autodeps(clkdm); } +static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) +{ + bool hwsup = false; + + if (!clkdm->clktrctrl_mask) + return 0; + + hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, + clkdm->clktrctrl_mask); + + if (hwsup) { + /* Disable HW transitions when we are changing deps */ + _disable_hwsup(clkdm); + _clkdm_add_autodeps(clkdm); + _enable_hwsup(clkdm); + } else { + if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) + omap3_clkdm_wakeup(clkdm); + } + + return 0; +} + +static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm) +{ + bool hwsup = false; + + if (!clkdm->clktrctrl_mask) + return 0; + + hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, + clkdm->clktrctrl_mask); + + if (hwsup) { + /* Disable HW transitions when we are changing deps */ + _disable_hwsup(clkdm); + _clkdm_del_autodeps(clkdm); + _enable_hwsup(clkdm); + } else { + if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) + omap3_clkdm_sleep(clkdm); + } + + return 0; +} + struct clkdm_ops omap2_clkdm_operations = { .clkdm_add_wkdep = omap2_clkdm_add_wkdep, .clkdm_del_wkdep = omap2_clkdm_del_wkdep, @@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = { .clkdm_wakeup = omap3_clkdm_wakeup, .clkdm_allow_idle = omap3_clkdm_allow_idle, .clkdm_deny_idle = omap3_clkdm_deny_idle, - .clkdm_clk_enable = omap2_clkdm_clk_enable, - .clkdm_clk_disable = omap2_clkdm_clk_disable, + .clkdm_clk_enable = omap3xxx_clkdm_clk_enable, + .clkdm_clk_disable = omap3xxx_clkdm_clk_disable, }; diff --git a/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h b/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h index 766338fe4d34..975f6bda0e0b 100644 --- a/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h +++ b/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h @@ -67,6 +67,7 @@ #define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) /* CM_IDLEST_IVA2 */ +#define OMAP3430_ST_IVA2_SHIFT 0 #define OMAP3430_ST_IVA2_MASK (1 << 0) /* CM_IDLEST_PLL_IVA2 */ diff --git a/trunk/arch/arm/mach-omap2/omap-wakeupgen.c b/trunk/arch/arm/mach-omap2/omap-wakeupgen.c index 05fdebfaa195..330d4c6e746b 100644 --- a/trunk/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/trunk/arch/arm/mach-omap2/omap-wakeupgen.c @@ -46,7 +46,7 @@ static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_SPINLOCK(wakeupgen_lock); -static unsigned int irq_target_cpu[NR_IRQS]; +static unsigned int irq_target_cpu[MAX_IRQS]; static unsigned int irq_banks = MAX_NR_REG_BANKS; static unsigned int max_irqs = MAX_IRQS; static unsigned int omap_secure_apis; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c index 6ca8e519968d..37afbd173c2c 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c @@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh) _enable_sysc(oh); } } else { + _omap4_disable_module(oh); _disable_clocks(oh); pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n", oh->name, r); diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index c9e38200216b..ce7e6068768f 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = { /* IVA2 (IVA2) */ static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { - { .name = "logic", .rst_shift = 0 }, - { .name = "seq0", .rst_shift = 1 }, - { .name = "seq1", .rst_shift = 2 }, + { .name = "logic", .rst_shift = 0, .st_shift = 8 }, + { .name = "seq0", .rst_shift = 1, .st_shift = 9 }, + { .name = "seq1", .rst_shift = 2, .st_shift = 10 }, }; static struct omap_hwmod omap3xxx_iva_hwmod = { @@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = { .rst_lines = omap3xxx_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), .main_clk = "iva2_ck", + .prcm = { + .omap2 = { + .module_offs = OMAP3430_IVA2_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT, + } + }, }; /* timer class */ diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 242aee498ceb..afb60917a948 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = { }; /* dsp -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_sl2if_hwmod, .clk = "dpll_iva_m5x2_ck", @@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = { }; /* iva -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = { .master = &omap44xx_iva_hwmod, .slave = &omap44xx_sl2if_hwmod, .clk = "dpll_iva_m5x2_ck", @@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = { }; /* l3_main_2 -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_sl2if_hwmod, .clk = "l3_div_ck", @@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_abe__dmic, &omap44xx_l4_abe__dmic_dma, &omap44xx_dsp__iva, - &omap44xx_dsp__sl2if, + /* &omap44xx_dsp__sl2if, */ &omap44xx_l4_cfg__dsp, &omap44xx_l3_main_2__dss, &omap44xx_l4_per__dss, @@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_per__i2c4, &omap44xx_l3_main_2__ipu, &omap44xx_l3_main_2__iss, - &omap44xx_iva__sl2if, + /* &omap44xx_iva__sl2if, */ &omap44xx_l3_main_2__iva, &omap44xx_l4_wkup__kbd, &omap44xx_l4_cfg__mailbox, @@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_cfg__cm_core, &omap44xx_l4_wkup__prm, &omap44xx_l4_wkup__scrm, - &omap44xx_l3_main_2__sl2if, + /* &omap44xx_l3_main_2__sl2if, */ &omap44xx_l4_abe__slimbus1, &omap44xx_l4_abe__slimbus1_dma, &omap44xx_l4_per__slimbus2, diff --git a/trunk/arch/arm/mach-omap2/timer.c b/trunk/arch/arm/mach-omap2/timer.c index 2ff6d41ec6c6..2ba4f57dda86 100644 --- a/trunk/arch/arm/mach-omap2/timer.c +++ b/trunk/arch/arm/mach-omap2/timer.c @@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void) return 0; } +#ifdef CONFIG_OMAP_32K_TIMER /* Setup free-running counter for clocksource */ static int __init omap2_sync32k_clocksource_init(void) { @@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void) return ret; } +#else +static inline int omap2_sync32k_clocksource_init(void) +{ + return -ENODEV; +} +#endif static void __init omap2_gptimer_clocksource_init(int gptimer_id, const char *fck_source) diff --git a/trunk/arch/arm/mach-shmobile/board-kzm9g.c b/trunk/arch/arm/mach-shmobile/board-kzm9g.c index 53b7ea92c32c..3b8a0171c3cb 100644 --- a/trunk/arch/arm/mach-shmobile/board-kzm9g.c +++ b/trunk/arch/arm/mach-shmobile/board-kzm9g.c @@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = gic_spi(141), + .start = gic_spi(140), .flags = IORESOURCE_IRQ, }, [2] = { - .start = gic_spi(140), + .start = gic_spi(141), .flags = IORESOURCE_IRQ, }, }; diff --git a/trunk/arch/arm/mm/context.c b/trunk/arch/arm/mm/context.c index 119bc52ab93e..4e07eec1270d 100644 --- a/trunk/arch/arm/mm/context.c +++ b/trunk/arch/arm/mm/context.c @@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, pid = task_pid_nr(thread->task) << ASID_BITS; asm volatile( " mrc p15, 0, %0, c13, c0, 1\n" - " bfi %1, %0, #0, %2\n" - " mcr p15, 0, %1, c13, c0, 1\n" + " and %0, %0, %2\n" + " orr %0, %0, %1\n" + " mcr p15, 0, %0, c13, c0, 1\n" : "=r" (contextidr), "+r" (pid) - : "I" (ASID_BITS)); + : "I" (~ASID_MASK)); isb(); return NOTIFY_OK; diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index 051204fc4617..e59c4ab71bcb 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -489,7 +489,7 @@ static bool __in_atomic_pool(void *start, size_t size) void *pool_start = pool->vaddr; void *pool_end = pool->vaddr + pool->size; - if (start < pool_start || start > pool_end) + if (start < pool_start || start >= pool_end) return false; if (end <= pool_end) diff --git a/trunk/arch/arm/mm/mm.h b/trunk/arch/arm/mm/mm.h index 6776160618ef..a8ee92da3544 100644 --- a/trunk/arch/arm/mm/mm.h +++ b/trunk/arch/arm/mm/mm.h @@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* permanent static mappings from iotable_init() */ #define VM_ARM_STATIC_MAPPING 0x40000000 +/* empty mapping */ +#define VM_ARM_EMPTY_MAPPING 0x20000000 + /* mapping type (attributes) for permanent static mappings */ #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20) diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 4c2d0451e84a..c2fa21d0103e 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr) vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm->addr = (void *)addr; vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = pmd_empty_section_gap; vm_area_add_early(vm); } @@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void) /* we're still single threaded hence no lock needed here */ for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) continue; addr = (unsigned long)vm->addr; if (addr < next) @@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size) > vmalloc_min || - __va(bank->start + bank->size) < __va(bank->start)) { + if (__va(bank->start + bank->size - 1) >= vmalloc_min || + __va(bank->start + bank->size - 1) <= __va(bank->start)) { unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " "to -%.8llx (vmalloc region overlap).\n", diff --git a/trunk/arch/arm/plat-mxc/include/mach/mx25.h b/trunk/arch/arm/plat-mxc/include/mach/mx25.h index 627d94f1b010..ec466400a200 100644 --- a/trunk/arch/arm/plat-mxc/include/mach/mx25.h +++ b/trunk/arch/arm/plat-mxc/include/mach/mx25.h @@ -98,6 +98,7 @@ #define MX25_INT_UART1 (NR_IRQS_LEGACY + 45) #define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51) #define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52) +#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54) #define MX25_INT_FEC (NR_IRQS_LEGACY + 57) #define MX25_DMA_REQ_SSI2_RX1 22 diff --git a/trunk/arch/arm/plat-omap/sram.c b/trunk/arch/arm/plat-omap/sram.c index 766181cb5c95..024f3b08db29 100644 --- a/trunk/arch/arm/plat-omap/sram.c +++ b/trunk/arch/arm/plat-omap/sram.c @@ -68,6 +68,7 @@ static unsigned long omap_sram_start; static void __iomem *omap_sram_base; +static unsigned long omap_sram_skip; static unsigned long omap_sram_size; static void __iomem *omap_sram_ceil; @@ -106,6 +107,7 @@ static int is_sram_locked(void) */ static void __init omap_detect_sram(void) { + omap_sram_skip = SRAM_BOOTLOADER_SZ; if (cpu_class_is_omap2()) { if (is_sram_locked()) { if (cpu_is_omap34xx()) { @@ -113,6 +115,7 @@ static void __init omap_detect_sram(void) if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { omap_sram_size = 0x7000; /* 28K */ + omap_sram_skip += SZ_16K; } else { omap_sram_size = 0x8000; /* 32K */ } @@ -175,8 +178,10 @@ static void __init omap_map_sram(void) return; #ifdef CONFIG_OMAP4_ERRATA_I688 + if (cpu_is_omap44xx()) { omap_sram_start += PAGE_SIZE; omap_sram_size -= SZ_16K; + } #endif if (cpu_is_omap34xx()) { /* @@ -203,8 +208,8 @@ static void __init omap_map_sram(void) * Looks like we need to preserve some bootloader code at the * beginning of SRAM for jumping to flash for reboot to work... */ - memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0, - omap_sram_size - SRAM_BOOTLOADER_SZ); + memset_io(omap_sram_base + omap_sram_skip, 0, + omap_sram_size - omap_sram_skip); } /* @@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size) { unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; - available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); + available = omap_sram_ceil - (omap_sram_base + omap_sram_skip); if (size > available) { pr_err("Not enough space in SRAM\n"); diff --git a/trunk/arch/arm/plat-samsung/clock.c b/trunk/arch/arm/plat-samsung/clock.c index 65c5eca475e7..d1116e2dfbea 100644 --- a/trunk/arch/arm/plat-samsung/clock.c +++ b/trunk/arch/arm/plat-samsung/clock.c @@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate) int clk_set_rate(struct clk *clk, unsigned long rate) { + unsigned long flags; int ret; if (IS_ERR(clk)) @@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) if (clk->ops == NULL || clk->ops->set_rate == NULL) return -EINVAL; - spin_lock(&clocks_lock); + spin_lock_irqsave(&clocks_lock, flags); ret = (clk->ops->set_rate)(clk, rate); - spin_unlock(&clocks_lock); + spin_unlock_irqrestore(&clocks_lock, flags); return ret; } @@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk) int clk_set_parent(struct clk *clk, struct clk *parent) { + unsigned long flags; int ret = 0; if (IS_ERR(clk)) return -EINVAL; - spin_lock(&clocks_lock); + spin_lock_irqsave(&clocks_lock, flags); if (clk->ops && clk->ops->set_parent) ret = (clk->ops->set_parent)(clk, parent); - spin_unlock(&clocks_lock); + spin_unlock_irqrestore(&clocks_lock, flags); return ret; } diff --git a/trunk/arch/m68k/platform/coldfire/clk.c b/trunk/arch/m68k/platform/coldfire/clk.c index 75f9ee967ea7..9cd13b4ce42b 100644 --- a/trunk/arch/m68k/platform/coldfire/clk.c +++ b/trunk/arch/m68k/platform/coldfire/clk.c @@ -146,9 +146,3 @@ struct clk_ops clk_ops1 = { }; #endif /* MCFPM_PPMCR1 */ #endif /* MCFPM_PPMCR0 */ - -struct clk *devm_clk_get(struct device *dev, const char *id) -{ - return NULL; -} -EXPORT_SYMBOL(devm_clk_get); diff --git a/trunk/arch/mips/kernel/smp-cmp.c b/trunk/arch/mips/kernel/smp-cmp.c index e7e03ecf5495..afc379ca3753 100644 --- a/trunk/arch/mips/kernel/smp-cmp.c +++ b/trunk/arch/mips/kernel/smp-cmp.c @@ -102,7 +102,7 @@ static void cmp_init_secondary(void) c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif #ifdef CONFIG_MIPS_MT_SMTC - c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC; + c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; #endif } diff --git a/trunk/arch/mips/mm/gup.c b/trunk/arch/mips/mm/gup.c index 33aadbcf170b..dcfd573871c1 100644 --- a/trunk/arch/mips/mm/gup.c +++ b/trunk/arch/mips/mm/gup.c @@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; + if (PageTail(page)) + get_huge_page_tail(page); (*nr)++; page++; refs++; diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c index 7b13a4caeea4..fea823f18479 100644 --- a/trunk/arch/mips/mti-malta/malta-int.c +++ b/trunk/arch/mips/mti-malta/malta-int.c @@ -273,16 +273,19 @@ asmlinkage void plat_irq_dispatch(void) unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; int irq; + if (unlikely(!pending)) { + spurious_interrupt(); + return; + } + irq = irq_ffs(pending); if (irq == MIPSCPU_INT_I8259A) malta_hw0_irqdispatch(); else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()])) malta_ipi_irqdispatch(); - else if (irq >= 0) - do_IRQ(MIPS_CPU_IRQ_BASE + irq); else - spurious_interrupt(); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); } #ifdef CONFIG_MIPS_MT_SMP diff --git a/trunk/arch/mips/mti-malta/malta-platform.c b/trunk/arch/mips/mti-malta/malta-platform.c index 4c35301720e7..80562b81f0f2 100644 --- a/trunk/arch/mips/mti-malta/malta-platform.c +++ b/trunk/arch/mips/mti-malta/malta-platform.c @@ -138,11 +138,6 @@ static int __init malta_add_devices(void) if (err) return err; - /* - * Set RTC to BCD mode to support current alarm code. - */ - CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL); - return 0; } diff --git a/trunk/arch/s390/include/asm/hugetlb.h b/trunk/arch/s390/include/asm/hugetlb.h index 799ed0f1643d..2d6e6e380564 100644 --- a/trunk/arch/s390/include/asm/hugetlb.h +++ b/trunk/arch/s390/include/asm/hugetlb.h @@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep) return pte; } -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - pte_t pte = huge_ptep_get(ptep); - - mm->context.flush_mm = 1; - pmd_clear((pmd_t *) ptep); - return pte; -} - static inline void __pmd_csp(pmd_t *pmdp) { register unsigned long reg2 asm("2") = pmd_val(*pmdp); @@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, __pmd_csp(pmdp); } +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = huge_ptep_get(ptep); + + huge_ptep_invalidate(mm, addr, ptep); + return pte; +} + #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ ({ \ int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ @@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, ({ \ pte_t __pte = huge_ptep_get(__ptep); \ if (pte_write(__pte)) { \ - (__mm)->context.flush_mm = 1; \ - if (atomic_read(&(__mm)->context.attach_count) > 1 || \ - (__mm) != current->active_mm) \ - huge_ptep_invalidate(__mm, __addr, __ptep); \ + huge_ptep_invalidate(__mm, __addr, __ptep); \ set_huge_pte_at(__mm, __addr, __ptep, \ huge_pte_wrprotect(__pte)); \ } \ diff --git a/trunk/arch/s390/include/asm/tlbflush.h b/trunk/arch/s390/include/asm/tlbflush.h index 9fde315f3a7c..1d8fe2b17ef6 100644 --- a/trunk/arch/s390/include/asm/tlbflush.h +++ b/trunk/arch/s390/include/asm/tlbflush.h @@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) static inline void __tlb_flush_mm_cond(struct mm_struct * mm) { - spin_lock(&mm->page_table_lock); if (mm->context.flush_mm) { __tlb_flush_mm(mm); mm->context.flush_mm = 0; } - spin_unlock(&mm->page_table_lock); } /* diff --git a/trunk/arch/s390/kernel/setup.c b/trunk/arch/s390/kernel/setup.c index f86c81e13c37..40b57693de38 100644 --- a/trunk/arch/s390/kernel/setup.c +++ b/trunk/arch/s390/kernel/setup.c @@ -974,11 +974,13 @@ static void __init setup_hwcaps(void) if (MACHINE_HAS_HPAGE) elf_hwcap |= HWCAP_S390_HPAGE; +#if defined(CONFIG_64BIT) /* * 64-bit register support for 31-bit processes * HWCAP_S390_HIGH_GPRS is bit 9. */ elf_hwcap |= HWCAP_S390_HIGH_GPRS; +#endif get_cpu_id(&cpu_id); switch (cpu_id.machine) { diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c index 60ee2b883797..2d37bb861faf 100644 --- a/trunk/arch/s390/lib/uaccess_pt.c +++ b/trunk/arch/s390/lib/uaccess_pt.c @@ -2,69 +2,82 @@ * User access functions based on page table walks for enhanced * system layout without hardware support. * - * Copyright IBM Corp. 2006 + * Copyright IBM Corp. 2006, 2012 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) */ #include #include #include +#include #include #include #include "uaccess.h" -static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) + +/* + * Returns kernel address for user virtual address. If the returned address is + * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address + * contains the (negative) exception code. + */ +static __always_inline unsigned long follow_table(struct mm_struct *mm, + unsigned long addr, int write) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; + pte_t *ptep; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return (pte_t *) 0x3a; + return -0x3aUL; pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return (pte_t *) 0x3b; + return -0x3bUL; pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return (pte_t *) 0x10; + if (pmd_none(*pmd)) + return -0x10UL; + if (pmd_huge(*pmd)) { + if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); + } + if (unlikely(pmd_bad(*pmd))) + return -0x10UL; + + ptep = pte_offset_map(pmd, addr); + if (!pte_present(*ptep)) + return -0x11UL; + if (write && !pte_write(*ptep)) + return -0x04UL; - return pte_offset_map(pmd, addr); + return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); } static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { struct mm_struct *mm = current->mm; - unsigned long offset, pfn, done, size; - pte_t *pte; + unsigned long offset, done, size, kaddr; void *from, *to; done = 0; retry: spin_lock(&mm->page_table_lock); do { - pte = follow_table(mm, uaddr); - if ((unsigned long) pte < 0x1000) + kaddr = follow_table(mm, uaddr, write_user); + if (IS_ERR_VALUE(kaddr)) goto fault; - if (!pte_present(*pte)) { - pte = (pte_t *) 0x11; - goto fault; - } else if (write_user && !pte_write(*pte)) { - pte = (pte_t *) 0x04; - goto fault; - } - pfn = pte_pfn(*pte); - offset = uaddr & (PAGE_SIZE - 1); + offset = uaddr & ~PAGE_MASK; size = min(n - done, PAGE_SIZE - offset); if (write_user) { - to = (void *)((pfn << PAGE_SHIFT) + offset); + to = (void *) kaddr; from = kptr + done; } else { - from = (void *)((pfn << PAGE_SHIFT) + offset); + from = (void *) kaddr; to = kptr + done; } memcpy(to, from, size); @@ -75,7 +88,7 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, return n - done; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, (unsigned long) pte, write_user)) + if (__handle_fault(uaddr, -kaddr, write_user)) return n - done; goto retry; } @@ -84,27 +97,22 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, * Do DAT for user address by page table walk, return kernel address. * This function needs to be called with current->mm->page_table_lock held. */ -static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) +static __always_inline unsigned long __dat_user_addr(unsigned long uaddr, + int write) { struct mm_struct *mm = current->mm; - unsigned long pfn; - pte_t *pte; + unsigned long kaddr; int rc; retry: - pte = follow_table(mm, uaddr); - if ((unsigned long) pte < 0x1000) - goto fault; - if (!pte_present(*pte)) { - pte = (pte_t *) 0x11; + kaddr = follow_table(mm, uaddr, write); + if (IS_ERR_VALUE(kaddr)) goto fault; - } - pfn = pte_pfn(*pte); - return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); + return kaddr; fault: spin_unlock(&mm->page_table_lock); - rc = __handle_fault(uaddr, (unsigned long) pte, 0); + rc = __handle_fault(uaddr, -kaddr, write); spin_lock(&mm->page_table_lock); if (!rc) goto retry; @@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to) static size_t strnlen_user_pt(size_t count, const char __user *src) { - char *addr; unsigned long uaddr = (unsigned long) src; struct mm_struct *mm = current->mm; - unsigned long offset, pfn, done, len; - pte_t *pte; + unsigned long offset, done, len, kaddr; size_t len_str; if (segment_eq(get_fs(), KERNEL_DS)) @@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) retry: spin_lock(&mm->page_table_lock); do { - pte = follow_table(mm, uaddr); - if ((unsigned long) pte < 0x1000) - goto fault; - if (!pte_present(*pte)) { - pte = (pte_t *) 0x11; + kaddr = follow_table(mm, uaddr, 0); + if (IS_ERR_VALUE(kaddr)) goto fault; - } - pfn = pte_pfn(*pte); - offset = uaddr & (PAGE_SIZE-1); - addr = (char *)(pfn << PAGE_SHIFT) + offset; + offset = uaddr & ~PAGE_MASK; len = min(count - done, PAGE_SIZE - offset); - len_str = strnlen(addr, len); + len_str = strnlen((char *) kaddr, len); done += len_str; uaddr += len_str; } while ((len_str == len) && (done < count)); @@ -192,7 +192,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) return done + 1; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, (unsigned long) pte, 0)) + if (__handle_fault(uaddr, -kaddr, 0)) return 0; goto retry; } @@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to, const void __user *from) { struct mm_struct *mm = current->mm; - unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, - uaddr, done, size, error_code; + unsigned long offset_max, uaddr, done, size, error_code; unsigned long uaddr_from = (unsigned long) from; unsigned long uaddr_to = (unsigned long) to; - pte_t *pte_from, *pte_to; + unsigned long kaddr_to, kaddr_from; int write_user; if (segment_eq(get_fs(), KERNEL_DS)) { @@ -242,38 +241,23 @@ static size_t copy_in_user_pt(size_t n, void __user *to, do { write_user = 0; uaddr = uaddr_from; - pte_from = follow_table(mm, uaddr_from); - error_code = (unsigned long) pte_from; - if (error_code < 0x1000) - goto fault; - if (!pte_present(*pte_from)) { - error_code = 0x11; + kaddr_from = follow_table(mm, uaddr_from, 0); + error_code = kaddr_from; + if (IS_ERR_VALUE(error_code)) goto fault; - } write_user = 1; uaddr = uaddr_to; - pte_to = follow_table(mm, uaddr_to); - error_code = (unsigned long) pte_to; - if (error_code < 0x1000) - goto fault; - if (!pte_present(*pte_to)) { - error_code = 0x11; + kaddr_to = follow_table(mm, uaddr_to, 1); + error_code = (unsigned long) kaddr_to; + if (IS_ERR_VALUE(error_code)) goto fault; - } else if (!pte_write(*pte_to)) { - error_code = 0x04; - goto fault; - } - pfn_from = pte_pfn(*pte_from); - pfn_to = pte_pfn(*pte_to); - offset_from = uaddr_from & (PAGE_SIZE-1); - offset_to = uaddr_from & (PAGE_SIZE-1); - offset_max = max(offset_from, offset_to); + offset_max = max(uaddr_from & ~PAGE_MASK, + uaddr_to & ~PAGE_MASK); size = min(n - done, PAGE_SIZE - offset_max); - memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, - (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); + memcpy((void *) kaddr_to, (void *) kaddr_from, size); done += size; uaddr_from += size; uaddr_to += size; @@ -282,7 +266,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, return n - done; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, error_code, write_user)) + if (__handle_fault(uaddr, -error_code, write_user)) return n - done; goto retry; } @@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) return __futex_atomic_op_pt(op, uaddr, oparg, old); spin_lock(¤t->mm->page_table_lock); uaddr = (u32 __force __user *) - __dat_user_addr((__force unsigned long) uaddr); + __dat_user_addr((__force unsigned long) uaddr, 1); if (!uaddr) { spin_unlock(¤t->mm->page_table_lock); return -EFAULT; @@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); spin_lock(¤t->mm->page_table_lock); uaddr = (u32 __force __user *) - __dat_user_addr((__force unsigned long) uaddr); + __dat_user_addr((__force unsigned long) uaddr, 1); if (!uaddr) { spin_unlock(¤t->mm->page_table_lock); return -EFAULT; diff --git a/trunk/arch/s390/oprofile/init.c b/trunk/arch/s390/oprofile/init.c index a1e9d69a9c90..584b93674ea4 100644 --- a/trunk/arch/s390/oprofile/init.c +++ b/trunk/arch/s390/oprofile/init.c @@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf, if (*offset) return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val < oprofile_min_interval) oprofile_hw_interval = oprofile_min_interval; @@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0) return -EINVAL; @@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) @@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) @@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) diff --git a/trunk/arch/sh/kernel/cpu/sh5/entry.S b/trunk/arch/sh/kernel/cpu/sh5/entry.S index b7cf6a547f11..7e605b95592a 100644 --- a/trunk/arch/sh/kernel/cpu/sh5/entry.S +++ b/trunk/arch/sh/kernel/cpu/sh5/entry.S @@ -933,7 +933,7 @@ ret_with_reschedule: pta restore_all, tr1 - movi _TIF_SIGPENDING, r8 + movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8 and r8, r7, r8 pta work_notifysig, tr0 bne r8, ZERO, tr0 diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S index f67601cb3f1f..b96489d8b27d 100644 --- a/trunk/arch/sh/kernel/entry-common.S +++ b/trunk/arch/sh/kernel/entry-common.S @@ -139,7 +139,7 @@ work_pending: ! r8: current_thread_info ! t: result of "tst #_TIF_NEED_RESCHED, r0" bf/s work_resched - tst #_TIF_SIGPENDING, r0 + tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0 work_notifysig: bt/s __restore_all mov r15, r4 diff --git a/trunk/arch/sparc/kernel/module.c b/trunk/arch/sparc/kernel/module.c index 15e0a1693976..f1ddc0d23679 100644 --- a/trunk/arch/sparc/kernel/module.c +++ b/trunk/arch/sparc/kernel/module.c @@ -48,9 +48,7 @@ void *module_alloc(unsigned long size) return NULL; ret = module_map(size); - if (!ret) - ret = ERR_PTR(-ENOMEM); - else + if (ret) memset(ret, 0, size); return ret; @@ -116,6 +114,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, v = sym->st_value + rel[i].r_addend; switch (ELF_R_TYPE(rel[i].r_info) & 0xff) { + case R_SPARC_DISP32: + v -= (Elf_Addr) location; + *loc32 = v; + break; #ifdef CONFIG_SPARC64 case R_SPARC_64: location[0] = v >> 56; @@ -128,11 +130,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, location[7] = v >> 0; break; - case R_SPARC_DISP32: - v -= (Elf_Addr) location; - *loc32 = v; - break; - case R_SPARC_WDISP19: v -= (Elf_Addr) location; *loc32 = (*loc32 & ~0x7ffff) | diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 8ec3a1aa4abd..50a1d1f9b6d3 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -746,10 +746,10 @@ config SWIOTLB def_bool y if X86_64 ---help--- Support for software bounce buffers used on x86-64 systems - which don't have a hardware IOMMU (e.g. the current generation - of Intel's x86-64 CPUs). Using this PCI devices which can only - access 32-bits of memory can be used on systems with more than - 3 GB of memory. If unsure, say Y. + which don't have a hardware IOMMU. Using this PCI devices + which can only access 32-bits of memory can be used on systems + with more than 3 GB of memory. + If unsure, say Y. config IOMMU_HELPER def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) diff --git a/trunk/arch/x86/Makefile b/trunk/arch/x86/Makefile index 682e9c210baa..474ca35b1bce 100644 --- a/trunk/arch/x86/Makefile +++ b/trunk/arch/x86/Makefile @@ -142,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) -archscripts: +archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs ### diff --git a/trunk/arch/x86/include/asm/xen/page.h b/trunk/arch/x86/include/asm/xen/page.h index 93971e841dd5..472b9b783019 100644 --- a/trunk/arch/x86/include/asm/xen/page.h +++ b/trunk/arch/x86/include/asm/xen/page.h @@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op); -extern int m2p_remove_override(struct page *page, bool clear_pte); +extern int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index 6605a81ba339..8b6defe7eefc 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -586,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[]; extern struct event_constraint intel_snb_pebs_event_constraints[]; +extern struct event_constraint intel_ivb_pebs_event_constraints[]; + struct event_constraint *intel_pebs_constraints(struct perf_event *event); void intel_pmu_pebs_enable(struct perf_event *event); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c index 7bfb5bec8630..eebd5ffe1bba 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -209,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) return -EOPNOTSUPP; } +static const struct perf_event_attr ibs_notsupp = { + .exclude_user = 1, + .exclude_kernel = 1, + .exclude_hv = 1, + .exclude_idle = 1, + .exclude_host = 1, + .exclude_guest = 1, +}; + static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -229,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event) if (event->pmu != &perf_ibs->pmu) return -ENOENT; + if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp)) + return -EINVAL; + if (config & ~perf_ibs->config_mask) return -EINVAL; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 7f2739e03e79..6bca492b8547 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void) break; case 28: /* Atom */ + case 54: /* Cedariew */ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2047,7 +2048,6 @@ __init int intel_pmu_init(void) case 42: /* SandyBridge */ case 45: /* SandyBridge, "Romely-EP" */ x86_add_quirk(intel_sandybridge_quirk); - case 58: /* IvyBridge */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, @@ -2072,6 +2072,29 @@ __init int intel_pmu_init(void) pr_cont("SandyBridge events, "); break; + case 58: /* IvyBridge */ + memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); + + intel_pmu_lbr_init_snb(); + + x86_pmu.event_constraints = intel_snb_event_constraints; + x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; + x86_pmu.pebs_aliases = intel_pebs_aliases_snb; + x86_pmu.extra_regs = intel_snb_extra_regs; + /* all extra regs are per-cpu when HT is on */ + x86_pmu.er_flags |= ERF_HAS_RSP_1; + x86_pmu.er_flags |= ERF_NO_HT_SHARING; + + /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); + + pr_cont("IvyBridge events, "); + break; + default: switch (x86_pmu.version) { diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c index e38d97bf4259..826054a4f2ee 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_ivb_pebs_event_constraints[] = { + INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ + INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ + INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ + INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ + EVENT_CONSTRAINT_END +}; + struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 520b4265fcd2..da02e9cc3754 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void) * to have an operational LBR which can freeze * on PMU interrupt */ - if (boot_cpu_data.x86_mask < 10) { + if (boot_cpu_data.x86_model == 28 + && boot_cpu_data.x86_mask < 10) { pr_cont("LBR disabled due to erratum"); return; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 0a5571080e74..38e4894165b9 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box) } } +static struct uncore_event_desc snb_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), + { /* end: all zeroes */ }, +}; + static struct attribute *snb_uncore_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = { .constraints = snb_uncore_cbox_constraints, .ops = &snb_uncore_msr_ops, .format_group = &snb_uncore_format_group, + .event_descs = snb_uncore_events, }; static struct intel_uncore_type *snb_msr_uncores[] = { diff --git a/trunk/arch/x86/kernel/microcode_core.c b/trunk/arch/x86/kernel/microcode_core.c index 4873e62db6a1..9e5bcf1e2376 100644 --- a/trunk/arch/x86/kernel/microcode_core.c +++ b/trunk/arch/x86/kernel/microcode_core.c @@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, if (do_microcode_update(buf, len) == 0) ret = (ssize_t)len; + if (ret > 0) + perf_check_microcode(); + mutex_unlock(µcode_mutex); put_online_cpus(); diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index e0e6990723e9..ab1f6a93b527 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -319,7 +319,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, */ int devmem_is_allowed(unsigned long pagenr) { - if (pagenr <= 256) + if (pagenr < 256) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index 9642d4a38602..1fbe75a95f15 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -1452,6 +1452,10 @@ asmlinkage void __init xen_start_kernel(void) pci_request_acs(); xen_acpi_sleep_register(); + + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = x86_init_uint_noop; } #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c index 76ba0e97e530..72213da605f5 100644 --- a/trunk/arch/x86/xen/p2m.c +++ b/trunk/arch/x86/xen/p2m.c @@ -828,9 +828,6 @@ int m2p_add_override(unsigned long mfn, struct page *page, xen_mc_issue(PARAVIRT_LAZY_MMU); } - /* let's use dev_bus_addr to record the old mfn instead */ - kmap_op->dev_bus_addr = page->index; - page->index = (unsigned long) kmap_op; } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); @@ -857,7 +854,8 @@ int m2p_add_override(unsigned long mfn, struct page *page, return 0; } EXPORT_SYMBOL_GPL(m2p_add_override); -int m2p_remove_override(struct page *page, bool clear_pte) +int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long mfn; @@ -887,10 +885,8 @@ int m2p_remove_override(struct page *page, bool clear_pte) WARN_ON(!PagePrivate(page)); ClearPagePrivate(page); - if (clear_pte) { - struct gnttab_map_grant_ref *map_op = - (struct gnttab_map_grant_ref *) page->index; - set_phys_to_machine(pfn, map_op->dev_bus_addr); + set_phys_to_machine(pfn, page->index); + if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; struct gnttab_unmap_grant_ref *unmap_op; @@ -902,13 +898,13 @@ int m2p_remove_override(struct page *page, bool clear_pte) * issued. In this case handle is going to -1 because * it hasn't been modified yet. */ - if (map_op->handle == -1) + if (kmap_op->handle == -1) xen_mc_flush(); /* - * Now if map_op->handle is negative it means that the + * Now if kmap_op->handle is negative it means that the * hypercall actually returned an error. */ - if (map_op->handle == GNTST_general_error) { + if (kmap_op->handle == GNTST_general_error) { printk(KERN_WARNING "m2p_remove_override: " "pfn %lx mfn %lx, failed to modify kernel mappings", pfn, mfn); @@ -918,8 +914,8 @@ int m2p_remove_override(struct page *page, bool clear_pte) mcs = xen_mc_entry( sizeof(struct gnttab_unmap_grant_ref)); unmap_op = mcs.args; - unmap_op->host_addr = map_op->host_addr; - unmap_op->handle = map_op->handle; + unmap_op->host_addr = kmap_op->host_addr; + unmap_op->handle = kmap_op->handle; unmap_op->dev_bus_addr = 0; MULTI_grant_table_op(mcs.mc, @@ -930,10 +926,9 @@ int m2p_remove_override(struct page *page, bool clear_pte) set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); __flush_tlb_single(address); - map_op->host_addr = 0; + kmap_op->host_addr = 0; } - } else - set_phys_to_machine(pfn, page->index); + } /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 4b4dbdfbca89..ee3cb3a5e278 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -2254,9 +2254,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) error_type = "I/O"; break; } - printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", - error_type, req->rq_disk ? req->rq_disk->disk_name : "?", - (unsigned long long)blk_rq_pos(req)); + printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", + error_type, req->rq_disk ? + req->rq_disk->disk_name : "?", + (unsigned long long)blk_rq_pos(req)); + } blk_account_io_completion(req, nr_bytes); diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c index 4476e0e85d16..4a85096f5410 100644 --- a/trunk/block/ioctl.c +++ b/trunk/block/ioctl.c @@ -41,7 +41,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user sizeof(long long) > sizeof(long)) { long pstart = start, plength = length; if (pstart != start || plength != length - || pstart < 0 || plength < 0) + || pstart < 0 || plength < 0 || partno > 65535) return -EINVAL; } diff --git a/trunk/drivers/acpi/bus.c b/trunk/drivers/acpi/bus.c index 9628652e080c..e0596954290b 100644 --- a/trunk/drivers/acpi/bus.c +++ b/trunk/drivers/acpi/bus.c @@ -237,6 +237,16 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state) } else if (result == ACPI_STATE_D3_HOT) { result = ACPI_STATE_D3; } + + /* + * If we were unsure about the device parent's power state up to this + * point, the fact that the device is in D0 implies that the parent has + * to be in D0 too. + */ + if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN + && result == ACPI_STATE_D0) + device->parent->power.state = ACPI_STATE_D0; + *state = result; out: diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c index fc1803414629..40e38a06ba85 100644 --- a/trunk/drivers/acpi/power.c +++ b/trunk/drivers/acpi/power.c @@ -107,6 +107,7 @@ struct acpi_power_resource { /* List of devices relying on this power resource */ struct acpi_power_resource_device *devices; + struct mutex devices_lock; }; static struct list_head acpi_power_resource_list; @@ -225,7 +226,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device) static int __acpi_power_on(struct acpi_power_resource *resource) { - struct acpi_power_resource_device *device_list = resource->devices; acpi_status status = AE_OK; status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); @@ -238,19 +238,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", resource->name)); - while (device_list) { - acpi_power_on_device(device_list->device); - - device_list = device_list->next; - } - return 0; } static int acpi_power_on(acpi_handle handle) { int result = 0; + bool resume_device = false; struct acpi_power_resource *resource = NULL; + struct acpi_power_resource_device *device_list; result = acpi_power_get_context(handle, &resource); if (result) @@ -266,10 +262,25 @@ static int acpi_power_on(acpi_handle handle) result = __acpi_power_on(resource); if (result) resource->ref_count--; + else + resume_device = true; } mutex_unlock(&resource->resource_lock); + if (!resume_device) + return result; + + mutex_lock(&resource->devices_lock); + + device_list = resource->devices; + while (device_list) { + acpi_power_on_device(device_list->device); + device_list = device_list->next; + } + + mutex_unlock(&resource->devices_lock); + return result; } @@ -355,7 +366,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev, if (acpi_power_get_context(res_handle, &resource)) return; - mutex_lock(&resource->resource_lock); + mutex_lock(&resource->devices_lock); prev = NULL; curr = resource->devices; while (curr) { @@ -372,7 +383,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev, prev = curr; curr = curr->next; } - mutex_unlock(&resource->resource_lock); + mutex_unlock(&resource->devices_lock); } /* Unlink dev from all power resources in _PR0 */ @@ -414,10 +425,10 @@ static int __acpi_power_resource_register_device( power_resource_device->device = powered_device; - mutex_lock(&resource->resource_lock); + mutex_lock(&resource->devices_lock); power_resource_device->next = resource->devices; resource->devices = power_resource_device; - mutex_unlock(&resource->resource_lock); + mutex_unlock(&resource->devices_lock); return 0; } @@ -462,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle) return ret; no_power_resource: - printk(KERN_WARNING PREFIX "Invalid Power Resource to register!"); + printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!"); return -ENODEV; } EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); @@ -721,6 +732,7 @@ static int acpi_power_add(struct acpi_device *device) resource->device = device; mutex_init(&resource->resource_lock); + mutex_init(&resource->devices_lock); strcpy(resource->name, device->pnp.bus_id); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 50d5dea0ff59..7862d17976b7 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -268,6 +268,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, + /* JMicron 362B and 362C have an AHCI function with IDE class code */ + { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr }, + { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr }, /* ATI */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ @@ -393,6 +396,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ { PCI_DEVICE(0x1b4b, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(0x1b4b, 0x9192), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, @@ -400,7 +405,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ /* Asmedia */ - { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c index de0435e63b02..887f68f6d79a 100644 --- a/trunk/drivers/block/aoe/aoecmd.c +++ b/trunk/drivers/block/aoe/aoecmd.c @@ -35,6 +35,7 @@ new_skb(ulong len) skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); + skb_checksum_none_assert(skb); } return skb; } diff --git a/trunk/drivers/block/cciss_scsi.c b/trunk/drivers/block/cciss_scsi.c index 38aa6dda6b81..da3311129a0c 100644 --- a/trunk/drivers/block/cciss_scsi.c +++ b/trunk/drivers/block/cciss_scsi.c @@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c index a8fddeb3d638..f946d31d6917 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.c +++ b/trunk/drivers/block/mtip32xx/mtip32xx.c @@ -1148,11 +1148,15 @@ static bool mtip_pause_ncq(struct mtip_port *port, reply = port->rxfis + RX_FIS_D2H_REG; task_file_data = readl(port->mmio+PORT_TFDATA); - if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT)) + if (fis->command == ATA_CMD_SEC_ERASE_UNIT) + clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); + + if ((task_file_data & 1)) return false; if (fis->command == ATA_CMD_SEC_ERASE_PREP) { set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); + set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); port->ic_pause_timer = jiffies; return true; } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && @@ -1900,7 +1904,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, int rv = 0, xfer_sz = command[3]; if (xfer_sz) { - if (user_buffer) + if (!user_buffer) return -EFAULT; buf = dmam_alloc_coherent(&port->dd->pdev->dev, @@ -2043,7 +2047,7 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout) *timeout = 240000; /* 4 minutes */ break; case ATA_CMD_STANDBYNOW1: - *timeout = 10000; /* 10 seconds */ + *timeout = 120000; /* 2 minutes */ break; case 0xF7: case 0xFA: @@ -2588,9 +2592,6 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, if (!len || size) return 0; - if (size < 0) - return -EINVAL; - size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) @@ -2660,9 +2661,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, if (!len || size) return 0; - if (size < 0) - return -EINVAL; - size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", dd->port->flags); size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", @@ -3214,8 +3212,8 @@ static int mtip_hw_init(struct driver_data *dd) "Unable to check write protect progress\n"); else dev_info(&dd->pdev->dev, - "Write protect progress: %d%% (%d blocks)\n", - attr242.cur, attr242.data); + "Write protect progress: %u%% (%u blocks)\n", + attr242.cur, le32_to_cpu(attr242.data)); return rv; out3: @@ -3619,6 +3617,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) bio_endio(bio, -ENODATA); return; } + if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) { + bio_endio(bio, -ENODATA); + return; + } } if (unlikely(!bio_has_data(bio))) { @@ -4168,7 +4170,13 @@ static void mtip_pci_shutdown(struct pci_dev *pdev) /* Table of device ids supported by this driver. */ static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { - { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) }, { 0 } }; @@ -4199,12 +4207,12 @@ static int __init mtip_init(void) { int error; - printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); + pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { - printk(KERN_ERR "Unable to register block device (%d)\n", + pr_err("Unable to register block device (%d)\n", error); return -EBUSY; } @@ -4213,7 +4221,7 @@ static int __init mtip_init(void) if (!dfs_parent) { dfs_parent = debugfs_create_dir("rssd", NULL); if (IS_ERR_OR_NULL(dfs_parent)) { - printk(KERN_WARNING "Error creating debugfs parent\n"); + pr_warn("Error creating debugfs parent\n"); dfs_parent = NULL; } } diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h index f51fc23d17bb..18627a1d04c5 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.h +++ b/trunk/drivers/block/mtip32xx/mtip32xx.h @@ -76,7 +76,13 @@ /* Micron Vendor ID & P320x SSD Device ID */ #define PCI_VENDOR_ID_MICRON 0x1344 -#define P320_DEVICE_ID 0x5150 +#define P320H_DEVICE_ID 0x5150 +#define P320M_DEVICE_ID 0x5151 +#define P320S_DEVICE_ID 0x5152 +#define P325M_DEVICE_ID 0x5153 +#define P420H_DEVICE_ID 0x5160 +#define P420M_DEVICE_ID 0x5161 +#define P425M_DEVICE_ID 0x5163 /* Driver name and version strings */ #define MTIP_DRV_NAME "mtip32xx" @@ -131,10 +137,12 @@ enum { MTIP_PF_SVC_THD_STOP_BIT = 8, /* below are bit numbers in 'dd_flag' defined in driver_data */ + MTIP_DDF_SEC_LOCK_BIT = 0, MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ + (1 << MTIP_DDF_SEC_LOCK_BIT) | \ (1 << MTIP_DDF_OVER_TEMP_BIT) | \ (1 << MTIP_DDF_WRITE_PROTECT_BIT)), diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c index d07c9f7fded6..0c03411c59eb 100644 --- a/trunk/drivers/block/nbd.c +++ b/trunk/drivers/block/nbd.c @@ -449,6 +449,14 @@ static void nbd_clear_que(struct nbd_device *nbd) req->errors++; nbd_end_request(req); } + + while (!list_empty(&nbd->waiting_queue)) { + req = list_entry(nbd->waiting_queue.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } } @@ -598,6 +606,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->file = NULL; nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); + BUG_ON(!list_empty(&nbd->waiting_queue)); if (file) fput(file); return 0; diff --git a/trunk/drivers/block/xen-blkback/blkback.c b/trunk/drivers/block/xen-blkback/blkback.c index 73f196ca713f..c6decb901e5e 100644 --- a/trunk/drivers/block/xen-blkback/blkback.c +++ b/trunk/drivers/block/xen-blkback/blkback.c @@ -337,7 +337,7 @@ static void xen_blkbk_unmap(struct pending_req *req) invcount++; } - ret = gnttab_unmap_refs(unmap, pages, invcount, false); + ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); BUG_ON(ret); } diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c index 11f36e502136..fc2de5528dcc 100644 --- a/trunk/drivers/bluetooth/ath3k.c +++ b/trunk/drivers/bluetooth/ath3k.c @@ -86,6 +86,7 @@ static struct usb_device_id ath3k_table[] = { /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C) }, + { USB_DEVICE(0x0489, 0xE036) }, { } /* Terminating entry */ }; @@ -109,6 +110,7 @@ static struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index cef3bac1a543..654e248763ef 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -52,6 +52,9 @@ static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Apple-specific (Broadcom) devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, + /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x0a5c, 0x21e1) }, @@ -94,16 +97,14 @@ static struct usb_device_id btusb_table[] = { /* Broadcom BCM20702A0 */ { USB_DEVICE(0x0489, 0xe042) }, - { USB_DEVICE(0x0a5c, 0x21e3) }, - { USB_DEVICE(0x0a5c, 0x21e6) }, - { USB_DEVICE(0x0a5c, 0x21e8) }, - { USB_DEVICE(0x0a5c, 0x21f3) }, - { USB_DEVICE(0x0a5c, 0x21f4) }, { USB_DEVICE(0x413c, 0x8197) }, /* Foxconn - Hon Hai */ { USB_DEVICE(0x0489, 0xe033) }, + /*Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + { } /* Terminating entry */ }; @@ -141,6 +142,7 @@ static struct usb_device_id blacklist_table[] = { /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c index 58e32f7c3229..e01f5eaaec82 100644 --- a/trunk/drivers/char/agp/intel-gtt.c +++ b/trunk/drivers/char/agp/intel-gtt.c @@ -84,40 +84,33 @@ static struct _intel_private { #define IS_IRONLAKE intel_private.driver->is_ironlake #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable -int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, - struct scatterlist **sg_list, int *num_sg) +static int intel_gtt_map_memory(struct page **pages, + unsigned int num_entries, + struct sg_table *st) { - struct sg_table st; struct scatterlist *sg; int i; - if (*sg_list) - return 0; /* already mapped (for e.g. resume */ - DBG("try mapping %lu pages\n", (unsigned long)num_entries); - if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) + if (sg_alloc_table(st, num_entries, GFP_KERNEL)) goto err; - *sg_list = sg = st.sgl; - - for (i = 0 ; i < num_entries; i++, sg = sg_next(sg)) + for_each_sg(st->sgl, sg, num_entries, i) sg_set_page(sg, pages[i], PAGE_SIZE, 0); - *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, - num_entries, PCI_DMA_BIDIRECTIONAL); - if (unlikely(!*num_sg)) + if (!pci_map_sg(intel_private.pcidev, + st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL)) goto err; return 0; err: - sg_free_table(&st); + sg_free_table(st); return -ENOMEM; } -EXPORT_SYMBOL(intel_gtt_map_memory); -void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) +static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) { struct sg_table st; DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); @@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) sg_free_table(&st); } -EXPORT_SYMBOL(intel_gtt_unmap_memory); static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { @@ -674,9 +666,14 @@ static int intel_gtt_init(void) gtt_map_size = intel_private.base.gtt_total_entries * 4; - intel_private.gtt = ioremap(intel_private.gtt_bus_addr, - gtt_map_size); - if (!intel_private.gtt) { + intel_private.gtt = NULL; + if (INTEL_GTT_GEN < 6) + intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, + gtt_map_size); + if (intel_private.gtt == NULL) + intel_private.gtt = ioremap(intel_private.gtt_bus_addr, + gtt_map_size); + if (intel_private.gtt == NULL) { intel_private.driver->cleanup(); iounmap(intel_private.registers); return -ENOMEM; @@ -879,8 +876,7 @@ static bool i830_check_flags(unsigned int flags) return false; } -void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, - unsigned int sg_len, +void intel_gtt_insert_sg_entries(struct sg_table *st, unsigned int pg_start, unsigned int flags) { @@ -892,12 +888,11 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, /* sg may merge pages, but we have to separate * per-page addr for GTT */ - for_each_sg(sg_list, sg, sg_len, i) { + for_each_sg(st->sgl, sg, st->nents, i) { len = sg_dma_len(sg) >> PAGE_SHIFT; for (m = 0; m < len; m++) { dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); - intel_private.driver->write_entry(addr, - j, flags); + intel_private.driver->write_entry(addr, j, flags); j++; } } @@ -905,8 +900,10 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); -void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, - struct page **pages, unsigned int flags) +static void intel_gtt_insert_pages(unsigned int first_entry, + unsigned int num_entries, + struct page **pages, + unsigned int flags) { int i, j; @@ -917,7 +914,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, } readl(intel_private.gtt+j-1); } -EXPORT_SYMBOL(intel_gtt_insert_pages); static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) @@ -953,13 +949,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, global_cache_flush(); if (intel_private.base.needs_dmar) { - ret = intel_gtt_map_memory(mem->pages, mem->page_count, - &mem->sg_list, &mem->num_sg); + struct sg_table st; + + ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); if (ret != 0) return ret; - intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, - pg_start, type); + intel_gtt_insert_sg_entries(&st, pg_start, type); + mem->sg_list = st.sgl; + mem->num_sg = st.nents; } else intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, type); diff --git a/trunk/drivers/clk/Makefile b/trunk/drivers/clk/Makefile index 5869ea387054..72ce247a0e8d 100644 --- a/trunk/drivers/clk/Makefile +++ b/trunk/drivers/clk/Makefile @@ -1,4 +1,5 @@ # common clock types +obj-$(CONFIG_HAVE_CLK) += clk-devres.o obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \ clk-mux.o clk-divider.o clk-fixed-factor.o diff --git a/trunk/drivers/clk/clk-devres.c b/trunk/drivers/clk/clk-devres.c new file mode 100644 index 000000000000..8f571548870f --- /dev/null +++ b/trunk/drivers/clk/clk-devres.c @@ -0,0 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +static void devm_clk_release(struct device *dev, void *res) +{ + clk_put(*(struct clk **)res); +} + +struct clk *devm_clk_get(struct device *dev, const char *id) +{ + struct clk **ptr, *clk; + + ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + clk = clk_get(dev, id); + if (!IS_ERR(clk)) { + *ptr = clk; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return clk; +} +EXPORT_SYMBOL(devm_clk_get); + +static int devm_clk_match(struct device *dev, void *res, void *data) +{ + struct clk **c = res; + if (!c || !*c) { + WARN_ON(!c || !*c); + return 0; + } + return *c == data; +} + +void devm_clk_put(struct device *dev, struct clk *clk) +{ + int ret; + + ret = devres_release(dev, devm_clk_release, devm_clk_match, clk); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_clk_put); diff --git a/trunk/drivers/clk/clkdev.c b/trunk/drivers/clk/clkdev.c index d423c9bdd71a..442a31363873 100644 --- a/trunk/drivers/clk/clkdev.c +++ b/trunk/drivers/clk/clkdev.c @@ -171,51 +171,6 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL(clk_put); -static void devm_clk_release(struct device *dev, void *res) -{ - clk_put(*(struct clk **)res); -} - -struct clk *devm_clk_get(struct device *dev, const char *id) -{ - struct clk **ptr, *clk; - - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - clk = clk_get(dev, id); - if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } - - return clk; -} -EXPORT_SYMBOL(devm_clk_get); - -static int devm_clk_match(struct device *dev, void *res, void *data) -{ - struct clk **c = res; - if (!c || !*c) { - WARN_ON(!c || !*c); - return 0; - } - return *c == data; -} - -void devm_clk_put(struct device *dev, struct clk *clk) -{ - int ret; - - ret = devres_destroy(dev, devm_clk_release, devm_clk_match, clk); - - WARN_ON(ret); -} -EXPORT_SYMBOL(devm_clk_put); - void clkdev_add(struct clk_lookup *cl) { mutex_lock(&clocks_mutex); diff --git a/trunk/drivers/cpufreq/powernow-k8.c b/trunk/drivers/cpufreq/powernow-k8.c index c0e816468e30..1a40935c85fd 100644 --- a/trunk/drivers/cpufreq/powernow-k8.c +++ b/trunk/drivers/cpufreq/powernow-k8.c @@ -35,7 +35,6 @@ #include #include #include -#include /* for current / set_cpus_allowed() */ #include #include @@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, return res; } -/* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +struct powernowk8_target_arg { + struct cpufreq_policy *pol; + unsigned targfreq; + unsigned relation; +}; + +static long powernowk8_target_fn(void *arg) { - cpumask_var_t oldmask; + struct powernowk8_target_arg *pta = arg; + struct cpufreq_policy *pol = pta->pol; + unsigned targfreq = pta->targfreq; + unsigned relation = pta->relation; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; unsigned int newstate; - int ret = -EIO; + int ret; if (!data) return -EINVAL; @@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol, checkfid = data->currfid; checkvid = data->currvid; - /* only run on specific CPU from here on. */ - /* This is poor form: use a workqueue or smp_call_function_single */ - if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) - return -ENOMEM; - - cpumask_copy(oldmask, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; - } - if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing targ, change pending bit set\n"); - goto err_out; + return -EIO; } pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) - goto err_out; + return -EIO; if (cpu_family != CPU_HW_PSTATE) { pr_debug("targ: curr fid 0x%x, vid 0x%x\n", @@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) - goto err_out; + return -EIO; mutex_lock(&fidvid_mutex); @@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, ret = transition_frequency_fidvid(data, newstate); if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); - ret = 1; mutex_unlock(&fidvid_mutex); - goto err_out; + return 1; } mutex_unlock(&fidvid_mutex); @@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol, data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); - ret = 0; -err_out: - set_cpus_allowed_ptr(current, oldmask); - free_cpumask_var(oldmask); - return ret; + return 0; +} + +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) +{ + struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, + .relation = relation }; + + /* + * Must run on @pol->cpu. cpufreq core is responsible for ensuring + * that we're bound to the current CPU and pol->cpu stays online. + */ + if (smp_processor_id() == pol->cpu) + return powernowk8_target_fn(&pta); + else + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } /* Driver entry point to verify the policy and range of frequencies */ diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c index 3934fcc4e00b..7ab6e26664a7 100644 --- a/trunk/drivers/dma/at_hdmac.c +++ b/trunk/drivers/dma/at_hdmac.c @@ -661,7 +661,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); if (unlikely(!atslave || !sg_len)) { - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); return NULL; } @@ -689,6 +689,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -724,6 +729,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -757,6 +767,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available\n"); +err: atc_desc_put(atchan, first); return NULL; } diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c index e4feba6b03c0..f5843bc80baa 100644 --- a/trunk/drivers/dma/pl330.c +++ b/trunk/drivers/dma/pl330.c @@ -1567,17 +1567,19 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) goto xfer_exit; } - /* Prefer Secure Channel */ - if (!_manager_ns(thrd)) - r->cfg->nonsecure = 0; - else - r->cfg->nonsecure = 1; /* Use last settings, if not provided */ - if (r->cfg) + if (r->cfg) { + /* Prefer Secure Channel */ + if (!_manager_ns(thrd)) + r->cfg->nonsecure = 0; + else + r->cfg->nonsecure = 1; + ccr = _prepare_ccr(r->cfg); - else + } else { ccr = readl(regs + CC(thrd->id)); + } /* If this req doesn't have valid xfer settings */ if (!_is_valid(ccr)) { @@ -2928,6 +2930,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); + if (!pdmac->peripherals) { + ret = -ENOMEM; + dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); + goto probe_err5; + } for (i = 0; i < num_chan; i++) { pch = &pdmac->peripherals[i]; diff --git a/trunk/drivers/edac/edac_mc.c b/trunk/drivers/edac/edac_mc.c index 616d90bcb3a4..d5dc9da7f99f 100644 --- a/trunk/drivers/edac/edac_mc.c +++ b/trunk/drivers/edac/edac_mc.c @@ -199,6 +199,36 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems) return (void *)(((unsigned long)ptr) + align - r); } +static void _edac_mc_free(struct mem_ctl_info *mci) +{ + int i, chn, row; + struct csrow_info *csr; + const unsigned int tot_dimms = mci->tot_dimms; + const unsigned int tot_channels = mci->num_cschannel; + const unsigned int tot_csrows = mci->nr_csrows; + + if (mci->dimms) { + for (i = 0; i < tot_dimms; i++) + kfree(mci->dimms[i]); + kfree(mci->dimms); + } + if (mci->csrows) { + for (row = 0; row < tot_csrows; row++) { + csr = mci->csrows[row]; + if (csr) { + if (csr->channels) { + for (chn = 0; chn < tot_channels; chn++) + kfree(csr->channels[chn]); + kfree(csr->channels); + } + kfree(csr); + } + } + kfree(mci->csrows); + } + kfree(mci); +} + /** * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure * @mc_num: Memory controller number @@ -413,24 +443,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, return mci; error: - if (mci->dimms) { - for (i = 0; i < tot_dimms; i++) - kfree(mci->dimms[i]); - kfree(mci->dimms); - } - if (mci->csrows) { - for (chn = 0; chn < tot_channels; chn++) { - csr = mci->csrows[chn]; - if (csr) { - for (chn = 0; chn < tot_channels; chn++) - kfree(csr->channels[chn]); - kfree(csr); - } - kfree(mci->csrows[i]); - } - kfree(mci->csrows); - } - kfree(mci); + _edac_mc_free(mci); return NULL; } @@ -445,6 +458,14 @@ void edac_mc_free(struct mem_ctl_info *mci) { edac_dbg(1, "\n"); + /* If we're not yet registered with sysfs free only what was allocated + * in edac_mc_alloc(). + */ + if (!device_is_registered(&mci->dev)) { + _edac_mc_free(mci); + return; + } + /* the mci instance is freed here, when the sysfs object is dropped */ edac_unregister_sysfs(mci); } diff --git a/trunk/drivers/extcon/extcon-max77693.c b/trunk/drivers/extcon/extcon-max77693.c index 920a609b2c35..38f9e52f358b 100644 --- a/trunk/drivers/extcon/extcon-max77693.c +++ b/trunk/drivers/extcon/extcon-max77693.c @@ -669,13 +669,18 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev) } info->dev = &pdev->dev; info->max77693 = max77693; - info->max77693->regmap_muic = regmap_init_i2c(info->max77693->muic, - &max77693_muic_regmap_config); - if (IS_ERR(info->max77693->regmap_muic)) { - ret = PTR_ERR(info->max77693->regmap_muic); - dev_err(max77693->dev, - "failed to allocate register map: %d\n", ret); - goto err_regmap; + if (info->max77693->regmap_muic) + dev_dbg(&pdev->dev, "allocate register map\n"); + else { + info->max77693->regmap_muic = devm_regmap_init_i2c( + info->max77693->muic, + &max77693_muic_regmap_config); + if (IS_ERR(info->max77693->regmap_muic)) { + ret = PTR_ERR(info->max77693->regmap_muic); + dev_err(max77693->dev, + "failed to allocate register map: %d\n", ret); + goto err_regmap; + } } platform_set_drvdata(pdev, info); mutex_init(&info->mutex); diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index 90e28081712d..18321b68b880 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -22,7 +22,7 @@ menuconfig DRM config DRM_USB tristate depends on DRM - depends on USB_ARCH_HAS_HCD + depends on USB_SUPPORT && USB_ARCH_HAS_HCD select USB config DRM_KMS_HELPER @@ -54,6 +54,21 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_GEM_CMA_HELPER + bool + depends on DRM + help + Choose this if you need the GEM CMA helper functions + +config DRM_KMS_CMA_HELPER + bool + select DRM_GEM_CMA_HELPER + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + help + Choose this if you need the KMS CMA helper functions + config DRM_TDFX tristate "3dfx Banshee/Voodoo3+" depends on DRM && PCI @@ -193,3 +208,5 @@ source "drivers/gpu/drm/ast/Kconfig" source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/cirrus/Kconfig" + +source "drivers/gpu/drm/shmobile/Kconfig" diff --git a/trunk/drivers/gpu/drm/Makefile b/trunk/drivers/gpu/drm/Makefile index f65f65ed0ddf..2ff5cefe9ead 100644 --- a/trunk/drivers/gpu/drm/Makefile +++ b/trunk/drivers/gpu/drm/Makefile @@ -15,11 +15,13 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_trace_points.o drm_global.o drm_prime.o drm-$(CONFIG_COMPAT) += drm_ioc32.o +drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-usb-y := drm_usb.o drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o @@ -45,4 +47,5 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ +obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ obj-y += i2c/ diff --git a/trunk/drivers/gpu/drm/ast/ast_drv.h b/trunk/drivers/gpu/drm/ast/ast_drv.h index d4af9edcbb97..20a437f88780 100644 --- a/trunk/drivers/gpu/drm/ast/ast_drv.h +++ b/trunk/drivers/gpu/drm/ast/ast_drv.h @@ -94,7 +94,6 @@ struct ast_private { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; - atomic_t validate_sequence; } ttm; struct drm_gem_object *cursor_cache; diff --git a/trunk/drivers/gpu/drm/ast/ast_mode.c b/trunk/drivers/gpu/drm/ast/ast_mode.c index a712cafcfa1d..f3b2a7cce744 100644 --- a/trunk/drivers/gpu/drm/ast/ast_mode.c +++ b/trunk/drivers/gpu/drm/ast/ast_mode.c @@ -737,6 +737,7 @@ static int ast_get_modes(struct drm_connector *connector) if (edid) { drm_mode_connector_update_edid_property(&ast_connector->base, edid); ret = drm_add_edid_modes(connector, edid); + kfree(edid); return ret; } else drm_mode_connector_update_edid_property(&ast_connector->base, NULL); diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h index 64ea597cb6d3..5d045647a3fc 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -143,7 +143,6 @@ struct cirrus_device { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; - atomic_t validate_sequence; } ttm; bool mm_inited; }; diff --git a/trunk/drivers/gpu/drm/drm_cache.c b/trunk/drivers/gpu/drm/drm_cache.c index 08758e061478..4a4274b348b6 100644 --- a/trunk/drivers/gpu/drm/drm_cache.c +++ b/trunk/drivers/gpu/drm/drm_cache.c @@ -37,12 +37,13 @@ drm_clflush_page(struct page *page) { uint8_t *page_virtual; unsigned int i; + const int size = boot_cpu_data.x86_clflush_size; if (unlikely(page == NULL)) return; page_virtual = kmap_atomic(page); - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + for (i = 0; i < PAGE_SIZE; i += size) clflush(page_virtual + i); kunmap_atomic(page_virtual); } @@ -99,6 +100,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) } EXPORT_SYMBOL(drm_clflush_pages); +void +drm_clflush_sg(struct sg_table *st) +{ +#if defined(CONFIG_X86) + if (cpu_has_clflush) { + struct scatterlist *sg; + int i; + + mb(); + for_each_sg(st->sgl, sg, st->nents, i) + drm_clflush_page(sg_page(sg)); + mb(); + + return; + } + + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif +} +EXPORT_SYMBOL(drm_clflush_sg); + void drm_clflush_virt_range(char *addr, unsigned long length) { diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 6fbfc244748f..9c346a50379f 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -294,6 +294,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, { int ret; + kref_init(&fb->refcount); + ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); if (ret) return ret; @@ -307,6 +309,38 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, } EXPORT_SYMBOL(drm_framebuffer_init); +static void drm_framebuffer_free(struct kref *kref) +{ + struct drm_framebuffer *fb = + container_of(kref, struct drm_framebuffer, refcount); + fb->funcs->destroy(fb); +} + +/** + * drm_framebuffer_unreference - unref a framebuffer + * + * LOCKING: + * Caller must hold mode config lock. + */ +void drm_framebuffer_unreference(struct drm_framebuffer *fb) +{ + struct drm_device *dev = fb->dev; + DRM_DEBUG("FB ID: %d\n", fb->base.id); + WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + kref_put(&fb->refcount, drm_framebuffer_free); +} +EXPORT_SYMBOL(drm_framebuffer_unreference); + +/** + * drm_framebuffer_reference - incr the fb refcnt + */ +void drm_framebuffer_reference(struct drm_framebuffer *fb) +{ + DRM_DEBUG("FB ID: %d\n", fb->base.id); + kref_get(&fb->refcount); +} +EXPORT_SYMBOL(drm_framebuffer_reference); + /** * drm_framebuffer_cleanup - remove a framebuffer object * @fb: framebuffer to remove @@ -318,6 +352,32 @@ EXPORT_SYMBOL(drm_framebuffer_init); * it, setting it to NULL. */ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) +{ + struct drm_device *dev = fb->dev; + /* + * This could be moved to drm_framebuffer_remove(), but for + * debugging is nice to keep around the list of fb's that are + * no longer associated w/ a drm_file but are not unreferenced + * yet. (i915 and omapdrm have debugfs files which will show + * this.) + */ + drm_mode_object_put(dev, &fb->base); + list_del(&fb->head); + dev->mode_config.num_fb--; +} +EXPORT_SYMBOL(drm_framebuffer_cleanup); + +/** + * drm_framebuffer_remove - remove and unreference a framebuffer object + * @fb: framebuffer to remove + * + * LOCKING: + * Caller must hold mode config lock. + * + * Scans all the CRTCs and planes in @dev's mode_config. If they're + * using @fb, removes it, setting it to NULL. + */ +void drm_framebuffer_remove(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; struct drm_crtc *crtc; @@ -350,11 +410,11 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) } } - drm_mode_object_put(dev, &fb->base); - list_del(&fb->head); - dev->mode_config.num_fb--; + list_del(&fb->filp_head); + + drm_framebuffer_unreference(fb); } -EXPORT_SYMBOL(drm_framebuffer_cleanup); +EXPORT_SYMBOL(drm_framebuffer_remove); /** * drm_crtc_init - Initialise a new CRTC object @@ -377,6 +437,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; + crtc->invert_dimensions = false; mutex_lock(&dev->mode_config.mutex); @@ -1031,11 +1092,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) } list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { - fb->funcs->destroy(fb); - } - - list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { - crtc->funcs->destroy(crtc); + drm_framebuffer_remove(fb); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, @@ -1043,6 +1100,10 @@ void drm_mode_config_cleanup(struct drm_device *dev) plane->funcs->destroy(plane); } + list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { + crtc->funcs->destroy(crtc); + } + idr_remove_all(&dev->mode_config.crtc_idr); idr_destroy(&dev->mode_config.crtc_idr); } @@ -1852,6 +1913,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (crtc_req->mode_valid) { + int hdisplay, vdisplay; /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ if (crtc_req->fb_id == -1) { @@ -1887,14 +1949,20 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - if (mode->hdisplay > fb->width || - mode->vdisplay > fb->height || - crtc_req->x > fb->width - mode->hdisplay || - crtc_req->y > fb->height - mode->vdisplay) { - DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n", - mode->hdisplay, mode->vdisplay, - crtc_req->x, crtc_req->y, - fb->width, fb->height); + hdisplay = mode->hdisplay; + vdisplay = mode->vdisplay; + + if (crtc->invert_dimensions) + swap(hdisplay, vdisplay); + + if (hdisplay > fb->width || + vdisplay > fb->height || + crtc_req->x > fb->width - hdisplay || + crtc_req->y > fb->height - vdisplay) { + DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", + fb->width, fb->height, + hdisplay, vdisplay, crtc_req->x, crtc_req->y, + crtc->invert_dimensions ? " (inverted)" : ""); ret = -ENOSPC; goto out; } @@ -2169,6 +2237,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r) case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: @@ -2335,11 +2405,7 @@ int drm_mode_rmfb(struct drm_device *dev, goto out; } - /* TODO release all crtc connected to the framebuffer */ - /* TODO unhock the destructor from the buffer object */ - - list_del(&fb->filp_head); - fb->funcs->destroy(fb); + drm_framebuffer_remove(fb); out: mutex_unlock(&dev->mode_config.mutex); @@ -2489,8 +2555,7 @@ void drm_fb_release(struct drm_file *priv) mutex_lock(&dev->mode_config.mutex); list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { - list_del(&fb->filp_head); - fb->funcs->destroy(fb); + drm_framebuffer_remove(fb); } mutex_unlock(&dev->mode_config.mutex); } @@ -3489,6 +3554,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, struct drm_framebuffer *fb; struct drm_pending_vblank_event *e = NULL; unsigned long flags; + int hdisplay, vdisplay; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || @@ -3518,14 +3584,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; fb = obj_to_fb(obj); - if (crtc->mode.hdisplay > fb->width || - crtc->mode.vdisplay > fb->height || - crtc->x > fb->width - crtc->mode.hdisplay || - crtc->y > fb->height - crtc->mode.vdisplay) { - DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n", - fb->width, fb->height, - crtc->mode.hdisplay, crtc->mode.vdisplay, - crtc->x, crtc->y); + hdisplay = crtc->mode.hdisplay; + vdisplay = crtc->mode.vdisplay; + + if (crtc->invert_dimensions) + swap(hdisplay, vdisplay); + + if (hdisplay > fb->width || + vdisplay > fb->height || + crtc->x > fb->width - hdisplay || + crtc->y > fb->height - vdisplay) { + DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", + fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y, + crtc->invert_dimensions ? " (inverted)" : ""); ret = -ENOSPC; goto out; } @@ -3718,6 +3789,8 @@ int drm_format_num_planes(uint32_t format) case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: return 2; default: return 1; @@ -3751,6 +3824,8 @@ int drm_format_plane_cpp(uint32_t format, int plane) case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: return plane ? 2 : 1; case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: diff --git a/trunk/drivers/gpu/drm/drm_drv.c b/trunk/drivers/gpu/drm/drm_drv.c index 9238de4009fa..a2c6eb6e0dd8 100644 --- a/trunk/drivers/gpu/drm/drm_drv.c +++ b/trunk/drivers/gpu/drm/drm_drv.c @@ -140,10 +140,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), @@ -152,19 +152,19 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index b7ee230572b7..289c9b18c0d3 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -161,7 +161,7 @@ MODULE_PARM_DESC(edid_fixup, * Sanity check the EDID block (base or extension). Return 0 if the block * doesn't check out, or 1 if it's valid. */ -bool drm_edid_block_valid(u8 *raw_edid, int block) +bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) { int i; u8 csum = 0; @@ -184,7 +184,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block) for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; if (csum) { - DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); + if (print_bad_edid) { + DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); + } /* allow CEA to slide through, switches mangle this */ if (raw_edid[0] != 0x02) @@ -210,7 +212,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block) return 1; bad: - if (raw_edid) { + if (raw_edid && print_bad_edid) { printk(KERN_ERR "Raw EDID:\n"); print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, raw_edid, EDID_LENGTH, false); @@ -234,7 +236,7 @@ bool drm_edid_is_valid(struct edid *edid) return false; for (i = 0; i <= edid->extensions; i++) - if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i)) + if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) return false; return true; @@ -257,6 +259,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, int block, int len) { unsigned char start = block * EDID_LENGTH; + unsigned char segment = block >> 1; + unsigned char xfers = segment ? 3 : 2; int ret, retries = 5; /* The core i2c driver will automatically retry the transfer if the @@ -268,6 +272,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, do { struct i2c_msg msgs[] = { { + .addr = DDC_SEGMENT_ADDR, + .flags = 0, + .len = 1, + .buf = &segment, + }, { .addr = DDC_ADDR, .flags = 0, .len = 1, @@ -279,15 +288,21 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, .buf = buf, } }; - ret = i2c_transfer(adapter, msgs, 2); + + /* + * Avoid sending the segment addr to not upset non-compliant ddc + * monitors. + */ + ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); + if (ret == -ENXIO) { DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", adapter->name); break; } - } while (ret != 2 && --retries); + } while (ret != xfers && --retries); - return ret == 2 ? 0 : -1; + return ret == xfers ? 0 : -1; } static bool drm_edid_is_zero(u8 *in_edid, int length) @@ -306,6 +321,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { int i, j = 0, valid_extensions = 0; u8 *block, *new; + bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; @@ -314,7 +330,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block, 0)) + if (drm_edid_block_valid(block, 0, print_bad_edid)) break; if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { connector->null_edid_counter++; @@ -339,7 +355,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) block + (valid_extensions + 1) * EDID_LENGTH, j, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) { + if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { valid_extensions++; break; } @@ -362,8 +378,11 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) return block; carp: - dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", - drm_get_connector_name(connector), j); + if (print_bad_edid) { + dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", + drm_get_connector_name(connector), j); + } + connector->bad_edid_counter++; out: kfree(block); @@ -402,10 +421,7 @@ struct edid *drm_get_edid(struct drm_connector *connector, if (drm_probe_ddc(adapter)) edid = (struct edid *)drm_do_get_edid(connector, adapter); - connector->display_info.raw_edid = (char *)edid; - return edid; - } EXPORT_SYMBOL(drm_get_edid); @@ -1522,6 +1538,40 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) return modes; } +static int +cea_db_payload_len(const u8 *db) +{ + return db[0] & 0x1f; +} + +static int +cea_db_tag(const u8 *db) +{ + return db[0] >> 5; +} + +static int +cea_revision(const u8 *cea) +{ + return cea[1]; +} + +static int +cea_db_offsets(const u8 *cea, int *start, int *end) +{ + /* Data block offset in CEA extension block */ + *start = 4; + *end = cea[2]; + if (*end == 0) + *end = 127; + if (*end < 4 || *end > 127) + return -ERANGE; + return 0; +} + +#define for_each_cea_db(cea, i, start, end) \ + for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) + static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { @@ -1529,10 +1579,17 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) u8 * db, dbl; int modes = 0; - if (cea && cea[1] >= 3) { - for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { - dbl = db[0] & 0x1f; - if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK) + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return 0; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + dbl = cea_db_payload_len(db); + + if (cea_db_tag(db) == VIDEO_BLOCK) modes += do_cea_modes (connector, db+1, dbl); } } @@ -1541,19 +1598,28 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) } static void -parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db) +parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) { - connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ - - connector->dvi_dual = db[6] & 1; - connector->max_tmds_clock = db[7] * 5; + u8 len = cea_db_payload_len(db); - connector->latency_present[0] = db[8] >> 7; - connector->latency_present[1] = (db[8] >> 6) & 1; - connector->video_latency[0] = db[9]; - connector->audio_latency[0] = db[10]; - connector->video_latency[1] = db[11]; - connector->audio_latency[1] = db[12]; + if (len >= 6) { + connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ + connector->dvi_dual = db[6] & 1; + } + if (len >= 7) + connector->max_tmds_clock = db[7] * 5; + if (len >= 8) { + connector->latency_present[0] = db[8] >> 7; + connector->latency_present[1] = (db[8] >> 6) & 1; + } + if (len >= 9) + connector->video_latency[0] = db[9]; + if (len >= 10) + connector->audio_latency[0] = db[10]; + if (len >= 11) + connector->video_latency[1] = db[11]; + if (len >= 12) + connector->audio_latency[1] = db[12]; DRM_LOG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " @@ -1577,6 +1643,21 @@ monitor_name(struct detailed_timing *t, void *data) *(u8 **)data = t->data.other_data.data.str.str; } +static bool cea_db_is_hdmi_vsdb(const u8 *db) +{ + int hdmi_id; + + if (cea_db_tag(db) != VENDOR_BLOCK) + return false; + + if (cea_db_payload_len(db) < 5) + return false; + + hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); + + return hdmi_id == HDMI_IDENTIFIER; +} + /** * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink @@ -1623,29 +1704,40 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) eld[18] = edid->prod_code[0]; eld[19] = edid->prod_code[1]; - if (cea[1] >= 3) - for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { - dbl = db[0] & 0x1f; - - switch ((db[0] & 0xe0) >> 5) { + if (cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) { + start = 0; + end = 0; + } + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + dbl = cea_db_payload_len(db); + + switch (cea_db_tag(db)) { case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ sad_count = dbl / 3; - memcpy(eld + 20 + mnl, &db[1], dbl); + if (dbl >= 1) + memcpy(eld + 20 + mnl, &db[1], dbl); break; case SPEAKER_BLOCK: - /* Speaker Allocation Data Block */ - eld[7] = db[1]; + /* Speaker Allocation Data Block */ + if (dbl >= 1) + eld[7] = db[1]; break; case VENDOR_BLOCK: /* HDMI Vendor-Specific Data Block */ - if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0) + if (cea_db_is_hdmi_vsdb(db)) parse_hdmi_vsdb(connector, db); break; default: break; } } + } eld[5] |= sad_count << 4; eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; @@ -1723,38 +1815,26 @@ EXPORT_SYMBOL(drm_select_eld); bool drm_detect_hdmi_monitor(struct edid *edid) { u8 *edid_ext; - int i, hdmi_id; + int i; int start_offset, end_offset; - bool is_hdmi = false; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) - goto end; + return false; - /* Data block offset in CEA extension block */ - start_offset = 4; - end_offset = edid_ext[2]; + if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) + return false; /* * Because HDMI identifier is in Vendor Specific Block, * search it from all data blocks of CEA extension. */ - for (i = start_offset; i < end_offset; - /* Increased by data block len */ - i += ((edid_ext[i] & 0x1f) + 1)) { - /* Find vendor specific block */ - if ((edid_ext[i] >> 5) == VENDOR_BLOCK) { - hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) | - edid_ext[i + 3] << 16; - /* Find HDMI identifier */ - if (hdmi_id == HDMI_IDENTIFIER) - is_hdmi = true; - break; - } + for_each_cea_db(edid_ext, i, start_offset, end_offset) { + if (cea_db_is_hdmi_vsdb(&edid_ext[i])) + return true; } -end: - return is_hdmi; + return false; } EXPORT_SYMBOL(drm_detect_hdmi_monitor); @@ -1786,15 +1866,13 @@ bool drm_detect_monitor_audio(struct edid *edid) goto end; } - /* Data block offset in CEA extension block */ - start_offset = 4; - end_offset = edid_ext[2]; + if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) + goto end; - for (i = start_offset; i < end_offset; - i += ((edid_ext[i] & 0x1f) + 1)) { - if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { + for_each_cea_db(edid_ext, i, start_offset, end_offset) { + if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) { has_audio = true; - for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) + for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3) DRM_DEBUG_KMS("CEA audio format %d\n", (edid_ext[i + j] >> 3) & 0xf); goto end; diff --git a/trunk/drivers/gpu/drm/drm_edid_load.c b/trunk/drivers/gpu/drm/drm_edid_load.c index 0303935d10e2..ea9cdab3d431 100644 --- a/trunk/drivers/gpu/drm/drm_edid_load.c +++ b/trunk/drivers/gpu/drm/drm_edid_load.c @@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = { }, }; -static int edid_load(struct drm_connector *connector, char *name, - char *connector_name) +static u8 *edid_load(struct drm_connector *connector, char *name, + char *connector_name) { const struct firmware *fw; struct platform_device *pdev; @@ -123,6 +123,7 @@ static int edid_load(struct drm_connector *connector, char *name, int fwsize, expected; int builtin = 0, err = 0; int i, valid_extensions = 0; + bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); pdev = platform_device_register_simple(connector_name, -1, NULL, 0); if (IS_ERR(pdev)) { @@ -173,7 +174,8 @@ static int edid_load(struct drm_connector *connector, char *name, } memcpy(edid, fwdata, fwsize); - if (!drm_edid_block_valid(edid, 0)) { + if (!drm_edid_block_valid(edid, 0, print_bad_edid)) { + connector->bad_edid_counter++; DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", name); kfree(edid); @@ -185,7 +187,7 @@ static int edid_load(struct drm_connector *connector, char *name, if (i != valid_extensions + 1) memcpy(edid + (valid_extensions + 1) * EDID_LENGTH, edid + i * EDID_LENGTH, EDID_LENGTH); - if (drm_edid_block_valid(edid + i * EDID_LENGTH, i)) + if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid)) valid_extensions++; } @@ -205,7 +207,6 @@ static int edid_load(struct drm_connector *connector, char *name, edid = new_edid; } - connector->display_info.raw_edid = edid; DRM_INFO("Got %s EDID base block and %d extension%s from " "\"%s\" for connector \"%s\"\n", builtin ? "built-in" : "external", valid_extensions, valid_extensions == 1 ? "" : "s", @@ -215,7 +216,10 @@ static int edid_load(struct drm_connector *connector, char *name, release_firmware(fw); out: - return err; + if (err) + return ERR_PTR(err); + + return edid; } int drm_load_edid_firmware(struct drm_connector *connector) @@ -223,6 +227,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) char *connector_name = drm_get_connector_name(connector); char *edidname = edid_firmware, *last, *colon; int ret; + struct edid *edid; if (*edidname == '\0') return 0; @@ -240,13 +245,13 @@ int drm_load_edid_firmware(struct drm_connector *connector) if (*last == '\n') *last = '\0'; - ret = edid_load(connector, edidname, connector_name); - if (ret) + edid = (struct edid *) edid_load(connector, edidname, connector_name); + if (IS_ERR_OR_NULL(edid)) return 0; - drm_mode_connector_update_edid_property(connector, - (struct edid *) connector->display_info.raw_edid); + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); - return drm_add_edid_modes(connector, (struct edid *) - connector->display_info.raw_edid); + return ret; } diff --git a/trunk/drivers/gpu/drm/drm_edid_modes.h b/trunk/drivers/gpu/drm/drm_edid_modes.h index ff98a7eb38dd..57459b316adc 100644 --- a/trunk/drivers/gpu/drm/drm_edid_modes.h +++ b/trunk/drivers/gpu/drm/drm_edid_modes.h @@ -89,7 +89,7 @@ static const struct drm_display_mode drm_dmt_modes[] = { 976, 1088, 0, 480, 486, 494, 517, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@43Hz, interlace */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, + { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 1208, 1264, 0, 768, 768, 772, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -395,7 +395,7 @@ static const struct drm_display_mode edid_est_modes[] = { { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, + { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, 1208, 1264, 0, 768, 768, 776, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, @@ -506,17 +506,17 @@ static const struct drm_display_mode edid_cea_modes[] = { 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 5 - 1920x1080i@60Hz */ - { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 6 - 1440x480i@60Hz */ - { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 7 - 1440x480i@60Hz */ - { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, + { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -531,12 +531,12 @@ static const struct drm_display_mode edid_cea_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK) }, /* 10 - 2880x480i@60Hz */ - { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 11 - 2880x480i@60Hz */ - { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, + { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -573,17 +573,17 @@ static const struct drm_display_mode edid_cea_modes[] = { 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 20 - 1920x1080i@50Hz */ - { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 21 - 1440x576i@50Hz */ - { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 22 - 1440x576i@50Hz */ - { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, + { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -598,12 +598,12 @@ static const struct drm_display_mode edid_cea_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK) }, /* 25 - 2880x576i@50Hz */ - { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 26 - 2880x576i@50Hz */ - { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, + { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -656,12 +656,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 39 - 1920x1080i@50Hz */ - { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 40 - 1920x1080i@100Hz */ - { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -688,7 +688,7 @@ static const struct drm_display_mode edid_cea_modes[] = { DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK) }, /* 46 - 1920x1080i@120Hz */ - { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, @@ -705,12 +705,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 50 - 1440x480i@120Hz */ - { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, + { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 51 - 1440x480i@120Hz */ - { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, + { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -723,12 +723,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 54 - 1440x576i@200Hz */ - { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, + { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 55 - 1440x576i@200Hz */ - { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, + { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, @@ -741,12 +741,12 @@ static const struct drm_display_mode edid_cea_modes[] = { 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 58 - 1440x480i@240 */ - { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, + { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, /* 59 - 1440x480i@240 */ - { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, + { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, diff --git a/trunk/drivers/gpu/drm/drm_fb_cma_helper.c b/trunk/drivers/gpu/drm/drm_fb_cma_helper.c new file mode 100644 index 000000000000..09e11a5d921a --- /dev/null +++ b/trunk/drivers/gpu/drm/drm_fb_cma_helper.c @@ -0,0 +1,406 @@ +/* + * drm kms/fb cma (contiguous memory allocator) helper functions + * + * Copyright (C) 2012 Analog Device Inc. + * Author: Lars-Peter Clausen + * + * Based on udl_fbdev.c + * Copyright (C) 2012 Red Hat + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct drm_fb_cma { + struct drm_framebuffer fb; + struct drm_gem_cma_object *obj[4]; +}; + +struct drm_fbdev_cma { + struct drm_fb_helper fb_helper; + struct drm_fb_cma *fb; +}; + +static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper) +{ + return container_of(helper, struct drm_fbdev_cma, fb_helper); +} + +static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb) +{ + return container_of(fb, struct drm_fb_cma, fb); +} + +static void drm_fb_cma_destroy(struct drm_framebuffer *fb) +{ + struct drm_fb_cma *fb_cma = to_fb_cma(fb); + int i; + + for (i = 0; i < 4; i++) { + if (fb_cma->obj[i]) + drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base); + } + + drm_framebuffer_cleanup(fb); + kfree(fb_cma); +} + +static int drm_fb_cma_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int *handle) +{ + struct drm_fb_cma *fb_cma = to_fb_cma(fb); + + return drm_gem_handle_create(file_priv, + &fb_cma->obj[0]->base, handle); +} + +static struct drm_framebuffer_funcs drm_fb_cma_funcs = { + .destroy = drm_fb_cma_destroy, + .create_handle = drm_fb_cma_create_handle, +}; + +static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, + struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj, + unsigned int num_planes) +{ + struct drm_fb_cma *fb_cma; + int ret; + int i; + + fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL); + if (!fb_cma) + return ERR_PTR(-ENOMEM); + + ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs); + if (ret) { + dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret); + kfree(fb_cma); + return ERR_PTR(ret); + } + + drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd); + + for (i = 0; i < num_planes; i++) + fb_cma->obj[i] = obj[i]; + + return fb_cma; +} + +/** + * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function + * + * If your hardware has special alignment or pitch requirements these should be + * checked before calling this function. + */ +struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev, + struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_fb_cma *fb_cma; + struct drm_gem_cma_object *objs[4]; + struct drm_gem_object *obj; + unsigned int hsub; + unsigned int vsub; + int ret; + int i; + + hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); + vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); + + for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { + unsigned int width = mode_cmd->width / (i ? hsub : 1); + unsigned int height = mode_cmd->height / (i ? vsub : 1); + unsigned int min_size; + + obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]); + if (!obj) { + dev_err(dev->dev, "Failed to lookup GEM object\n"); + ret = -ENXIO; + goto err_gem_object_unreference; + } + + min_size = (height - 1) * mode_cmd->pitches[i] + + width * drm_format_plane_cpp(mode_cmd->pixel_format, i) + + mode_cmd->offsets[i]; + + if (obj->size < min_size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto err_gem_object_unreference; + } + objs[i] = to_drm_gem_cma_obj(obj); + } + + fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i); + if (IS_ERR(fb_cma)) { + ret = PTR_ERR(fb_cma); + goto err_gem_object_unreference; + } + + return &fb_cma->fb; + +err_gem_object_unreference: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(&objs[i]->base); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_fb_cma_create); + +/** + * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer + * @fb: The framebuffer + * @plane: Which plane + * + * Return the CMA GEM object for given framebuffer. + * + * This function will usually be called from the CRTC callback functions. + */ +struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, + unsigned int plane) +{ + struct drm_fb_cma *fb_cma = to_fb_cma(fb); + + if (plane >= 4) + return NULL; + + return fb_cma->obj[plane]; +} +EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); + +static struct fb_ops drm_fbdev_cma_ops = { + .owner = THIS_MODULE, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_blank = drm_fb_helper_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_setcmap = drm_fb_helper_setcmap, +}; + +static int drm_fbdev_cma_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper); + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_device *dev = helper->dev; + struct drm_gem_cma_object *obj; + struct drm_framebuffer *fb; + unsigned int bytes_per_pixel; + unsigned long offset; + struct fb_info *fbi; + size_t size; + int ret; + + DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + obj = drm_gem_cma_create(dev, size); + if (!obj) + return -ENOMEM; + + fbi = framebuffer_alloc(0, dev->dev); + if (!fbi) { + dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); + ret = -ENOMEM; + goto err_drm_gem_cma_free_object; + } + + fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1); + if (IS_ERR(fbdev_cma->fb)) { + dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); + ret = PTR_ERR(fbdev_cma->fb); + goto err_framebuffer_release; + } + + fb = &fbdev_cma->fb->fb; + helper->fb = fb; + helper->fbdev = fbi; + + fbi->par = helper; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->fbops = &drm_fbdev_cma_ops; + + ret = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (ret) { + dev_err(dev->dev, "Failed to allocate color map.\n"); + goto err_drm_fb_cma_destroy; + } + + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + + dev->mode_config.fb_base = (resource_size_t)obj->paddr; + fbi->screen_base = obj->vaddr + offset; + fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); + fbi->screen_size = size; + fbi->fix.smem_len = size; + + return 0; + +err_drm_fb_cma_destroy: + drm_fb_cma_destroy(fb); +err_framebuffer_release: + framebuffer_release(fbi); +err_drm_gem_cma_free_object: + drm_gem_cma_free_object(&obj->base); + return ret; +} + +static int drm_fbdev_cma_probe(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + int ret = 0; + + if (!helper->fb) { + ret = drm_fbdev_cma_create(helper, sizes); + if (ret < 0) + return ret; + ret = 1; + } + + return ret; +} + +static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { + .fb_probe = drm_fbdev_cma_probe, +}; + +/** + * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct + * @dev: DRM device + * @preferred_bpp: Preferred bits per pixel for the device + * @num_crtc: Number of CRTCs + * @max_conn_count: Maximum number of connectors + * + * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR. + */ +struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int num_crtc, + unsigned int max_conn_count) +{ + struct drm_fbdev_cma *fbdev_cma; + struct drm_fb_helper *helper; + int ret; + + fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL); + if (!fbdev_cma) { + dev_err(dev->dev, "Failed to allocate drm fbdev.\n"); + return ERR_PTR(-ENOMEM); + } + + fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs; + helper = &fbdev_cma->fb_helper; + + ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count); + if (ret < 0) { + dev_err(dev->dev, "Failed to initialize drm fb helper.\n"); + goto err_free; + } + + ret = drm_fb_helper_single_add_all_connectors(helper); + if (ret < 0) { + dev_err(dev->dev, "Failed to add connectors.\n"); + goto err_drm_fb_helper_fini; + + } + + ret = drm_fb_helper_initial_config(helper, preferred_bpp); + if (ret < 0) { + dev_err(dev->dev, "Failed to set inital hw configuration.\n"); + goto err_drm_fb_helper_fini; + } + + return fbdev_cma; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(helper); +err_free: + kfree(fbdev_cma); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_fbdev_cma_init); + +/** + * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct + * @fbdev_cma: The drm_fbdev_cma struct + */ +void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma) +{ + if (fbdev_cma->fb_helper.fbdev) { + struct fb_info *info; + int ret; + + info = fbdev_cma->fb_helper.fbdev; + ret = unregister_framebuffer(info); + if (ret < 0) + DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); + + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + + framebuffer_release(info); + } + + if (fbdev_cma->fb) + drm_fb_cma_destroy(&fbdev_cma->fb->fb); + + drm_fb_helper_fini(&fbdev_cma->fb_helper); + kfree(fbdev_cma); +} +EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini); + +/** + * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode + * @fbdev_cma: The drm_fbdev_cma struct, may be NULL + * + * This function is usually called from the DRM drivers lastclose callback. + */ +void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma) +{ + if (fbdev_cma) + drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper); +} +EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode); + +/** + * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events + * @fbdev_cma: The drm_fbdev_cma struct, may be NULL + * + * This function is usually called from the DRM drivers output_poll_changed + * callback. + */ +void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma) +{ + if (fbdev_cma) + drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); +} +EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index f546d1e8af82..b5d05f58c15a 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); -bool drm_fb_helper_force_kernel_mode(void) +static bool drm_fb_helper_force_kernel_mode(void) { bool ret, error = false; struct drm_fb_helper *helper; @@ -330,7 +330,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) /* Walk the connectors & encoders on this fb turning them on/off */ for (j = 0; j < fb_helper->connector_count; j++) { connector = fb_helper->connector_info[j]->connector; - drm_helper_connector_dpms(connector, dpms_mode); + connector->funcs->dpms(connector, dpms_mode); drm_connector_property_set_value(connector, dev->mode_config.dpms_property, dpms_mode); } @@ -1230,7 +1230,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) struct drm_device *dev = fb_helper->dev; struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; - struct drm_encoder *encoder; struct drm_mode_set *modeset; bool *enabled; int width, height; @@ -1241,11 +1240,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) width = dev->mode_config.max_width; height = dev->mode_config.max_height; - /* clean out all the encoder/crtc combos */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - encoder->crtc = NULL; - } - crtcs = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); modes = kcalloc(dev->mode_config.num_connector, diff --git a/trunk/drivers/gpu/drm/drm_gem_cma_helper.c b/trunk/drivers/gpu/drm/drm_gem_cma_helper.c new file mode 100644 index 000000000000..1aa8fee1e865 --- /dev/null +++ b/trunk/drivers/gpu/drm/drm_gem_cma_helper.c @@ -0,0 +1,251 @@ +/* + * drm gem CMA (contiguous memory allocator) helper functions + * + * Copyright (C) 2012 Sascha Hauer, Pengutronix + * + * Based on Samsung Exynos code + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) +{ + return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; +} + +static void drm_gem_cma_buf_destroy(struct drm_device *drm, + struct drm_gem_cma_object *cma_obj) +{ + dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr, + cma_obj->paddr); +} + +/* + * drm_gem_cma_create - allocate an object with the given size + * + * returns a struct drm_gem_cma_object* on success or ERR_PTR values + * on failure. + */ +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + unsigned int size) +{ + struct drm_gem_cma_object *cma_obj; + struct drm_gem_object *gem_obj; + int ret; + + size = round_up(size, PAGE_SIZE); + + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!cma_obj) + return ERR_PTR(-ENOMEM); + + cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, + &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); + if (!cma_obj->vaddr) { + dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); + ret = -ENOMEM; + goto err_dma_alloc; + } + + gem_obj = &cma_obj->base; + + ret = drm_gem_object_init(drm, gem_obj, size); + if (ret) + goto err_obj_init; + + ret = drm_gem_create_mmap_offset(gem_obj); + if (ret) + goto err_create_mmap_offset; + + return cma_obj; + +err_create_mmap_offset: + drm_gem_object_release(gem_obj); + +err_obj_init: + drm_gem_cma_buf_destroy(drm, cma_obj); + +err_dma_alloc: + kfree(cma_obj); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_create); + +/* + * drm_gem_cma_create_with_handle - allocate an object with the given + * size and create a gem handle on it + * + * returns a struct drm_gem_cma_object* on success or ERR_PTR values + * on failure. + */ +static struct drm_gem_cma_object *drm_gem_cma_create_with_handle( + struct drm_file *file_priv, + struct drm_device *drm, unsigned int size, + unsigned int *handle) +{ + struct drm_gem_cma_object *cma_obj; + struct drm_gem_object *gem_obj; + int ret; + + cma_obj = drm_gem_cma_create(drm, size); + if (IS_ERR(cma_obj)) + return cma_obj; + + gem_obj = &cma_obj->base; + + /* + * allocate a id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, gem_obj, handle); + if (ret) + goto err_handle_create; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_unreference_unlocked(gem_obj); + + return cma_obj; + +err_handle_create: + drm_gem_cma_free_object(gem_obj); + + return ERR_PTR(ret); +} + +/* + * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback + * function + */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) +{ + struct drm_gem_cma_object *cma_obj; + + if (gem_obj->map_list.map) + drm_gem_free_mmap_offset(gem_obj); + + drm_gem_object_release(gem_obj); + + cma_obj = to_drm_gem_cma_obj(gem_obj); + + drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); + + kfree(cma_obj); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); + +/* + * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback + * function + * + * This aligns the pitch and size arguments to the minimum required. wrap + * this into your own function if you need bigger alignment. + */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct drm_gem_cma_object *cma_obj; + int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + if (args->pitch < min_pitch) + args->pitch = min_pitch; + + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + + cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, + args->size, &args->handle); + if (IS_ERR(cma_obj)) + return PTR_ERR(cma_obj); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); + +/* + * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback + * function + */ +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *drm, uint32_t handle, uint64_t *offset) +{ + struct drm_gem_object *gem_obj; + + mutex_lock(&drm->struct_mutex); + + gem_obj = drm_gem_object_lookup(drm, file_priv, handle); + if (!gem_obj) { + dev_err(drm->dev, "failed to lookup gem object\n"); + mutex_unlock(&drm->struct_mutex); + return -EINVAL; + } + + *offset = get_gem_mmap_offset(gem_obj); + + drm_gem_object_unreference(gem_obj); + + mutex_unlock(&drm->struct_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); + +const struct vm_operations_struct drm_gem_cma_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; +EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops); + +/* + * drm_gem_cma_mmap - (struct file_operation)->mmap callback function + */ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_gem_object *gem_obj; + struct drm_gem_cma_object *cma_obj; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + gem_obj = vma->vm_private_data; + cma_obj = to_drm_gem_cma_obj(gem_obj); + + ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + if (ret) + drm_gem_vm_close(vma); + + return ret; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); + +/* + * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function + */ +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, + struct drm_device *drm, unsigned int handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy); diff --git a/trunk/drivers/gpu/drm/drm_irq.c b/trunk/drivers/gpu/drm/drm_irq.c index 03f16f352fe2..076c4a86ff84 100644 --- a/trunk/drivers/gpu/drm/drm_irq.c +++ b/trunk/drivers/gpu/drm/drm_irq.c @@ -1236,7 +1236,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, return ret; } -void drm_handle_vblank_events(struct drm_device *dev, int crtc) +static void drm_handle_vblank_events(struct drm_device *dev, int crtc) { struct drm_pending_vblank_event *e, *t; struct timeval now; diff --git a/trunk/drivers/gpu/drm/drm_vm.c b/trunk/drivers/gpu/drm/drm_vm.c index 961ee08927fe..6fed21502313 100644 --- a/trunk/drivers/gpu/drm/drm_vm.c +++ b/trunk/drivers/gpu/drm/drm_vm.c @@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) tmp = pgprot_writecombine(tmp); else tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) || defined(__arm__) +#elif defined(__sparc__) || defined(__arm__) || defined(__mips__) tmp = pgprot_noncached(tmp); #endif return tmp; @@ -619,20 +619,11 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) offset = drm_core_get_reg_ofs(dev); vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); -#if !defined(__arm__) if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; -#else - if (remap_pfn_range(vma, vma->vm_start, - (map->offset + offset) >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; -#endif - DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," " offset = 0x%llx\n", map->type, diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c index d9568198c300..9dce3b9c3896 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -147,9 +147,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); - - kfree(connector->display_info.raw_edid); - connector->display_info.raw_edid = edid; + kfree(edid); } else { struct drm_display_mode *mode = drm_mode_create(connector->dev); struct exynos_drm_panel_info *panel; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index d5586cc75163..f4ac43356583 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -266,8 +266,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, /* release drm framebuffer and real buffer */ if (fb_helper->fb && fb_helper->fb->funcs) { fb = fb_helper->fb; - if (fb && fb->funcs->destroy) - fb->funcs->destroy(fb); + if (fb) + drm_framebuffer_remove(fb); } /* release linux framebuffer */ diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 537027a74fd5..e364165f1a2a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -102,7 +102,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector, u8 *edid, int len) { struct vidi_context *ctx = get_vidi_context(dev); - struct edid *raw_edid; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -115,18 +114,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector, return -EFAULT; } - raw_edid = kzalloc(len, GFP_KERNEL); - if (!raw_edid) { - DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); - return -ENOMEM; - } - - memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) - * EDID_LENGTH, len)); - - /* attach the edid data to connector. */ - connector->display_info.raw_edid = (char *)raw_edid; - memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) * EDID_LENGTH, len)); diff --git a/trunk/drivers/gpu/drm/gma500/Makefile b/trunk/drivers/gpu/drm/gma500/Makefile index abfa2a93f0d0..7a2d40a5c1e1 100644 --- a/trunk/drivers/gpu/drm/gma500/Makefile +++ b/trunk/drivers/gpu/drm/gma500/Makefile @@ -3,7 +3,7 @@ # ccflags-y += -I$(srctree)/include/drm -gma500_gfx-y += gem_glue.o \ +gma500_gfx-y += \ accel_2d.o \ backlight.o \ framebuffer.o \ @@ -30,7 +30,8 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \ cdv_intel_crt.o \ cdv_intel_display.o \ cdv_intel_hdmi.o \ - cdv_intel_lvds.o + cdv_intel_lvds.o \ + cdv_intel_dp.o gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ oaktrail_crtc.o \ diff --git a/trunk/drivers/gpu/drm/gma500/backlight.c b/trunk/drivers/gpu/drm/gma500/backlight.c index 20793951fcac..143eba3309c5 100644 --- a/trunk/drivers/gpu/drm/gma500/backlight.c +++ b/trunk/drivers/gpu/drm/gma500/backlight.c @@ -26,10 +26,55 @@ #include "intel_bios.h" #include "power.h" +static void do_gma_backlight_set(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct drm_psb_private *dev_priv = dev->dev_private; + backlight_update_status(dev_priv->backlight_device); +#endif +} + +void gma_backlight_enable(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct drm_psb_private *dev_priv = dev->dev_private; + dev_priv->backlight_enabled = true; + if (dev_priv->backlight_device) { + dev_priv->backlight_device->props.brightness = dev_priv->backlight_level; + do_gma_backlight_set(dev); + } +#endif +} + +void gma_backlight_disable(struct drm_device *dev) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct drm_psb_private *dev_priv = dev->dev_private; + dev_priv->backlight_enabled = false; + if (dev_priv->backlight_device) { + dev_priv->backlight_device->props.brightness = 0; + do_gma_backlight_set(dev); + } +#endif +} + +void gma_backlight_set(struct drm_device *dev, int v) +{ +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + struct drm_psb_private *dev_priv = dev->dev_private; + dev_priv->backlight_level = v; + if (dev_priv->backlight_device && dev_priv->backlight_enabled) { + dev_priv->backlight_device->props.brightness = v; + do_gma_backlight_set(dev); + } +#endif +} + int gma_backlight_init(struct drm_device *dev) { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = dev->dev_private; + dev_priv->backlight_enabled = true; return dev_priv->ops->backlight_init(dev); #else return 0; diff --git a/trunk/drivers/gpu/drm/gma500/cdv_device.c b/trunk/drivers/gpu/drm/gma500/cdv_device.c index b7e7b49d8f62..bfc2f397019a 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_device.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_device.c @@ -58,10 +58,17 @@ static int cdv_output_init(struct drm_device *dev) cdv_intel_lvds_init(dev, &dev_priv->mode_dev); /* These bits indicate HDMI not SDVO on CDV */ - if (REG_READ(SDVOB) & SDVO_DETECTED) + if (REG_READ(SDVOB) & SDVO_DETECTED) { cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); - if (REG_READ(SDVOC) & SDVO_DETECTED) + if (REG_READ(DP_B) & DP_DETECTED) + cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B); + } + + if (REG_READ(SDVOC) & SDVO_DETECTED) { cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC); + if (REG_READ(DP_C) & DP_DETECTED) + cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C); + } return 0; } @@ -163,6 +170,7 @@ static int cdv_backlight_init(struct drm_device *dev) cdv_get_brightness(cdv_backlight_device); backlight_update_status(cdv_backlight_device); dev_priv->backlight_device = cdv_backlight_device; + dev_priv->backlight_enabled = true; return 0; } @@ -449,6 +457,7 @@ static void cdv_get_core_freq(struct drm_device *dev) case 6: case 7: dev_priv->core_freq = 266; + break; default: dev_priv->core_freq = 0; } @@ -488,6 +497,65 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on) } } +static const char *force_audio_names[] = { + "off", + "auto", + "on", +}; + +void cdv_intel_attach_force_audio_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_property *prop; + int i; + + prop = dev_priv->force_audio_property; + if (prop == NULL) { + prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, + "audio", + ARRAY_SIZE(force_audio_names)); + if (prop == NULL) + return; + + for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) + drm_property_add_enum(prop, i, i-1, force_audio_names[i]); + + dev_priv->force_audio_property = prop; + } + drm_connector_attach_property(connector, prop, 0); +} + + +static const char *broadcast_rgb_names[] = { + "Full", + "Limited 16:235", +}; + +void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_property *prop; + int i; + + prop = dev_priv->broadcast_rgb_property; + if (prop == NULL) { + prop = drm_property_create(dev, DRM_MODE_PROP_ENUM, + "Broadcast RGB", + ARRAY_SIZE(broadcast_rgb_names)); + if (prop == NULL) + return; + + for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) + drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); + + dev_priv->broadcast_rgb_property = prop; + } + + drm_connector_attach_property(connector, prop, 0); +} + /* Cedarview */ static const struct psb_offset cdv_regmap[2] = { { diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c index a68509ba22a8..3cfd0931fbfb 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -57,15 +57,26 @@ struct cdv_intel_clock_t { struct cdv_intel_limit_t { struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1; struct cdv_intel_p2_t p2; + bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *, + int, int, struct cdv_intel_clock_t *); }; +static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, + struct drm_crtc *crtc, int target, int refclk, + struct cdv_intel_clock_t *best_clock); +static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, + int refclk, + struct cdv_intel_clock_t *best_clock); + #define CDV_LIMIT_SINGLE_LVDS_96 0 #define CDV_LIMIT_SINGLE_LVDS_100 1 #define CDV_LIMIT_DAC_HDMI_27 2 #define CDV_LIMIT_DAC_HDMI_96 3 +#define CDV_LIMIT_DP_27 4 +#define CDV_LIMIT_DP_100 5 static const struct cdv_intel_limit_t cdv_intel_limits[] = { - { /* CDV_SIGNLE_LVDS_96MHz */ + { /* CDV_SINGLE_LVDS_96MHz */ .dot = {.min = 20000, .max = 115500}, .vco = {.min = 1800000, .max = 3600000}, .n = {.min = 2, .max = 6}, @@ -76,6 +87,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p1 = {.min = 2, .max = 10}, .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, + .find_pll = cdv_intel_find_best_PLL, }, { /* CDV_SINGLE_LVDS_100MHz */ .dot = {.min = 20000, .max = 115500}, @@ -90,6 +102,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { * is 80-224Mhz. Prefer single channel as much as possible. */ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, + .find_pll = cdv_intel_find_best_PLL, }, { /* CDV_DAC_HDMI_27MHz */ .dot = {.min = 20000, .max = 400000}, @@ -101,6 +114,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p = {.min = 5, .max = 90}, .p1 = {.min = 1, .max = 9}, .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, + .find_pll = cdv_intel_find_best_PLL, }, { /* CDV_DAC_HDMI_96MHz */ .dot = {.min = 20000, .max = 400000}, @@ -112,7 +126,32 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { .p = {.min = 5, .max = 100}, .p1 = {.min = 1, .max = 10}, .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, + .find_pll = cdv_intel_find_best_PLL, + }, + { /* CDV_DP_27MHz */ + .dot = {.min = 160000, .max = 272000}, + .vco = {.min = 1809000, .max = 3564000}, + .n = {.min = 1, .max = 1}, + .m = {.min = 67, .max = 132}, + .m1 = {.min = 0, .max = 0}, + .m2 = {.min = 65, .max = 130}, + .p = {.min = 5, .max = 90}, + .p1 = {.min = 1, .max = 9}, + .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10}, + .find_pll = cdv_intel_find_dp_pll, }, + { /* CDV_DP_100MHz */ + .dot = {.min = 160000, .max = 272000}, + .vco = {.min = 1800000, .max = 3600000}, + .n = {.min = 2, .max = 6}, + .m = {.min = 60, .max = 164}, + .m1 = {.min = 0, .max = 0}, + .m2 = {.min = 58, .max = 162}, + .p = {.min = 5, .max = 100}, + .p1 = {.min = 1, .max = 10}, + .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10}, + .find_pll = cdv_intel_find_dp_pll, + } }; #define _wait_for(COND, MS, W) ({ \ @@ -132,7 +171,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = { #define wait_for(COND, MS) _wait_for(COND, MS, 1) -static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) +int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) { int ret; @@ -159,7 +198,7 @@ static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) return 0; } -static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) +int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) { int ret; static bool dpio_debug = true; @@ -201,7 +240,7 @@ static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) /* Reset the DPIO configuration register. The BIOS does this at every * mode set. */ -static void cdv_sb_reset(struct drm_device *dev) +void cdv_sb_reset(struct drm_device *dev) { REG_WRITE(DPIO_CFG, 0); @@ -216,7 +255,7 @@ static void cdv_sb_reset(struct drm_device *dev) */ static int cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, - struct cdv_intel_clock_t *clock, bool is_lvds) + struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select) { struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); int pipe = psb_crtc->pipe; @@ -259,7 +298,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, ref_value &= ~(REF_CLK_MASK); /* use DPLL_A for pipeB on CRT/HDMI */ - if (pipe == 1 && !is_lvds) { + if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) { DRM_DEBUG_KMS("use DPLLA for pipe B\n"); ref_value |= REF_CLK_DPLLA; } else { @@ -336,30 +375,33 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, if (ret) return ret; - lane_reg = PSB_LANE0; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - - lane_reg = PSB_LANE1; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - - lane_reg = PSB_LANE2; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - - lane_reg = PSB_LANE3; - cdv_sb_read(dev, lane_reg, &lane_value); - lane_value &= ~(LANE_PLL_MASK); - lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); - cdv_sb_write(dev, lane_reg, lane_value); - + if (ddi_select) { + if ((ddi_select & DDI_MASK) == DDI0_SELECT) { + lane_reg = PSB_LANE0; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + + lane_reg = PSB_LANE1; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + } else { + lane_reg = PSB_LANE2; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + + lane_reg = PSB_LANE3; + cdv_sb_read(dev, lane_reg, &lane_value); + lane_value &= ~(LANE_PLL_MASK); + lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); + cdv_sb_write(dev, lane_reg, lane_value); + } + } return 0; } @@ -396,6 +438,12 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc, limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; else limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; + } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || + psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { + if (refclk == 27000) + limit = &cdv_intel_limits[CDV_LIMIT_DP_27]; + else + limit = &cdv_intel_limits[CDV_LIMIT_DP_100]; } else { if (refclk == 27000) limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27]; @@ -438,13 +486,12 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc, return true; } -static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target, - int refclk, - struct cdv_intel_clock_t *best_clock) +static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit, + struct drm_crtc *crtc, int target, int refclk, + struct cdv_intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; struct cdv_intel_clock_t clock; - const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk); int err = target; @@ -498,6 +545,49 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target, return err != target; } +static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target, + int refclk, + struct cdv_intel_clock_t *best_clock) +{ + struct cdv_intel_clock_t clock; + if (refclk == 27000) { + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 0; + clock.m2 = 118; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 1; + clock.m1 = 0; + clock.m2 = 98; + } + } else if (refclk == 100000) { + if (target < 200000) { + clock.p1 = 2; + clock.p2 = 10; + clock.n = 5; + clock.m1 = 0; + clock.m2 = 160; + } else { + clock.p1 = 1; + clock.p2 = 10; + clock.n = 5; + clock.m1 = 0; + clock.m2 = 133; + } + } else + return false; + clock.m = clock.m2 + 2; + clock.p = clock.p1 * clock.p2; + clock.vco = (refclk * clock.m) / clock.n; + clock.dot = clock.vco / clock.p; + memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t)); + return true; +} + static int cdv_intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -791,7 +881,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: if (psb_intel_crtc->active) - return; + break; psb_intel_crtc->active = true; @@ -835,17 +925,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(map->status, temp); REG_READ(map->status); - cdv_intel_update_watermark(dev, crtc); cdv_intel_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable * if it's on this pipe */ /* psb_intel_crtc_dpms_video(crtc, true); TODO */ - psb_intel_crtc->crtc_enable = true; break; case DRM_MODE_DPMS_OFF: if (!psb_intel_crtc->active) - return; + break; psb_intel_crtc->active = false; @@ -892,10 +980,9 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode) /* Wait for the clocks to turn off. */ udelay(150); - cdv_intel_update_watermark(dev, crtc); - psb_intel_crtc->crtc_enable = false; break; } + cdv_intel_update_watermark(dev, crtc); /*Set FIFO Watermarks*/ REG_WRITE(DSPARB, 0x3F3E); } @@ -952,9 +1039,12 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, u32 dpll = 0, dspcntr, pipeconf; bool ok; bool is_crt = false, is_lvds = false, is_tv = false; - bool is_hdmi = false; + bool is_hdmi = false, is_dp = false; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + const struct cdv_intel_limit_t *limit; + u32 ddi_select = 0; + bool is_edp = false; list_for_each_entry(connector, &mode_config->connector_list, head) { struct psb_intel_encoder *psb_intel_encoder = @@ -964,6 +1054,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, || connector->encoder->crtc != crtc) continue; + ddi_select = psb_intel_encoder->ddi_select; switch (psb_intel_encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -977,6 +1068,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, case INTEL_OUTPUT_HDMI: is_hdmi = true; break; + case INTEL_OUTPUT_DISPLAYPORT: + is_dp = true; + break; + case INTEL_OUTPUT_EDP: + is_edp = true; + break; + default: + DRM_ERROR("invalid output type.\n"); + return 0; } } @@ -986,6 +1086,20 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, else /* high-end sku, 27/100 mhz */ refclk = 27000; + if (is_dp || is_edp) { + /* + * Based on the spec the low-end SKU has only CRT/LVDS. So it is + * unnecessary to consider it for DP/eDP. + * On the high-end SKU, it will use the 27/100M reference clk + * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise + * it will be 27MHz. From the VBIOS code it seems that the pipe A choose + * 27MHz for DP/eDP while the Pipe B chooses the 100MHz. + */ + if (pipe == 0) + refclk = 27000; + else + refclk = 100000; + } if (is_lvds && dev_priv->lvds_use_ssc) { refclk = dev_priv->lvds_ssc_freq * 1000; @@ -993,8 +1107,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, } drm_mode_debug_printmodeline(adjusted_mode); + + limit = cdv_intel_limit(crtc, refclk); - ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, + ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); @@ -1009,6 +1125,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, } /* dpll |= PLL_REF_INPUT_DREFCLK; */ + if (is_dp || is_edp) { + cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode); + } else { + REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0); + REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0); + REG_WRITE(PIPE_DP_LINK_M(pipe), 0); + REG_WRITE(PIPE_DP_LINK_N(pipe), 0); + } + dpll |= DPLL_SYNCLOCK_ENABLE; /* if (is_lvds) dpll |= DPLLB_MODE_LVDS; @@ -1019,6 +1144,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, /* setup pipeconf */ pipeconf = REG_READ(map->conf); + pipeconf &= ~(PIPE_BPC_MASK); + if (is_edp) { + switch (dev_priv->edp.bpp) { + case 24: + pipeconf |= PIPE_8BPC; + break; + case 18: + pipeconf |= PIPE_6BPC; + break; + case 30: + pipeconf |= PIPE_10BPC; + break; + default: + pipeconf |= PIPE_8BPC; + break; + } + } else if (is_lvds) { + /* the BPC will be 6 if it is 18-bit LVDS panel */ + if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) + pipeconf |= PIPE_8BPC; + else + pipeconf |= PIPE_6BPC; + } else + pipeconf |= PIPE_8BPC; + /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -1033,7 +1183,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc, REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE); REG_READ(map->dpll); - cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds); + cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select); udelay(150); diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c new file mode 100644 index 000000000000..c9abc06ef680 --- /dev/null +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -0,0 +1,1951 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard + * + */ + +#include +#include +#include "drmP.h" +#include "drm.h" +#include "drm_crtc.h" +#include "drm_crtc_helper.h" +#include "psb_drv.h" +#include "psb_intel_drv.h" +#include "psb_intel_reg.h" +#include "drm_dp_helper.h" + +#define _wait_for(COND, MS, W) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ + int ret__ = 0; \ + while (! (COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + if (W && !in_dbg_master()) msleep(W); \ + } \ + ret__; \ +}) + +#define wait_for(COND, MS) _wait_for(COND, MS, 1) + +#define DP_LINK_STATUS_SIZE 6 +#define DP_LINK_CHECK_TIMEOUT (10 * 1000) + +#define DP_LINK_CONFIGURATION_SIZE 9 + +#define CDV_FAST_LINK_TRAIN 1 + +struct cdv_intel_dp { + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + int force_audio; + uint32_t color_range; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[4]; + struct psb_intel_encoder *encoder; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; + uint8_t train_set[4]; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + int panel_power_up_delay; + int panel_power_down_delay; + int panel_power_cycle_delay; + int backlight_on_delay; + int backlight_off_delay; + struct drm_display_mode *panel_fixed_mode; /* for eDP */ + bool panel_on; +}; + +struct ddi_regoff { + uint32_t PreEmph1; + uint32_t PreEmph2; + uint32_t VSwing1; + uint32_t VSwing2; + uint32_t VSwing3; + uint32_t VSwing4; + uint32_t VSwing5; +}; + +static struct ddi_regoff ddi_DP_train_table[] = { + {.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154, + .VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150, + .VSwing5 = 0x8158,}, + {.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254, + .VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250, + .VSwing5 = 0x8258,}, +}; + +static uint32_t dp_vswing_premph_table[] = { + 0x55338954, 0x4000, + 0x554d8954, 0x2000, + 0x55668954, 0, + 0x559ac0d4, 0x6000, +}; +/** + * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +static bool is_edp(struct psb_intel_encoder *encoder) +{ + return encoder->type == INTEL_OUTPUT_EDP; +} + + +static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder); +static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder); +static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder); + +static int +cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int max_lane_count = 4; + + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { + max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; + switch (max_lane_count) { + case 1: case 2: case 4: + break; + default: + max_lane_count = 4; + } + } + return max_lane_count; +} + +static int +cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; + + switch (max_link_bw) { + case DP_LINK_BW_1_62: + case DP_LINK_BW_2_7: + break; + default: + max_link_bw = DP_LINK_BW_1_62; + break; + } + return max_link_bw; +} + +static int +cdv_intel_dp_link_clock(uint8_t link_bw) +{ + if (link_bw == DP_LINK_BW_2_7) + return 270000; + else + return 162000; +} + +static int +cdv_intel_dp_link_required(int pixel_clock, int bpp) +{ + return (pixel_clock * bpp + 7) / 8; +} + +static int +cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes) +{ + return (max_link_clock * max_lanes * 19) / 20; +} + +static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + u32 pp; + + if (intel_dp->panel_on) { + DRM_DEBUG_KMS("Skip VDD on because of panel on\n"); + return; + } + DRM_DEBUG_KMS("\n"); + + pp = REG_READ(PP_CONTROL); + + pp |= EDP_FORCE_VDD; + REG_WRITE(PP_CONTROL, pp); + REG_READ(PP_CONTROL); + msleep(intel_dp->panel_power_up_delay); +} + +static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + u32 pp; + + DRM_DEBUG_KMS("\n"); + pp = REG_READ(PP_CONTROL); + + pp &= ~EDP_FORCE_VDD; + REG_WRITE(PP_CONTROL, pp); + REG_READ(PP_CONTROL); + +} + +/* Returns true if the panel was already on when called */ +static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE; + + if (intel_dp->panel_on) + return true; + + DRM_DEBUG_KMS("\n"); + pp = REG_READ(PP_CONTROL); + pp &= ~PANEL_UNLOCK_MASK; + + pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON); + REG_WRITE(PP_CONTROL, pp); + REG_READ(PP_CONTROL); + + if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) { + DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS)); + intel_dp->panel_on = false; + } else + intel_dp->panel_on = true; + msleep(intel_dp->panel_power_up_delay); + + return false; +} + +static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + u32 pp, idle_off_mask = PP_ON ; + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + + DRM_DEBUG_KMS("\n"); + + pp = REG_READ(PP_CONTROL); + + if ((pp & POWER_TARGET_ON) == 0) + return; + + intel_dp->panel_on = false; + pp &= ~PANEL_UNLOCK_MASK; + /* ILK workaround: disable reset around power sequence */ + + pp &= ~POWER_TARGET_ON; + pp &= ~EDP_FORCE_VDD; + pp &= ~EDP_BLC_ENABLE; + REG_WRITE(PP_CONTROL, pp); + REG_READ(PP_CONTROL); + DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS)); + + if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) { + DRM_DEBUG_KMS("Error in turning off Panel\n"); + } + + msleep(intel_dp->panel_power_cycle_delay); + DRM_DEBUG_KMS("Over\n"); +} + +static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + u32 pp; + + DRM_DEBUG_KMS("\n"); + /* + * If we enable the backlight right away following a panel power + * on, we may see slight flicker as the panel syncs with the eDP + * link. So delay a bit to make sure the image is solid before + * allowing it to appear. + */ + msleep(300); + pp = REG_READ(PP_CONTROL); + + pp |= EDP_BLC_ENABLE; + REG_WRITE(PP_CONTROL, pp); + gma_backlight_enable(dev); +} + +static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + u32 pp; + + DRM_DEBUG_KMS("\n"); + gma_backlight_disable(dev); + msleep(10); + pp = REG_READ(PP_CONTROL); + + pp &= ~EDP_BLC_ENABLE; + REG_WRITE(PP_CONTROL, pp); + msleep(intel_dp->backlight_off_delay); +} + +static int +cdv_intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder)); + int max_lanes = cdv_intel_dp_max_lane_count(encoder); + struct drm_psb_private *dev_priv = connector->dev->dev_private; + + if (is_edp(encoder) && intel_dp->panel_fixed_mode) { + if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) + return MODE_PANEL; + } + + /* only refuse the mode on non eDP since we have seen some weird eDP panels + which are outside spec tolerances but somehow work by magic */ + if (!is_edp(encoder) && + (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp) + > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))) + return MODE_CLOCK_HIGH; + + if (is_edp(encoder)) { + if (cdv_intel_dp_link_required(mode->clock, 24) + > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)) + return MODE_CLOCK_HIGH; + + } + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + return MODE_OK; +} + +static uint32_t +pack_aux(uint8_t *src, int src_bytes) +{ + int i; + uint32_t v = 0; + + if (src_bytes > 4) + src_bytes = 4; + for (i = 0; i < src_bytes; i++) + v |= ((uint32_t) src[i]) << ((3-i) * 8); + return v; +} + +static void +unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +{ + int i; + if (dst_bytes > 4) + dst_bytes = 4; + for (i = 0; i < dst_bytes; i++) + dst[i] = src >> ((3-i) * 8); +} + +static int +cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + uint32_t output_reg = intel_dp->output_reg; + struct drm_device *dev = encoder->base.dev; + uint32_t ch_ctl = output_reg + 0x10; + uint32_t ch_data = ch_ctl + 4; + int i; + int recv_bytes; + uint32_t status; + uint32_t aux_clock_divider; + int try, precharge; + + /* The clock divider is based off the hrawclk, + * and would like to run at 2MHz. So, take the + * hrawclk value and divide by 2 and use that + * On CDV platform it uses 200MHz as hrawclk. + * + */ + aux_clock_divider = 200 / 2; + + precharge = 4; + if (is_edp(encoder)) + precharge = 10; + + if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { + DRM_ERROR("dp_aux_ch not started status 0x%08x\n", + REG_READ(ch_ctl)); + return -EBUSY; + } + + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) + REG_WRITE(ch_data + i, + pack_aux(send + i, send_bytes - i)); + + /* Send the command and wait for it to complete */ + REG_WRITE(ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + for (;;) { + status = REG_READ(ch_ctl); + if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) + break; + udelay(100); + } + + /* Clear done status and any errors */ + REG_WRITE(ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + if (status & DP_AUX_CH_CTL_DONE) + break; + } + + if ((status & DP_AUX_CH_CTL_DONE) == 0) { + DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); + return -EBUSY; + } + + /* Check for timeout or receive error. + * Timeouts occur when the sink is not connected + */ + if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { + DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); + return -EIO; + } + + /* Timeouts occur when the device isn't connected, so they're + * "normal" -- don't fill the kernel log with these */ + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { + DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); + return -ETIMEDOUT; + } + + /* Unload any bytes sent back from the other side */ + recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> + DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + if (recv_bytes > recv_size) + recv_bytes = recv_size; + + for (i = 0; i < recv_bytes; i += 4) + unpack_aux(REG_READ(ch_data + i), + recv + i, recv_bytes - i); + + return recv_bytes; +} + +/* Write data to the aux channel in native mode */ +static int +cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder, + uint16_t address, uint8_t *send, int send_bytes) +{ + int ret; + uint8_t msg[20]; + int msg_bytes; + uint8_t ack; + + if (send_bytes > 16) + return -1; + msg[0] = AUX_NATIVE_WRITE << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = send_bytes - 1; + memcpy(&msg[4], send, send_bytes); + msg_bytes = send_bytes + 4; + for (;;) { + ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1); + if (ret < 0) + return ret; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) + break; + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } + return send_bytes; +} + +/* Write a single byte to the aux channel in native mode */ +static int +cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder, + uint16_t address, uint8_t byte) +{ + return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1); +} + +/* read bytes from a native aux channel */ +static int +cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder, + uint16_t address, uint8_t *recv, int recv_bytes) +{ + uint8_t msg[4]; + int msg_bytes; + uint8_t reply[20]; + int reply_bytes; + uint8_t ack; + int ret; + + msg[0] = AUX_NATIVE_READ << 4; + msg[1] = address >> 8; + msg[2] = address & 0xff; + msg[3] = recv_bytes - 1; + + msg_bytes = 4; + reply_bytes = recv_bytes + 1; + + for (;;) { + ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, + reply, reply_bytes); + if (ret == 0) + return -EPROTO; + if (ret < 0) + return ret; + ack = reply[0]; + if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { + memcpy(recv, reply + 1, ret - 1); + return ret - 1; + } + else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) + udelay(100); + else + return -EIO; + } +} + +static int +cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, + uint8_t write_byte, uint8_t *read_byte) +{ + struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; + struct cdv_intel_dp *intel_dp = container_of(adapter, + struct cdv_intel_dp, + adapter); + struct psb_intel_encoder *encoder = intel_dp->encoder; + uint16_t address = algo_data->address; + uint8_t msg[5]; + uint8_t reply[2]; + unsigned retry; + int msg_bytes; + int reply_bytes; + int ret; + + /* Set up the command byte */ + if (mode & MODE_I2C_READ) + msg[0] = AUX_I2C_READ << 4; + else + msg[0] = AUX_I2C_WRITE << 4; + + if (!(mode & MODE_I2C_STOP)) + msg[0] |= AUX_I2C_MOT << 4; + + msg[1] = address >> 8; + msg[2] = address; + + switch (mode) { + case MODE_I2C_WRITE: + msg[3] = 0; + msg[4] = write_byte; + msg_bytes = 5; + reply_bytes = 1; + break; + case MODE_I2C_READ: + msg[3] = 0; + msg_bytes = 4; + reply_bytes = 2; + break; + default: + msg_bytes = 3; + reply_bytes = 1; + break; + } + + for (retry = 0; retry < 5; retry++) { + ret = cdv_intel_dp_aux_ch(encoder, + msg, msg_bytes, + reply, reply_bytes); + if (ret < 0) { + DRM_DEBUG_KMS("aux_ch failed %d\n", ret); + return ret; + } + + switch (reply[0] & AUX_NATIVE_REPLY_MASK) { + case AUX_NATIVE_REPLY_ACK: + /* I2C-over-AUX Reply field is only valid + * when paired with AUX ACK. + */ + break; + case AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("aux_ch native nack\n"); + return -EREMOTEIO; + case AUX_NATIVE_REPLY_DEFER: + udelay(100); + continue; + default: + DRM_ERROR("aux_ch invalid native reply 0x%02x\n", + reply[0]); + return -EREMOTEIO; + } + + switch (reply[0] & AUX_I2C_REPLY_MASK) { + case AUX_I2C_REPLY_ACK: + if (mode == MODE_I2C_READ) { + *read_byte = reply[1]; + } + return reply_bytes - 1; + case AUX_I2C_REPLY_NACK: + DRM_DEBUG_KMS("aux_i2c nack\n"); + return -EREMOTEIO; + case AUX_I2C_REPLY_DEFER: + DRM_DEBUG_KMS("aux_i2c defer\n"); + udelay(100); + break; + default: + DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); + return -EREMOTEIO; + } + } + + DRM_ERROR("too many retries, giving up\n"); + return -EREMOTEIO; +} + +static int +cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int ret; + + DRM_DEBUG_KMS("i2c_init %s\n", name); + + intel_dp->algo.running = false; + intel_dp->algo.address = 0; + intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch; + + memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter)); + intel_dp->adapter.owner = THIS_MODULE; + intel_dp->adapter.class = I2C_CLASS_DDC; + strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); + intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; + intel_dp->adapter.algo_data = &intel_dp->algo; + intel_dp->adapter.dev.parent = &connector->base.kdev; + + if (is_edp(encoder)) + cdv_intel_edp_panel_vdd_on(encoder); + ret = i2c_dp_aux_add_bus(&intel_dp->adapter); + if (is_edp(encoder)) + cdv_intel_edp_panel_vdd_off(encoder); + + return ret; +} + +void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, + struct drm_display_mode *adjusted_mode) +{ + adjusted_mode->hdisplay = fixed_mode->hdisplay; + adjusted_mode->hsync_start = fixed_mode->hsync_start; + adjusted_mode->hsync_end = fixed_mode->hsync_end; + adjusted_mode->htotal = fixed_mode->htotal; + + adjusted_mode->vdisplay = fixed_mode->vdisplay; + adjusted_mode->vsync_start = fixed_mode->vsync_start; + adjusted_mode->vsync_end = fixed_mode->vsync_end; + adjusted_mode->vtotal = fixed_mode->vtotal; + + adjusted_mode->clock = fixed_mode->clock; + + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); +} + +static bool +cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_psb_private *dev_priv = encoder->dev->dev_private; + struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + int lane_count, clock; + int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder); + int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; + static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + int refclock = mode->clock; + int bpp = 24; + + if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) { + cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); + refclock = intel_dp->panel_fixed_mode->clock; + bpp = dev_priv->edp.bpp; + } + + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = max_clock; clock >= 0; clock--) { + int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count); + + if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) { + intel_dp->link_bw = bws[clock]; + intel_dp->lane_count = lane_count; + adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("Display port link bw %02x lane " + "count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + return true; + } + } + } + if (is_edp(intel_encoder)) { + /* okay we failed just pick the highest */ + intel_dp->lane_count = max_lane_count; + intel_dp->link_bw = bws[max_clock]; + adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("Force picking display port link bw %02x lane " + "count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + + return true; + } + return false; +} + +struct cdv_intel_dp_m_n { + uint32_t tu; + uint32_t gmch_m; + uint32_t gmch_n; + uint32_t link_m; + uint32_t link_n; +}; + +static void +cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den) +{ + /* + while (*num > 0xffffff || *den > 0xffffff) { + *num >>= 1; + *den >>= 1; + }*/ + uint64_t value, m; + m = *num; + value = m * (0x800000); + m = do_div(value, *den); + *num = value; + *den = 0x800000; +} + +static void +cdv_intel_dp_compute_m_n(int bpp, + int nlanes, + int pixel_clock, + int link_clock, + struct cdv_intel_dp_m_n *m_n) +{ + m_n->tu = 64; + m_n->gmch_m = (pixel_clock * bpp + 7) >> 3; + m_n->gmch_n = link_clock * nlanes; + cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); + m_n->link_m = pixel_clock; + m_n->link_n = link_clock; + cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n); +} + +void +cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_encoder *encoder; + struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + int lane_count = 4, bpp = 24; + struct cdv_intel_dp_m_n m_n; + int pipe = intel_crtc->pipe; + + /* + * Find the lane count in the intel_encoder private + */ + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct psb_intel_encoder *intel_encoder; + struct cdv_intel_dp *intel_dp; + + if (encoder->crtc != crtc) + continue; + + intel_encoder = to_psb_intel_encoder(encoder); + intel_dp = intel_encoder->dev_priv; + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + lane_count = intel_dp->lane_count; + break; + } else if (is_edp(intel_encoder)) { + lane_count = intel_dp->lane_count; + bpp = dev_priv->edp.bpp; + break; + } + } + + /* + * Compute the GMCH and Link ratios. The '3' here is + * the number of bytes_per_pixel post-LUT, which we always + * set up for 8-bits of R/G/B, or 3 bytes total. + */ + cdv_intel_dp_compute_m_n(bpp, lane_count, + mode->clock, adjusted_mode->clock, &m_n); + + { + REG_WRITE(PIPE_GMCH_DATA_M(pipe), + ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | + m_n.gmch_m); + REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); + REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); + REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); + } +} + +static void +cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct drm_crtc *crtc = encoder->crtc; + struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc); + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + struct drm_device *dev = encoder->dev; + + intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; + intel_dp->DP |= intel_dp->color_range; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + + intel_dp->DP |= DP_LINK_TRAIN_OFF; + + switch (intel_dp->lane_count) { + case 1: + intel_dp->DP |= DP_PORT_WIDTH_1; + break; + case 2: + intel_dp->DP |= DP_PORT_WIDTH_2; + break; + case 4: + intel_dp->DP |= DP_PORT_WIDTH_4; + break; + } + if (intel_dp->has_audio) + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; + + memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); + intel_dp->link_configuration[0] = intel_dp->link_bw; + intel_dp->link_configuration[1] = intel_dp->lane_count; + + /* + * Check for DPCD version > 1.1 and enhanced framing support + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { + intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + intel_dp->DP |= DP_ENHANCED_FRAMING; + } + + /* CPT DP's pipe select is decided in TRANS_DP_CTL */ + if (intel_crtc->pipe == 1) + intel_dp->DP |= DP_PIPEB_SELECT; + + REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN)); + DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP); + if (is_edp(intel_encoder)) { + uint32_t pfit_control; + cdv_intel_edp_panel_on(intel_encoder); + + if (mode->hdisplay != adjusted_mode->hdisplay || + mode->vdisplay != adjusted_mode->vdisplay) + pfit_control = PFIT_ENABLE; + else + pfit_control = 0; + + pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT; + + REG_WRITE(PFIT_CONTROL, pfit_control); + } +} + + +/* If the sink supports it, try to set the power state appropriately */ +static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int ret, i; + + /* Should have a valid DPCD by this point */ + if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + + if (mode != DRM_MODE_DPMS_ON) { + ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER, + DP_SET_POWER_D3); + if (ret != 1) + DRM_DEBUG_DRIVER("failed to write sink power state\n"); + } else { + /* + * When turning on, we need to retry for 1ms to give the sink + * time to wake up. + */ + for (i = 0; i < 3; i++) { + ret = cdv_intel_dp_aux_native_write_1(encoder, + DP_SET_POWER, + DP_SET_POWER_D0); + if (ret == 1) + break; + udelay(1000); + } + } +} + +static void cdv_intel_dp_prepare(struct drm_encoder *encoder) +{ + struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + int edp = is_edp(intel_encoder); + + if (edp) { + cdv_intel_edp_backlight_off(intel_encoder); + cdv_intel_edp_panel_off(intel_encoder); + cdv_intel_edp_panel_vdd_on(intel_encoder); + } + /* Wake up the sink first */ + cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON); + cdv_intel_dp_link_down(intel_encoder); + if (edp) + cdv_intel_edp_panel_vdd_off(intel_encoder); +} + +static void cdv_intel_dp_commit(struct drm_encoder *encoder) +{ + struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + int edp = is_edp(intel_encoder); + + if (edp) + cdv_intel_edp_panel_on(intel_encoder); + cdv_intel_dp_start_link_train(intel_encoder); + cdv_intel_dp_complete_link_train(intel_encoder); + if (edp) + cdv_intel_edp_backlight_on(intel_encoder); +} + +static void +cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode) +{ + struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder); + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + struct drm_device *dev = encoder->dev; + uint32_t dp_reg = REG_READ(intel_dp->output_reg); + int edp = is_edp(intel_encoder); + + if (mode != DRM_MODE_DPMS_ON) { + if (edp) { + cdv_intel_edp_backlight_off(intel_encoder); + cdv_intel_edp_panel_vdd_on(intel_encoder); + } + cdv_intel_dp_sink_dpms(intel_encoder, mode); + cdv_intel_dp_link_down(intel_encoder); + if (edp) { + cdv_intel_edp_panel_vdd_off(intel_encoder); + cdv_intel_edp_panel_off(intel_encoder); + } + } else { + if (edp) + cdv_intel_edp_panel_on(intel_encoder); + cdv_intel_dp_sink_dpms(intel_encoder, mode); + if (!(dp_reg & DP_PORT_EN)) { + cdv_intel_dp_start_link_train(intel_encoder); + cdv_intel_dp_complete_link_train(intel_encoder); + } + if (edp) + cdv_intel_edp_backlight_on(intel_encoder); + } +} + +/* + * Native read with retry for link status and receiver capability reads for + * cases where the sink may still be asleep. + */ +static bool +cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address, + uint8_t *recv, int recv_bytes) +{ + int ret, i; + + /* + * Sinks are *supposed* to come up within 1ms from an off state, + * but we're also supposed to retry 3 times per the spec. + */ + for (i = 0; i < 3; i++) { + ret = cdv_intel_dp_aux_native_read(encoder, address, recv, + recv_bytes); + if (ret == recv_bytes) + return true; + udelay(1000); + } + + return false; +} + +/* + * Fetch AUX CH registers 0x202 - 0x207 which contain + * link status information + */ +static bool +cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + return cdv_intel_dp_aux_native_read_retry(encoder, + DP_LANE0_1_STATUS, + intel_dp->link_status, + DP_LINK_STATUS_SIZE); +} + +static uint8_t +cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int r) +{ + return link_status[r - DP_LANE0_1_STATUS]; +} + +static uint8_t +cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : + DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); + uint8_t l = cdv_intel_dp_link_status(link_status, i); + + return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; +} + +static uint8_t +cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : + DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); + uint8_t l = cdv_intel_dp_link_status(link_status, i); + + return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; +} + + +#if 0 +static char *voltage_names[] = { + "0.4V", "0.6V", "0.8V", "1.2V" +}; +static char *pre_emph_names[] = { + "0dB", "3.5dB", "6dB", "9.5dB" +}; +static char *link_train_names[] = { + "pattern 1", "pattern 2", "idle", "off" +}; +#endif + +#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 +/* +static uint8_t +cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_400: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_600: + return DP_TRAIN_PRE_EMPHASIS_6; + case DP_TRAIN_VOLTAGE_SWING_800: + return DP_TRAIN_PRE_EMPHASIS_3_5; + case DP_TRAIN_VOLTAGE_SWING_1200: + default: + return DP_TRAIN_PRE_EMPHASIS_0; + } +} +*/ +static void +cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + uint8_t v = 0; + uint8_t p = 0; + int lane; + + for (lane = 0; lane < intel_dp->lane_count; lane++) { + uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane); + uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + if (v >= CDV_DP_VOLTAGE_MAX) + v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; + + if (p == DP_TRAIN_PRE_EMPHASIS_MASK) + p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + intel_dp->train_set[lane] = v | p; +} + + +static uint8_t +cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_LANE0_1_STATUS + (lane >> 1); + int s = (lane & 1) * 4; + uint8_t l = cdv_intel_dp_link_status(link_status, i); + + return (l >> s) & 0xf; +} + +/* Check for clock recovery is done on all channels */ +static bool +cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) +{ + int lane; + uint8_t lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = cdv_intel_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return false; + } + return true; +} + +/* Check to see if channel eq is done on all channels */ +#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ + DP_LANE_CHANNEL_EQ_DONE|\ + DP_LANE_SYMBOL_LOCKED) +static bool +cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + uint8_t lane_align; + uint8_t lane_status; + int lane; + + lane_align = cdv_intel_dp_link_status(intel_dp->link_status, + DP_LANE_ALIGN_STATUS_UPDATED); + if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) + return false; + for (lane = 0; lane < intel_dp->lane_count; lane++) { + lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane); + if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) + return false; + } + return true; +} + +static bool +cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder, + uint32_t dp_reg_value, + uint8_t dp_train_pat) +{ + + struct drm_device *dev = encoder->base.dev; + int ret; + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + + REG_WRITE(intel_dp->output_reg, dp_reg_value); + REG_READ(intel_dp->output_reg); + + ret = cdv_intel_dp_aux_native_write_1(encoder, + DP_TRAINING_PATTERN_SET, + dp_train_pat); + + if (ret != 1) { + DRM_DEBUG_KMS("Failure in setting link pattern %x\n", + dp_train_pat); + return false; + } + + return true; +} + + +static bool +cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder, + uint8_t dp_train_pat) +{ + + int ret; + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + + ret = cdv_intel_dp_aux_native_write(encoder, + DP_TRAINING_LANE0_SET, + intel_dp->train_set, + intel_dp->lane_count); + + if (ret != intel_dp->lane_count) { + DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n", + intel_dp->train_set[0], intel_dp->lane_count); + return false; + } + return true; +} + +static void +cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level) +{ + struct drm_device *dev = encoder->base.dev; + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + struct ddi_regoff *ddi_reg; + int vswing, premph, index; + + if (intel_dp->output_reg == DP_B) + ddi_reg = &ddi_DP_train_table[0]; + else + ddi_reg = &ddi_DP_train_table[1]; + + vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK); + premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + if (vswing + premph > 3) + return; +#ifdef CDV_FAST_LINK_TRAIN + return; +#endif + DRM_DEBUG_KMS("Test2\n"); + //return ; + cdv_sb_reset(dev); + /* ;Swing voltage programming + ;gfx_dpio_set_reg(0xc058, 0x0505313A) */ + cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A); + + /* ;gfx_dpio_set_reg(0x8154, 0x43406055) */ + cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055); + + /* ;gfx_dpio_set_reg(0x8148, 0x55338954) + * The VSwing_PreEmph table is also considered based on the vswing/premp + */ + index = (vswing + premph) * 2; + if (premph == 1 && vswing == 1) { + cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954); + } else + cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]); + + /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */ + if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200) + cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040); + else + cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040); + + /* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */ + /* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */ + + /* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */ + cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055); + + /* ;Pre emphasis programming + * ;gfx_dpio_set_reg(0xc02c, 0x1f030040) + */ + cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040); + + /* ;gfx_dpio_set_reg(0x8124, 0x00004000) */ + index = 2 * premph + 1; + cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]); + return; +} + + +/* Enable corresponding port and start training pattern 1 */ +static void +cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int i; + uint8_t voltage; + bool clock_recovery = false; + int tries; + u32 reg; + uint32_t DP = intel_dp->DP; + + DP |= DP_PORT_EN; + DP &= ~DP_LINK_TRAIN_MASK; + + reg = DP; + reg |= DP_LINK_TRAIN_PAT_1; + /* Enable output, wait for it to become active */ + REG_WRITE(intel_dp->output_reg, reg); + REG_READ(intel_dp->output_reg); + psb_intel_wait_for_vblank(dev); + + DRM_DEBUG_KMS("Link config\n"); + /* Write the link configuration data */ + cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET, + intel_dp->link_configuration, + 2); + + memset(intel_dp->train_set, 0, 4); + voltage = 0; + tries = 0; + clock_recovery = false; + + DRM_DEBUG_KMS("Start train\n"); + reg = DP | DP_LINK_TRAIN_PAT_1; + + + for (;;) { + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ + DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n", + intel_dp->train_set[0], + intel_dp->link_configuration[0], + intel_dp->link_configuration[1]); + + if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) { + DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n"); + } + cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]); + /* Set training pattern 1 */ + + cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1); + + udelay(200); + if (!cdv_intel_dp_get_link_status(encoder)) + break; + + DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n", + intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2], + intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]); + + if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("PT1 train is done\n"); + clock_recovery = true; + break; + } + + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == intel_dp->lane_count) + break; + + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new intel_dp->train_set as requested by target */ + cdv_intel_get_adjust_train(encoder); + + } + + if (!clock_recovery) { + DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]); + } + + intel_dp->DP = DP; +} + +static void +cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + bool channel_eq = false; + int tries, cr_tries; + u32 reg; + uint32_t DP = intel_dp->DP; + + /* channel equalization */ + tries = 0; + cr_tries = 0; + channel_eq = false; + + DRM_DEBUG_KMS("\n"); + reg = DP | DP_LINK_TRAIN_PAT_2; + + for (;;) { + + DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n", + intel_dp->train_set[0], + intel_dp->link_configuration[0], + intel_dp->link_configuration[1]); + /* channel eq pattern */ + + if (!cdv_intel_dp_set_link_train(encoder, reg, + DP_TRAINING_PATTERN_2)) { + DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n"); + } + /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ + + if (cr_tries > 5) { + DRM_ERROR("failed to train DP, aborting\n"); + cdv_intel_dp_link_down(encoder); + break; + } + + cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]); + + cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2); + + udelay(1000); + if (!cdv_intel_dp_get_link_status(encoder)) + break; + + DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n", + intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2], + intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]); + + /* Make sure clock is still ok */ + if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + cdv_intel_dp_start_link_train(encoder); + cr_tries++; + continue; + } + + if (cdv_intel_channel_eq_ok(encoder)) { + DRM_DEBUG_KMS("PT2 train is done\n"); + channel_eq = true; + break; + } + + /* Try 5 times, then try clock recovery if that fails */ + if (tries > 5) { + cdv_intel_dp_link_down(encoder); + cdv_intel_dp_start_link_train(encoder); + tries = 0; + cr_tries++; + continue; + } + + /* Compute new intel_dp->train_set as requested by target */ + cdv_intel_get_adjust_train(encoder); + ++tries; + + } + + reg = DP | DP_LINK_TRAIN_OFF; + + REG_WRITE(intel_dp->output_reg, reg); + REG_READ(intel_dp->output_reg); + cdv_intel_dp_aux_native_write_1(encoder, + DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); +} + +static void +cdv_intel_dp_link_down(struct psb_intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + uint32_t DP = intel_dp->DP; + + if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) + return; + + DRM_DEBUG_KMS("\n"); + + + { + DP &= ~DP_LINK_TRAIN_MASK; + REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); + } + REG_READ(intel_dp->output_reg); + + msleep(17); + + REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); + REG_READ(intel_dp->output_reg); +} + +static enum drm_connector_status +cdv_dp_detect(struct psb_intel_encoder *encoder) +{ + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + enum drm_connector_status status; + + status = connector_status_disconnected; + if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd, + sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) + { + if (intel_dp->dpcd[DP_DPCD_REV] != 0) + status = connector_status_connected; + } + if (status == connector_status_connected) + DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], + intel_dp->dpcd[2], intel_dp->dpcd[3]); + return status; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +cdv_intel_dp_detect(struct drm_connector *connector, bool force) +{ + struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + enum drm_connector_status status; + struct edid *edid = NULL; + int edp = is_edp(encoder); + + intel_dp->has_audio = false; + + if (edp) + cdv_intel_edp_panel_vdd_on(encoder); + status = cdv_dp_detect(encoder); + if (status != connector_status_connected) { + if (edp) + cdv_intel_edp_panel_vdd_off(encoder); + return status; + } + + if (intel_dp->force_audio) { + intel_dp->has_audio = intel_dp->force_audio > 0; + } else { + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + intel_dp->has_audio = drm_detect_monitor_audio(edid); + kfree(edid); + } + } + if (edp) + cdv_intel_edp_panel_vdd_off(encoder); + + return connector_status_connected; +} + +static int cdv_intel_dp_get_modes(struct drm_connector *connector) +{ + struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv; + struct edid *edid = NULL; + int ret = 0; + int edp = is_edp(intel_encoder); + + + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + } + + if (is_edp(intel_encoder)) { + struct drm_device *dev = connector->dev; + struct drm_psb_private *dev_priv = dev->dev_private; + + cdv_intel_edp_panel_vdd_off(intel_encoder); + if (ret) { + if (edp && !intel_dp->panel_fixed_mode) { + struct drm_display_mode *newmode; + list_for_each_entry(newmode, &connector->probed_modes, + head) { + if (newmode->type & DRM_MODE_TYPE_PREFERRED) { + intel_dp->panel_fixed_mode = + drm_mode_duplicate(dev, newmode); + break; + } + } + } + + return ret; + } + if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) { + intel_dp->panel_fixed_mode = + drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); + if (intel_dp->panel_fixed_mode) { + intel_dp->panel_fixed_mode->type |= + DRM_MODE_TYPE_PREFERRED; + } + } + if (intel_dp->panel_fixed_mode != NULL) { + struct drm_display_mode *mode; + mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); + drm_mode_probed_add(connector, mode); + return 1; + } + } + + return ret; +} + +static bool +cdv_intel_dp_detect_audio(struct drm_connector *connector) +{ + struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + struct edid *edid; + bool has_audio = false; + int edp = is_edp(encoder); + + if (edp) + cdv_intel_edp_panel_vdd_on(encoder); + + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + has_audio = drm_detect_monitor_audio(edid); + kfree(edid); + } + if (edp) + cdv_intel_edp_panel_vdd_off(encoder); + + return has_audio; +} + +static int +cdv_intel_dp_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct drm_psb_private *dev_priv = connector->dev->dev_private; + struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = encoder->dev_priv; + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == dev_priv->force_audio_property) { + int i = val; + bool has_audio; + + if (i == intel_dp->force_audio) + return 0; + + intel_dp->force_audio = i; + + if (i == 0) + has_audio = cdv_intel_dp_detect_audio(connector); + else + has_audio = i > 0; + + if (has_audio == intel_dp->has_audio) + return 0; + + intel_dp->has_audio = has_audio; + goto done; + } + + if (property == dev_priv->broadcast_rgb_property) { + if (val == !!intel_dp->color_range) + return 0; + + intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; + goto done; + } + + return -EINVAL; + +done: + if (encoder->base.crtc) { + struct drm_crtc *crtc = encoder->base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; +} + +static void +cdv_intel_dp_destroy(struct drm_connector *connector) +{ + struct psb_intel_encoder *psb_intel_encoder = + psb_intel_attached_encoder(connector); + struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv; + + if (is_edp(psb_intel_encoder)) { + /* cdv_intel_panel_destroy_backlight(connector->dev); */ + if (intel_dp->panel_fixed_mode) { + kfree(intel_dp->panel_fixed_mode); + intel_dp->panel_fixed_mode = NULL; + } + } + i2c_del_adapter(&intel_dp->adapter); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = { + .dpms = cdv_intel_dp_dpms, + .mode_fixup = cdv_intel_dp_mode_fixup, + .prepare = cdv_intel_dp_prepare, + .mode_set = cdv_intel_dp_mode_set, + .commit = cdv_intel_dp_commit, +}; + +static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = cdv_intel_dp_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = cdv_intel_dp_set_property, + .destroy = cdv_intel_dp_destroy, +}; + +static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = { + .get_modes = cdv_intel_dp_get_modes, + .mode_valid = cdv_intel_dp_mode_valid, + .best_encoder = psb_intel_best_encoder, +}; + +static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = { + .destroy = cdv_intel_dp_encoder_destroy, +}; + + +static void cdv_intel_dp_add_properties(struct drm_connector *connector) +{ + cdv_intel_attach_force_audio_property(connector); + cdv_intel_attach_broadcast_rgb_property(connector); +} + +/* check the VBT to see whether the eDP is on DP-D port */ +static bool cdv_intel_dpc_is_edp(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct child_device_config *p_child; + int i; + + if (!dev_priv->child_dev_num) + return false; + + for (i = 0; i < dev_priv->child_dev_num; i++) { + p_child = dev_priv->child_dev + i; + + if (p_child->dvo_port == PORT_IDPC && + p_child->device_type == DEVICE_TYPE_eDP) + return true; + } + return false; +} + +/* Cedarview display clock gating + + We need this disable dot get correct behaviour while enabling + DP/eDP. TODO - investigate if we can turn it back to normality + after enabling */ +static void cdv_disable_intel_clock_gating(struct drm_device *dev) +{ + u32 reg_value; + reg_value = REG_READ(DSPCLK_GATE_D); + + reg_value |= (DPUNIT_PIPEB_GATE_DISABLE | + DPUNIT_PIPEA_GATE_DISABLE | + DPCUNIT_CLOCK_GATE_DISABLE | + DPLSUNIT_CLOCK_GATE_DISABLE | + DPOUNIT_CLOCK_GATE_DISABLE | + DPIOUNIT_CLOCK_GATE_DISABLE); + + REG_WRITE(DSPCLK_GATE_D, reg_value); + + udelay(500); +} + +void +cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg) +{ + struct psb_intel_encoder *psb_intel_encoder; + struct psb_intel_connector *psb_intel_connector; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct cdv_intel_dp *intel_dp; + const char *name = NULL; + int type = DRM_MODE_CONNECTOR_DisplayPort; + + psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL); + if (!psb_intel_encoder) + return; + psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL); + if (!psb_intel_connector) + goto err_connector; + intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL); + if (!intel_dp) + goto err_priv; + + if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev)) + type = DRM_MODE_CONNECTOR_eDP; + + connector = &psb_intel_connector->base; + encoder = &psb_intel_encoder->base; + + drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type); + drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); + + psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder); + + if (type == DRM_MODE_CONNECTOR_DisplayPort) + psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + else + psb_intel_encoder->type = INTEL_OUTPUT_EDP; + + + psb_intel_encoder->dev_priv=intel_dp; + intel_dp->encoder = psb_intel_encoder; + intel_dp->output_reg = output_reg; + + drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs); + drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs); + + connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + drm_sysfs_connector_add(connector); + + /* Set up the DDC bus. */ + switch (output_reg) { + case DP_B: + name = "DPDDC-B"; + psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT); + break; + case DP_C: + name = "DPDDC-C"; + psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT); + break; + } + + cdv_disable_intel_clock_gating(dev); + + cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name); + /* FIXME:fail check */ + cdv_intel_dp_add_properties(connector); + + if (is_edp(psb_intel_encoder)) { + int ret; + struct edp_power_seq cur; + u32 pp_on, pp_off, pp_div; + u32 pwm_ctrl; + + pp_on = REG_READ(PP_CONTROL); + pp_on &= ~PANEL_UNLOCK_MASK; + pp_on |= PANEL_UNLOCK_REGS; + + REG_WRITE(PP_CONTROL, pp_on); + + pwm_ctrl = REG_READ(BLC_PWM_CTL2); + pwm_ctrl |= PWM_PIPE_B; + REG_WRITE(BLC_PWM_CTL2, pwm_ctrl); + + pp_on = REG_READ(PP_ON_DELAYS); + pp_off = REG_READ(PP_OFF_DELAYS); + pp_div = REG_READ(PP_DIVISOR); + + /* Pull timing values out of registers */ + cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; + + cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; + + cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; + + cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; + + cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> + PANEL_POWER_CYCLE_DELAY_SHIFT); + + DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); + + + intel_dp->panel_power_up_delay = cur.t1_t3 / 10; + intel_dp->backlight_on_delay = cur.t8 / 10; + intel_dp->backlight_off_delay = cur.t9 / 10; + intel_dp->panel_power_down_delay = cur.t10 / 10; + intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100; + + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); + + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + + cdv_intel_edp_panel_vdd_on(psb_intel_encoder); + ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV, + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + cdv_intel_edp_panel_vdd_off(psb_intel_encoder); + if (ret == 0) { + /* if this fails, presume the device is a ghost */ + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + cdv_intel_dp_encoder_destroy(encoder); + cdv_intel_dp_destroy(connector); + goto err_priv; + } else { + DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], + intel_dp->dpcd[2], intel_dp->dpcd[3]); + + } + /* The CDV reference driver moves pnale backlight setup into the displays that + have a backlight: this is a good idea and one we should probably adopt, however + we need to migrate all the drivers before we can do that */ + /*cdv_intel_panel_setup_backlight(dev); */ + } + return; + +err_priv: + kfree(psb_intel_connector); +err_connector: + kfree(psb_intel_encoder); +} diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index a86f87b9ddde..7272a461edfe 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -139,8 +139,6 @@ static enum drm_connector_status cdv_hdmi_detect( { struct psb_intel_encoder *psb_intel_encoder = psb_intel_attached_encoder(connector); - struct psb_intel_connector *psb_intel_connector = - to_psb_intel_connector(connector); struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; struct edid *edid = NULL; enum drm_connector_status status = connector_status_disconnected; @@ -157,8 +155,6 @@ static enum drm_connector_status cdv_hdmi_detect( hdmi_priv->has_hdmi_audio = drm_detect_monitor_audio(edid); } - - psb_intel_connector->base.display_info.raw_edid = NULL; kfree(edid); } return status; @@ -352,9 +348,11 @@ void cdv_hdmi_init(struct drm_device *dev, switch (reg) { case SDVOB: ddc_bus = GPIOE; + psb_intel_encoder->ddi_select = DDI0_SELECT; break; case SDVOC: ddc_bus = GPIOD; + psb_intel_encoder->ddi_select = DDI1_SELECT; break; default: DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); diff --git a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c index c7f9468b74ba..b362dd39bf5a 100644 --- a/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -506,16 +506,8 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector, property, value)) return -1; - else { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct drm_psb_private *dev_priv = - encoder->dev->dev_private; - struct backlight_device *bd = - dev_priv->backlight_device; - bd->props.brightness = value; - backlight_update_status(bd); -#endif - } + else + gma_backlight_set(encoder->dev, value); } else if (!strcmp(property->name, "DPMS") && encoder) { struct drm_encoder_helper_funcs *helpers = encoder->helper_private; diff --git a/trunk/drivers/gpu/drm/gma500/framebuffer.c b/trunk/drivers/gpu/drm/gma500/framebuffer.c index 5732b5702e1c..884ba73ac6ce 100644 --- a/trunk/drivers/gpu/drm/gma500/framebuffer.c +++ b/trunk/drivers/gpu/drm/gma500/framebuffer.c @@ -764,6 +764,13 @@ static void psb_setup_outputs(struct drm_device *dev) crtc_mask = dev_priv->ops->hdmi_mask; clone_mask = (1 << INTEL_OUTPUT_HDMI); break; + case INTEL_OUTPUT_DISPLAYPORT: + crtc_mask = (1 << 0) | (1 << 1); + clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT); + break; + case INTEL_OUTPUT_EDP: + crtc_mask = (1 << 1); + clone_mask = (1 << INTEL_OUTPUT_EDP); } encoder->possible_crtcs = crtc_mask; encoder->possible_clones = diff --git a/trunk/drivers/gpu/drm/gma500/gem.c b/trunk/drivers/gpu/drm/gma500/gem.c index fc7d144bc2d3..df20546a2a34 100644 --- a/trunk/drivers/gpu/drm/gma500/gem.c +++ b/trunk/drivers/gpu/drm/gma500/gem.c @@ -36,7 +36,12 @@ int psb_gem_init_object(struct drm_gem_object *obj) void psb_gem_free_object(struct drm_gem_object *obj) { struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); - drm_gem_object_release_wrap(obj); + + /* Remove the list map if one is present */ + if (obj->map_list.map) + drm_gem_free_mmap_offset(obj); + drm_gem_object_release(obj); + /* This must occur last as it frees up the memory of the GEM object */ psb_gtt_free_range(obj->dev, gtt); } @@ -77,7 +82,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, /* Make it mmapable */ if (!obj->map_list.map) { - ret = gem_create_mmap_offset(obj); + ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; } diff --git a/trunk/drivers/gpu/drm/gma500/gem_glue.c b/trunk/drivers/gpu/drm/gma500/gem_glue.c deleted file mode 100644 index 3c17634f6061..000000000000 --- a/trunk/drivers/gpu/drm/gma500/gem_glue.c +++ /dev/null @@ -1,90 +0,0 @@ -/************************************************************************** - * Copyright (c) 2011, Intel Corporation. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - **************************************************************************/ - -#include -#include -#include "gem_glue.h" - -void drm_gem_object_release_wrap(struct drm_gem_object *obj) -{ - /* Remove the list map if one is present */ - if (obj->map_list.map) { - struct drm_gem_mm *mm = obj->dev->mm_private; - struct drm_map_list *list = &obj->map_list; - drm_ht_remove_item(&mm->offset_hash, &list->hash); - drm_mm_put_block(list->file_offset_node); - kfree(list->map); - list->map = NULL; - } - drm_gem_object_release(obj); -} - -/** - * gem_create_mmap_offset - invent an mmap offset - * @obj: our object - * - * Standard implementation of offset generation for mmap as is - * duplicated in several drivers. This belongs in GEM. - */ -int gem_create_mmap_offset(struct drm_gem_object *obj) -{ - struct drm_device *dev = obj->dev; - struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_local_map *map; - int ret; - - list = &obj->map_list; - list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); - if (list->map == NULL) - return -ENOMEM; - map = list->map; - map->type = _DRM_GEM; - map->size = obj->size; - map->handle = obj; - - list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, 0); - if (!list->file_offset_node) { - dev_err(dev->dev, "failed to allocate offset for bo %d\n", - obj->name); - ret = -ENOSPC; - goto free_it; - } - list->file_offset_node = drm_mm_get_block(list->file_offset_node, - obj->size / PAGE_SIZE, 0); - if (!list->file_offset_node) { - ret = -ENOMEM; - goto free_it; - } - list->hash.key = list->file_offset_node->start; - ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); - if (ret) { - dev_err(dev->dev, "failed to add to map hash\n"); - goto free_mm; - } - return 0; - -free_mm: - drm_mm_put_block(list->file_offset_node); -free_it: - kfree(list->map); - list->map = NULL; - return ret; -} diff --git a/trunk/drivers/gpu/drm/gma500/gem_glue.h b/trunk/drivers/gpu/drm/gma500/gem_glue.h deleted file mode 100644 index ce5ce30f74db..000000000000 --- a/trunk/drivers/gpu/drm/gma500/gem_glue.h +++ /dev/null @@ -1,2 +0,0 @@ -extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); -extern int gem_create_mmap_offset(struct drm_gem_object *obj); diff --git a/trunk/drivers/gpu/drm/gma500/intel_bios.c b/trunk/drivers/gpu/drm/gma500/intel_bios.c index 8d7caf0f363e..4fb79cf00ed8 100644 --- a/trunk/drivers/gpu/drm/gma500/intel_bios.c +++ b/trunk/drivers/gpu/drm/gma500/intel_bios.c @@ -54,6 +54,98 @@ static void *find_section(struct bdb_header *bdb, int section_id) return NULL; } +static void +parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb) +{ + struct bdb_edp *edp; + struct edp_power_seq *edp_pps; + struct edp_link_params *edp_link_params; + uint8_t panel_type; + + edp = find_section(bdb, BDB_EDP); + + dev_priv->edp.bpp = 18; + if (!edp) { + if (dev_priv->edp.support) { + DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n", + dev_priv->edp.bpp); + } + return; + } + + panel_type = dev_priv->panel_type; + switch ((edp->color_depth >> (panel_type * 2)) & 3) { + case EDP_18BPP: + dev_priv->edp.bpp = 18; + break; + case EDP_24BPP: + dev_priv->edp.bpp = 24; + break; + case EDP_30BPP: + dev_priv->edp.bpp = 30; + break; + } + + /* Get the eDP sequencing and link info */ + edp_pps = &edp->power_seqs[panel_type]; + edp_link_params = &edp->link_params[panel_type]; + + dev_priv->edp.pps = *edp_pps; + + DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, + dev_priv->edp.pps.t9, dev_priv->edp.pps.t10, + dev_priv->edp.pps.t11_t12); + + dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : + DP_LINK_BW_1_62; + switch (edp_link_params->lanes) { + case 0: + dev_priv->edp.lanes = 1; + break; + case 1: + dev_priv->edp.lanes = 2; + break; + case 3: + default: + dev_priv->edp.lanes = 4; + break; + } + DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n", + dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp); + + switch (edp_link_params->preemphasis) { + case 0: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; + break; + case 1: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; + break; + case 2: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; + break; + case 3: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; + break; + } + switch (edp_link_params->vswing) { + case 0: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; + break; + case 1: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; + break; + case 2: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; + break; + case 3: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; + break; + } + DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n", + dev_priv->edp.vswing, dev_priv->edp.preemphasis); +} + static u16 get_blocksize(void *p) { @@ -154,6 +246,8 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv, return; dev_priv->lvds_dither = lvds_options->pixel_dither; + dev_priv->panel_type = lvds_options->panel_type; + if (lvds_options->panel_type == 0xff) return; @@ -340,6 +434,9 @@ parse_driver_features(struct drm_psb_private *dev_priv, if (!driver) return; + if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) + dev_priv->edp.support = 1; + /* This bit means to use 96Mhz for DPLL_A or not */ if (driver->primary_lfp_id) dev_priv->dplla_96mhz = true; @@ -437,6 +534,9 @@ int psb_intel_init_bios(struct drm_device *dev) size_t size; int i; + + dev_priv->panel_type = 0xff; + /* XXX Should this validation be moved to intel_opregion.c? */ if (dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; @@ -477,6 +577,7 @@ int psb_intel_init_bios(struct drm_device *dev) parse_sdvo_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb); parse_backlight_data(dev_priv, bdb); + parse_edp(dev_priv, bdb); if (bios) pci_unmap_rom(pdev, bios); diff --git a/trunk/drivers/gpu/drm/gma500/intel_bios.h b/trunk/drivers/gpu/drm/gma500/intel_bios.h index 2e95523b84b1..c6267c98c9e7 100644 --- a/trunk/drivers/gpu/drm/gma500/intel_bios.h +++ b/trunk/drivers/gpu/drm/gma500/intel_bios.h @@ -23,6 +23,7 @@ #define _I830_BIOS_H_ #include +#include struct vbt_header { u8 signature[20]; /**< Always starts with 'VBT$' */ @@ -93,6 +94,7 @@ struct vbios_data { #define BDB_SDVO_LVDS_PNP_IDS 24 #define BDB_SDVO_LVDS_POWER_SEQ 25 #define BDB_TV_OPTIONS 26 +#define BDB_EDP 27 #define BDB_LVDS_OPTIONS 40 #define BDB_LVDS_LFP_DATA_PTRS 41 #define BDB_LVDS_LFP_DATA 42 @@ -391,6 +393,11 @@ struct bdb_sdvo_lvds_options { u8 panel_misc_bits_4; } __attribute__((packed)); +#define BDB_DRIVER_FEATURE_NO_LVDS 0 +#define BDB_DRIVER_FEATURE_INT_LVDS 1 +#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 +#define BDB_DRIVER_FEATURE_EDP 3 + struct bdb_driver_features { u8 boot_dev_algorithm:1; u8 block_display_switch:1; @@ -431,6 +438,45 @@ struct bdb_driver_features { u8 custom_vbt_version; } __attribute__((packed)); +#define EDP_18BPP 0 +#define EDP_24BPP 1 +#define EDP_30BPP 2 +#define EDP_RATE_1_62 0 +#define EDP_RATE_2_7 1 +#define EDP_LANE_1 0 +#define EDP_LANE_2 1 +#define EDP_LANE_4 3 +#define EDP_PREEMPHASIS_NONE 0 +#define EDP_PREEMPHASIS_3_5dB 1 +#define EDP_PREEMPHASIS_6dB 2 +#define EDP_PREEMPHASIS_9_5dB 3 +#define EDP_VSWING_0_4V 0 +#define EDP_VSWING_0_6V 1 +#define EDP_VSWING_0_8V 2 +#define EDP_VSWING_1_2V 3 + +struct edp_power_seq { + u16 t1_t3; + u16 t8; + u16 t9; + u16 t10; + u16 t11_t12; +} __attribute__ ((packed)); + +struct edp_link_params { + u8 rate:4; + u8 lanes:4; + u8 preemphasis:4; + u8 vswing:4; +} __attribute__ ((packed)); + +struct bdb_edp { + struct edp_power_seq power_seqs[16]; + u32 color_depth; + u32 sdrrs_msa_timing_delay; + struct edp_link_params link_params[16]; +} __attribute__ ((packed)); + extern int psb_intel_init_bios(struct drm_device *dev); extern void psb_intel_destroy_bios(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 5675d93b4205..32dba2ab53e1 100644 --- a/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/trunk/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -299,17 +299,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector, if (drm_connector_property_set_value(connector, property, value)) goto set_prop_error; - else { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct backlight_device *psb_bd; - - psb_bd = mdfld_get_backlight_device(); - if (psb_bd) { - psb_bd->props.brightness = value; - mdfld_set_brightness(psb_bd); - } -#endif - } + else + gma_backlight_set(encoder->dev, value); } set_prop_done: return 0; diff --git a/trunk/drivers/gpu/drm/gma500/mid_bios.c b/trunk/drivers/gpu/drm/gma500/mid_bios.c index b2a790bd9899..850cd3fbb969 100644 --- a/trunk/drivers/gpu/drm/gma500/mid_bios.c +++ b/trunk/drivers/gpu/drm/gma500/mid_bios.c @@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv) dev_priv->platform_rev_id); } -struct vbt_header { +struct mid_vbt_header { u32 signature; u8 revision; } __packed; /* The same for r0 and r1 */ struct vbt_r0 { - struct vbt_header vbt_header; + struct mid_vbt_header vbt_header; u8 size; u8 checksum; } __packed; struct vbt_r10 { - struct vbt_header vbt_header; + struct mid_vbt_header vbt_header; u8 checksum; u16 size; u8 panel_count; @@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv) struct drm_device *dev = dev_priv->dev; u32 addr; u8 __iomem *vbt_virtual; - struct vbt_header vbt_header; + struct mid_vbt_header vbt_header; struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); int ret = -1; diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c index 2eb3dc4e9c9b..69e51e903f35 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_hdmi.c @@ -252,7 +252,6 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector) if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); - connector->display_info.raw_edid = NULL; } /* diff --git a/trunk/drivers/gpu/drm/gma500/opregion.c b/trunk/drivers/gpu/drm/gma500/opregion.c index c430bd424681..ad0d6de938f3 100644 --- a/trunk/drivers/gpu/drm/gma500/opregion.c +++ b/trunk/drivers/gpu/drm/gma500/opregion.c @@ -166,8 +166,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) { int max = bd->props.max_brightness; - bd->props.brightness = bclp * max / 255; - backlight_update_status(bd); + gma_backlight_set(dev, bclp * max / 255); } asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID; diff --git a/trunk/drivers/gpu/drm/gma500/psb_device.c b/trunk/drivers/gpu/drm/gma500/psb_device.c index 5971bc82b765..f1432f096e58 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_device.c +++ b/trunk/drivers/gpu/drm/gma500/psb_device.c @@ -290,6 +290,7 @@ static void psb_get_core_freq(struct drm_device *dev) case 6: case 7: dev_priv->core_freq = 266; + break; default: dev_priv->core_freq = 0; } diff --git a/trunk/drivers/gpu/drm/gma500/psb_drv.h b/trunk/drivers/gpu/drm/gma500/psb_drv.h index 1bd115ecefe1..223ff5b1b5c4 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_drv.h +++ b/trunk/drivers/gpu/drm/gma500/psb_drv.h @@ -24,10 +24,10 @@ #include #include "drm_global.h" -#include "gem_glue.h" #include "gma_drm.h" #include "psb_reg.h" #include "psb_intel_drv.h" +#include "intel_bios.h" #include "gtt.h" #include "power.h" #include "opregion.h" @@ -613,6 +613,8 @@ struct drm_psb_private { */ struct backlight_device *backlight_device; struct drm_property *backlight_property; + bool backlight_enabled; + int backlight_level; uint32_t blc_adj1; uint32_t blc_adj2; @@ -640,6 +642,19 @@ struct drm_psb_private { int mdfld_panel_id; bool dplla_96mhz; /* DPLL data from the VBT */ + + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; + uint8_t panel_type; }; @@ -796,6 +811,9 @@ extern int psb_fbdev_init(struct drm_device *dev); /* backlight.c */ int gma_backlight_init(struct drm_device *dev); void gma_backlight_exit(struct drm_device *dev); +void gma_backlight_disable(struct drm_device *dev); +void gma_backlight_enable(struct drm_device *dev); +void gma_backlight_set(struct drm_device *dev, int v); /* oaktrail_crtc.c */ extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs; diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h b/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h index ebe1a28f60e1..90f2d11e686b 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -29,10 +29,6 @@ * Display related stuff */ -/* store information about an Ixxx DVO */ -/* The i830->i865 use multiple DVOs with multiple i2cs */ -/* the i915, i945 have a single sDVO i2c bus - which is different */ -#define MAX_OUTPUTS 6 /* maximum connectors per crtcs in the mode set */ #define INTELFB_CONN_LIMIT 4 @@ -69,6 +65,8 @@ #define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_MIPI 7 #define INTEL_OUTPUT_MIPI2 8 +#define INTEL_OUTPUT_DISPLAYPORT 9 +#define INTEL_OUTPUT_EDP 10 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -133,6 +131,11 @@ struct psb_intel_encoder { void (*hot_plug)(struct psb_intel_encoder *); int crtc_mask; int clone_mask; + u32 ddi_select; /* Channel info */ +#define DDI0_SELECT 0x01 +#define DDI1_SELECT 0x02 +#define DP_MASK 0x8000 +#define DDI_MASK 0x03 void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */ /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's @@ -190,7 +193,6 @@ struct psb_intel_crtc { u32 mode_flags; bool active; - bool crtc_enable; /* Saved Crtc HW states */ struct psb_intel_crtc_state *crtc_state; @@ -285,4 +287,20 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); extern void gma_intel_teardown_gmbus(struct drm_device *dev); +/* DP support */ +extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg); +extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + +extern void psb_intel_attach_force_audio_property(struct drm_connector *connector); +extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector); + +extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val); +extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val); +extern void cdv_sb_reset(struct drm_device *dev); + +extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector); +extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector); + #endif /* __INTEL_DRV_H__ */ diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c index 37adc9edf974..2a4c3a9e33e3 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -630,17 +630,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector, property, value)) goto set_prop_error; - else { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - struct drm_psb_private *devp = - encoder->dev->dev_private; - struct backlight_device *bd = devp->backlight_device; - if (bd) { - bd->props.brightness = value; - backlight_update_status(bd); - } -#endif - } + else + gma_backlight_set(encoder->dev, value); } else if (!strcmp(property->name, "DPMS")) { struct drm_encoder_helper_funcs *hfuncs = encoder->helper_private; diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h b/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h index 8e8c8efb0a89..d914719c4b60 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_reg.h @@ -173,15 +173,46 @@ #define PP_SEQUENCE_ON (1 << 28) #define PP_SEQUENCE_OFF (2 << 28) #define PP_SEQUENCE_MASK 0x30000000 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) +#define PP_SEQUENCE_STATE_MASK 0x0000000f + #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) - +#define PANEL_UNLOCK_REGS (0xabcd << 16) +#define PANEL_UNLOCK_MASK (0xffff << 16) +#define EDP_FORCE_VDD (1 << 3) +#define EDP_BLC_ENABLE (1 << 2) +#define PANEL_POWER_RESET (1 << 1) +#define PANEL_POWER_OFF (0 << 0) +#define PANEL_POWER_ON (1 << 0) + +/* Poulsbo/Oaktrail */ #define LVDSPP_ON 0x61208 #define LVDSPP_OFF 0x6120c #define PP_CYCLE 0x61210 +/* Cedartrail */ #define PP_ON_DELAYS 0x61208 /* Cedartrail */ +#define PANEL_PORT_SELECT_MASK (3 << 30) +#define PANEL_PORT_SELECT_LVDS (0 << 30) +#define PANEL_PORT_SELECT_EDP (1 << 30) +#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) +#define PANEL_POWER_UP_DELAY_SHIFT 16 +#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) +#define PANEL_LIGHT_ON_DELAY_SHIFT 0 + #define PP_OFF_DELAYS 0x6120c /* Cedartrail */ +#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) +#define PANEL_POWER_DOWN_DELAY_SHIFT 16 +#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) +#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 + +#define PP_DIVISOR 0x61210 /* Cedartrail */ +#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) +#define PP_REFERENCE_DIVIDER_SHIFT 8 +#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) +#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 #define PFIT_CONTROL 0x61230 #define PFIT_ENABLE (1 << 31) @@ -1282,6 +1313,10 @@ No status bits are changed. # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */ # define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) # define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) +# define DPUNIT_PIPEB_GATE_DISABLE (1 << 30) +# define DPUNIT_PIPEA_GATE_DISABLE (1 << 25) +# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) +# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) #define RAMCLK_GATE_D 0x6210 @@ -1347,5 +1382,165 @@ No status bits are changed. #define LANE_PLL_ENABLE (0x3 << 20) #define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21)) +#define DP_B 0x64100 +#define DP_C 0x64200 + +#define DP_PORT_EN (1 << 31) +#define DP_PIPEB_SELECT (1 << 30) +#define DP_PIPE_MASK (1 << 30) + +/* Link training mode - select a suitable mode for each stage */ +#define DP_LINK_TRAIN_PAT_1 (0 << 28) +#define DP_LINK_TRAIN_PAT_2 (1 << 28) +#define DP_LINK_TRAIN_PAT_IDLE (2 << 28) +#define DP_LINK_TRAIN_OFF (3 << 28) +#define DP_LINK_TRAIN_MASK (3 << 28) +#define DP_LINK_TRAIN_SHIFT 28 + +/* Signal voltages. These are mostly controlled by the other end */ +#define DP_VOLTAGE_0_4 (0 << 25) +#define DP_VOLTAGE_0_6 (1 << 25) +#define DP_VOLTAGE_0_8 (2 << 25) +#define DP_VOLTAGE_1_2 (3 << 25) +#define DP_VOLTAGE_MASK (7 << 25) +#define DP_VOLTAGE_SHIFT 25 + +/* Signal pre-emphasis levels, like voltages, the other end tells us what + * they want + */ +#define DP_PRE_EMPHASIS_0 (0 << 22) +#define DP_PRE_EMPHASIS_3_5 (1 << 22) +#define DP_PRE_EMPHASIS_6 (2 << 22) +#define DP_PRE_EMPHASIS_9_5 (3 << 22) +#define DP_PRE_EMPHASIS_MASK (7 << 22) +#define DP_PRE_EMPHASIS_SHIFT 22 + +/* How many wires to use. I guess 3 was too hard */ +#define DP_PORT_WIDTH_1 (0 << 19) +#define DP_PORT_WIDTH_2 (1 << 19) +#define DP_PORT_WIDTH_4 (3 << 19) +#define DP_PORT_WIDTH_MASK (7 << 19) + +/* Mystic DPCD version 1.1 special mode */ +#define DP_ENHANCED_FRAMING (1 << 18) + +/** locked once port is enabled */ +#define DP_PORT_REVERSAL (1 << 15) + +/** sends the clock on lane 15 of the PEG for debug */ +#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) + +#define DP_SCRAMBLING_DISABLE (1 << 12) +#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7) + +/** limit RGB values to avoid confusing TVs */ +#define DP_COLOR_RANGE_16_235 (1 << 8) + +/** Turn on the audio link */ +#define DP_AUDIO_OUTPUT_ENABLE (1 << 6) + +/** vs and hs sync polarity */ +#define DP_SYNC_VS_HIGH (1 << 4) +#define DP_SYNC_HS_HIGH (1 << 3) + +/** A fantasy */ +#define DP_DETECTED (1 << 2) + +/** The aux channel provides a way to talk to the + * signal sink for DDC etc. Max packet size supported + * is 20 bytes in each direction, hence the 5 fixed + * data registers + */ +#define DPB_AUX_CH_CTL 0x64110 +#define DPB_AUX_CH_DATA1 0x64114 +#define DPB_AUX_CH_DATA2 0x64118 +#define DPB_AUX_CH_DATA3 0x6411c +#define DPB_AUX_CH_DATA4 0x64120 +#define DPB_AUX_CH_DATA5 0x64124 + +#define DPC_AUX_CH_CTL 0x64210 +#define DPC_AUX_CH_DATA1 0x64214 +#define DPC_AUX_CH_DATA2 0x64218 +#define DPC_AUX_CH_DATA3 0x6421c +#define DPC_AUX_CH_DATA4 0x64220 +#define DPC_AUX_CH_DATA5 0x64224 + +#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) +#define DP_AUX_CH_CTL_DONE (1 << 30) +#define DP_AUX_CH_CTL_INTERRUPT (1 << 29) +#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) +#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26) +#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) +#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) +#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 +#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) +#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 +#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) +#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) +#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) +#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) +#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) +#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 + +/* + * Computing GMCH M and N values for the Display Port link + * + * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes + * + * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz) + * + * The GMCH value is used internally + * + * bytes_per_pixel is the number of bytes coming out of the plane, + * which is after the LUTs, so we want the bytes for our color format. + * For our current usage, this is always 3, one byte for R, G and B. + */ + +#define _PIPEA_GMCH_DATA_M 0x70050 +#define _PIPEB_GMCH_DATA_M 0x71050 + +/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ +#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25) +#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25 + +#define PIPE_GMCH_DATA_M_MASK (0xffffff) + +#define _PIPEA_GMCH_DATA_N 0x70054 +#define _PIPEB_GMCH_DATA_N 0x71054 +#define PIPE_GMCH_DATA_N_MASK (0xffffff) + +/* + * Computing Link M and N values for the Display Port link + * + * Link M / N = pixel_clock / ls_clk + * + * (the DP spec calls pixel_clock the 'strm_clk') + * + * The Link value is transmitted in the Main Stream + * Attributes and VB-ID. + */ + +#define _PIPEA_DP_LINK_M 0x70060 +#define _PIPEB_DP_LINK_M 0x71060 +#define PIPEA_DP_LINK_M_MASK (0xffffff) + +#define _PIPEA_DP_LINK_N 0x70064 +#define _PIPEB_DP_LINK_N 0x71064 +#define PIPEA_DP_LINK_N_MASK (0xffffff) + +#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M) +#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N) +#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M) +#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N) + +#define PIPE_BPC_MASK (7 << 5) +#define PIPE_8BPC (0 << 5) +#define PIPE_10BPC (1 << 5) +#define PIPE_6BPC (2 << 5) #endif diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 0466c7b985f8..d35f93ba3a85 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -1292,7 +1292,6 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector) return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); - return NULL; } static enum drm_connector_status @@ -1343,7 +1342,6 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) } } else status = connector_status_disconnected; - connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1404,7 +1402,6 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force) ret = connector_status_disconnected; else ret = connector_status_connected; - connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; @@ -1453,7 +1450,6 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector) drm_add_edid_modes(connector, edid); } - connector->display_info.raw_edid = NULL; kfree(edid); } } diff --git a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c index 36d952280c50..599099fe76e3 100644 --- a/trunk/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/trunk/drivers/gpu/drm/i2c/ch7006_drv.c @@ -427,15 +427,10 @@ static int ch7006_remove(struct i2c_client *client) return 0; } -static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg) +static int ch7006_resume(struct device *dev) { - ch7006_dbg(client, "\n"); - - return 0; -} + struct i2c_client *client = to_i2c_client(dev); -static int ch7006_resume(struct i2c_client *client) -{ ch7006_dbg(client, "\n"); ch7006_write(client, 0x3d, 0x0); @@ -499,15 +494,18 @@ static struct i2c_device_id ch7006_ids[] = { }; MODULE_DEVICE_TABLE(i2c, ch7006_ids); +static const struct dev_pm_ops ch7006_pm_ops = { + .resume = ch7006_resume, +}; + static struct drm_i2c_encoder_driver ch7006_driver = { .i2c_driver = { .probe = ch7006_probe, .remove = ch7006_remove, - .suspend = ch7006_suspend, - .resume = ch7006_resume, .driver = { .name = "ch7006", + .pm = &ch7006_pm_ops, }, .id_table = ch7006_ids, diff --git a/trunk/drivers/gpu/drm/i915/Makefile b/trunk/drivers/gpu/drm/i915/Makefile index b0bacdba6d7e..0f2c5493242b 100644 --- a/trunk/drivers/gpu/drm/i915/Makefile +++ b/trunk/drivers/gpu/drm/i915/Makefile @@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ dvo_ivch.o \ dvo_tfp410.o \ dvo_sil164.o \ + dvo_ns2501.o \ i915_gem_dmabuf.o i915-$(CONFIG_COMPAT) += i915_ioc32.o diff --git a/trunk/drivers/gpu/drm/i915/dvo.h b/trunk/drivers/gpu/drm/i915/dvo.h index 58914691a77b..74b5efccfdb1 100644 --- a/trunk/drivers/gpu/drm/i915/dvo.h +++ b/trunk/drivers/gpu/drm/i915/dvo.h @@ -58,13 +58,12 @@ struct intel_dvo_dev_ops { void (*create_resources)(struct intel_dvo_device *dvo); /* - * Turn on/off output or set intermediate power levels if available. + * Turn on/off output. * - * Unsupported intermediate modes drop to the lower power setting. - * If the mode is DPMSModeOff, the output must be disabled, - * as the DPLL may be disabled afterwards. + * Because none of our dvo drivers support an intermediate power levels, + * we don't expose this in the interfac. */ - void (*dpms)(struct intel_dvo_device *dvo, int mode); + void (*dpms)(struct intel_dvo_device *dvo, bool enable); /* * Callback for testing a video mode for a given output. @@ -115,6 +114,12 @@ struct intel_dvo_dev_ops { */ enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); + /* + * Probe the current hw status, returning true if the connected output + * is active. + */ + bool (*get_hw_state)(struct intel_dvo_device *dev); + /** * Query the device for the modes it provides. * @@ -140,5 +145,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops; extern struct intel_dvo_dev_ops ivch_ops; extern struct intel_dvo_dev_ops tfp410_ops; extern struct intel_dvo_dev_ops ch7017_ops; +extern struct intel_dvo_dev_ops ns2501_ops; #endif /* _INTEL_DVO_H */ diff --git a/trunk/drivers/gpu/drm/i915/dvo_ch7017.c b/trunk/drivers/gpu/drm/i915/dvo_ch7017.c index 1ca799a1e1fc..86b27d1d90c2 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/trunk/drivers/gpu/drm/i915/dvo_ch7017.c @@ -163,7 +163,7 @@ struct ch7017_priv { }; static void ch7017_dump_regs(struct intel_dvo_device *dvo); -static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); +static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) { @@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | (mode->hdisplay & 0x0700) >> 8; - ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); + ch7017_dpms(dvo, false); ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, horizontal_active_pixel_input); ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, @@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, } /* set the CH7017 power state */ -static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) +static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) { uint8_t val; @@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) CH7017_DAC3_POWER_DOWN | CH7017_TV_POWER_DOWN_EN); - if (mode == DRM_MODE_DPMS_ON) { + if (enable) { /* Turn on the LVDS */ ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, val & ~CH7017_LVDS_POWER_DOWN_EN); @@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) msleep(20); } +static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) +{ + uint8_t val; + + ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); + + if (val & CH7017_LVDS_POWER_DOWN_EN) + return false; + else + return true; +} + static void ch7017_dump_regs(struct intel_dvo_device *dvo) { uint8_t val; @@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = { .mode_valid = ch7017_mode_valid, .mode_set = ch7017_mode_set, .dpms = ch7017_dpms, + .get_hw_state = ch7017_get_hw_state, .dump_regs = ch7017_dump_regs, .destroy = ch7017_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c b/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c index 4a036600e806..38f3a6cb8c7d 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/trunk/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, } /* set the CH7xxx power state */ -static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) +static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) { - if (mode == DRM_MODE_DPMS_ON) + if (enable) ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); else ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); } +static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo) +{ + u8 val; + + ch7xxx_readb(dvo, CH7xxx_PM, &val); + + if (val & CH7xxx_PM_FPD) + return false; + else + return true; +} + static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) { int i; @@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = { .mode_valid = ch7xxx_mode_valid, .mode_set = ch7xxx_mode_set, .dpms = ch7xxx_dpms, + .get_hw_state = ch7xxx_get_hw_state, .dump_regs = ch7xxx_dump_regs, .destroy = ch7xxx_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/dvo_ivch.c b/trunk/drivers/gpu/drm/i915/dvo_ivch.c index 04f2893d5e3c..baaf65bf0bdd 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_ivch.c +++ b/trunk/drivers/gpu/drm/i915/dvo_ivch.c @@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, } /** Sets the power state of the panel connected to the ivch */ -static void ivch_dpms(struct intel_dvo_device *dvo, int mode) +static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) { int i; uint16_t vr01, vr30, backlight; @@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode) if (!ivch_read(dvo, VR01, &vr01)) return; - if (mode == DRM_MODE_DPMS_ON) + if (enable) backlight = 1; else backlight = 0; ivch_write(dvo, VR80, backlight); - if (mode == DRM_MODE_DPMS_ON) + if (enable) vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; else vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); @@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode) if (!ivch_read(dvo, VR30, &vr30)) break; - if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) + if (((vr30 & VR30_PANEL_ON) != 0) == enable) break; udelay(1000); } @@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode) udelay(16 * 1000); } +static bool ivch_get_hw_state(struct intel_dvo_device *dvo) +{ + uint16_t vr01; + + /* Set the new power state of the panel. */ + if (!ivch_read(dvo, VR01, &vr01)) + return false; + + if (vr01 & VR01_LCD_ENABLE) + return true; + else + return false; +} + static void ivch_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo) struct intel_dvo_dev_ops ivch_ops = { .init = ivch_init, .dpms = ivch_dpms, + .get_hw_state = ivch_get_hw_state, .mode_valid = ivch_mode_valid, .mode_set = ivch_mode_set, .detect = ivch_detect, diff --git a/trunk/drivers/gpu/drm/i915/dvo_ns2501.c b/trunk/drivers/gpu/drm/i915/dvo_ns2501.c new file mode 100644 index 000000000000..c4a255be6979 --- /dev/null +++ b/trunk/drivers/gpu/drm/i915/dvo_ns2501.c @@ -0,0 +1,588 @@ +/* + * + * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "dvo.h" +#include "i915_reg.h" +#include "i915_drv.h" + +#define NS2501_VID 0x1305 +#define NS2501_DID 0x6726 + +#define NS2501_VID_LO 0x00 +#define NS2501_VID_HI 0x01 +#define NS2501_DID_LO 0x02 +#define NS2501_DID_HI 0x03 +#define NS2501_REV 0x04 +#define NS2501_RSVD 0x05 +#define NS2501_FREQ_LO 0x06 +#define NS2501_FREQ_HI 0x07 + +#define NS2501_REG8 0x08 +#define NS2501_8_VEN (1<<5) +#define NS2501_8_HEN (1<<4) +#define NS2501_8_DSEL (1<<3) +#define NS2501_8_BPAS (1<<2) +#define NS2501_8_RSVD (1<<1) +#define NS2501_8_PD (1<<0) + +#define NS2501_REG9 0x09 +#define NS2501_9_VLOW (1<<7) +#define NS2501_9_MSEL_MASK (0x7<<4) +#define NS2501_9_TSEL (1<<3) +#define NS2501_9_RSEN (1<<2) +#define NS2501_9_RSVD (1<<1) +#define NS2501_9_MDI (1<<0) + +#define NS2501_REGC 0x0c + +struct ns2501_priv { + //I2CDevRec d; + bool quiet; + int reg_8_shadow; + int reg_8_set; + // Shadow registers for i915 + int dvoc; + int pll_a; + int srcdim; + int fw_blc; +}; + +#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) + +/* + * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens + * laptops does not react on the i2c bus unless + * both the PLL is running and the display is configured in its native + * resolution. + * This function forces the DVO on, and stores the registers it touches. + * Afterwards, registers are restored to regular values. + * + * This is pretty much a hack, though it works. + * Without that, ns2501_readb and ns2501_writeb fail + * when switching the resolution. + */ + +static void enable_dvo(struct intel_dvo_device *dvo) +{ + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__); + + ns->dvoc = I915_READ(DVO_C); + ns->pll_a = I915_READ(_DPLL_A); + ns->srcdim = I915_READ(DVOC_SRCDIM); + ns->fw_blc = I915_READ(FW_BLC); + + I915_WRITE(DVOC, 0x10004084); + I915_WRITE(_DPLL_A, 0xd0820000); + I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768 + I915_WRITE(FW_BLC, 0x1080304); + + I915_WRITE(DVOC, 0x90004084); +} + +/* + * Restore the I915 registers modified by the above + * trigger function. + */ +static void restore_dvo(struct intel_dvo_device *dvo) +{ + struct i2c_adapter *adapter = dvo->i2c_bus; + struct intel_gmbus *bus = container_of(adapter, + struct intel_gmbus, + adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + + I915_WRITE(DVOC, ns->dvoc); + I915_WRITE(_DPLL_A, ns->pll_a); + I915_WRITE(DVOC_SRCDIM, ns->srcdim); + I915_WRITE(FW_BLC, ns->fw_blc); +} + +/* +** Read a register from the ns2501. +** Returns true if successful, false otherwise. +** If it returns false, it might be wise to enable the +** DVO with the above function. +*/ +static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) +{ + struct ns2501_priv *ns = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + }; + + if (!ns->quiet) { + DRM_DEBUG_KMS + ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, + adapter->name, dvo->slave_addr); + } + + return false; +} + +/* +** Write a register to the ns2501. +** Returns true if successful, false otherwise. +** If it returns false, it might be wise to enable the +** DVO with the above function. +*/ +static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +{ + struct ns2501_priv *ns = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + uint8_t out_buf[2]; + + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) { + return true; + } + + if (!ns->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/* National Semiconductor 2501 driver for chip on i2c bus + * scan for the chip on the bus. + * Hope the VBIOS initialized the PLL correctly so we can + * talk to it. If not, it will not be seen and not detected. + * Bummer! + */ +static bool ns2501_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the NS2501 chip on the specified i2c bus */ + struct ns2501_priv *ns; + unsigned char ch; + + ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); + if (ns == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = ns; + ns->quiet = true; + + if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) + goto out; + + if (ch != (NS2501_VID & 0xff)) { + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + + if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) + goto out; + + if (ch != (NS2501_DID & 0xff)) { + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + ns->quiet = false; + ns->reg_8_set = 0; + ns->reg_8_shadow = + NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN; + + DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); + return true; + +out: + kfree(ns); + return false; +} + +static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) +{ + /* + * This is a Laptop display, it doesn't have hotplugging. + * Even if not, the detection bit of the 2501 is unreliable as + * it only works for some display types. + * It is even more unreliable as the PLL must be active for + * allowing reading from the chiop. + */ + return connector_status_connected; +} + +static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + DRM_DEBUG_KMS + ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", + __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, + mode->vtotal); + + /* + * Currently, these are all the modes I have data from. + * More might exist. Unclear how to find the native resolution + * of the panel in here so we could always accept it + * by disabling the scaler. + */ + if ((mode->hdisplay == 800 && mode->vdisplay == 600) || + (mode->hdisplay == 640 && mode->vdisplay == 480) || + (mode->hdisplay == 1024 && mode->vdisplay == 768)) { + return MODE_OK; + } else { + return MODE_ONE_SIZE; /* Is this a reasonable error? */ + } +} + +static void ns2501_mode_set(struct intel_dvo_device *dvo, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + bool ok; + bool restore = false; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + + DRM_DEBUG_KMS + ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", + __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay, + mode->vtotal); + + /* + * Where do I find the native resolution for which scaling is not required??? + * + * First trigger the DVO on as otherwise the chip does not appear on the i2c + * bus. + */ + do { + ok = true; + + if (mode->hdisplay == 800 && mode->vdisplay == 600) { + /* mode 277 */ + ns->reg_8_shadow &= ~NS2501_8_BPAS; + DRM_DEBUG_KMS("%s: switching to 800x600\n", + __FUNCTION__); + + /* + * No, I do not know where this data comes from. + * It is just what the video bios left in the DVO, so + * I'm just copying it here over. + * This also means that I cannot support any other modes + * except the ones supported by the bios. + */ + ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works. + ok &= ns2501_writeb(dvo, 0x1b, 0x19); + ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer + ok &= ns2501_writeb(dvo, 0x1d, 0x02); + + ok &= ns2501_writeb(dvo, 0x34, 0x03); + ok &= ns2501_writeb(dvo, 0x35, 0xff); + + ok &= ns2501_writeb(dvo, 0x80, 0x27); + ok &= ns2501_writeb(dvo, 0x81, 0x03); + ok &= ns2501_writeb(dvo, 0x82, 0x41); + ok &= ns2501_writeb(dvo, 0x83, 0x05); + + ok &= ns2501_writeb(dvo, 0x8d, 0x02); + ok &= ns2501_writeb(dvo, 0x8e, 0x04); + ok &= ns2501_writeb(dvo, 0x8f, 0x00); + + ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */ + ok &= ns2501_writeb(dvo, 0x91, 0x07); + ok &= ns2501_writeb(dvo, 0x94, 0x00); + ok &= ns2501_writeb(dvo, 0x95, 0x00); + + ok &= ns2501_writeb(dvo, 0x96, 0x00); + + ok &= ns2501_writeb(dvo, 0x99, 0x00); + ok &= ns2501_writeb(dvo, 0x9a, 0x88); + + ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */ + ok &= ns2501_writeb(dvo, 0x9d, 0x00); + ok &= ns2501_writeb(dvo, 0x9e, 0x25); + ok &= ns2501_writeb(dvo, 0x9f, 0x03); + + ok &= ns2501_writeb(dvo, 0xa4, 0x80); + + ok &= ns2501_writeb(dvo, 0xb6, 0x00); + + ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ + + ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xc1, 0xd7); + + ok &= ns2501_writeb(dvo, 0xc2, 0x00); + ok &= ns2501_writeb(dvo, 0xc3, 0xf8); + + ok &= ns2501_writeb(dvo, 0xc4, 0x03); + ok &= ns2501_writeb(dvo, 0xc5, 0x1a); + + ok &= ns2501_writeb(dvo, 0xc6, 0x00); + ok &= ns2501_writeb(dvo, 0xc7, 0x73); + ok &= ns2501_writeb(dvo, 0xc8, 0x02); + + } else if (mode->hdisplay == 640 && mode->vdisplay == 480) { + /* mode 274 */ + DRM_DEBUG_KMS("%s: switching to 640x480\n", + __FUNCTION__); + /* + * No, I do not know where this data comes from. + * It is just what the video bios left in the DVO, so + * I'm just copying it here over. + * This also means that I cannot support any other modes + * except the ones supported by the bios. + */ + ns->reg_8_shadow &= ~NS2501_8_BPAS; + + ok &= ns2501_writeb(dvo, 0x11, 0xa0); + ok &= ns2501_writeb(dvo, 0x1b, 0x11); + ok &= ns2501_writeb(dvo, 0x1c, 0x54); + ok &= ns2501_writeb(dvo, 0x1d, 0x03); + + ok &= ns2501_writeb(dvo, 0x34, 0x03); + ok &= ns2501_writeb(dvo, 0x35, 0xff); + + ok &= ns2501_writeb(dvo, 0x80, 0xff); + ok &= ns2501_writeb(dvo, 0x81, 0x07); + ok &= ns2501_writeb(dvo, 0x82, 0x3d); + ok &= ns2501_writeb(dvo, 0x83, 0x05); + + ok &= ns2501_writeb(dvo, 0x8d, 0x02); + ok &= ns2501_writeb(dvo, 0x8e, 0x10); + ok &= ns2501_writeb(dvo, 0x8f, 0x00); + + ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */ + ok &= ns2501_writeb(dvo, 0x91, 0x07); + ok &= ns2501_writeb(dvo, 0x94, 0x00); + ok &= ns2501_writeb(dvo, 0x95, 0x00); + + ok &= ns2501_writeb(dvo, 0x96, 0x05); + + ok &= ns2501_writeb(dvo, 0x99, 0x00); + ok &= ns2501_writeb(dvo, 0x9a, 0x88); + + ok &= ns2501_writeb(dvo, 0x9c, 0x24); + ok &= ns2501_writeb(dvo, 0x9d, 0x00); + ok &= ns2501_writeb(dvo, 0x9e, 0x25); + ok &= ns2501_writeb(dvo, 0x9f, 0x03); + + ok &= ns2501_writeb(dvo, 0xa4, 0x84); + + ok &= ns2501_writeb(dvo, 0xb6, 0x09); + + ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */ + + ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */ + ok &= ns2501_writeb(dvo, 0xc1, 0x90); + + ok &= ns2501_writeb(dvo, 0xc2, 0x00); + ok &= ns2501_writeb(dvo, 0xc3, 0x0f); + + ok &= ns2501_writeb(dvo, 0xc4, 0x03); + ok &= ns2501_writeb(dvo, 0xc5, 0x16); + + ok &= ns2501_writeb(dvo, 0xc6, 0x00); + ok &= ns2501_writeb(dvo, 0xc7, 0x02); + ok &= ns2501_writeb(dvo, 0xc8, 0x02); + + } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) { + /* mode 280 */ + DRM_DEBUG_KMS("%s: switching to 1024x768\n", + __FUNCTION__); + /* + * This might or might not work, actually. I'm silently + * assuming here that the native panel resolution is + * 1024x768. If not, then this leaves the scaler disabled + * generating a picture that is likely not the expected. + * + * Problem is that I do not know where to take the panel + * dimensions from. + * + * Enable the bypass, scaling not required. + * + * The scaler registers are irrelevant here.... + * + */ + ns->reg_8_shadow |= NS2501_8_BPAS; + ok &= ns2501_writeb(dvo, 0x37, 0x44); + } else { + /* + * Data not known. Bummer! + * Hopefully, the code should not go here + * as mode_OK delivered no other modes. + */ + ns->reg_8_shadow |= NS2501_8_BPAS; + } + ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow); + + if (!ok) { + if (restore) + restore_dvo(dvo); + enable_dvo(dvo); + restore = true; + } + } while (!ok); + /* + * Restore the old i915 registers before + * forcing the ns2501 on. + */ + if (restore) + restore_dvo(dvo); +} + +/* set the NS2501 power state */ +static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) +{ + unsigned char ch; + + if (!ns2501_readb(dvo, NS2501_REG8, &ch)) + return false; + + if (ch & NS2501_8_PD) + return true; + else + return false; +} + +/* set the NS2501 power state */ +static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) +{ + bool ok; + bool restore = false; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + unsigned char ch; + + DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n", + __FUNCTION__, enable); + + ch = ns->reg_8_shadow; + + if (enable) + ch |= NS2501_8_PD; + else + ch &= ~NS2501_8_PD; + + if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) { + ns->reg_8_set = 1; + ns->reg_8_shadow = ch; + + do { + ok = true; + ok &= ns2501_writeb(dvo, NS2501_REG8, ch); + ok &= + ns2501_writeb(dvo, 0x34, + enable ? 0x03 : 0x00); + ok &= + ns2501_writeb(dvo, 0x35, + enable ? 0xff : 0x00); + if (!ok) { + if (restore) + restore_dvo(dvo); + enable_dvo(dvo); + restore = true; + } + } while (!ok); + + if (restore) + restore_dvo(dvo); + } +} + +static void ns2501_dump_regs(struct intel_dvo_device *dvo) +{ + uint8_t val; + + ns2501_readb(dvo, NS2501_FREQ_LO, &val); + DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_FREQ_HI, &val); + DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_REG8, &val); + DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_REG9, &val); + DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val); + ns2501_readb(dvo, NS2501_REGC, &val); + DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val); +} + +static void ns2501_destroy(struct intel_dvo_device *dvo) +{ + struct ns2501_priv *ns = dvo->dev_priv; + + if (ns) { + kfree(ns); + dvo->dev_priv = NULL; + } +} + +struct intel_dvo_dev_ops ns2501_ops = { + .init = ns2501_init, + .detect = ns2501_detect, + .mode_valid = ns2501_mode_valid, + .mode_set = ns2501_mode_set, + .dpms = ns2501_dpms, + .get_hw_state = ns2501_get_hw_state, + .dump_regs = ns2501_dump_regs, + .destroy = ns2501_destroy, +}; diff --git a/trunk/drivers/gpu/drm/i915/dvo_sil164.c b/trunk/drivers/gpu/drm/i915/dvo_sil164.c index a0b13a6f619d..4debd32e3e4c 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_sil164.c +++ b/trunk/drivers/gpu/drm/i915/dvo_sil164.c @@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo, } /* set the SIL164 power state */ -static void sil164_dpms(struct intel_dvo_device *dvo, int mode) +static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) { int ret; unsigned char ch; @@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode) if (ret == false) return; - if (mode == DRM_MODE_DPMS_ON) + if (enable) ch |= SIL164_8_PD; else ch &= ~SIL164_8_PD; @@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode) return; } +static bool sil164_get_hw_state(struct intel_dvo_device *dvo) +{ + int ret; + unsigned char ch; + + ret = sil164_readb(dvo, SIL164_REG8, &ch); + if (ret == false) + return false; + + if (ch & SIL164_8_PD) + return true; + else + return false; +} + static void sil164_dump_regs(struct intel_dvo_device *dvo) { uint8_t val; @@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = { .mode_valid = sil164_mode_valid, .mode_set = sil164_mode_set, .dpms = sil164_dpms, + .get_hw_state = sil164_get_hw_state, .dump_regs = sil164_dump_regs, .destroy = sil164_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/dvo_tfp410.c b/trunk/drivers/gpu/drm/i915/dvo_tfp410.c index aa2cd3ec54aa..e17f1b07e915 100644 --- a/trunk/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/trunk/drivers/gpu/drm/i915/dvo_tfp410.c @@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo, } /* set the tfp410 power state */ -static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) +static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) { uint8_t ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; - if (mode == DRM_MODE_DPMS_ON) + if (enable) ctl1 |= TFP410_CTL_1_PD; else ctl1 &= ~TFP410_CTL_1_PD; @@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) tfp410_writeb(dvo, TFP410_CTL_1, ctl1); } +static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) +{ + uint8_t ctl1; + + if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) + return false; + + if (ctl1 & TFP410_CTL_1_PD) + return true; + else + return false; +} + static void tfp410_dump_regs(struct intel_dvo_device *dvo) { uint8_t val, val2; @@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = { .mode_valid = tfp410_mode_valid, .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, + .get_hw_state = tfp410_get_hw_state, .dump_regs = tfp410_dump_regs, .destroy = tfp410_destroy, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_debugfs.c b/trunk/drivers/gpu/drm/i915/i915_debugfs.c index 359f6e8b9b00..7a4b3f3aad3f 100644 --- a/trunk/drivers/gpu/drm/i915/i915_debugfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_debugfs.c @@ -44,7 +44,6 @@ enum { ACTIVE_LIST, - FLUSHING_LIST, INACTIVE_LIST, PINNED_LIST, }; @@ -62,28 +61,11 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "gen: %d\n", info->gen); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); -#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) - B(is_mobile); - B(is_i85x); - B(is_i915g); - B(is_i945gm); - B(is_g33); - B(need_gfx_hws); - B(is_g4x); - B(is_pineview); - B(is_broadwater); - B(is_crestline); - B(has_fbc); - B(has_pipe_cxsr); - B(has_hotplug); - B(cursor_needs_physical); - B(has_overlay); - B(overlay_needs_physical); - B(supports_tv); - B(has_bsd_ring); - B(has_blt_ring); - B(has_llc); -#undef B +#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) +#define DEV_INFO_SEP ; + DEV_INFO_FLAGS; +#undef DEV_INFO_FLAG +#undef DEV_INFO_SEP return 0; } @@ -121,20 +103,23 @@ static const char *cache_level_str(int type) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", + seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain, - obj->last_rendering_seqno, + obj->last_read_seqno, + obj->last_write_seqno, obj->last_fenced_seqno, cache_level_str(obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); + if (obj->pin_count) + seq_printf(m, " (pinned x %d)", obj->pin_count); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) @@ -177,10 +162,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) seq_printf(m, "Inactive:\n"); head = &dev_priv->mm.inactive_list; break; - case FLUSHING_LIST: - seq_printf(m, "Flushing:\n"); - head = &dev_priv->mm.flushing_list; - break; default: mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -218,8 +199,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 count, mappable_count; - size_t size, mappable_size; + u32 count, mappable_count, purgeable_count; + size_t size, mappable_size, purgeable_size; struct drm_i915_gem_object *obj; int ret; @@ -232,13 +213,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->mm.object_memory); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.gtt_list, gtt_list); + count_objects(&dev_priv->mm.bound_list, gtt_list); seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; count_objects(&dev_priv->mm.active_list, mm_list); - count_objects(&dev_priv->mm.flushing_list, mm_list); seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); @@ -247,8 +227,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); + size = count = purgeable_size = purgeable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { + size += obj->base.size, ++count; + if (obj->madv == I915_MADV_DONTNEED) + purgeable_size += obj->base.size, ++purgeable_count; + } + seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); + size = count = mappable_size = mappable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { if (obj->fault_mappable) { size += obj->gtt_space->size; ++count; @@ -257,7 +245,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data) mappable_size += obj->gtt_space->size; ++mappable_count; } + if (obj->madv == I915_MADV_DONTNEED) { + purgeable_size += obj->base.size; + ++purgeable_count; + } } + seq_printf(m, "%u purgeable objects, %zu bytes\n", + purgeable_count, purgeable_size); seq_printf(m, "%u pinned mappable objects, %zu bytes\n", mappable_count, mappable_size); seq_printf(m, "%u fault mappable objects, %zu bytes\n", @@ -286,7 +280,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) return ret; total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { if (list == PINNED_LIST && obj->pin_count == 0) continue; @@ -359,40 +353,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; struct drm_i915_gem_request *gem_request; - int ret, count; + int ret, count, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; count = 0; - if (!list_empty(&dev_priv->ring[RCS].request_list)) { - seq_printf(m, "Render requests:\n"); - list_for_each_entry(gem_request, - &dev_priv->ring[RCS].request_list, - list) { - seq_printf(m, " %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); - } - count++; - } - if (!list_empty(&dev_priv->ring[VCS].request_list)) { - seq_printf(m, "BSD requests:\n"); - list_for_each_entry(gem_request, - &dev_priv->ring[VCS].request_list, - list) { - seq_printf(m, " %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); - } - count++; - } - if (!list_empty(&dev_priv->ring[BCS].request_list)) { - seq_printf(m, "BLT requests:\n"); + for_each_ring(ring, dev_priv, i) { + if (list_empty(&ring->request_list)) + continue; + + seq_printf(m, "%s requests:\n", ring->name); list_for_each_entry(gem_request, - &dev_priv->ring[BCS].request_list, + &ring->request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -413,7 +389,7 @@ static void i915_ring_seqno_info(struct seq_file *m, { if (ring->get_seqno) { seq_printf(m, "Current sequence (%s): %d\n", - ring->name, ring->get_seqno(ring)); + ring->name, ring->get_seqno(ring, false)); } } @@ -422,14 +398,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - for (i = 0; i < I915_NUM_RINGS; i++) - i915_ring_seqno_info(m, &dev_priv->ring[i]); + for_each_ring(ring, dev_priv, i) + i915_ring_seqno_info(m, ring); mutex_unlock(&dev->struct_mutex); @@ -442,6 +419,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; int ret, i, pipe; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -519,13 +497,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - for (i = 0; i < I915_NUM_RINGS; i++) { + for_each_ring(ring, dev_priv, i) { if (IS_GEN6(dev) || IS_GEN7(dev)) { - seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", - dev_priv->ring[i].name, - I915_READ_IMR(&dev_priv->ring[i])); + seq_printf(m, + "Graphics Interrupt mask (%s): %08x\n", + ring->name, I915_READ_IMR(ring)); } - i915_ring_seqno_info(m, &dev_priv->ring[i]); + i915_ring_seqno_info(m, ring); } mutex_unlock(&dev->struct_mutex); @@ -548,7 +526,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; - seq_printf(m, "Fenced object[%2d] = ", i); + seq_printf(m, "Fence %d, pin count = %d, object = ", + i, dev_priv->fence_regs[i].pin_count); if (obj == NULL) seq_printf(m, "unused"); else @@ -630,12 +609,12 @@ static void print_error_buffers(struct seq_file *m, seq_printf(m, "%s [%d]:\n", name, count); while (count--) { - seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", + seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", err->gtt_offset, err->size, err->read_domains, err->write_domain, - err->seqno, + err->rseqno, err->wseqno, pin_flag(err->pinned), tiling_flag(err->tiling), dirty_flag(err->dirty), @@ -667,10 +646,9 @@ static void i915_ring_error_state(struct seq_file *m, seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); - if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { - seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + if (ring == RCS && INTEL_INFO(dev)->gen >= 4) seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); - } + if (INTEL_INFO(dev)->gen >= 4) seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); @@ -719,11 +697,17 @@ static int i915_error_state(struct seq_file *m, void *unused) for (i = 0; i < dev_priv->num_fence_regs; i++) seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) + seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); + if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "ERROR: 0x%08x\n", error->error); seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } + if (INTEL_INFO(dev)->gen == 7) + seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); + for_each_ring(ring, dev_priv, i) i915_ring_error_state(m, dev, error, i); @@ -799,10 +783,14 @@ i915_error_state_write(struct file *filp, struct seq_file *m = filp->private_data; struct i915_error_state_file_priv *error_priv = m->private; struct drm_device *dev = error_priv->dev; + int ret; DRM_DEBUG_DRIVER("Resetting error state\n"); - mutex_lock(&dev->struct_mutex); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + i915_destroy_error_state(dev); mutex_unlock(&dev->struct_mutex); @@ -926,7 +914,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) seq_printf(m, "Render p-state limit: %d\n", rp_state_limits & 0xff); seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> - GEN6_CAGF_SHIFT) * 50); + GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & GEN6_CURICONT_MASK); seq_printf(m, "RP CUR UP: %dus\n", rpcurup & @@ -942,15 +930,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) max_freq = (rp_state_cap & 0xff0000) >> 16; seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", - max_freq * 50); + max_freq * GT_FREQUENCY_MULTIPLIER); max_freq = (rp_state_cap & 0xff00) >> 8; seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", - max_freq * 50); + max_freq * GT_FREQUENCY_MULTIPLIER); max_freq = rp_state_cap & 0xff; seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", - max_freq * 50); + max_freq * GT_FREQUENCY_MULTIPLIER); } else { seq_printf(m, "no P-state info available\n"); } @@ -1292,7 +1280,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); - for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; + for (gpu_freq = dev_priv->rps.min_delay; + gpu_freq <= dev_priv->rps.max_delay; gpu_freq++) { I915_WRITE(GEN6_PCODE_DATA, gpu_freq); I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | @@ -1303,7 +1292,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) continue; } ia_freq = I915_READ(GEN6_PCODE_DATA); - seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); + seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); } mutex_unlock(&dev->struct_mutex); @@ -1472,8 +1461,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; - mutex_lock(&dev->struct_mutex); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", @@ -1520,9 +1513,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for (i = 0; i < I915_NUM_RINGS; i++) { - ring = &dev_priv->ring[i]; - + for_each_ring(ring, dev_priv, i) { seq_printf(m, "%s\n", ring->name); if (INTEL_INFO(dev)->gen == 7) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); @@ -1674,7 +1665,7 @@ i915_ring_stop_write(struct file *filp, struct drm_device *dev = filp->private_data; struct drm_i915_private *dev_priv = dev->dev_private; char buf[20]; - int val = 0; + int val = 0, ret; if (cnt > 0) { if (cnt > sizeof(buf) - 1) @@ -1689,7 +1680,10 @@ i915_ring_stop_write(struct file *filp, DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); - mutex_lock(&dev->struct_mutex); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + dev_priv->stop_rings = val; mutex_unlock(&dev->struct_mutex); @@ -1713,10 +1707,18 @@ i915_max_freq_read(struct file *filp, struct drm_device *dev = filp->private_data; drm_i915_private_t *dev_priv = dev->dev_private; char buf[80]; - int len; + int len, ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; len = snprintf(buf, sizeof(buf), - "max freq: %d\n", dev_priv->max_delay * 50); + "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); + mutex_unlock(&dev->struct_mutex); if (len > sizeof(buf)) len = sizeof(buf); @@ -1733,7 +1735,10 @@ i915_max_freq_write(struct file *filp, struct drm_device *dev = filp->private_data; struct drm_i915_private *dev_priv = dev->dev_private; char buf[20]; - int val = 1; + int val = 1, ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; if (cnt > 0) { if (cnt > sizeof(buf) - 1) @@ -1748,12 +1753,17 @@ i915_max_freq_write(struct file *filp, DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + /* * Turbo will still be enabled, but won't go above the set value. */ - dev_priv->max_delay = val / 50; + dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; - gen6_set_rps(dev, val / 50); + gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); + mutex_unlock(&dev->struct_mutex); return cnt; } @@ -1773,10 +1783,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, struct drm_device *dev = filp->private_data; drm_i915_private_t *dev_priv = dev->dev_private; char buf[80]; - int len; + int len, ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; len = snprintf(buf, sizeof(buf), - "min freq: %d\n", dev_priv->min_delay * 50); + "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); + mutex_unlock(&dev->struct_mutex); if (len > sizeof(buf)) len = sizeof(buf); @@ -1791,7 +1809,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, struct drm_device *dev = filp->private_data; struct drm_i915_private *dev_priv = dev->dev_private; char buf[20]; - int val = 1; + int val = 1, ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; if (cnt > 0) { if (cnt > sizeof(buf) - 1) @@ -1806,12 +1827,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + /* * Turbo will still be enabled, but won't go below the set value. */ - dev_priv->min_delay = val / 50; + dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; - gen6_set_rps(dev, val / 50); + gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); + mutex_unlock(&dev->struct_mutex); return cnt; } @@ -1834,9 +1860,15 @@ i915_cache_sharing_read(struct file *filp, drm_i915_private_t *dev_priv = dev->dev_private; char buf[80]; u32 snpcr; - int len; + int len, ret; + + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; - mutex_lock(&dev_priv->dev->struct_mutex); snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); mutex_unlock(&dev_priv->dev->struct_mutex); @@ -1862,6 +1894,9 @@ i915_cache_sharing_write(struct file *filp, u32 snpcr; int val = 1; + if (!(IS_GEN6(dev) || IS_GEN7(dev))) + return -ENODEV; + if (cnt > 0) { if (cnt > sizeof(buf) - 1) return -EINVAL; @@ -1925,16 +1960,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; struct drm_i915_private *dev_priv = dev->dev_private; - int ret; if (INTEL_INFO(dev)->gen < 6) return 0; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; gen6_gt_force_wake_get(dev_priv); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -1947,16 +1977,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) if (INTEL_INFO(dev)->gen < 6) return 0; - /* - * It's bad that we can potentially hang userspace if struct_mutex gets - * forever stuck. However, if we cannot acquire this lock it means that - * almost certainly the driver has hung, is not unload-able. Therefore - * hanging here is probably a minor inconvenience not to be seen my - * almost every user. - */ - mutex_lock(&dev->struct_mutex); gen6_gt_force_wake_put(dev_priv); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -2006,7 +2027,6 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_gtt", i915_gem_gtt_info, 0}, {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, - {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, @@ -2067,6 +2087,7 @@ int i915_debugfs_init(struct drm_minor *minor) &i915_cache_sharing_fops); if (ret) return ret; + ret = i915_debugfs_create(minor->debugfs_root, minor, "i915_ring_stop", &i915_ring_stop_fops); diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index 914c0dfabe60..ffbc9156c792 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -235,10 +235,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } } - dev_priv->cpp = init->cpp; - dev_priv->back_offset = init->back_offset; - dev_priv->front_offset = init->front_offset; - dev_priv->current_page = 0; + dev_priv->dri1.cpp = init->cpp; + dev_priv->dri1.back_offset = init->back_offset; + dev_priv->dri1.front_offset = init->front_offset; + dev_priv->dri1.current_page = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->pf_current_page = 0; @@ -575,7 +575,7 @@ static int i915_dispatch_flip(struct drm_device * dev) DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, - dev_priv->current_page, + dev_priv->dri1.current_page, master_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); @@ -589,12 +589,12 @@ static int i915_dispatch_flip(struct drm_device * dev) OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); - if (dev_priv->current_page == 0) { - OUT_RING(dev_priv->back_offset); - dev_priv->current_page = 1; + if (dev_priv->dri1.current_page == 0) { + OUT_RING(dev_priv->dri1.back_offset); + dev_priv->dri1.current_page = 1; } else { - OUT_RING(dev_priv->front_offset); - dev_priv->current_page = 0; + OUT_RING(dev_priv->dri1.front_offset); + dev_priv->dri1.current_page = 0; } OUT_RING(0); @@ -613,7 +613,7 @@ static int i915_dispatch_flip(struct drm_device * dev) ADVANCE_LP_RING(); } - master_priv->sarea_priv->pf_current_page = dev_priv->current_page; + master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page; return 0; } @@ -1009,6 +1009,12 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_WAIT_TIMEOUT: value = 1; break; + case I915_PARAM_HAS_SEMAPHORES: + value = i915_semaphore_is_enabled(dev); + break; + case I915_PARAM_HAS_PRIME_VMAP_FLUSH: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -1425,6 +1431,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) kfree(ap); } +static void i915_dump_device_info(struct drm_i915_private *dev_priv) +{ + const struct intel_device_info *info = dev_priv->info; + +#define DEV_INFO_FLAG(name) info->name ? #name "," : "" +#define DEV_INFO_SEP , + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags=" + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + info->gen, + dev_priv->dev->pdev->device, + DEV_INFO_FLAGS); +#undef DEV_INFO_FLAG +#undef DEV_INFO_SEP +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1440,7 +1461,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; struct intel_device_info *info; - int ret = 0, mmio_bar; + int ret = 0, mmio_bar, mmio_size; uint32_t aperture_size; info = (struct intel_device_info *) flags; @@ -1449,7 +1470,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) return -ENODEV; - /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1465,6 +1485,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->info = info; + i915_dump_device_info(dev_priv); + if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; @@ -1504,7 +1526,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); mmio_bar = IS_GEN2(dev) ? 1 : 0; - dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); + /* Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (info->gen < 5) + mmio_size = 512*1024; + else + mmio_size = 2*1024*1024; + + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; @@ -1536,11 +1570,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the - * workqueue at any time: max_active = 1 and NON_REENTRANT. + * workqueue at any time. Use an ordered one. */ - dev_priv->wq = alloc_workqueue("i915", - WQ_UNBOUND | WQ_NON_REENTRANT, - 1); + dev_priv->wq = alloc_ordered_workqueue("i915", 0); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; @@ -1586,7 +1618,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); - spin_lock_init(&dev_priv->rps_lock); + spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->dpio_lock); if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) @@ -1836,6 +1868,8 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), @@ -1858,6 +1892,7 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index a24ffbe97c01..a7837e556945 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -470,6 +470,9 @@ static int i915_drm_freeze(struct drm_device *dev) "GEM idle failed, resume might fail\n"); return error; } + + intel_modeset_disable(dev); + drm_irq_uninstall(dev); } @@ -543,13 +546,9 @@ static int i915_drm_thaw(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); intel_modeset_init_hw(dev); + intel_modeset_setup_hw_state(dev); drm_mode_config_reset(dev); drm_irq_install(dev); - - /* Resume the modeset for every activated CRTC */ - mutex_lock(&dev->mode_config.mutex); - drm_helper_resume_force_mode(dev); - mutex_unlock(&dev->mode_config.mutex); } intel_opregion_init(dev); @@ -1060,7 +1059,7 @@ static bool IS_DISPLAYREG(u32 reg) * This should make it easier to transition modules over to the * new register block scheme, since we can do it incrementally. */ - if (reg >= 0x180000) + if (reg >= VLV_DISPLAY_BASE) return false; if (reg >= RENDER_RING_BASE && @@ -1174,9 +1173,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ + if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ + DRM_ERROR("Unclaimed write to %x\n", reg); \ + writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \ + } \ } __i915_write(8, b) __i915_write(16, w) __i915_write(32, l) __i915_write(64, q) #undef __i915_write + +static const struct register_whitelist { + uint64_t offset; + uint32_t size; + uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ +} whitelist[] = { + { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, +}; + +int i915_reg_read_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reg_read *reg = data; + struct register_whitelist const *entry = whitelist; + int i; + + for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { + if (entry->offset == reg->offset && + (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + break; + } + + if (i == ARRAY_SIZE(whitelist)) + return -EINVAL; + + switch (entry->size) { + case 8: + reg->val = I915_READ64(reg->offset); + break; + case 4: + reg->val = I915_READ(reg->offset); + break; + case 2: + reg->val = I915_READ16(reg->offset); + break; + case 1: + reg->val = I915_READ8(reg->offset); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + return 0; +} diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.h b/trunk/drivers/gpu/drm/i915/i915_drv.h index 627fe35781b4..4f2831aa5fed 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.h +++ b/trunk/drivers/gpu/drm/i915/i915_drv.h @@ -109,6 +109,7 @@ struct intel_pch_pll { #define WATCH_COHERENCY 0 #define WATCH_LISTS 0 +#define WATCH_GTT 0 #define I915_GEM_PHYS_CURSOR_0 1 #define I915_GEM_PHYS_CURSOR_1 2 @@ -195,9 +196,10 @@ struct drm_i915_error_state { u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_tail[I915_NUM_RINGS]; u32 error; /* gen6+ */ + u32 err_int; /* gen7 */ u32 instpm[I915_NUM_RINGS]; u32 instps[I915_NUM_RINGS]; - u32 instdone1; + u32 extra_instdone[I915_NUM_INSTDONE_REG]; u32 seqno[I915_NUM_RINGS]; u64 bbaddr; u32 fault_reg[I915_NUM_RINGS]; @@ -221,7 +223,7 @@ struct drm_i915_error_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 seqno; + u32 rseqno, wseqno; u32 gtt_offset; u32 read_domains; u32 write_domain; @@ -239,7 +241,6 @@ struct drm_i915_error_state { }; struct drm_i915_display_funcs { - void (*dpms)(struct drm_crtc *crtc, int mode); bool (*fbc_enabled)(struct drm_device *dev); void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); void (*disable_fbc)(struct drm_device *dev); @@ -248,7 +249,6 @@ struct drm_i915_display_funcs { void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size); - void (*sanitize_pm)(struct drm_device *dev); void (*update_linetime_wm)(struct drm_device *dev, int pipe, struct drm_display_mode *mode); int (*crtc_mode_set)(struct drm_crtc *crtc, @@ -256,6 +256,8 @@ struct drm_i915_display_funcs { struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); + void (*crtc_enable)(struct drm_crtc *crtc); + void (*crtc_disable)(struct drm_crtc *crtc); void (*off)(struct drm_crtc *crtc); void (*write_eld)(struct drm_connector *connector, struct drm_crtc *crtc); @@ -279,6 +281,32 @@ struct drm_i915_gt_funcs { void (*force_wake_put)(struct drm_i915_private *dev_priv); }; +#define DEV_INFO_FLAGS \ + DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ + DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ + DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ + DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ + DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ + DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ + DEV_INFO_FLAG(has_llc) + struct intel_device_info { u8 gen; u8 is_mobile:1; @@ -402,12 +430,6 @@ typedef struct drm_i915_private { struct resource mch_res; - unsigned int cpp; - int back_offset; - int front_offset; - int current_page; - int page_flipping; - atomic_t irq_received; /* protects the irq masks */ @@ -425,7 +447,6 @@ typedef struct drm_i915_private { u32 hotplug_supported_mask; struct work_struct hotplug_work; - unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int num_pipe; int num_pch_pll; @@ -434,8 +455,7 @@ typedef struct drm_i915_private { struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd[I915_NUM_RINGS]; - uint32_t last_instdone; - uint32_t last_instdone1; + uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; unsigned int stop_rings; @@ -666,7 +686,13 @@ typedef struct drm_i915_private { struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ - struct list_head gtt_list; + struct list_head bound_list; + /** + * List of objects which are not bound to the GTT (thus + * are idle and not used by the GPU) but still have + * (presumably uncached) pages still attached. + */ + struct list_head unbound_list; /** Usable portion of the GTT for GEM */ unsigned long gtt_start; @@ -695,17 +721,6 @@ typedef struct drm_i915_private { */ struct list_head active_list; - /** - * List of objects which are not in the ringbuffer but which - * still have a write_domain which needs to be flushed before - * unbinding. - * - * last_rendering_seqno is 0 while an object is in this list. - * - * A reference is held on the buffer while on this list. - */ - struct list_head flushing_list; - /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. @@ -775,6 +790,12 @@ typedef struct drm_i915_private { struct { unsigned allow_batchbuffer : 1; u32 __iomem *gfx_hws_cpu_addr; + + unsigned int cpp; + int back_offset; + int front_offset; + int current_page; + int page_flipping; } dri1; /* Kernel Modesetting */ @@ -796,9 +817,6 @@ typedef struct drm_i915_private { bool lvds_downclock_avail; /* indicates the reduced downclock for LVDS*/ int lvds_downclock; - struct work_struct idle_work; - struct timer_list idle_timer; - bool busy; u16 orig_clock; int child_dev_num; struct child_device_config *child_dev; @@ -807,26 +825,41 @@ typedef struct drm_i915_private { bool mchbar_need_disable; - struct work_struct rps_work; - spinlock_t rps_lock; - u32 pm_iir; - - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - int c_m; - int r_t; - u8 corr; - spinlock_t *mchdev_lock; + /* gen6+ rps state */ + struct { + struct work_struct work; + u32 pm_iir; + /* lock - irqsave spinlock that protectects the work_struct and + * pm_iir. */ + spinlock_t lock; + + /* The below variables an all the rps hw state are protected by + * dev->struct mutext. */ + u8 cur_delay; + u8 min_delay; + u8 max_delay; + } rps; + + /* ilk-only ips/rps state. Everything in here is protected by the global + * mchdev_lock in intel_pm.c */ + struct { + u8 cur_delay; + u8 min_delay; + u8 max_delay; + u8 fmax; + u8 fstart; + + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + u8 corr; + + int c_m; + int r_t; + } ips; enum no_fbc_reason no_fbc_reason; @@ -861,30 +894,48 @@ enum hdmi_force_audio { }; enum i915_cache_level { - I915_CACHE_NONE, + I915_CACHE_NONE = 0, I915_CACHE_LLC, - I915_CACHE_LLC_MLC, /* gen6+ */ + I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ +}; + +struct drm_i915_gem_object_ops { + /* Interface between the GEM object and its backing storage. + * get_pages() is called once prior to the use of the associated set + * of pages before to binding them into the GTT, and put_pages() is + * called after we no longer need them. As we expect there to be + * associated cost with migrating pages between the backing storage + * and making them available for the GPU (e.g. clflush), we may hold + * onto the pages after they are no longer referenced by the GPU + * in case they may be used again shortly (for example migrating the + * pages to a different memory domain within the GTT). put_pages() + * will therefore most likely be called when the object itself is + * being released or under memory pressure (where we attempt to + * reap pages for the shrinker). + */ + int (*get_pages)(struct drm_i915_gem_object *); + void (*put_pages)(struct drm_i915_gem_object *); }; struct drm_i915_gem_object { struct drm_gem_object base; + const struct drm_i915_gem_object_ops *ops; + /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; struct list_head gtt_list; - /** This object's place on the active/flushing/inactive lists */ + /** This object's place on the active/inactive lists */ struct list_head ring_list; struct list_head mm_list; - /** This object's place on GPU write list */ - struct list_head gpu_write_list; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; /** - * This is set if the object is on the active or flushing lists - * (has pending rendering), and is not set if it's on inactive (ready - * to be unbound). + * This is set if the object is on the active lists (has pending + * rendering and so a non-zero seqno), and is not set if it i s on + * inactive (ready to be unbound) list. */ unsigned int active:1; @@ -894,12 +945,6 @@ struct drm_i915_gem_object { */ unsigned int dirty:1; - /** - * This is set if the object has been written to since the last - * GPU flush. - */ - unsigned int pending_gpu_write:1; - /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. @@ -961,17 +1006,12 @@ struct drm_i915_gem_object { unsigned int has_aliasing_ppgtt_mapping:1; unsigned int has_global_gtt_mapping:1; + unsigned int has_dma_mapping:1; - struct page **pages; - - /** - * DMAR support - */ - struct scatterlist *sg_list; - int num_sg; + struct sg_table *pages; + int pages_pin_count; /* prime dma-buf support */ - struct sg_table *sg_table; void *dma_buf_vmapping; int vmapping_count; @@ -992,7 +1032,8 @@ struct drm_i915_gem_object { struct intel_ring_buffer *ring; /** Breadcrumb of last rendering to the buffer. */ - uint32_t last_rendering_seqno; + uint32_t last_read_seqno; + uint32_t last_write_seqno; /** Breadcrumb of last fenced GPU access to the buffer. */ uint32_t last_fenced_seqno; @@ -1135,6 +1176,10 @@ struct drm_i915_file_private { #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) +#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + +#define GT_FREQUENCY_MULTIPLIER 50 + #include "i915_trace.h" /** @@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, @@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains); +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, - bool map_and_fenceable); + bool map_and_fenceable, + bool nonblocking); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, - gfp_t gfpmask); +int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); +static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) +{ + struct scatterlist *sg = obj->pages->sgl; + while (n >= SG_MAX_SINGLE_ALLOC) { + sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); + n -= SG_MAX_SINGLE_ALLOC - 1; + } + return sg_page(sg+n); +} +static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages == NULL); + obj->pages_pin_count++; +} +static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + BUG_ON(obj->pages_pin_count == 0); + obj->pages_pin_count--; +} + int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); -int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, @@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev); -int __must_check i915_add_request(struct intel_ring_buffer *ring, - struct drm_file *file, - struct drm_i915_gem_request *request); +int i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + struct drm_i915_gem_request *request); int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); @@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev, /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, bool mappable); -int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); + unsigned alignment, + unsigned cache_level, + bool mappable, + bool nonblock); +int i915_gem_evict_everything(struct drm_device *dev); /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); @@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); +extern void intel_modeset_setup_hw_state(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); @@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_enable_rc6(const struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev); +int i915_reg_read_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* overlay */ #ifdef CONFIG_DEBUG_FS diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 489e2b162b27..365a7dc8a4a8 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -37,12 +37,12 @@ #include #include -static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, - bool map_and_fenceable); + bool map_and_fenceable, + bool nonblocking); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -56,6 +56,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc); +static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); +static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) @@ -141,7 +143,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return !obj->active; + return obj->gtt_space && !obj->active; } int @@ -180,7 +182,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) if (obj->pin_count) pinned += obj->gtt_space->size; mutex_unlock(&dev->struct_mutex); @@ -341,7 +343,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length, page_length); kunmap_atomic(vaddr); - return ret; + return ret ? -EFAULT : 0; } static void @@ -392,7 +394,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length, page_length); kunmap(page); - return ret; + return ret ? - EFAULT : 0; } static int @@ -401,7 +403,6 @@ i915_gem_shmem_pread(struct drm_device *dev, struct drm_i915_gem_pread *args, struct drm_file *file) { - struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; char __user *user_data; ssize_t remain; loff_t offset; @@ -410,7 +411,8 @@ i915_gem_shmem_pread(struct drm_device *dev, int hit_slowpath = 0; int prefaulted = 0; int needs_clflush = 0; - int release_page; + struct scatterlist *sg; + int i; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -424,16 +426,30 @@ i915_gem_shmem_pread(struct drm_device *dev, * anyway again before the next pread happens. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush = 1; - ret = i915_gem_object_set_to_gtt_domain(obj, false); - if (ret) - return ret; + if (obj->gtt_space) { + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; + } } + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + offset = args->offset; - while (remain > 0) { + for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { struct page *page; + if (i < offset >> PAGE_SHIFT) + continue; + + if (remain <= 0) + break; + /* Operation in this page * * shmem_page_offset = offset within page in shmem file @@ -444,18 +460,7 @@ i915_gem_shmem_pread(struct drm_device *dev, if ((shmem_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - shmem_page_offset; - if (obj->pages) { - page = obj->pages[offset >> PAGE_SHIFT]; - release_page = 0; - } else { - page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto out; - } - release_page = 1; - } - + page = sg_page(sg); page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; @@ -466,7 +471,6 @@ i915_gem_shmem_pread(struct drm_device *dev, goto next_page; hit_slowpath = 1; - page_cache_get(page); mutex_unlock(&dev->struct_mutex); if (!prefaulted) { @@ -484,16 +488,12 @@ i915_gem_shmem_pread(struct drm_device *dev, needs_clflush); mutex_lock(&dev->struct_mutex); - page_cache_release(page); + next_page: mark_page_accessed(page); - if (release_page) - page_cache_release(page); - if (ret) { - ret = -EFAULT; + if (ret) goto out; - } remain -= page_length; user_data += page_length; @@ -501,6 +501,8 @@ i915_gem_shmem_pread(struct drm_device *dev, } out: + i915_gem_object_unpin_pages(obj); + if (hit_slowpath) { /* Fixup: Kill any reinstated backing storage pages */ if (obj->madv == __I915_MADV_PURGED) @@ -606,7 +608,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, char __user *user_data; int page_offset, page_length, ret; - ret = i915_gem_object_pin(obj, 0, true); + ret = i915_gem_object_pin(obj, 0, true, true); if (ret) goto out; @@ -686,7 +688,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, page_length); kunmap_atomic(vaddr); - return ret; + return ret ? -EFAULT : 0; } /* Only difference to the fast-path function is that this can handle bit17 @@ -720,7 +722,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, page_do_bit17_swizzling); kunmap(page); - return ret; + return ret ? -EFAULT : 0; } static int @@ -729,7 +731,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; loff_t offset; char __user *user_data; @@ -738,7 +739,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int hit_slowpath = 0; int needs_clflush_after = 0; int needs_clflush_before = 0; - int release_page; + int i; + struct scatterlist *sg; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -752,9 +754,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * right away and we therefore have to clflush anyway. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush_after = 1; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; + if (obj->gtt_space) { + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + } } /* Same trick applies for invalidate partially written cachelines before * writing. */ @@ -762,13 +766,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev, && obj->cache_level == I915_CACHE_NONE) needs_clflush_before = 1; + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + offset = args->offset; obj->dirty = 1; - while (remain > 0) { + for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { struct page *page; int partial_cacheline_write; + if (i < offset >> PAGE_SHIFT) + continue; + + if (remain <= 0) + break; + /* Operation in this page * * shmem_page_offset = offset within page in shmem file @@ -787,18 +803,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ((shmem_page_offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); - if (obj->pages) { - page = obj->pages[offset >> PAGE_SHIFT]; - release_page = 0; - } else { - page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto out; - } - release_page = 1; - } - + page = sg_page(sg); page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; @@ -810,26 +815,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev, goto next_page; hit_slowpath = 1; - page_cache_get(page); mutex_unlock(&dev->struct_mutex); - ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, partial_cacheline_write, needs_clflush_after); mutex_lock(&dev->struct_mutex); - page_cache_release(page); + next_page: set_page_dirty(page); mark_page_accessed(page); - if (release_page) - page_cache_release(page); - if (ret) { - ret = -EFAULT; + if (ret) goto out; - } remain -= page_length; user_data += page_length; @@ -837,6 +836,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev, } out: + i915_gem_object_unpin_pages(obj); + if (hit_slowpath) { /* Fixup: Kill any reinstated backing storage pages */ if (obj->madv == __I915_MADV_PURGED) @@ -920,10 +921,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } - if (obj->gtt_space && - obj->cache_level == I915_CACHE_NONE && + if (obj->cache_level == I915_CACHE_NONE && obj->tiling_mode == I915_TILING_NONE && - obj->map_and_fenceable && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user @@ -931,7 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT) + if (ret == -EFAULT || ret == -ENOSPC) ret = i915_gem_shmem_pwrite(dev, obj, args, file); out: @@ -941,6 +940,240 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return ret; } +int +i915_gem_check_wedge(struct drm_i915_private *dev_priv, + bool interruptible) +{ + if (atomic_read(&dev_priv->mm.wedged)) { + struct completion *x = &dev_priv->error_completion; + bool recovery_complete; + unsigned long flags; + + /* Give the error handler a chance to run. */ + spin_lock_irqsave(&x->wait.lock, flags); + recovery_complete = x->done > 0; + spin_unlock_irqrestore(&x->wait.lock, flags); + + /* Non-interruptible callers can't handle -EAGAIN, hence return + * -EIO unconditionally for these. */ + if (!interruptible) + return -EIO; + + /* Recovery complete, but still wedged means reset failure. */ + if (recovery_complete) + return -EIO; + + return -EAGAIN; + } + + return 0; +} + +/* + * Compare seqno against outstanding lazy request. Emit a request if they are + * equal. + */ +static int +i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) +{ + int ret; + + BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + + ret = 0; + if (seqno == ring->outstanding_lazy_request) + ret = i915_add_request(ring, NULL, NULL); + + return ret; +} + +/** + * __wait_seqno - wait until execution of seqno has finished + * @ring: the ring expected to report seqno + * @seqno: duh! + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * + * Returns 0 if the seqno was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ +static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, + bool interruptible, struct timespec *timeout) +{ + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct timespec before, now, wait_time={1,0}; + unsigned long timeout_jiffies; + long end; + bool wait_forever = true; + int ret; + + if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) + return 0; + + trace_i915_gem_request_wait_begin(ring, seqno); + + if (timeout != NULL) { + wait_time = *timeout; + wait_forever = false; + } + + timeout_jiffies = timespec_to_jiffies(&wait_time); + + if (WARN_ON(!ring->irq_get(ring))) + return -ENODEV; + + /* Record current time in case interrupted by signal, or wedged * */ + getrawmonotonic(&before); + +#define EXIT_COND \ + (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ + atomic_read(&dev_priv->mm.wedged)) + do { + if (interruptible) + end = wait_event_interruptible_timeout(ring->irq_queue, + EXIT_COND, + timeout_jiffies); + else + end = wait_event_timeout(ring->irq_queue, EXIT_COND, + timeout_jiffies); + + ret = i915_gem_check_wedge(dev_priv, interruptible); + if (ret) + end = ret; + } while (end == 0 && wait_forever); + + getrawmonotonic(&now); + + ring->irq_put(ring); + trace_i915_gem_request_wait_end(ring, seqno); +#undef EXIT_COND + + if (timeout) { + struct timespec sleep_time = timespec_sub(now, before); + *timeout = timespec_sub(*timeout, sleep_time); + } + + switch (end) { + case -EIO: + case -EAGAIN: /* Wedged */ + case -ERESTARTSYS: /* Signal */ + return (int)end; + case 0: /* Timeout */ + if (timeout) + set_normalized_timespec(timeout, 0, 0); + return -ETIME; + default: /* Completed */ + WARN_ON(end < 0); /* We're not aware of other errors */ + return 0; + } +} + +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ +int +i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + bool interruptible = dev_priv->mm.interruptible; + int ret; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(seqno == 0); + + ret = i915_gem_check_wedge(dev_priv, interruptible); + if (ret) + return ret; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + return ret; + + return __wait_seqno(ring, seqno, interruptible, NULL); +} + +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + */ +static __must_check int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool readonly) +{ + struct intel_ring_buffer *ring = obj->ring; + u32 seqno; + int ret; + + seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; + if (seqno == 0) + return 0; + + ret = i915_wait_seqno(ring, seqno); + if (ret) + return ret; + + i915_gem_retire_requests_ring(ring); + + /* Manually manage the write flush as we may have not yet + * retired the buffer. + */ + if (obj->last_write_seqno && + i915_seqno_passed(seqno, obj->last_write_seqno)) { + obj->last_write_seqno = 0; + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + + return 0; +} + +/* A nonblocking variant of the above wait. This is a highly dangerous routine + * as the object state may change during this call. + */ +static __must_check int +i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, + bool readonly) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = obj->ring; + u32 seqno; + int ret; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!dev_priv->mm.interruptible); + + seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; + if (seqno == 0) + return 0; + + ret = i915_gem_check_wedge(dev_priv, true); + if (ret) + return ret; + + ret = i915_gem_check_olr(ring, seqno); + if (ret) + return ret; + + mutex_unlock(&dev->struct_mutex); + ret = __wait_seqno(ring, seqno, true, NULL); + mutex_lock(&dev->struct_mutex); + + i915_gem_retire_requests_ring(ring); + + /* Manually manage the write flush as we may have not yet + * retired the buffer. + */ + if (obj->last_write_seqno && + i915_seqno_passed(seqno, obj->last_write_seqno)) { + obj->last_write_seqno = 0; + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + + return ret; +} + /** * Called when user space prepares to use an object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. @@ -978,6 +1211,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, goto unlock; } + /* Try to flush the object off the GPU without holding the lock. + * We will repeat the flush holding the lock in the normal manner + * to catch cases where we are gazumped. + */ + ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); + if (ret) + goto unref; + if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); @@ -991,6 +1232,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } +unref: drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); @@ -1110,7 +1352,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } if (!obj->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, 0, true); + ret = i915_gem_object_bind_to_gtt(obj, 0, true, false); if (ret) goto unlock; @@ -1271,8 +1513,44 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, return i915_gem_get_gtt_size(dev, size, tiling_mode); } -int -i915_gem_mmap_gtt(struct drm_file *file, +static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int ret; + + if (obj->base.map_list.map) + return 0; + + ret = drm_gem_create_mmap_offset(&obj->base); + if (ret != -ENOSPC) + return ret; + + /* Badly fragmented mmap space? The only way we can recover + * space is by destroying unwanted objects. We can't randomly release + * mmap_offsets as userspace expects them to be persistent for the + * lifetime of the objects. The closest we can is to release the + * offsets on purgeable objects by truncating it and marking it purged, + * which prevents userspace from ever using that object again. + */ + i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); + ret = drm_gem_create_mmap_offset(&obj->base); + if (ret != -ENOSPC) + return ret; + + i915_gem_shrink_all(dev_priv); + return drm_gem_create_mmap_offset(&obj->base); +} + +static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) +{ + if (!obj->base.map_list.map) + return; + + drm_gem_free_mmap_offset(&obj->base); +} + +int +i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) @@ -1302,11 +1580,9 @@ i915_gem_mmap_gtt(struct drm_file *file, goto out; } - if (!obj->base.map_list.map) { - ret = drm_gem_create_mmap_offset(&obj->base); - if (ret) - goto out; - } + ret = i915_gem_object_create_mmap_offset(obj); + if (ret) + goto out; *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; @@ -1341,83 +1617,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } -int -i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, - gfp_t gfpmask) +/* Immediately discard the backing storage */ +static void +i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - int page_count, i; - struct address_space *mapping; struct inode *inode; - struct page *page; - if (obj->pages || obj->sg_table) - return 0; + i915_gem_object_free_mmap_offset(obj); - /* Get the list of pages out of our struct file. They'll be pinned - * at this point until we release them. - */ - page_count = obj->base.size / PAGE_SIZE; - BUG_ON(obj->pages != NULL); - obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); - if (obj->pages == NULL) - return -ENOMEM; + if (obj->base.filp == NULL) + return; + /* Our goal here is to return as much of the memory as + * is possible back to the system as we are called from OOM. + * To do this we must instruct the shmfs to drop all of its + * backing pages, *now*. + */ inode = obj->base.filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - gfpmask |= mapping_gfp_mask(mapping); - - for (i = 0; i < page_count; i++) { - page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); - if (IS_ERR(page)) - goto err_pages; - - obj->pages[i] = page; - } - - if (i915_gem_object_needs_bit17_swizzle(obj)) - i915_gem_object_do_bit_17_swizzle(obj); - - return 0; + shmem_truncate_range(inode, 0, (loff_t)-1); -err_pages: - while (i--) - page_cache_release(obj->pages[i]); + obj->madv = __I915_MADV_PURGED; +} - drm_free_large(obj->pages); - obj->pages = NULL; - return PTR_ERR(page); +static inline int +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) +{ + return obj->madv == I915_MADV_DONTNEED; } static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { int page_count = obj->base.size / PAGE_SIZE; - int i; - - if (!obj->pages) - return; + struct scatterlist *sg; + int ret, i; BUG_ON(obj->madv == __I915_MADV_PURGED); + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ + WARN_ON(ret != -EIO); + i915_gem_clflush_object(obj); + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; - for (i = 0; i < page_count; i++) { + for_each_sg(obj->pages->sgl, sg, page_count, i) { + struct page *page = sg_page(sg); + if (obj->dirty) - set_page_dirty(obj->pages[i]); + set_page_dirty(page); if (obj->madv == I915_MADV_WILLNEED) - mark_page_accessed(obj->pages[i]); + mark_page_accessed(page); - page_cache_release(obj->pages[i]); + page_cache_release(page); } obj->dirty = 0; - drm_free_large(obj->pages); + sg_free_table(obj->pages); + kfree(obj->pages); +} + +static int +i915_gem_object_put_pages(struct drm_i915_gem_object *obj) +{ + const struct drm_i915_gem_object_ops *ops = obj->ops; + + if (obj->pages == NULL) + return 0; + + BUG_ON(obj->gtt_space); + + if (obj->pages_pin_count) + return -EBUSY; + + ops->put_pages(obj); obj->pages = NULL; + + list_del(&obj->gtt_list); + if (i915_gem_object_is_purgeable(obj)) + i915_gem_object_truncate(obj); + + return 0; +} + +static long +i915_gem_purge(struct drm_i915_private *dev_priv, long target) +{ + struct drm_i915_gem_object *obj, *next; + long count = 0; + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.unbound_list, + gtt_list) { + if (i915_gem_object_is_purgeable(obj) && + i915_gem_object_put_pages(obj) == 0) { + count += obj->base.size >> PAGE_SHIFT; + if (count >= target) + return count; + } + } + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (i915_gem_object_is_purgeable(obj) && + i915_gem_object_unbind(obj) == 0 && + i915_gem_object_put_pages(obj) == 0) { + count += obj->base.size >> PAGE_SHIFT; + if (count >= target) + return count; + } + } + + return count; +} + +static void +i915_gem_shrink_all(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj, *next; + + i915_gem_evict_everything(dev_priv->dev); + + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) + i915_gem_object_put_pages(obj); +} + +static int +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int page_count, i; + struct address_space *mapping; + struct sg_table *st; + struct scatterlist *sg; + struct page *page; + gfp_t gfp; + + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return -ENOMEM; + + page_count = obj->base.size / PAGE_SIZE; + if (sg_alloc_table(st, page_count, GFP_KERNEL)) { + sg_free_table(st); + kfree(st); + return -ENOMEM; + } + + /* Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. + * + * Fail silently without starting the shrinker + */ + mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; + gfp = mapping_gfp_mask(mapping); + gfp |= __GFP_NORETRY | __GFP_NOWARN; + gfp &= ~(__GFP_IO | __GFP_WAIT); + for_each_sg(st->sgl, sg, page_count, i) { + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) { + i915_gem_purge(dev_priv, page_count); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + } + if (IS_ERR(page)) { + /* We've tried hard to allocate the memory by reaping + * our own buffer, now let the real VM do its job and + * go down in flames if truly OOM. + */ + gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); + gfp |= __GFP_IO | __GFP_WAIT; + + i915_gem_shrink_all(dev_priv); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) + goto err_pages; + + gfp |= __GFP_NORETRY | __GFP_NOWARN; + gfp &= ~(__GFP_IO | __GFP_WAIT); + } + + sg_set_page(sg, page, PAGE_SIZE, 0); + } + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_do_bit_17_swizzle(obj); + + obj->pages = st; + return 0; + +err_pages: + for_each_sg(st->sgl, sg, i, page_count) + page_cache_release(sg_page(sg)); + sg_free_table(st); + kfree(st); + return PTR_ERR(page); +} + +/* Ensure that the associated pages are gathered from the backing storage + * and pinned into our object. i915_gem_object_get_pages() may be called + * multiple times before they are released by a single call to + * i915_gem_object_put_pages() - once the pages are no longer referenced + * either as a result of memory pressure (reaping pages under the shrinker) + * or as the object is itself released. + */ +int +i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + const struct drm_i915_gem_object_ops *ops = obj->ops; + int ret; + + if (obj->pages) + return 0; + + BUG_ON(obj->pages_pin_count); + + ret = ops->get_pages(obj); + if (ret) + return ret; + + list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + return 0; } void @@ -1441,7 +1879,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->ring_list, &ring->active_list); - obj->last_rendering_seqno = seqno; + obj->last_read_seqno = seqno; if (obj->fenced_gpu_access) { obj->last_fenced_seqno = seqno; @@ -1458,97 +1896,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, } static void -i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) -{ - list_del_init(&obj->ring_list); - obj->last_rendering_seqno = 0; - obj->last_fenced_seqno = 0; -} - -static void -i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; + BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); - list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); - i915_gem_object_move_off_active(obj); -} - -static void -i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + if (obj->pin_count) /* are we a framebuffer? */ + intel_mark_fb_idle(obj); list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - BUG_ON(!list_empty(&obj->gpu_write_list)); - BUG_ON(!obj->active); + list_del_init(&obj->ring_list); obj->ring = NULL; - i915_gem_object_move_off_active(obj); + obj->last_read_seqno = 0; + obj->last_write_seqno = 0; + obj->base.write_domain = 0; + + obj->last_fenced_seqno = 0; obj->fenced_gpu_access = false; obj->active = 0; - obj->pending_gpu_write = false; drm_gem_object_unreference(&obj->base); WARN_ON(i915_verify_lists(dev)); } -/* Immediately discard the backing storage */ -static void -i915_gem_object_truncate(struct drm_i915_gem_object *obj) -{ - struct inode *inode; - - /* Our goal here is to return as much of the memory as - * is possible back to the system as we are called from OOM. - * To do this we must instruct the shmfs to drop all of its - * backing pages, *now*. - */ - inode = obj->base.filp->f_path.dentry->d_inode; - shmem_truncate_range(inode, 0, (loff_t)-1); - - if (obj->base.map_list.map) - drm_gem_free_mmap_offset(&obj->base); - - obj->madv = __I915_MADV_PURGED; -} - -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ - return obj->madv == I915_MADV_DONTNEED; -} - -static void -i915_gem_process_flushing_list(struct intel_ring_buffer *ring, - uint32_t flush_domains) -{ - struct drm_i915_gem_object *obj, *next; - - list_for_each_entry_safe(obj, next, - &ring->gpu_write_list, - gpu_write_list) { - if (obj->base.write_domain & flush_domains) { - uint32_t old_write_domain = obj->base.write_domain; - - obj->base.write_domain = 0; - list_del_init(&obj->gpu_write_list); - i915_gem_object_move_to_active(obj, ring, - i915_gem_next_request_seqno(ring)); - - trace_i915_gem_object_change_domain(obj, - obj->base.read_domains, - old_write_domain); - } - } -} - static u32 i915_gem_get_seqno(struct drm_device *dev) { @@ -1589,15 +1965,16 @@ i915_add_request(struct intel_ring_buffer *ring, * is that the flush _must_ happen before the next request, no matter * what. */ - if (ring->gpu_caches_dirty) { - ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; + ret = intel_ring_flush_all_caches(ring); + if (ret) + return ret; - ring->gpu_caches_dirty = false; + if (request == NULL) { + request = kmalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; } - BUG_ON(request == NULL); seqno = i915_gem_next_request_seqno(ring); /* Record the position of the start of the request so that @@ -1608,8 +1985,10 @@ i915_add_request(struct intel_ring_buffer *ring, request_ring_position = intel_ring_get_tail(ring); ret = ring->add_request(ring, &seqno); - if (ret) - return ret; + if (ret) { + kfree(request); + return ret; + } trace_i915_gem_request_add(ring, seqno); @@ -1619,6 +1998,7 @@ i915_add_request(struct intel_ring_buffer *ring, request->emitted_jiffies = jiffies; was_empty = list_empty(&ring->request_list); list_add_tail(&request->list, &ring->request_list); + request->file_priv = NULL; if (file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -1638,13 +2018,13 @@ i915_add_request(struct intel_ring_buffer *ring, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } - if (was_empty) + if (was_empty) { queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + intel_mark_busy(dev_priv->dev); + } } - WARN_ON(!list_empty(&ring->gpu_write_list)); - return 0; } @@ -1686,8 +2066,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, struct drm_i915_gem_object, ring_list); - obj->base.write_domain = 0; - list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_inactive(obj); } } @@ -1723,20 +2101,6 @@ void i915_gem_reset(struct drm_device *dev) for_each_ring(ring, dev_priv, i) i915_gem_reset_ring_lists(dev_priv, ring); - /* Remove anything from the flushing lists. The GPU cache is likely - * to be lost on reset along with the data, so simply move the - * lost bo to the inactive list. - */ - while (!list_empty(&dev_priv->mm.flushing_list)) { - obj = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - mm_list); - - obj->base.write_domain = 0; - list_del_init(&obj->gpu_write_list); - i915_gem_object_move_to_inactive(obj); - } - /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ @@ -1765,7 +2129,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) WARN_ON(i915_verify_lists(ring->dev)); - seqno = ring->get_seqno(ring); + seqno = ring->get_seqno(ring, true); for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) if (seqno >= ring->sync_seqno[i]) @@ -1804,13 +2168,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) struct drm_i915_gem_object, ring_list); - if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) + if (!i915_seqno_passed(seqno, obj->last_read_seqno)) break; - if (obj->base.write_domain != 0) - i915_gem_object_move_to_flushing(obj); - else - i915_gem_object_move_to_inactive(obj); + i915_gem_object_move_to_inactive(obj); } if (unlikely(ring->trace_irq_seqno && @@ -1848,225 +2209,29 @@ i915_gem_retire_work_handler(struct work_struct *work) /* Come back later if the device is busy... */ if (!mutex_trylock(&dev->struct_mutex)) { - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); - return; - } - - i915_gem_retire_requests(dev); - - /* Send a periodic flush down the ring so we don't hold onto GEM - * objects indefinitely. - */ - idle = true; - for_each_ring(ring, dev_priv, i) { - if (ring->gpu_caches_dirty) { - struct drm_i915_gem_request *request; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL || - i915_add_request(ring, NULL, request)) - kfree(request); - } - - idle &= list_empty(&ring->request_list); - } - - if (!dev_priv->mm.suspended && !idle) - queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); - - mutex_unlock(&dev->struct_mutex); -} - -int -i915_gem_check_wedge(struct drm_i915_private *dev_priv, - bool interruptible) -{ - if (atomic_read(&dev_priv->mm.wedged)) { - struct completion *x = &dev_priv->error_completion; - bool recovery_complete; - unsigned long flags; - - /* Give the error handler a chance to run. */ - spin_lock_irqsave(&x->wait.lock, flags); - recovery_complete = x->done > 0; - spin_unlock_irqrestore(&x->wait.lock, flags); - - /* Non-interruptible callers can't handle -EAGAIN, hence return - * -EIO unconditionally for these. */ - if (!interruptible) - return -EIO; - - /* Recovery complete, but still wedged means reset failure. */ - if (recovery_complete) - return -EIO; - - return -EAGAIN; - } - - return 0; -} - -/* - * Compare seqno against outstanding lazy request. Emit a request if they are - * equal. - */ -static int -i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) -{ - int ret = 0; - - BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - - if (seqno == ring->outstanding_lazy_request) { - struct drm_i915_gem_request *request; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return -ENOMEM; - - ret = i915_add_request(ring, NULL, request); - if (ret) { - kfree(request); - return ret; - } - - BUG_ON(seqno != request->seqno); - } - - return ret; -} - -/** - * __wait_seqno - wait until execution of seqno has finished - * @ring: the ring expected to report seqno - * @seqno: duh! - * @interruptible: do an interruptible wait (normally yes) - * @timeout: in - how long to wait (NULL forever); out - how much time remaining - * - * Returns 0 if the seqno was found within the alloted time. Else returns the - * errno with remaining time filled in timeout argument. - */ -static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, - bool interruptible, struct timespec *timeout) -{ - drm_i915_private_t *dev_priv = ring->dev->dev_private; - struct timespec before, now, wait_time={1,0}; - unsigned long timeout_jiffies; - long end; - bool wait_forever = true; - int ret; - - if (i915_seqno_passed(ring->get_seqno(ring), seqno)) - return 0; - - trace_i915_gem_request_wait_begin(ring, seqno); - - if (timeout != NULL) { - wait_time = *timeout; - wait_forever = false; - } - - timeout_jiffies = timespec_to_jiffies(&wait_time); - - if (WARN_ON(!ring->irq_get(ring))) - return -ENODEV; - - /* Record current time in case interrupted by signal, or wedged * */ - getrawmonotonic(&before); - -#define EXIT_COND \ - (i915_seqno_passed(ring->get_seqno(ring), seqno) || \ - atomic_read(&dev_priv->mm.wedged)) - do { - if (interruptible) - end = wait_event_interruptible_timeout(ring->irq_queue, - EXIT_COND, - timeout_jiffies); - else - end = wait_event_timeout(ring->irq_queue, EXIT_COND, - timeout_jiffies); - - ret = i915_gem_check_wedge(dev_priv, interruptible); - if (ret) - end = ret; - } while (end == 0 && wait_forever); - - getrawmonotonic(&now); - - ring->irq_put(ring); - trace_i915_gem_request_wait_end(ring, seqno); -#undef EXIT_COND - - if (timeout) { - struct timespec sleep_time = timespec_sub(now, before); - *timeout = timespec_sub(*timeout, sleep_time); - } - - switch (end) { - case -EIO: - case -EAGAIN: /* Wedged */ - case -ERESTARTSYS: /* Signal */ - return (int)end; - case 0: /* Timeout */ - if (timeout) - set_normalized_timespec(timeout, 0, 0); - return -ETIME; - default: /* Completed */ - WARN_ON(end < 0); /* We're not aware of other errors */ - return 0; - } -} - -/** - * Waits for a sequence number to be signaled, and cleans up the - * request and object lists appropriately for that event. - */ -int -i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) -{ - drm_i915_private_t *dev_priv = ring->dev->dev_private; - int ret = 0; - - BUG_ON(seqno == 0); - - ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); - if (ret) - return ret; - - ret = i915_gem_check_olr(ring, seqno); - if (ret) - return ret; - - ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL); - - return ret; -} - -/** - * Ensures that all rendering to the object has completed and the object is - * safe to unbind from the GTT or access from the CPU. - */ -int -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) -{ - int ret; + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + return; + } - /* This function only exists to support waiting for existing rendering, - * not for emitting required flushes. - */ - BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); + i915_gem_retire_requests(dev); - /* If there is rendering queued on the buffer being evicted, wait for - * it. + /* Send a periodic flush down the ring so we don't hold onto GEM + * objects indefinitely. */ - if (obj->active) { - ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno); - if (ret) - return ret; - i915_gem_retire_requests_ring(obj->ring); + idle = true; + for_each_ring(ring, dev_priv, i) { + if (ring->gpu_caches_dirty) + i915_add_request(ring, NULL, NULL); + + idle &= list_empty(&ring->request_list); } - return 0; + if (!dev_priv->mm.suspended && !idle) + queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); + if (idle) + intel_mark_idle(dev); + + mutex_unlock(&dev->struct_mutex); } /** @@ -2080,14 +2245,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) int ret; if (obj->active) { - ret = i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); if (ret) return ret; - ret = i915_gem_check_olr(obj->ring, - obj->last_rendering_seqno); - if (ret) - return ret; i915_gem_retire_requests_ring(obj->ring); } @@ -2147,7 +2308,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto out; if (obj->active) { - seqno = obj->last_rendering_seqno; + seqno = obj->last_read_seqno; ring = obj->ring; } @@ -2202,11 +2363,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) - return i915_gem_object_wait_rendering(obj); + return i915_gem_object_wait_rendering(obj, false); idx = intel_ring_sync_index(from, to); - seqno = obj->last_rendering_seqno; + seqno = obj->last_read_seqno; if (seqno <= from->sync_seqno[idx]) return 0; @@ -2260,6 +2421,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (obj->pin_count) return -EBUSY; + BUG_ON(obj->pages == NULL); + ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -2270,22 +2433,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_gem_object_finish_gtt(obj); - /* Move the object to the CPU domain to ensure that - * any possible CPU writes while it's not in the GTT - * are flushed when we go to remap it. - */ - if (ret == 0) - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret == -ERESTARTSYS) - return ret; - if (ret) { - /* In the event of a disaster, abandon all caches and - * hope for the best. - */ - i915_gem_clflush_object(obj); - obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; - } - /* release the fence reg _after_ flushing */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -2301,10 +2448,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) } i915_gem_gtt_finish_object(obj); - i915_gem_object_put_pages_gtt(obj); - - list_del_init(&obj->gtt_list); - list_del_init(&obj->mm_list); + list_del(&obj->mm_list); + list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; @@ -2312,48 +2457,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) obj->gtt_space = NULL; obj->gtt_offset = 0; - if (i915_gem_object_is_purgeable(obj)) - i915_gem_object_truncate(obj); - - return ret; -} - -int -i915_gem_flush_ring(struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains) -{ - int ret; - - if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) - return 0; - - trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); - - ret = ring->flush(ring, invalidate_domains, flush_domains); - if (ret) - return ret; - - if (flush_domains & I915_GEM_GPU_DOMAINS) - i915_gem_process_flushing_list(ring, flush_domains); - return 0; } static int i915_ring_idle(struct intel_ring_buffer *ring) { - int ret; - - if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) + if (list_empty(&ring->active_list)) return 0; - if (!list_empty(&ring->gpu_write_list)) { - ret = i915_gem_flush_ring(ring, - I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; - } - return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); } @@ -2372,10 +2483,6 @@ int i915_gpu_idle(struct drm_device *dev) ret = i915_ring_idle(ring); if (ret) return ret; - - /* Is the device fubar? */ - if (WARN_ON(!list_empty(&ring->gpu_write_list))) - return -EBUSY; } return 0; @@ -2548,21 +2655,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, static int i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) { - int ret; - - if (obj->fenced_gpu_access) { - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->ring, - 0, obj->base.write_domain); - if (ret) - return ret; - } - - obj->fenced_gpu_access = false; - } - if (obj->last_fenced_seqno) { - ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); + int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); if (ret) return ret; @@ -2575,6 +2669,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) mb(); + obj->fenced_gpu_access = false; return 0; } @@ -2694,18 +2789,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) return 0; } +static bool i915_gem_valid_gtt_space(struct drm_device *dev, + struct drm_mm_node *gtt_space, + unsigned long cache_level) +{ + struct drm_mm_node *other; + + /* On non-LLC machines we have to be careful when putting differing + * types of snoopable memory together to avoid the prefetcher + * crossing memory domains and dieing. + */ + if (HAS_LLC(dev)) + return true; + + if (gtt_space == NULL) + return true; + + if (list_empty(>t_space->node_list)) + return true; + + other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); + if (other->allocated && !other->hole_follows && other->color != cache_level) + return false; + + other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); + if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) + return false; + + return true; +} + +static void i915_gem_verify_gtt(struct drm_device *dev) +{ +#if WATCH_GTT + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + int err = 0; + + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + if (obj->gtt_space == NULL) { + printk(KERN_ERR "object found on GTT list with no space reserved\n"); + err++; + continue; + } + + if (obj->cache_level != obj->gtt_space->color) { + printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", + obj->gtt_space->start, + obj->gtt_space->start + obj->gtt_space->size, + obj->cache_level, + obj->gtt_space->color); + err++; + continue; + } + + if (!i915_gem_valid_gtt_space(dev, + obj->gtt_space, + obj->cache_level)) { + printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", + obj->gtt_space->start, + obj->gtt_space->start + obj->gtt_space->size, + obj->cache_level); + err++; + continue; + } + } + + WARN_ON(err); +#endif +} + /** * Finds free space in the GTT aperture and binds the object there. */ static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, - bool map_and_fenceable) + bool map_and_fenceable, + bool nonblocking) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_mm_node *free_space; - gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; @@ -2745,89 +2910,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -E2BIG; } + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + search_free: if (map_and_fenceable) free_space = - drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, - size, alignment, - 0, dev_priv->mm.gtt_mappable_end, - 0); + drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, + size, alignment, obj->cache_level, + 0, dev_priv->mm.gtt_mappable_end, + false); else - free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, - size, alignment, 0); + free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, + size, alignment, obj->cache_level, + false); if (free_space != NULL) { if (map_and_fenceable) obj->gtt_space = drm_mm_get_block_range_generic(free_space, - size, alignment, 0, + size, alignment, obj->cache_level, 0, dev_priv->mm.gtt_mappable_end, - 0); + false); else obj->gtt_space = - drm_mm_get_block(free_space, size, alignment); + drm_mm_get_block_generic(free_space, + size, alignment, obj->cache_level, + false); } if (obj->gtt_space == NULL) { - /* If the gtt is empty and we're still having trouble - * fitting our object in, we're out of memory. - */ ret = i915_gem_evict_something(dev, size, alignment, - map_and_fenceable); + obj->cache_level, + map_and_fenceable, + nonblocking); if (ret) return ret; goto search_free; } - - ret = i915_gem_object_get_pages_gtt(obj, gfpmask); - if (ret) { + if (WARN_ON(!i915_gem_valid_gtt_space(dev, + obj->gtt_space, + obj->cache_level))) { drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; - - if (ret == -ENOMEM) { - /* first try to reclaim some memory by clearing the GTT */ - ret = i915_gem_evict_everything(dev, false); - if (ret) { - /* now try to shrink everyone else */ - if (gfpmask) { - gfpmask = 0; - goto search_free; - } - - return -ENOMEM; - } - - goto search_free; - } - - return ret; + return -EINVAL; } + ret = i915_gem_gtt_prepare_object(obj); if (ret) { - i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; - - if (i915_gem_evict_everything(dev, false)) - return ret; - - goto search_free; + return ret; } if (!dev_priv->mm.aliasing_ppgtt) i915_gem_gtt_bind_object(obj, obj->cache_level); - list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); + list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - /* Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - obj->gtt_offset = obj->gtt_space->start; fenceable = @@ -2840,6 +2983,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, obj->map_and_fenceable = mappable && fenceable; trace_i915_gem_object_bind(obj, map_and_fenceable); + i915_gem_verify_gtt(dev); return 0; } @@ -2866,18 +3010,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) trace_i915_gem_object_clflush(obj); - drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); -} - -/** Flushes any GPU write domain for the object if it's dirty. */ -static int -i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) -{ - if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) - return 0; - - /* Queue the GPU write cache flushing we need. */ - return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); + drm_clflush_sg(obj->pages); } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2946,16 +3079,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) return 0; - ret = i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_wait_rendering(obj, !write); if (ret) return ret; - if (obj->pending_gpu_write || write) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - } - i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -2998,6 +3125,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } + if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + } + if (obj->gtt_space) { ret = i915_gem_object_finish_gpu(obj); if (ret) @@ -3009,7 +3142,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * registers with snooped memory, so relinquish any fences * currently pointing to our region in the aperture. */ - if (INTEL_INFO(obj->base.dev)->gen < 6) { + if (INTEL_INFO(dev)->gen < 6) { ret = i915_gem_object_put_fence(obj); if (ret) return ret; @@ -3020,6 +3153,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); + + obj->gtt_space->color = cache_level; } if (cache_level == I915_CACHE_NONE) { @@ -3046,9 +3181,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } obj->cache_level = cache_level; + i915_gem_verify_gtt(dev); return 0; } +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + args->caching = obj->cache_level != I915_CACHE_NONE; + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + enum i915_cache_level level; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + switch (args->caching) { + case I915_CACHING_NONE: + level = I915_CACHE_NONE; + break; + case I915_CACHING_CACHED: + level = I915_CACHE_LLC; + break; + default: + return -EINVAL; + } + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); + if (&obj->base == NULL) { + ret = -ENOENT; + goto unlock; + } + + ret = i915_gem_object_set_cache_level(obj, level); + + drm_gem_object_unreference(&obj->base); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + /* * Prepare buffer for display plane (scanout, cursors, etc). * Can be called from an uninterruptible phase (modesetting) and allows @@ -3062,10 +3260,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 old_read_domains, old_write_domain; int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj); - if (ret) - return ret; - if (pipelined != obj->ring) { ret = i915_gem_object_sync(obj, pipelined); if (ret) @@ -3089,7 +3283,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_object_pin(obj, alignment, true); + ret = i915_gem_object_pin(obj, alignment, true, false); if (ret) return ret; @@ -3101,7 +3295,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->base.write_domain = 0; obj->base.read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, @@ -3119,13 +3313,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) return 0; - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { - ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); - if (ret) - return ret; - } - - ret = i915_gem_object_wait_rendering(obj); + ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; @@ -3149,16 +3337,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) return 0; - ret = i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_wait_rendering(obj, !write); if (ret) return ret; - if (write || obj->pending_gpu_write) { - ret = i915_gem_object_wait_rendering(obj); - if (ret) - return ret; - } - i915_gem_object_flush_gtt_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -3238,11 +3420,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, - bool map_and_fenceable) + bool map_and_fenceable, + bool nonblocking) { int ret; - BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + return -EBUSY; if (obj->gtt_space != NULL) { if ((alignment && obj->gtt_offset & (alignment - 1)) || @@ -3262,7 +3446,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (obj->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment, - map_and_fenceable); + map_and_fenceable, + nonblocking); if (ret) return ret; } @@ -3320,7 +3505,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj->user_pin_count++; obj->pin_filp = file; if (obj->user_pin_count == 1) { - ret = i915_gem_object_pin(obj, args->alignment, true); + ret = i915_gem_object_pin(obj, args->alignment, true, false); if (ret) goto out; } @@ -3400,6 +3585,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_flush_active(obj); args->busy = obj->active; + if (obj->ring) { + BUILD_BUG_ON(I915_NUM_RINGS > 16); + args->busy |= intel_ring_flag(obj->ring) << 16; + } drm_gem_object_unreference(&obj->base); unlock: @@ -3448,9 +3637,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (obj->madv != __I915_MADV_PURGED) obj->madv = args->madv; - /* if the object is no longer bound, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && - obj->gtt_space == NULL) + /* if the object is no longer attached, discard its backing storage */ + if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -3462,10 +3650,32 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return ret; } +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops) +{ + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->gtt_list); + INIT_LIST_HEAD(&obj->ring_list); + INIT_LIST_HEAD(&obj->exec_list); + + obj->ops = ops; + + obj->fence_reg = I915_FENCE_REG_NONE; + obj->madv = I915_MADV_WILLNEED; + /* Avoid an unnecessary call to unbind on the first bind. */ + obj->map_and_fenceable = true; + + i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); +} + +static const struct drm_i915_gem_object_ops i915_gem_object_ops = { + .get_pages = i915_gem_object_get_pages_gtt, + .put_pages = i915_gem_object_put_pages_gtt, +}; + struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct address_space *mapping; u32 mask; @@ -3489,7 +3699,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; mapping_set_gfp_mask(mapping, mask); - i915_gem_info_add_obj(dev_priv, size); + i915_gem_object_init(obj, &i915_gem_object_ops); obj->base.write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU; @@ -3511,17 +3721,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, } else obj->cache_level = I915_CACHE_NONE; - obj->base.driver_private = NULL; - obj->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj->mm_list); - INIT_LIST_HEAD(&obj->gtt_list); - INIT_LIST_HEAD(&obj->ring_list); - INIT_LIST_HEAD(&obj->exec_list); - INIT_LIST_HEAD(&obj->gpu_write_list); - obj->madv = I915_MADV_WILLNEED; - /* Avoid an unnecessary call to unbind on the first bind. */ - obj->map_and_fenceable = true; - return obj; } @@ -3540,9 +3739,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - if (gem_obj->import_attach) - drm_prime_gem_destroy(gem_obj, obj->sg_table); - if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); @@ -3558,8 +3754,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) dev_priv->mm.interruptible = was_interruptible; } - if (obj->base.map_list.map) - drm_gem_free_mmap_offset(&obj->base); + obj->pages_pin_count = 0; + i915_gem_object_put_pages(obj); + i915_gem_object_free_mmap_offset(obj); + + BUG_ON(obj->pages); + + if (obj->base.import_attach) + drm_prime_gem_destroy(&obj->base, NULL); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(dev_priv, obj->base.size); @@ -3590,7 +3792,7 @@ i915_gem_idle(struct drm_device *dev) /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_gem_evict_everything(dev, false); + i915_gem_evict_everything(dev); i915_gem_reset_fences(dev); @@ -3891,7 +4093,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); mutex_unlock(&dev->struct_mutex); @@ -3939,7 +4140,6 @@ init_ring_lists(struct intel_ring_buffer *ring) { INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); - INIT_LIST_HEAD(&ring->gpu_write_list); } void @@ -3949,10 +4149,10 @@ i915_gem_load(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; INIT_LIST_HEAD(&dev_priv->mm.active_list); - INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.unbound_list); + INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); - INIT_LIST_HEAD(&dev_priv->mm.gtt_list); for (i = 0; i < I915_NUM_RINGS; i++) init_ring_lists(&dev_priv->ring[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) @@ -4196,18 +4396,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } -static int -i915_gpu_is_active(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int lists_empty; - - lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->mm.active_list); - - return !lists_empty; -} - static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { @@ -4216,60 +4404,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj, *next; + struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; int cnt; if (!mutex_trylock(&dev->struct_mutex)) return 0; - /* "fast-path" to count number of available objects */ - if (nr_to_scan == 0) { - cnt = 0; - list_for_each_entry(obj, - &dev_priv->mm.inactive_list, - mm_list) - cnt++; - mutex_unlock(&dev->struct_mutex); - return cnt / 100 * sysctl_vfs_cache_pressure; - } - -rescan: - /* first scan for clean buffers */ - i915_gem_retire_requests(dev); - - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { - if (i915_gem_object_is_purgeable(obj)) { - if (i915_gem_object_unbind(obj) == 0 && - --nr_to_scan == 0) - break; - } + if (nr_to_scan) { + nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); + if (nr_to_scan > 0) + i915_gem_shrink_all(dev_priv); } - /* second pass, evict/count anything still on the inactive list */ cnt = 0; - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { - if (nr_to_scan && - i915_gem_object_unbind(obj) == 0) - nr_to_scan--; - else - cnt++; - } + list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) + if (obj->pages_pin_count == 0) + cnt += obj->base.size >> PAGE_SHIFT; + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + if (obj->pin_count == 0 && obj->pages_pin_count == 0) + cnt += obj->base.size >> PAGE_SHIFT; - if (nr_to_scan && i915_gpu_is_active(dev)) { - /* - * We are desperate for pages, so as a last resort, wait - * for the GPU to finish and discard whatever we can. - * This has a dramatic impact to reduce the number of - * OOM-killer events whilst running the GPU aggressively. - */ - if (i915_gpu_idle(dev) == 0) - goto rescan; - } mutex_unlock(&dev->struct_mutex); - return cnt / 100 * sysctl_vfs_cache_pressure; + return cnt; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c index a9d58d72bb4d..4aa7ecf77ede 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c @@ -97,8 +97,7 @@ static struct i915_hw_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); -static int do_switch(struct drm_i915_gem_object *from_obj, - struct i915_hw_context *to, u32 seqno); +static int do_switch(struct i915_hw_context *to); static int get_context_size(struct drm_device *dev) { @@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev) break; case 7: reg = I915_READ(GEN7_CXT_SIZE); - ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; + if (IS_HASWELL(dev)) + ret = HSW_CXT_TOTAL_SIZE(reg) * 64; + else + ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; break; default: BUG(); @@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv) * default context. */ dev_priv->ring[RCS].default_context = ctx; - ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); - if (ret) { - do_destroy(ctx); - return ret; - } + ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); + if (ret) + goto err_destroy; - ret = do_switch(NULL, ctx, 0); - if (ret) { - i915_gem_object_unpin(ctx->obj); - do_destroy(ctx); - } else { - DRM_DEBUG_DRIVER("Default HW context loaded\n"); - } + ret = do_switch(ctx); + if (ret) + goto err_unpin; + DRM_DEBUG_DRIVER("Default HW context loaded\n"); + return 0; + +err_unpin: + i915_gem_object_unpin(ctx->obj); +err_destroy: + do_destroy(ctx); return ret; } @@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring, return ret; } -static int do_switch(struct drm_i915_gem_object *from_obj, - struct i915_hw_context *to, - u32 seqno) +static int do_switch(struct i915_hw_context *to) { - struct intel_ring_buffer *ring = NULL; + struct intel_ring_buffer *ring = to->ring; + struct drm_i915_gem_object *from_obj = ring->last_context_obj; u32 hw_flags = 0; int ret; - BUG_ON(to == NULL); BUG_ON(from_obj != NULL && from_obj->pin_count == 0); - ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); + if (from_obj == to->obj) + return 0; + + ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); if (ret) return ret; @@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj, else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ hw_flags |= MI_FORCE_RESTORE; - ring = to->ring; ret = mi_set_context(ring, to, hw_flags); if (ret) { i915_gem_object_unpin(to->obj); @@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj, * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from_obj != NULL) { + u32 seqno = i915_gem_next_request_seqno(ring); from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; i915_gem_object_move_to_active(from_obj, ring, seqno); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the @@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj, * swapped, but there is no way to do that yet. */ from_obj->dirty = 1; - BUG_ON(from_obj->ring != to->ring); + BUG_ON(from_obj->ring != ring); i915_gem_object_unpin(from_obj); drm_gem_object_unreference(&from_obj->base); @@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring, int to_id) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; - struct drm_i915_gem_object *from_obj = ring->last_context_obj; if (dev_priv->hw_contexts_disabled) return 0; @@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring, if (ring != &dev_priv->ring[RCS]) return 0; - if (file) - file_priv = file->driver_priv; - if (to_id == DEFAULT_CONTEXT_ID) { to = ring->default_context; } else { - to = i915_gem_context_get(file_priv, to_id); + if (file == NULL) + return -EINVAL; + + to = i915_gem_context_get(file->driver_priv, to_id); if (to == NULL) return -ENOENT; } - if (from_obj == to->obj) - return 0; - - return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring)); + return do_switch(to); } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c index aa308e1337db..ca3497e1108c 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -28,35 +28,62 @@ #include static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction dir) + enum dma_data_direction dir) { struct drm_i915_gem_object *obj = attachment->dmabuf->priv; - struct drm_device *dev = obj->base.dev; - int npages = obj->base.size / PAGE_SIZE; - struct sg_table *sg = NULL; - int ret; - int nents; + struct sg_table *st; + struct scatterlist *src, *dst; + int ret, i; - ret = i915_mutex_lock_interruptible(dev); + ret = i915_mutex_lock_interruptible(obj->base.dev); if (ret) return ERR_PTR(ret); - if (!obj->pages) { - ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); - if (ret) - goto out; + ret = i915_gem_object_get_pages(obj); + if (ret) { + st = ERR_PTR(ret); + goto out; + } + + /* Copy sg so that we make an independent mapping */ + st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (st == NULL) { + st = ERR_PTR(-ENOMEM); + goto out; } - /* link the pages into an SG then map the sg */ - sg = drm_prime_pages_to_sg(obj->pages, npages); - nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); + ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); + if (ret) { + kfree(st); + st = ERR_PTR(ret); + goto out; + } + + src = obj->pages->sgl; + dst = st->sgl; + for (i = 0; i < obj->pages->nents; i++) { + sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); + dst = sg_next(dst); + src = sg_next(src); + } + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { + sg_free_table(st); + kfree(st); + st = ERR_PTR(-ENOMEM); + goto out; + } + + i915_gem_object_pin_pages(obj); + out: - mutex_unlock(&dev->struct_mutex); - return sg; + mutex_unlock(&obj->base.dev->struct_mutex); + return st; } static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *sg, enum dma_data_direction dir) + struct sg_table *sg, + enum dma_data_direction dir) { dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); @@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->base.dev; - int ret; + struct scatterlist *sg; + struct page **pages; + int ret, i; ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) goto out_unlock; } - if (!obj->pages) { - ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ERR_PTR(ret); - } - } + ret = i915_gem_object_get_pages(obj); + if (ret) + goto error; - obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); - if (!obj->dma_buf_vmapping) { - DRM_ERROR("failed to vmap object\n"); - goto out_unlock; - } + ret = -ENOMEM; + + pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); + if (pages == NULL) + goto error; + + for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) + pages[i] = sg_page(sg); + + obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); + drm_free_large(pages); + + if (!obj->dma_buf_vmapping) + goto error; obj->vmapping_count = 1; + i915_gem_object_pin_pages(obj); out_unlock: mutex_unlock(&dev->struct_mutex); return obj->dma_buf_vmapping; + +error: + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); } static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) @@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) if (ret) return; - --obj->vmapping_count; - if (obj->vmapping_count == 0) { + if (--obj->vmapping_count == 0) { vunmap(obj->dma_buf_vmapping); obj->dma_buf_vmapping = NULL; + + i915_gem_object_unpin_pages(obj); } mutex_unlock(&dev->struct_mutex); } @@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * return -EINVAL; } +static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) +{ + struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->base.dev; + int ret; + bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = i915_gem_object_set_to_cpu_domain(obj, write); + mutex_unlock(&dev->struct_mutex); + return ret; +} + static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, @@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .mmap = i915_gem_dmabuf_mmap, .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, + .begin_cpu_access = i915_gem_begin_cpu_access, }; struct dma_buf *i915_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *gem_obj, int flags) + struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - return dma_buf_export(obj, &i915_dmabuf_ops, - obj->base.size, 0600); + return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); +} + +static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) +{ + struct sg_table *sg; + + sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sg)) + return PTR_ERR(sg); + + obj->pages = sg; + obj->has_dma_mapping = true; + return 0; } +static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj) +{ + dma_buf_unmap_attachment(obj->base.import_attach, + obj->pages, DMA_BIDIRECTIONAL); + obj->has_dma_mapping = false; +} + +static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { + .get_pages = i915_gem_object_get_pages_dmabuf, + .put_pages = i915_gem_object_put_pages_dmabuf, +}; + struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) + struct dma_buf *dma_buf) { struct dma_buf_attachment *attach; - struct sg_table *sg; struct drm_i915_gem_object *obj; - int npages; - int size; int ret; /* is this one of own objects? */ @@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, if (IS_ERR(attach)) return ERR_CAST(attach); - sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); - goto fail_detach; - } - - size = dma_buf->size; - npages = size / PAGE_SIZE; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (obj == NULL) { ret = -ENOMEM; - goto fail_unmap; + goto fail_detach; } - ret = drm_gem_private_object_init(dev, &obj->base, size); + ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); if (ret) { kfree(obj); - goto fail_unmap; + goto fail_detach; } - obj->sg_table = sg; + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); obj->base.import_attach = attach; return &obj->base; -fail_unmap: - dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); return ERR_PTR(ret); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_evict.c b/trunk/drivers/gpu/drm/i915/i915_gem_evict.c index eba0308f10e3..a2d8acde8550 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_evict.c @@ -44,7 +44,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) int i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, bool mappable) + unsigned alignment, unsigned cache_level, + bool mappable, bool nonblocking) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; @@ -79,11 +80,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, INIT_LIST_HEAD(&unwind_list); if (mappable) drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, - min_size, alignment, 0, + min_size, alignment, cache_level, 0, dev_priv->mm.gtt_mappable_end); else drm_mm_init_scan(&dev_priv->mm.gtt_space, - min_size, alignment, 0); + min_size, alignment, cache_level); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { @@ -91,29 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, goto found; } - /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - /* Does the object require an outstanding flush? */ - if (obj->base.write_domain) - continue; - - if (mark_free(obj, &unwind_list)) - goto found; - } + if (nonblocking) + goto none; - /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { - if (mark_free(obj, &unwind_list)) - goto found; - } + /* Now merge in the soon-to-be-expired objects... */ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - if (!obj->base.write_domain) - continue; - if (mark_free(obj, &unwind_list)) goto found; } +none: /* Nothing found, clean up and bail out! */ while (!list_empty(&unwind_list)) { obj = list_first_entry(&unwind_list, @@ -164,7 +152,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, } int -i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) +i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; @@ -172,12 +160,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) int ret; lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.active_list)); if (lists_empty) return -ENOSPC; - trace_i915_gem_evict_everything(dev, purgeable_only); + trace_i915_gem_evict_everything(dev); /* The gpu_idle will flush everything in the write domain to the * active list. Then we must move everything off the active list @@ -189,16 +176,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) i915_gem_retire_requests(dev); - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - /* Having flushed everything, unbind() should never raise an error */ list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, mm_list) { - if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { - if (obj->pin_count == 0) - WARN_ON(i915_gem_object_unbind(obj)); - } - } + &dev_priv->mm.inactive_list, mm_list) + if (obj->pin_count == 0) + WARN_ON(i915_gem_object_unbind(obj)); return 0; } diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index ff2819ea0813..6a2f3e50c714 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -34,180 +34,6 @@ #include "intel_drv.h" #include -struct change_domains { - uint32_t invalidate_domains; - uint32_t flush_domains; - uint32_t flush_rings; - uint32_t flips; -}; - -/* - * Set the next domain for the specified object. This - * may not actually perform the necessary flushing/invaliding though, - * as that may want to be batched with other set_domain operations - * - * This is (we hope) the only really tricky part of gem. The goal - * is fairly simple -- track which caches hold bits of the object - * and make sure they remain coherent. A few concrete examples may - * help to explain how it works. For shorthand, we use the notation - * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the - * a pair of read and write domain masks. - * - * Case 1: the batch buffer - * - * 1. Allocated - * 2. Written by CPU - * 3. Mapped to GTT - * 4. Read by GPU - * 5. Unmapped from GTT - * 6. Freed - * - * Let's take these a step at a time - * - * 1. Allocated - * Pages allocated from the kernel may still have - * cache contents, so we set them to (CPU, CPU) always. - * 2. Written by CPU (using pwrite) - * The pwrite function calls set_domain (CPU, CPU) and - * this function does nothing (as nothing changes) - * 3. Mapped by GTT - * This function asserts that the object is not - * currently in any GPU-based read or write domains - * 4. Read by GPU - * i915_gem_execbuffer calls set_domain (COMMAND, 0). - * As write_domain is zero, this function adds in the - * current read domains (CPU+COMMAND, 0). - * flush_domains is set to CPU. - * invalidate_domains is set to COMMAND - * clflush is run to get data out of the CPU caches - * then i915_dev_set_domain calls i915_gem_flush to - * emit an MI_FLUSH and drm_agp_chipset_flush - * 5. Unmapped from GTT - * i915_gem_object_unbind calls set_domain (CPU, CPU) - * flush_domains and invalidate_domains end up both zero - * so no flushing/invalidating happens - * 6. Freed - * yay, done - * - * Case 2: The shared render buffer - * - * 1. Allocated - * 2. Mapped to GTT - * 3. Read/written by GPU - * 4. set_domain to (CPU,CPU) - * 5. Read/written by CPU - * 6. Read/written by GPU - * - * 1. Allocated - * Same as last example, (CPU, CPU) - * 2. Mapped to GTT - * Nothing changes (assertions find that it is not in the GPU) - * 3. Read/written by GPU - * execbuffer calls set_domain (RENDER, RENDER) - * flush_domains gets CPU - * invalidate_domains gets GPU - * clflush (obj) - * MI_FLUSH and drm_agp_chipset_flush - * 4. set_domain (CPU, CPU) - * flush_domains gets GPU - * invalidate_domains gets CPU - * wait_rendering (obj) to make sure all drawing is complete. - * This will include an MI_FLUSH to get the data from GPU - * to memory - * clflush (obj) to invalidate the CPU cache - * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) - * 5. Read/written by CPU - * cache lines are loaded and dirtied - * 6. Read written by GPU - * Same as last GPU access - * - * Case 3: The constant buffer - * - * 1. Allocated - * 2. Written by CPU - * 3. Read by GPU - * 4. Updated (written) by CPU again - * 5. Read by GPU - * - * 1. Allocated - * (CPU, CPU) - * 2. Written by CPU - * (CPU, CPU) - * 3. Read by GPU - * (CPU+RENDER, 0) - * flush_domains = CPU - * invalidate_domains = RENDER - * clflush (obj) - * MI_FLUSH - * drm_agp_chipset_flush - * 4. Updated (written) by CPU again - * (CPU, CPU) - * flush_domains = 0 (no previous write domain) - * invalidate_domains = 0 (no new read domains) - * 5. Read by GPU - * (CPU+RENDER, 0) - * flush_domains = CPU - * invalidate_domains = RENDER - * clflush (obj) - * MI_FLUSH - * drm_agp_chipset_flush - */ -static void -i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring, - struct change_domains *cd) -{ - uint32_t invalidate_domains = 0, flush_domains = 0; - - /* - * If the object isn't moving to a new write domain, - * let the object stay in multiple read domains - */ - if (obj->base.pending_write_domain == 0) - obj->base.pending_read_domains |= obj->base.read_domains; - - /* - * Flush the current write domain if - * the new read domains don't match. Invalidate - * any read domains which differ from the old - * write domain - */ - if (obj->base.write_domain && - (((obj->base.write_domain != obj->base.pending_read_domains || - obj->ring != ring)) || - (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { - flush_domains |= obj->base.write_domain; - invalidate_domains |= - obj->base.pending_read_domains & ~obj->base.write_domain; - } - /* - * Invalidate any read caches which may have - * stale data. That is, any new read domains. - */ - invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; - if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); - - if (obj->base.pending_write_domain) - cd->flips |= atomic_read(&obj->pending_flip); - - /* The actual obj->write_domain will be updated with - * pending_write_domain after we emit the accumulated flush for all - * of our domain changes in execbuffers (which clears objects' - * write_domains). So if we have a current write domain that we - * aren't changing, set pending_write_domain to that. - */ - if (flush_domains == 0 && obj->base.pending_write_domain == 0) - obj->base.pending_write_domain = obj->base.write_domain; - - cd->invalidate_domains |= invalidate_domains; - cd->flush_domains |= flush_domains; - if (flush_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= intel_ring_flag(obj->ring); - if (invalidate_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= intel_ring_flag(ring); -} - struct eb_objects { int and; struct hlist_head buckets[0]; @@ -218,6 +44,7 @@ eb_create(int size) { struct eb_objects *eb; int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; + BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); while (count > size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + @@ -269,6 +96,7 @@ eb_destroy(struct eb_objects *eb) static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || + !obj->map_and_fenceable || obj->cache_level != I915_CACHE_NONE); } @@ -383,7 +211,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (ret) return ret; - vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); *(uint32_t *)(vaddr + page_offset) = reloc->delta; kunmap_atomic(vaddr); } else { @@ -504,7 +333,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, return ret; } -#define __EXEC_OBJECT_HAS_FENCE (1<<31) +#define __EXEC_OBJECT_HAS_PIN (1<<31) +#define __EXEC_OBJECT_HAS_FENCE (1<<30) static int need_reloc_mappable(struct drm_i915_gem_object *obj) @@ -514,9 +344,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) } static int -pin_and_fence_object(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) +i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence, need_mappable; @@ -528,15 +359,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); + ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); if (ret) return ret; + entry->flags |= __EXEC_OBJECT_HAS_PIN; + if (has_fenced_gpu_access) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { ret = i915_gem_object_get_fence(obj); if (ret) - goto err_unpin; + return ret; if (i915_gem_object_pin_fence(obj)) entry->flags |= __EXEC_OBJECT_HAS_FENCE; @@ -545,12 +378,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, } } + /* Ensure ppgtt mapping exists if needed */ + if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { + i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, + obj, obj->cache_level); + + obj->has_aliasing_ppgtt_mapping = 1; + } + entry->offset = obj->gtt_offset; return 0; +} -err_unpin: - i915_gem_object_unpin(obj); - return ret; +static void +i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) +{ + struct drm_i915_gem_exec_object2 *entry; + + if (!obj->gtt_space) + return; + + entry = obj->exec_entry; + + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) + i915_gem_object_unpin_fence(obj); + + if (entry->flags & __EXEC_OBJECT_HAS_PIN) + i915_gem_object_unpin(obj); + + entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); } static int @@ -558,11 +414,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_file *file, struct list_head *objects) { - drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_object *obj; - int ret, retry; - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; struct list_head ordered_objects; + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + int retry; INIT_LIST_HEAD(&ordered_objects); while (!list_empty(objects)) { @@ -587,6 +442,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; + obj->pending_fenced_gpu_access = false; } list_splice(&ordered_objects, objects); @@ -599,12 +455,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, * 2. Bind new objects. * 3. Decrement pin count. * - * This avoid unnecessary unbinding of later objects in order to makr + * This avoid unnecessary unbinding of later objects in order to make * room for the earlier objects *unless* we need to defragment. */ retry = 0; do { - ret = 0; + int ret = 0; /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(obj, objects, exec_list) { @@ -624,7 +480,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else - ret = pin_and_fence_object(obj, ring); + ret = i915_gem_execbuffer_reserve_object(obj, ring); if (ret) goto err; } @@ -634,77 +490,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, if (obj->gtt_space) continue; - ret = pin_and_fence_object(obj, ring); - if (ret) { - int ret_ignore; - - /* This can potentially raise a harmless - * -EINVAL if we failed to bind in the above - * call. It cannot raise -EINTR since we know - * that the bo is freshly bound and so will - * not need to be flushed or waited upon. - */ - ret_ignore = i915_gem_object_unbind(obj); - (void)ret_ignore; - WARN_ON(obj->gtt_space); - break; - } + ret = i915_gem_execbuffer_reserve_object(obj, ring); + if (ret) + goto err; } - /* Decrement pin count for bound objects */ - list_for_each_entry(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry; - - if (!obj->gtt_space) - continue; - - entry = obj->exec_entry; - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - i915_gem_object_unpin_fence(obj); - entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; - } - - i915_gem_object_unpin(obj); - - /* ... and ensure ppgtt mapping exist if needed. */ - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, - obj, obj->cache_level); +err: /* Decrement pin count for bound objects */ + list_for_each_entry(obj, objects, exec_list) + i915_gem_execbuffer_unreserve_object(obj); - obj->has_aliasing_ppgtt_mapping = 1; - } - } - - if (ret != -ENOSPC || retry > 1) + if (ret != -ENOSPC || retry++) return ret; - /* First attempt, just clear anything that is purgeable. - * Second attempt, clear the entire GTT. - */ - ret = i915_gem_evict_everything(ring->dev, retry == 0); + ret = i915_gem_evict_everything(ring->dev); if (ret) return ret; - - retry++; } while (1); - -err: - list_for_each_entry_continue_reverse(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry; - - if (!obj->gtt_space) - continue; - - entry = obj->exec_entry; - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - i915_gem_object_unpin_fence(obj); - entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; - } - - i915_gem_object_unpin(obj); - } - - return ret; } static int @@ -810,18 +611,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, return ret; } -static void -i915_gem_execbuffer_flush(struct drm_device *dev, - uint32_t invalidate_domains, - uint32_t flush_domains) -{ - if (flush_domains & I915_GEM_DOMAIN_CPU) - intel_gtt_chipset_flush(); - - if (flush_domains & I915_GEM_DOMAIN_GTT) - wmb(); -} - static int i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) { @@ -854,48 +643,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) return 0; } - static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, struct list_head *objects) { struct drm_i915_gem_object *obj; - struct change_domains cd; + uint32_t flush_domains = 0; + uint32_t flips = 0; int ret; - memset(&cd, 0, sizeof(cd)); - list_for_each_entry(obj, objects, exec_list) - i915_gem_object_set_to_gpu_domain(obj, ring, &cd); - - if (cd.invalidate_domains | cd.flush_domains) { - i915_gem_execbuffer_flush(ring->dev, - cd.invalidate_domains, - cd.flush_domains); - } - - if (cd.flips) { - ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips); + list_for_each_entry(obj, objects, exec_list) { + ret = i915_gem_object_sync(obj, ring); if (ret) return ret; + + if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) + i915_gem_clflush_object(obj); + + if (obj->base.pending_write_domain) + flips |= atomic_read(&obj->pending_flip); + + flush_domains |= obj->base.write_domain; } - list_for_each_entry(obj, objects, exec_list) { - ret = i915_gem_object_sync(obj, ring); + if (flips) { + ret = i915_gem_execbuffer_wait_for_flips(ring, flips); if (ret) return ret; } + if (flush_domains & I915_GEM_DOMAIN_CPU) + intel_gtt_chipset_flush(); + + if (flush_domains & I915_GEM_DOMAIN_GTT) + wmb(); + /* Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ - ret = i915_gem_flush_ring(ring, - I915_GEM_GPU_DOMAINS, - ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0); - if (ret) - return ret; - - ring->gpu_caches_dirty = false; - return 0; + return intel_ring_invalidate_all_caches(ring); } static bool @@ -943,9 +729,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, struct drm_i915_gem_object *obj; list_for_each_entry(obj, objects, exec_list) { - u32 old_read = obj->base.read_domains; - u32 old_write = obj->base.write_domain; - + u32 old_read = obj->base.read_domains; + u32 old_write = obj->base.write_domain; obj->base.read_domains = obj->base.pending_read_domains; obj->base.write_domain = obj->base.pending_write_domain; @@ -954,17 +739,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, i915_gem_object_move_to_active(obj, ring, seqno); if (obj->base.write_domain) { obj->dirty = 1; - obj->pending_gpu_write = true; - list_move_tail(&obj->gpu_write_list, - &ring->gpu_write_list); + obj->last_write_seqno = seqno; if (obj->pin_count) /* check for potential scanout */ - intel_mark_busy(ring->dev, obj); + intel_mark_fb_busy(obj); } trace_i915_gem_object_change_domain(obj, old_read, old_write); } - - intel_mark_busy(ring->dev, NULL); } static void @@ -972,16 +753,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring) { - struct drm_i915_gem_request *request; - /* Unconditionally force add_request to emit a full flush. */ ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL || i915_add_request(ring, file, request)) { - kfree(request); - } + (void)i915_add_request(ring, file, NULL); } static int @@ -1327,8 +1103,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, return -ENOMEM; } ret = copy_from_user(exec_list, - (struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, + (void __user *)(uintptr_t)args->buffers_ptr, sizeof(*exec_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", @@ -1367,8 +1142,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) exec_list[i].offset = exec2_list[i].offset; /* ... and back out to userspace */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, + ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, exec_list, sizeof(*exec_list) * args->buffer_count); if (ret) { @@ -1422,8 +1196,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, + ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, exec2_list, sizeof(*exec2_list) * args->buffer_count); if (ret) { diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index 60815b861ec2..47e427e4bc67 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -167,8 +167,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) } static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, - struct scatterlist *sg_list, - unsigned sg_len, + const struct sg_table *pages, unsigned first_entry, uint32_t pte_flags) { @@ -180,12 +179,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, struct scatterlist *sg; /* init sg walking */ - sg = sg_list; + sg = pages->sgl; i = 0; segment_len = sg_dma_len(sg) >> PAGE_SHIFT; m = 0; - while (i < sg_len) { + while (i < pages->nents) { pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { @@ -194,13 +193,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, pt_vaddr[j] = pte | pte_flags; /* grab the next page */ - m++; - if (m == segment_len) { - sg = sg_next(sg); - i++; - if (i == sg_len) + if (++m == segment_len) { + if (++i == pages->nents) break; + sg = sg_next(sg); segment_len = sg_dma_len(sg) >> PAGE_SHIFT; m = 0; } @@ -213,44 +210,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, } } -static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, - unsigned first_entry, unsigned num_entries, - struct page **pages, uint32_t pte_flags) -{ - uint32_t *pt_vaddr, pte; - unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; - unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; - unsigned last_pte, i; - dma_addr_t page_addr; - - while (num_entries) { - last_pte = first_pte + num_entries; - last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES); - - pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); - - for (i = first_pte; i < last_pte; i++) { - page_addr = page_to_phys(*pages); - pte = GEN6_PTE_ADDR_ENCODE(page_addr); - pt_vaddr[i] = pte | pte_flags; - - pages++; - } - - kunmap_atomic(pt_vaddr); - - num_entries -= last_pte - first_pte; - first_pte = 0; - act_pd++; - } -} - void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t pte_flags = GEN6_PTE_VALID; switch (cache_level) { @@ -261,7 +224,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, pte_flags |= GEN6_PTE_CACHE_LLC; break; case I915_CACHE_NONE: - if (IS_HASWELL(dev)) + if (IS_HASWELL(obj->base.dev)) pte_flags |= HSW_PTE_UNCACHED; else pte_flags |= GEN6_PTE_UNCACHED; @@ -270,26 +233,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, BUG(); } - if (obj->sg_table) { - i915_ppgtt_insert_sg_entries(ppgtt, - obj->sg_table->sgl, - obj->sg_table->nents, - obj->gtt_space->start >> PAGE_SHIFT, - pte_flags); - } else if (dev_priv->mm.gtt->needs_dmar) { - BUG_ON(!obj->sg_list); - - i915_ppgtt_insert_sg_entries(ppgtt, - obj->sg_list, - obj->num_sg, - obj->gtt_space->start >> PAGE_SHIFT, - pte_flags); - } else - i915_ppgtt_insert_pages(ppgtt, - obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, - obj->pages, - pte_flags); + i915_ppgtt_insert_sg_entries(ppgtt, + obj->pages, + obj->gtt_space->start >> PAGE_SHIFT, + pte_flags); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, @@ -351,7 +298,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { i915_gem_clflush_object(obj); i915_gem_gtt_bind_object(obj, obj->cache_level); } @@ -361,44 +308,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - /* don't map imported dma buf objects */ - if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table) - return intel_gtt_map_memory(obj->pages, - obj->base.size >> PAGE_SHIFT, - &obj->sg_list, - &obj->num_sg); - else + if (obj->has_dma_mapping) return 0; + + if (!dma_map_sg(&obj->base.dev->pdev->dev, + obj->pages->sgl, obj->pages->nents, + PCI_DMA_BIDIRECTIONAL)) + return -ENOSPC; + + return 0; } void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); - if (obj->sg_table) { - intel_gtt_insert_sg_entries(obj->sg_table->sgl, - obj->sg_table->nents, - obj->gtt_space->start >> PAGE_SHIFT, - agp_type); - } else if (dev_priv->mm.gtt->needs_dmar) { - BUG_ON(!obj->sg_list); - - intel_gtt_insert_sg_entries(obj->sg_list, - obj->num_sg, - obj->gtt_space->start >> PAGE_SHIFT, - agp_type); - } else - intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT, - obj->pages, - agp_type); - + intel_gtt_insert_sg_entries(obj->pages, + obj->gtt_space->start >> PAGE_SHIFT, + agp_type); obj->has_global_gtt_mapping = 1; } @@ -418,14 +347,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) interruptible = do_idling(dev_priv); - if (obj->sg_list) { - intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); - obj->sg_list = NULL; - } + if (!obj->has_dma_mapping) + dma_unmap_sg(&dev->pdev->dev, + obj->pages->sgl, obj->pages->nents, + PCI_DMA_BIDIRECTIONAL); undo_idling(dev_priv, interruptible); } +static void i915_gtt_color_adjust(struct drm_mm_node *node, + unsigned long color, + unsigned long *start, + unsigned long *end) +{ + if (node->color != color) + *start += 4096; + + if (!list_empty(&node->node_list)) { + node = list_entry(node->node_list.next, + struct drm_mm_node, + node_list); + if (node->allocated && node->color != color) + *end -= 4096; + } +} + void i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start, unsigned long mappable_end, @@ -435,6 +381,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev, /* Substract the guard page ... */ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); + if (!HAS_LLC(dev)) + dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; dev_priv->mm.gtt_start = start; dev_priv->mm.gtt_mappable_end = mappable_end; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c index b964df51cec7..8093ecd2ea31 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -470,18 +470,20 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { + struct scatterlist *sg; int page_count = obj->base.size >> PAGE_SHIFT; int i; if (obj->bit_17 == NULL) return; - for (i = 0; i < page_count; i++) { - char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; + for_each_sg(obj->pages->sgl, sg, page_count, i) { + struct page *page = sg_page(sg); + char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { - i915_gem_swizzle_page(obj->pages[i]); - set_page_dirty(obj->pages[i]); + i915_gem_swizzle_page(page); + set_page_dirty(page); } } } @@ -489,6 +491,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { + struct scatterlist *sg; int page_count = obj->base.size >> PAGE_SHIFT; int i; @@ -502,8 +505,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } } - for (i = 0; i < page_count; i++) { - if (page_to_phys(obj->pages[i]) & (1 << 17)) + for_each_sg(obj->pages->sgl, sg, page_count, i) { + struct page *page = sg_page(sg); + if (page_to_phys(page) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 5249640cce13..d7f0066538a9 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -296,11 +296,21 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_helper_hpd_irq_event(dev); } -static void i915_handle_rps_change(struct drm_device *dev) +/* defined intel_pm.c */ +extern spinlock_t mchdev_lock; + +static void ironlake_handle_rps_change(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; - u8 new_delay = dev_priv->cur_delay; + u8 new_delay; + unsigned long flags; + + spin_lock_irqsave(&mchdev_lock, flags); + + I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); + + new_delay = dev_priv->ips.cur_delay; I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); busy_up = I915_READ(RCPREVBSYTUPAVG); @@ -310,19 +320,21 @@ static void i915_handle_rps_change(struct drm_device *dev) /* Handle RCS change request from hw */ if (busy_up > max_avg) { - if (dev_priv->cur_delay != dev_priv->max_delay) - new_delay = dev_priv->cur_delay - 1; - if (new_delay < dev_priv->max_delay) - new_delay = dev_priv->max_delay; + if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) + new_delay = dev_priv->ips.cur_delay - 1; + if (new_delay < dev_priv->ips.max_delay) + new_delay = dev_priv->ips.max_delay; } else if (busy_down < min_avg) { - if (dev_priv->cur_delay != dev_priv->min_delay) - new_delay = dev_priv->cur_delay + 1; - if (new_delay > dev_priv->min_delay) - new_delay = dev_priv->min_delay; + if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) + new_delay = dev_priv->ips.cur_delay + 1; + if (new_delay > dev_priv->ips.min_delay) + new_delay = dev_priv->ips.min_delay; } if (ironlake_set_drps(dev, new_delay)) - dev_priv->cur_delay = new_delay; + dev_priv->ips.cur_delay = new_delay; + + spin_unlock_irqrestore(&mchdev_lock, flags); return; } @@ -335,7 +347,7 @@ static void notify_ring(struct drm_device *dev, if (ring->obj == NULL) return; - trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); + trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); wake_up_all(&ring->irq_queue); if (i915_enable_hangcheck) { @@ -349,16 +361,16 @@ static void notify_ring(struct drm_device *dev, static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - rps_work); + rps.work); u32 pm_iir, pm_imr; u8 new_delay; - spin_lock_irq(&dev_priv->rps_lock); - pm_iir = dev_priv->pm_iir; - dev_priv->pm_iir = 0; + spin_lock_irq(&dev_priv->rps.lock); + pm_iir = dev_priv->rps.pm_iir; + dev_priv->rps.pm_iir = 0; pm_imr = I915_READ(GEN6_PMIMR); I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps_lock); + spin_unlock_irq(&dev_priv->rps.lock); if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; @@ -366,11 +378,17 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_lock(&dev_priv->dev->struct_mutex); if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) - new_delay = dev_priv->cur_delay + 1; + new_delay = dev_priv->rps.cur_delay + 1; else - new_delay = dev_priv->cur_delay - 1; + new_delay = dev_priv->rps.cur_delay - 1; - gen6_set_rps(dev_priv->dev, new_delay); + /* sysfs frequency interfaces may have snuck in while servicing the + * interrupt + */ + if (!(new_delay > dev_priv->rps.max_delay || + new_delay < dev_priv->rps.min_delay)) { + gen6_set_rps(dev_priv->dev, new_delay); + } mutex_unlock(&dev_priv->dev->struct_mutex); } @@ -444,7 +462,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long flags; - if (!IS_IVYBRIDGE(dev)) + if (!HAS_L3_GPU_CACHE(dev)) return; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -488,19 +506,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, * IIR bits should never already be set because IMR should * prevent an interrupt from being shown in IIR. The warning * displays a case where we've unsafely cleared - * dev_priv->pm_iir. Although missing an interrupt of the same + * dev_priv->rps.pm_iir. Although missing an interrupt of the same * type is not a problem, it displays a problem in the logic. * - * The mask bit in IMR is cleared by rps_work. + * The mask bit in IMR is cleared by dev_priv->rps.work. */ - spin_lock_irqsave(&dev_priv->rps_lock, flags); - dev_priv->pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); + spin_lock_irqsave(&dev_priv->rps.lock, flags); + dev_priv->rps.pm_iir |= pm_iir; + I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps_lock, flags); + spin_unlock_irqrestore(&dev_priv->rps.lock, flags); - queue_work(dev_priv->wq, &dev_priv->rps_work); + queue_work(dev_priv->wq, &dev_priv->rps.work); } static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) @@ -793,10 +811,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) ibx_irq_handler(dev, pch_iir); } - if (de_iir & DE_PCU_EVENT) { - I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); - i915_handle_rps_change(dev); - } + if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) + ironlake_handle_rps_change(dev); if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) gen6_queue_rps_work(dev_priv, pm_iir); @@ -843,26 +859,55 @@ static void i915_error_work_func(struct work_struct *work) } } +/* NB: please notice the memset */ +static void i915_get_extra_instdone(struct drm_device *dev, + uint32_t *instdone) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); + + switch(INTEL_INFO(dev)->gen) { + case 2: + case 3: + instdone[0] = I915_READ(INSTDONE); + break; + case 4: + case 5: + case 6: + instdone[0] = I915_READ(INSTDONE_I965); + instdone[1] = I915_READ(INSTDONE1); + break; + default: + WARN_ONCE(1, "Unsupported platform\n"); + case 7: + instdone[0] = I915_READ(GEN7_INSTDONE_1); + instdone[1] = I915_READ(GEN7_SC_INSTDONE); + instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); + instdone[3] = I915_READ(GEN7_ROW_INSTDONE); + break; + } +} + #ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src) { struct drm_i915_error_object *dst; - int page, page_count; + int i, count; u32 reloc_offset; if (src == NULL || src->pages == NULL) return NULL; - page_count = src->base.size / PAGE_SIZE; + count = src->base.size / PAGE_SIZE; - dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); + dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); if (dst == NULL) return NULL; reloc_offset = src->gtt_offset; - for (page = 0; page < page_count; page++) { + for (i = 0; i < count; i++) { unsigned long flags; void *d; @@ -885,30 +930,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv, memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); } else { + struct page *page; void *s; - drm_clflush_pages(&src->pages[page], 1); + page = i915_gem_object_get_page(src, i); + + drm_clflush_pages(&page, 1); - s = kmap_atomic(src->pages[page]); + s = kmap_atomic(page); memcpy(d, s, PAGE_SIZE); kunmap_atomic(s); - drm_clflush_pages(&src->pages[page], 1); + drm_clflush_pages(&page, 1); } local_irq_restore(flags); - dst->pages[page] = d; + dst->pages[i] = d; reloc_offset += PAGE_SIZE; } - dst->page_count = page_count; + dst->page_count = count; dst->gtt_offset = src->gtt_offset; return dst; unwind: - while (page--) - kfree(dst->pages[page]); + while (i--) + kfree(dst->pages[i]); kfree(dst); return NULL; } @@ -949,7 +997,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, { err->size = obj->base.size; err->name = obj->base.name; - err->seqno = obj->last_rendering_seqno; + err->rseqno = obj->last_read_seqno; + err->wseqno = obj->last_write_seqno; err->gtt_offset = obj->gtt_offset; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; @@ -1039,12 +1088,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, if (!ring->get_seqno) return NULL; - seqno = ring->get_seqno(ring); + seqno = ring->get_seqno(ring, false); list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (obj->ring != ring) continue; - if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) + if (i915_seqno_passed(seqno, obj->last_read_seqno)) continue; if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) @@ -1080,10 +1129,8 @@ static void i915_record_ring_state(struct drm_device *dev, error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); - if (ring->id == RCS) { - error->instdone1 = I915_READ(INSTDONE1); + if (ring->id == RCS) error->bbaddr = I915_READ64(BB_ADDR); - } } else { error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); error->ipeir[ring->id] = I915_READ(IPEIR); @@ -1093,7 +1140,7 @@ static void i915_record_ring_state(struct drm_device *dev, error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); - error->seqno[ring->id] = ring->get_seqno(ring); + error->seqno[ring->id] = ring->get_seqno(ring, false); error->acthd[ring->id] = intel_ring_get_active_head(ring); error->head[ring->id] = I915_READ_HEAD(ring); error->tail[ring->id] = I915_READ_TAIL(ring); @@ -1199,6 +1246,11 @@ static void i915_capture_error_state(struct drm_device *dev) error->done_reg = I915_READ(DONE_REG); } + if (INTEL_INFO(dev)->gen == 7) + error->err_int = I915_READ(GEN7_ERR_INT); + + i915_get_extra_instdone(dev, error->extra_instdone); + i915_gem_record_fences(dev, error); i915_gem_record_rings(dev, error); @@ -1210,7 +1262,7 @@ static void i915_capture_error_state(struct drm_device *dev) list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) if (obj->pin_count) i++; error->pinned_bo_count = i - error->active_bo_count; @@ -1235,7 +1287,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->pinned_bo_count = capture_pinned_bo(error->pinned_bo, error->pinned_bo_count, - &dev_priv->mm.gtt_list); + &dev_priv->mm.bound_list); do_gettimeofday(&error->time); @@ -1274,24 +1326,26 @@ void i915_destroy_error_state(struct drm_device *dev) static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t instdone[I915_NUM_INSTDONE_REG]; u32 eir = I915_READ(EIR); - int pipe; + int pipe, i; if (!eir) return; pr_err("render error detected, EIR: 0x%08x\n", eir); + i915_get_extra_instdone(dev, instdone); + if (IS_G4X(dev)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); - pr_err(" INSTDONE: 0x%08x\n", - I915_READ(INSTDONE_I965)); + for (i = 0; i < ARRAY_SIZE(instdone); i++) + pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); - pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); @@ -1325,12 +1379,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev) if (eir & I915_ERROR_INSTRUCTION) { pr_err("instruction error\n"); pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); + for (i = 0; i < ARRAY_SIZE(instdone); i++) + pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); if (INTEL_INFO(dev)->gen < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); - pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); POSTING_READ(IPEIR); @@ -1339,10 +1394,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); - pr_err(" INSTDONE: 0x%08x\n", - I915_READ(INSTDONE_I965)); pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); - pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1)); pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); POSTING_READ(IPEIR_I965); @@ -1590,7 +1642,8 @@ ring_last_seqno(struct intel_ring_buffer *ring) static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) { if (list_empty(&ring->request_list) || - i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { + i915_seqno_passed(ring->get_seqno(ring, false), + ring_last_seqno(ring))) { /* Issue a wake-up to catch stuck h/w. */ if (waitqueue_active(&ring->irq_queue)) { DRM_ERROR("Hangcheck timer elapsed... %s idle\n", @@ -1656,7 +1709,7 @@ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; + uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; struct intel_ring_buffer *ring; bool err = false, idle; int i; @@ -1684,25 +1737,16 @@ void i915_hangcheck_elapsed(unsigned long data) return; } - if (INTEL_INFO(dev)->gen < 4) { - instdone = I915_READ(INSTDONE); - instdone1 = 0; - } else { - instdone = I915_READ(INSTDONE_I965); - instdone1 = I915_READ(INSTDONE1); - } - + i915_get_extra_instdone(dev, instdone); if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && - dev_priv->last_instdone == instdone && - dev_priv->last_instdone1 == instdone1) { + memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { if (i915_hangcheck_hung(dev)) return; } else { dev_priv->hangcheck_count = 0; memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); - dev_priv->last_instdone = instdone; - dev_priv->last_instdone1 = instdone1; + memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); } repeat: @@ -2647,7 +2691,7 @@ void intel_irq_init(struct drm_device *dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); - INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); + INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); dev->driver->get_vblank_counter = i915_get_vblank_counter; diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 28725ce5b82c..7637824c6a7d 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -450,6 +450,7 @@ #define RING_ACTHD(base) ((base)+0x74) #define RING_NOPID(base) ((base)+0x94) #define RING_IMR(base) ((base)+0xa8) +#define RING_TIMESTAMP(base) ((base)+0x358) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -478,6 +479,11 @@ #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c +#define GEN7_INSTDONE_1 0x0206c +#define GEN7_SC_INSTDONE 0x07100 +#define GEN7_SAMPLER_INSTDONE 0x0e160 +#define GEN7_ROW_INSTDONE 0x0e164 +#define I915_NUM_INSTDONE_REG 4 #define RING_IPEIR(base) ((base)+0x64) #define RING_IPEHR(base) ((base)+0x68) #define RING_INSTDONE(base) ((base)+0x6c) @@ -500,6 +506,8 @@ #define DMA_FADD_I8XX 0x020d0 #define ERROR_GEN6 0x040a0 +#define GEN7_ERR_INT 0x44040 +#define ERR_INT_MMIO_UNCLAIMED (1<<13) /* GM45+ chicken bits -- debug workaround bits that may be required * for various sorts of correct behavior. The top 16 bits of each are @@ -529,6 +537,8 @@ #define GFX_PSMI_GRANULARITY (1<<10) #define GFX_PPGTT_ENABLE (1<<9) +#define VLV_DISPLAY_BASE 0x180000 + #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 @@ -1496,6 +1506,14 @@ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_GT1_SIZE(ctx_reg) + \ GEN7_CXT_VFSTATE_SIZE(ctx_reg)) +#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f) +#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7) +#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff) +#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \ + HSW_CXT_RING_SIZE(ctx_reg) + \ + HSW_CXT_RENDER_SIZE(ctx_reg) + \ + GEN7_CXT_VFSTATE_SIZE(ctx_reg)) + /* * Overlay regs @@ -1549,12 +1567,35 @@ /* VGA port control */ #define ADPA 0x61100 +#define PCH_ADPA 0xe1100 +#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA) + #define ADPA_DAC_ENABLE (1<<31) #define ADPA_DAC_DISABLE 0 #define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_B_SELECT (1<<30) #define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) +/* CPT uses bits 29:30 for pch transcoder select */ +#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ +#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) +#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) +#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) +#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) +#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) +#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) +#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) +#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) +#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) +#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) +#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) +#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) +#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 #define ADPA_VSYNC_CNTL_DISABLE (1<<11) @@ -1753,6 +1794,10 @@ /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 +/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC + * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte + * of the infoframe structure specified by CEA-861. */ +#define VIDEO_DIP_DATA_SIZE 32 #define VIDEO_DIP_CTL 0x61170 /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) @@ -3889,31 +3934,6 @@ #define FDI_PLL_CTL_1 0xfe000 #define FDI_PLL_CTL_2 0xfe004 -/* CRT */ -#define PCH_ADPA 0xe1100 -#define ADPA_TRANS_SELECT_MASK (1<<30) -#define ADPA_TRANS_A_SELECT 0 -#define ADPA_TRANS_B_SELECT (1<<30) -#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ -#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) -#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) -#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) -#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) -#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) -#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) -#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) -#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) -#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) -#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) -#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) -#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) - /* or SDVOB */ #define HDMIB 0xe1140 #define PORT_ENABLE (1 << 31) @@ -4021,6 +4041,8 @@ #define PORT_TRANS_C_SEL_CPT (2<<29) #define PORT_TRANS_SEL_MASK (3<<29) #define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) +#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) +#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 @@ -4239,7 +4261,15 @@ #define G4X_HDMIW_HDMIEDID 0x6210C #define IBX_HDMIW_HDMIEDID_A 0xE2050 +#define IBX_HDMIW_HDMIEDID_B 0xE2150 +#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ + IBX_HDMIW_HDMIEDID_A, \ + IBX_HDMIW_HDMIEDID_B) #define IBX_AUD_CNTL_ST_A 0xE20B4 +#define IBX_AUD_CNTL_ST_B 0xE21B4 +#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ + IBX_AUD_CNTL_ST_A, \ + IBX_AUD_CNTL_ST_B) #define IBX_ELD_BUFFER_SIZE (0x1f << 10) #define IBX_ELD_ADDRESS (0x1f << 5) #define IBX_ELD_ACK (1 << 4) @@ -4248,7 +4278,15 @@ #define IBX_CP_READYB (1 << 1) #define CPT_HDMIW_HDMIEDID_A 0xE5050 +#define CPT_HDMIW_HDMIEDID_B 0xE5150 +#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ + CPT_HDMIW_HDMIEDID_A, \ + CPT_HDMIW_HDMIEDID_B) #define CPT_AUD_CNTL_ST_A 0xE50B4 +#define CPT_AUD_CNTL_ST_B 0xE51B4 +#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ + CPT_AUD_CNTL_ST_A, \ + CPT_AUD_CNTL_ST_B) #define CPT_AUD_CNTRL_ST2 0xE50C0 /* These are the 4 32-bit write offset registers for each stream @@ -4258,7 +4296,15 @@ #define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) #define IBX_AUD_CONFIG_A 0xe2000 +#define IBX_AUD_CONFIG_B 0xe2100 +#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ + IBX_AUD_CONFIG_A, \ + IBX_AUD_CONFIG_B) #define CPT_AUD_CONFIG_A 0xe5000 +#define CPT_AUD_CONFIG_B 0xe5100 +#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ + CPT_AUD_CONFIG_A, \ + CPT_AUD_CONFIG_B) #define AUD_CONFIG_N_VALUE_INDEX (1 << 29) #define AUD_CONFIG_N_PROG_ENABLE (1 << 28) #define AUD_CONFIG_UPPER_N_SHIFT 20 @@ -4269,195 +4315,233 @@ #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) #define AUD_CONFIG_DISABLE_NCTS (1 << 3) +/* HSW Audio */ +#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ +#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ +#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ + HSW_AUD_CONFIG_A, \ + HSW_AUD_CONFIG_B) + +#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ +#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ +#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ + HSW_AUD_MISC_CTRL_A, \ + HSW_AUD_MISC_CTRL_B) + +#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ +#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ +#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ + HSW_AUD_DIP_ELD_CTRL_ST_A, \ + HSW_AUD_DIP_ELD_CTRL_ST_B) + +/* Audio Digital Converter */ +#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ +#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ +#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ + HSW_AUD_DIG_CNVT_1, \ + HSW_AUD_DIG_CNVT_2) +#define DIP_PORT_SEL_MASK 0x3 + +#define HSW_AUD_EDID_DATA_A 0x65050 +#define HSW_AUD_EDID_DATA_B 0x65150 +#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ + HSW_AUD_EDID_DATA_A, \ + HSW_AUD_EDID_DATA_B) + +#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ +#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ +#define AUDIO_INACTIVE_C (1<<11) +#define AUDIO_INACTIVE_B (1<<7) +#define AUDIO_INACTIVE_A (1<<3) +#define AUDIO_OUTPUT_ENABLE_A (1<<2) +#define AUDIO_OUTPUT_ENABLE_B (1<<6) +#define AUDIO_OUTPUT_ENABLE_C (1<<10) +#define AUDIO_ELD_VALID_A (1<<0) +#define AUDIO_ELD_VALID_B (1<<4) +#define AUDIO_ELD_VALID_C (1<<8) +#define AUDIO_CP_READY_A (1<<1) +#define AUDIO_CP_READY_B (1<<5) +#define AUDIO_CP_READY_C (1<<9) + /* HSW Power Wells */ -#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ -#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ -#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ -#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ -#define HSW_PWR_WELL_ENABLE (1<<31) -#define HSW_PWR_WELL_STATE (1<<30) -#define HSW_PWR_WELL_CTL5 0x45410 +#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ +#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ +#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ +#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ +#define HSW_PWR_WELL_ENABLE (1<<31) +#define HSW_PWR_WELL_STATE (1<<30) +#define HSW_PWR_WELL_CTL5 0x45410 #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) -#define HSW_PWR_WELL_FORCE_ON (1<<19) -#define HSW_PWR_WELL_CTL6 0x45414 +#define HSW_PWR_WELL_FORCE_ON (1<<19) +#define HSW_PWR_WELL_CTL6 0x45414 /* Per-pipe DDI Function Control */ -#define PIPE_DDI_FUNC_CTL_A 0x60400 -#define PIPE_DDI_FUNC_CTL_B 0x61400 -#define PIPE_DDI_FUNC_CTL_C 0x62400 +#define PIPE_DDI_FUNC_CTL_A 0x60400 +#define PIPE_DDI_FUNC_CTL_B 0x61400 +#define PIPE_DDI_FUNC_CTL_C 0x62400 #define PIPE_DDI_FUNC_CTL_EDP 0x6F400 -#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ - PIPE_DDI_FUNC_CTL_A, \ - PIPE_DDI_FUNC_CTL_B) +#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ + PIPE_DDI_FUNC_CTL_B) #define PIPE_DDI_FUNC_ENABLE (1<<31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define PIPE_DDI_PORT_MASK (7<<28) -#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) -#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) -#define PIPE_DDI_MODE_SELECT_DVI (1<<24) +#define PIPE_DDI_PORT_MASK (7<<28) +#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) +#define PIPE_DDI_MODE_SELECT_MASK (7<<24) +#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) +#define PIPE_DDI_MODE_SELECT_DVI (1<<24) #define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) #define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) -#define PIPE_DDI_MODE_SELECT_FDI (4<<24) -#define PIPE_DDI_BPC_8 (0<<20) -#define PIPE_DDI_BPC_10 (1<<20) -#define PIPE_DDI_BPC_6 (2<<20) -#define PIPE_DDI_BPC_12 (3<<20) -#define PIPE_DDI_BFI_ENABLE (1<<4) -#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) -#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) -#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) +#define PIPE_DDI_MODE_SELECT_FDI (4<<24) +#define PIPE_DDI_BPC_MASK (7<<20) +#define PIPE_DDI_BPC_8 (0<<20) +#define PIPE_DDI_BPC_10 (1<<20) +#define PIPE_DDI_BPC_6 (2<<20) +#define PIPE_DDI_BPC_12 (3<<20) +#define PIPE_DDI_PVSYNC (1<<17) +#define PIPE_DDI_PHSYNC (1<<16) +#define PIPE_DDI_BFI_ENABLE (1<<4) +#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) +#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) +#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) /* DisplayPort Transport Control */ #define DP_TP_CTL_A 0x64040 #define DP_TP_CTL_B 0x64140 -#define DP_TP_CTL(port) _PORT(port, \ - DP_TP_CTL_A, \ - DP_TP_CTL_B) -#define DP_TP_CTL_ENABLE (1<<31) -#define DP_TP_CTL_MODE_SST (0<<27) -#define DP_TP_CTL_MODE_MST (1<<27) +#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B) +#define DP_TP_CTL_ENABLE (1<<31) +#define DP_TP_CTL_MODE_SST (0<<27) +#define DP_TP_CTL_MODE_MST (1<<27) #define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) -#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) +#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) -#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) +#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) /* DisplayPort Transport Status */ #define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_B 0x64144 -#define DP_TP_STATUS(port) _PORT(port, \ - DP_TP_STATUS_A, \ - DP_TP_STATUS_B) +#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) /* DDI Buffer Control */ #define DDI_BUF_CTL_A 0x64000 #define DDI_BUF_CTL_B 0x64100 -#define DDI_BUF_CTL(port) _PORT(port, \ - DDI_BUF_CTL_A, \ - DDI_BUF_CTL_B) -#define DDI_BUF_CTL_ENABLE (1<<31) +#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) +#define DDI_BUF_CTL_ENABLE (1<<31) #define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ -#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ +#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ #define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ -#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ +#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ #define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ -#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ +#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ #define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ -#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ -#define DDI_BUF_EMP_MASK (0xf<<24) -#define DDI_BUF_IS_IDLE (1<<7) -#define DDI_PORT_WIDTH_X1 (0<<1) -#define DDI_PORT_WIDTH_X2 (1<<1) -#define DDI_PORT_WIDTH_X4 (3<<1) +#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ +#define DDI_BUF_EMP_MASK (0xf<<24) +#define DDI_BUF_IS_IDLE (1<<7) +#define DDI_PORT_WIDTH_X1 (0<<1) +#define DDI_PORT_WIDTH_X2 (1<<1) +#define DDI_PORT_WIDTH_X4 (3<<1) #define DDI_INIT_DISPLAY_DETECTED (1<<0) /* DDI Buffer Translations */ #define DDI_BUF_TRANS_A 0x64E00 #define DDI_BUF_TRANS_B 0x64E60 -#define DDI_BUF_TRANS(port) _PORT(port, \ - DDI_BUF_TRANS_A, \ - DDI_BUF_TRANS_B) +#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) /* Sideband Interface (SBI) is programmed indirectly, via * SBI_ADDR, which contains the register offset; and SBI_DATA, * which contains the payload */ -#define SBI_ADDR 0xC6000 -#define SBI_DATA 0xC6004 +#define SBI_ADDR 0xC6000 +#define SBI_DATA 0xC6004 #define SBI_CTL_STAT 0xC6008 #define SBI_CTL_OP_CRRD (0x6<<8) #define SBI_CTL_OP_CRWR (0x7<<8) #define SBI_RESPONSE_FAIL (0x1<<1) -#define SBI_RESPONSE_SUCCESS (0x0<<1) -#define SBI_BUSY (0x1<<0) -#define SBI_READY (0x0<<0) +#define SBI_RESPONSE_SUCCESS (0x0<<1) +#define SBI_BUSY (0x1<<0) +#define SBI_READY (0x0<<0) /* SBI offsets */ -#define SBI_SSCDIVINTPHASE6 0x0600 +#define SBI_SSCDIVINTPHASE6 0x0600 #define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) #define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) -#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) +#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) -#define SBI_SSCCTL 0x020c +#define SBI_SSCCTL 0x020c #define SBI_SSCCTL6 0x060C -#define SBI_SSCCTL_DISABLE (1<<0) +#define SBI_SSCCTL_DISABLE (1<<0) #define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) -#define SBI_DBUFF0 0x2a00 +#define SBI_DBUFF0 0x2a00 /* LPT PIXCLK_GATE */ -#define PIXCLK_GATE 0xC6020 -#define PIXCLK_GATE_UNGATE 1<<0 -#define PIXCLK_GATE_GATE 0<<0 +#define PIXCLK_GATE 0xC6020 +#define PIXCLK_GATE_UNGATE (1<<0) +#define PIXCLK_GATE_GATE (0<<0) /* SPLL */ -#define SPLL_CTL 0x46020 +#define SPLL_CTL 0x46020 #define SPLL_PLL_ENABLE (1<<31) #define SPLL_PLL_SCC (1<<28) #define SPLL_PLL_NON_SCC (2<<28) -#define SPLL_PLL_FREQ_810MHz (0<<26) -#define SPLL_PLL_FREQ_1350MHz (1<<26) +#define SPLL_PLL_FREQ_810MHz (0<<26) +#define SPLL_PLL_FREQ_1350MHz (1<<26) /* WRPLL */ -#define WRPLL_CTL1 0x46040 -#define WRPLL_CTL2 0x46060 -#define WRPLL_PLL_ENABLE (1<<31) -#define WRPLL_PLL_SELECT_SSC (0x01<<28) -#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) +#define WRPLL_CTL1 0x46040 +#define WRPLL_CTL2 0x46060 +#define WRPLL_PLL_ENABLE (1<<31) +#define WRPLL_PLL_SELECT_SSC (0x01<<28) +#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) /* WRPLL divider programming */ -#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) -#define WRPLL_DIVIDER_POST(x) ((x)<<8) -#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) +#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) +#define WRPLL_DIVIDER_POST(x) ((x)<<8) +#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) /* Port clock selection */ #define PORT_CLK_SEL_A 0x46100 #define PORT_CLK_SEL_B 0x46104 -#define PORT_CLK_SEL(port) _PORT(port, \ - PORT_CLK_SEL_A, \ - PORT_CLK_SEL_B) +#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B) #define PORT_CLK_SEL_LCPLL_2700 (0<<29) #define PORT_CLK_SEL_LCPLL_1350 (1<<29) #define PORT_CLK_SEL_LCPLL_810 (2<<29) -#define PORT_CLK_SEL_SPLL (3<<29) +#define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29) /* Pipe clock selection */ #define PIPE_CLK_SEL_A 0x46140 #define PIPE_CLK_SEL_B 0x46144 -#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ - PIPE_CLK_SEL_A, \ - PIPE_CLK_SEL_B) +#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) /* For each pipe, we need to select the corresponding port clock */ -#define PIPE_CLK_SEL_DISABLED (0x0<<29) -#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) +#define PIPE_CLK_SEL_DISABLED (0x0<<29) +#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) /* LCPLL Control */ -#define LCPLL_CTL 0x130040 +#define LCPLL_CTL 0x130040 #define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_LOCK (1<<30) -#define LCPLL_CD_CLOCK_DISABLE (1<<25) +#define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) /* Pipe WM_LINETIME - watermark line time */ #define PIPE_WM_LINETIME_A 0x45270 #define PIPE_WM_LINETIME_B 0x45274 -#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ - PIPE_WM_LINETIME_A, \ - PIPE_WM_LINETIME_B) -#define PIPE_WM_LINETIME_MASK (0x1ff) -#define PIPE_WM_LINETIME_TIME(x) ((x)) +#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \ + PIPE_WM_LINETIME_B) +#define PIPE_WM_LINETIME_MASK (0x1ff) +#define PIPE_WM_LINETIME_TIME(x) ((x)) #define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) -#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) +#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) /* SFUSE_STRAP */ -#define SFUSE_STRAP 0xc2014 +#define SFUSE_STRAP 0xc2014 #define SFUSE_STRAP_DDIB_DETECTED (1<<2) #define SFUSE_STRAP_DDIC_DETECTED (1<<1) #define SFUSE_STRAP_DDID_DETECTED (1<<0) diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c index 7631807a2788..903eebd2117a 100644 --- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c @@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg) } static ssize_t -show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf) +show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); } static ssize_t -show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf) +show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); } static ssize_t -show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf) +show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); } static ssize_t -show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf) +show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); + struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); } @@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = { .name = power_group_name, .attrs = rc6_attrs }; +#endif static int l3_access_valid(struct drm_device *dev, loff_t offset) { @@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = { .mmap = NULL }; +static ssize_t gt_cur_freq_mhz_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev->struct_mutex); + + return snprintf(buf, PAGE_SIZE, "%d", ret); +} + +static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev->struct_mutex); + + return snprintf(buf, PAGE_SIZE, "%d", ret); +} + +static ssize_t gt_max_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, rp_state_cap, hw_max, hw_min; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + val /= GT_FREQUENCY_MULTIPLIER; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + hw_max = (rp_state_cap & 0xff); + hw_min = ((rp_state_cap & 0xff0000) >> 16); + + if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + if (dev_priv->rps.cur_delay > val) + gen6_set_rps(dev_priv->dev, val); + + dev_priv->rps.max_delay = val; + + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; + mutex_unlock(&dev->struct_mutex); + + return snprintf(buf, PAGE_SIZE, "%d", ret); +} + +static ssize_t gt_min_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, rp_state_cap, hw_max, hw_min; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + val /= GT_FREQUENCY_MULTIPLIER; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + hw_max = (rp_state_cap & 0xff); + hw_min = ((rp_state_cap & 0xff0000) >> 16); + + if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + if (dev_priv->rps.cur_delay < val) + gen6_set_rps(dev_priv->dev, val); + + dev_priv->rps.min_delay = val; + + mutex_unlock(&dev->struct_mutex); + + return count; + +} + +static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); +static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); +static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); + + +static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); +static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); +static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); +static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); + +/* For now we have a static number of RP states */ +static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val, rp_state_cap; + ssize_t ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + mutex_unlock(&dev->struct_mutex); + + if (attr == &dev_attr_gt_RP0_freq_mhz) { + val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER; + } else if (attr == &dev_attr_gt_RP1_freq_mhz) { + val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER; + } else if (attr == &dev_attr_gt_RPn_freq_mhz) { + val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER; + } else { + BUG(); + } + return snprintf(buf, PAGE_SIZE, "%d", val); +} + +static const struct attribute *gen6_attrs[] = { + &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_max_freq_mhz.attr, + &dev_attr_gt_min_freq_mhz.attr, + &dev_attr_gt_RP0_freq_mhz.attr, + &dev_attr_gt_RP1_freq_mhz.attr, + &dev_attr_gt_RPn_freq_mhz.attr, + NULL, +}; + void i915_setup_sysfs(struct drm_device *dev) { int ret; +#ifdef CONFIG_PM if (INTEL_INFO(dev)->gen >= 6) { ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group); if (ret) DRM_ERROR("RC6 residency sysfs setup failed\n"); } - - if (IS_IVYBRIDGE(dev)) { +#endif + if (HAS_L3_GPU_CACHE(dev)) { ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); if (ret) DRM_ERROR("l3 parity sysfs setup failed\n"); } + + if (INTEL_INFO(dev)->gen >= 6) { + ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); + if (ret) + DRM_ERROR("gen6 sysfs setup failed\n"); + } } void i915_teardown_sysfs(struct drm_device *dev) { + sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); +#ifdef CONFIG_PM sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); +#endif } -#else -void i915_setup_sysfs(struct drm_device *dev) -{ - return; -} - -void i915_teardown_sysfs(struct drm_device *dev) -{ - return; -} -#endif /* CONFIG_PM */ diff --git a/trunk/drivers/gpu/drm/i915/i915_trace.h b/trunk/drivers/gpu/drm/i915/i915_trace.h index fe90b3a84a6d..8134421b89a6 100644 --- a/trunk/drivers/gpu/drm/i915/i915_trace.h +++ b/trunk/drivers/gpu/drm/i915/i915_trace.h @@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict, ); TRACE_EVENT(i915_gem_evict_everything, - TP_PROTO(struct drm_device *dev, bool purgeable), - TP_ARGS(dev, purgeable), + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), TP_STRUCT__entry( __field(u32, dev) - __field(bool, purgeable) ), TP_fast_assign( __entry->dev = dev->primary->index; - __entry->purgeable = purgeable; ), - TP_printk("dev=%d%s", - __entry->dev, - __entry->purgeable ? ", purgeable only" : "") + TP_printk("dev=%d", __entry->dev) ); TRACE_EVENT(i915_gem_ring_dispatch, @@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw, (u32)(__entry->val >> 32)) ); +TRACE_EVENT(intel_gpu_freq_change, + TP_PROTO(u32 freq), + TP_ARGS(freq), + + TP_STRUCT__entry( + __field(u32, freq) + ), + + TP_fast_assign( + __entry->freq = freq; + ), + + TP_printk("new_freq=%u", __entry->freq) +); + #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 23bdc8cd1458..c42b9809f86d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -47,6 +47,7 @@ struct intel_crt { struct intel_encoder base; bool force_hotplug_required; + u32 adpa_reg; }; static struct intel_crt *intel_attached_crt(struct drm_connector *connector) @@ -55,42 +56,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) struct intel_crt, base); } -static void pch_crt_dpms(struct drm_encoder *encoder, int mode) +static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + return container_of(encoder, struct intel_crt, base); +} + +static bool intel_crt_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 tmp; + + tmp = I915_READ(crt->adpa_reg); + + if (!(tmp & ADPA_DAC_ENABLE)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_disable_crt(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 temp; - temp = I915_READ(PCH_ADPA); + temp = I915_READ(crt->adpa_reg); + temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp &= ~ADPA_DAC_ENABLE; + I915_WRITE(crt->adpa_reg, temp); +} - switch (mode) { - case DRM_MODE_DPMS_ON: - temp |= ADPA_DAC_ENABLE; - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - /* Just leave port enable cleared */ - break; - } +static void intel_enable_crt(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 temp; - I915_WRITE(PCH_ADPA, temp); + temp = I915_READ(crt->adpa_reg); + temp |= ADPA_DAC_ENABLE; + I915_WRITE(crt->adpa_reg, temp); } -static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) +/* Note: The caller is required to filter out dpms modes not supported by the + * platform. */ +static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 temp; - temp = I915_READ(ADPA); + temp = I915_READ(crt->adpa_reg); temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); temp &= ~ADPA_DAC_ENABLE; - if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - switch (mode) { case DRM_MODE_DPMS_ON: temp |= ADPA_DAC_ENABLE; @@ -106,7 +133,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) break; } - I915_WRITE(ADPA, temp); + I915_WRITE(crt->adpa_reg, temp); +} + +static void intel_crt_dpms(struct drm_connector *connector, int mode) +{ + struct drm_device *dev = connector->dev; + struct intel_encoder *encoder = intel_attached_encoder(connector); + struct drm_crtc *crtc; + int old_dpms; + + /* PCH platforms and VLV only support on/off. */ + if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + old_dpms = connector->dpms; + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + crtc = encoder->base.crtc; + if (!crtc) { + encoder->connectors_active = false; + return; + } + + /* We need the pipe to run for anything but OFF. */ + if (mode == DRM_MODE_DPMS_OFF) + encoder->connectors_active = false; + else + encoder->connectors_active = true; + + if (mode < old_dpms) { + /* From off to on, enable the pipe first. */ + intel_crtc_update_dpms(crtc); + + intel_crt_set_dpms(encoder, mode); + } else { + intel_crt_set_dpms(encoder, mode); + + intel_crtc_update_dpms(crtc); + } + + intel_modeset_check_state(connector->dev); } static int intel_crt_mode_valid(struct drm_connector *connector, @@ -145,19 +216,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_crtc *crtc = encoder->crtc; + struct intel_crt *crt = + intel_encoder_to_crt(to_intel_encoder(encoder)); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = dev->dev_private; int dpll_md_reg; u32 adpa, dpll_md; - u32 adpa_reg; dpll_md_reg = DPLL_MD(intel_crtc->pipe); - if (HAS_PCH_SPLIT(dev)) - adpa_reg = PCH_ADPA; - else - adpa_reg = ADPA; - /* * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning @@ -185,7 +252,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); - I915_WRITE(adpa_reg, adpa); + I915_WRITE(crt->adpa_reg, adpa); } static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) @@ -545,14 +612,12 @@ intel_crt_detect(struct drm_connector *connector, bool force) return connector->status; /* for pre-945g platforms use load detect */ - if (intel_get_load_detect_pipe(&crt->base, connector, NULL, - &tmp)) { + if (intel_get_load_detect_pipe(connector, NULL, &tmp)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(&crt->base, connector, - &tmp); + intel_release_load_detect_pipe(connector, &tmp); } else status = connector_status_unknown; @@ -603,25 +668,15 @@ static void intel_crt_reset(struct drm_connector *connector) * Routines for controlling stuff on the analog port */ -static const struct drm_encoder_helper_funcs pch_encoder_funcs = { +static const struct drm_encoder_helper_funcs crt_encoder_funcs = { .mode_fixup = intel_crt_mode_fixup, - .prepare = intel_encoder_prepare, - .commit = intel_encoder_commit, .mode_set = intel_crt_mode_set, - .dpms = pch_crt_dpms, -}; - -static const struct drm_encoder_helper_funcs gmch_encoder_funcs = { - .mode_fixup = intel_crt_mode_fixup, - .prepare = intel_encoder_prepare, - .commit = intel_encoder_commit, - .mode_set = intel_crt_mode_set, - .dpms = gmch_crt_dpms, + .disable = intel_encoder_noop, }; static const struct drm_connector_funcs intel_crt_connector_funcs = { .reset = intel_crt_reset, - .dpms = drm_helper_connector_dpms, + .dpms = intel_crt_dpms, .detect = intel_crt_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = intel_crt_destroy, @@ -662,7 +717,6 @@ void intel_crt_init(struct drm_device *dev) struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; - const struct drm_encoder_helper_funcs *encoder_helper_funcs; /* Skip machines without VGA that falsely report hotplug events */ if (dmi_check_system(intel_no_crt)) @@ -688,13 +742,11 @@ void intel_crt_init(struct drm_device *dev) intel_connector_attach_encoder(intel_connector, &crt->base); crt->base.type = INTEL_OUTPUT_ANALOG; - crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | - 1 << INTEL_ANALOG_CLONE_BIT | - 1 << INTEL_SDVO_LVDS_CLONE_BIT); + crt->base.cloneable = true; if (IS_HASWELL(dev)) crt->base.crtc_mask = (1 << 0); else - crt->base.crtc_mask = (1 << 0) | (1 << 1); + crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); if (IS_GEN2(dev)) connector->interlace_allowed = 0; @@ -703,11 +755,18 @@ void intel_crt_init(struct drm_device *dev) connector->doublescan_allowed = 0; if (HAS_PCH_SPLIT(dev)) - encoder_helper_funcs = &pch_encoder_funcs; + crt->adpa_reg = PCH_ADPA; + else if (IS_VALLEYVIEW(dev)) + crt->adpa_reg = VLV_ADPA; else - encoder_helper_funcs = &gmch_encoder_funcs; + crt->adpa_reg = ADPA; + + crt->base.disable = intel_disable_crt; + crt->base.enable = intel_enable_crt; + crt->base.get_hw_state = intel_crt_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; - drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); + drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c index 933c74859172..bfe375466a0e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ddi.c +++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c @@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) case PORT_B: case PORT_C: case PORT_D: - intel_hdmi_init(dev, DDI_BUF_CTL(port)); + intel_hdmi_init(dev, DDI_BUF_CTL(port), port); break; default: DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", @@ -267,7 +267,8 @@ struct wrpll_tmds_clock { u16 r2; /* Reference divider */ }; -/* Table of matching values for WRPLL clocks programming for each frequency */ +/* Table of matching values for WRPLL clocks programming for each frequency. + * The code assumes this table is sorted. */ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {19750, 38, 25, 18}, {20000, 48, 32, 18}, @@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {23000, 36, 23, 15}, {23500, 40, 40, 23}, {23750, 26, 16, 14}, - {23750, 26, 16, 14}, {24000, 36, 24, 15}, {25000, 36, 25, 15}, {25175, 26, 40, 33}, @@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {108000, 8, 24, 15}, {108108, 8, 173, 108}, {109000, 6, 23, 19}, - {109000, 6, 23, 19}, {110000, 6, 22, 18}, {110013, 6, 22, 18}, {110250, 8, 49, 30}, @@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { {218250, 4, 42, 26}, {218750, 4, 34, 21}, {219000, 4, 47, 29}, - {219000, 4, 47, 29}, {220000, 4, 44, 27}, {220640, 4, 49, 30}, {220750, 4, 36, 22}, @@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int port = intel_hdmi->ddi_port; int pipe = intel_crtc->pipe; - int p, n2, r2, valid=0; + int p, n2, r2; u32 temp, i; /* On Haswell, we need to enable the clocks and prepare DDI function to @@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, */ DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); - for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) { - if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { - p = wrpll_tmds_clock_table[i].p; - n2 = wrpll_tmds_clock_table[i].n2; - r2 = wrpll_tmds_clock_table[i].r2; + for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) + if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) + break; - DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", - crtc->mode.clock, - p, n2, r2); + if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) + i--; - valid = 1; - break; - } - } + p = wrpll_tmds_clock_table[i].p; + n2 = wrpll_tmds_clock_table[i].n2; + r2 = wrpll_tmds_clock_table[i].r2; - if (!valid) { - DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", - crtc->mode.clock); - return; - } + if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) + DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", + wrpll_tmds_clock_table[i].clock, crtc->mode.clock); + + DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", + crtc->mode.clock, p, n2, r2); /* Enable LCPLL if disabled */ temp = I915_READ(LCPLL_CTL); @@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder, /* Proper support for digital audio needs a new logic and a new set * of registers, so we leave it for future patch bombing. */ - DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", + DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", pipe_name(intel_crtc->pipe)); + + /* write eld */ + DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); + intel_write_eld(encoder, adjusted_mode); } /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ - temp = I915_READ(DDI_FUNC_CTL(pipe)); - temp &= ~PIPE_DDI_PORT_MASK; - temp &= ~PIPE_DDI_BPC_12; - temp |= PIPE_DDI_SELECT_PORT(port) | - PIPE_DDI_MODE_SELECT_HDMI | - ((intel_crtc->bpp > 24) ? - PIPE_DDI_BPC_12 : - PIPE_DDI_BPC_8) | - PIPE_DDI_FUNC_ENABLE; + temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); + + switch (intel_crtc->bpp) { + case 18: + temp |= PIPE_DDI_BPC_6; + break; + case 24: + temp |= PIPE_DDI_BPC_8; + break; + case 30: + temp |= PIPE_DDI_BPC_10; + break; + case 36: + temp |= PIPE_DDI_BPC_12; + break; + default: + WARN(1, "%d bpp unsupported by pipe DDI function\n", + intel_crtc->bpp); + } + + if (intel_hdmi->has_hdmi_sink) + temp |= PIPE_DDI_MODE_SELECT_HDMI; + else + temp |= PIPE_DDI_MODE_SELECT_DVI; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + temp |= PIPE_DDI_PVSYNC; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + temp |= PIPE_DDI_PHSYNC; I915_WRITE(DDI_FUNC_CTL(pipe), temp); intel_hdmi->set_infoframes(encoder, adjusted_mode); } -void intel_ddi_dpms(struct drm_encoder *encoder, int mode) +bool intel_ddi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + u32 tmp; + int i; + + tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); + + if (!(tmp & DDI_BUF_CTL_ENABLE)) + return false; + + for_each_pipe(i) { + tmp = I915_READ(DDI_FUNC_CTL(i)); + + if ((tmp & PIPE_DDI_PORT_MASK) + == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { + *pipe = i; + return true; + } + } + + DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); + + return true; +} + +void intel_enable_ddi(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); int port = intel_hdmi->ddi_port; u32 temp; temp = I915_READ(DDI_BUF_CTL(port)); - - if (mode != DRM_MODE_DPMS_ON) { - temp &= ~DDI_BUF_CTL_ENABLE; - } else { - temp |= DDI_BUF_CTL_ENABLE; - } + temp |= DDI_BUF_CTL_ENABLE; /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, * and swing/emphasis values are ignored so nothing special needs * to be done besides enabling the port. */ - I915_WRITE(DDI_BUF_CTL(port), - temp); + I915_WRITE(DDI_BUF_CTL(port), temp); +} + +void intel_disable_ddi(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int port = intel_hdmi->ddi_port; + u32 temp; + + temp = I915_READ(DDI_BUF_CTL(port)); + temp &= ~DDI_BUF_CTL_ENABLE; + + I915_WRITE(DDI_BUF_CTL(port), temp); } diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index bc2ad348e5d8..6f5aafa1b633 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) /* Wait for the Pipe State to go off */ if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100)) - DRM_DEBUG_KMS("pipe_off wait timed out\n"); + WARN(1, "pipe_off wait timed out\n"); } else { u32 last_line, line_mask; int reg = PIPEDSL(pipe); @@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) } while (((I915_READ(reg) & line_mask) != last_line) && time_after(timeout, jiffies)); if (time_after(jiffies, timeout)) - DRM_DEBUG_KMS("pipe_off wait timed out\n"); + WARN(1, "pipe_off wait timed out\n"); } } @@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, * protect mechanism may be enabled. * * Note! This is for pre-ILK only. + * + * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. */ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, intel_wait_for_vblank(dev_priv->dev, pipe); } -static void disable_pch_dp(struct drm_i915_private *dev_priv, - enum pipe pipe, int reg, u32 port_sel) -{ - u32 val = I915_READ(reg); - if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { - DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); - I915_WRITE(reg, val & ~DP_PORT_EN); - } -} - -static void disable_pch_hdmi(struct drm_i915_private *dev_priv, - enum pipe pipe, int reg) -{ - u32 val = I915_READ(reg); - if (hdmi_pipe_enabled(dev_priv, pipe, val)) { - DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", - reg, pipe); - I915_WRITE(reg, val & ~PORT_ENABLE); - } -} - -/* Disable any ports connected to this transcoder */ -static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 reg, val; - - val = I915_READ(PCH_PP_CONTROL); - I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); - - disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); - disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); - disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); - - reg = PCH_ADPA; - val = I915_READ(reg); - if (adpa_pipe_enabled(dev_priv, pipe, val)) - I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); - - reg = PCH_LVDS; - val = I915_READ(reg); - if (lvds_pipe_enabled(dev_priv, pipe, val)) { - DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); - I915_WRITE(reg, val & ~LVDS_PORT_EN); - POSTING_READ(reg); - udelay(100); - } - - disable_pch_hdmi(dev_priv, pipe, HDMIB); - disable_pch_hdmi(dev_priv, pipe, HDMIC); - disable_pch_hdmi(dev_priv, pipe, HDMID); -} - int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb) static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) + struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_framebuffer *old_fb; int ret; /* no fb bound */ - if (!crtc->fb) { + if (!fb) { DRM_ERROR("No FB bound\n"); return 0; } @@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, - to_intel_framebuffer(crtc->fb)->obj, + to_intel_framebuffer(fb)->obj, NULL); if (ret != 0) { mutex_unlock(&dev->struct_mutex); @@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - if (old_fb) - intel_finish_fb(old_fb); + if (crtc->fb) + intel_finish_fb(crtc->fb); - ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y); + ret = dev_priv->display.update_plane(crtc, fb, x, y); if (ret) { - intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); + intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); mutex_unlock(&dev->struct_mutex); DRM_ERROR("failed to update base address\n"); return ret; } + old_fb = crtc->fb; + crtc->fb = fb; + crtc->x = x; + crtc->y = y; + if (old_fb) { intel_wait_for_vblank(dev, intel_crtc->pipe); intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); @@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train done.\n"); } -static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) +static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; u32 reg, temp; @@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) } } +static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* Switch from PCDclk to Rawclk */ + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_PCDCLK); + + /* Disable CPU FDI TX PLL */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); + + POSTING_READ(reg); + udelay(100); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); + + /* Wait for the clocks to turn off. */ + POSTING_READ(reg); + udelay(100); +} + static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) static bool intel_crtc_driving_pch(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - struct intel_encoder *encoder; + struct intel_encoder *intel_encoder; /* * If there's a non-PCH eDP on this crtc, it must be DP_A, and that * must be driven by its own crtc; no sharing is possible. */ - for_each_encoder_on_crtc(dev, crtc, encoder) { + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell * CPU handles all others */ @@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc) /* It is still unclear how this will work on PPT, so throw up a warning */ WARN_ON(!HAS_PCH_LPT(dev)); - if (encoder->type == DRM_MODE_ENCODER_DAC) { + if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); return true; } else { DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", - encoder->type); + intel_encoder->type); return false; } } - switch (encoder->type) { + switch (intel_encoder->type) { case INTEL_OUTPUT_EDP: - if (!intel_encoder_is_pch_edp(&encoder->base)) + if (!intel_encoder_is_pch_edp(&intel_encoder->base)) return false; continue; } @@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 temp; bool is_pch_port; + WARN_ON(!crtc->enabled); + if (intel_crtc->active) return; @@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) is_pch_port = intel_crtc_driving_pch(crtc); - if (is_pch_port) - ironlake_fdi_pll_enable(crtc); - else - ironlake_fdi_disable(crtc); + if (is_pch_port) { + ironlake_fdi_pll_enable(intel_crtc); + } else { + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + } + + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_enable) + encoder->pre_enable(encoder); /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && @@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); intel_crtc_update_cursor(crtc, true); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); + + if (HAS_PCH_CPT(dev)) + intel_cpt_verify_modeset(dev, intel_crtc->pipe); } static void ironlake_crtc_disable(struct drm_crtc *crtc) @@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; u32 reg, temp; + if (!intel_crtc->active) return; + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->disable(encoder); + intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); intel_crtc_update_cursor(crtc, false); @@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0); - ironlake_fdi_disable(crtc); + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->post_disable) + encoder->post_disable(encoder); - /* This is a horrible layering violation; we should be doing this in - * the connector/encoder ->prepare instead, but we don't always have - * enough information there about the config to know whether it will - * actually be necessary or just cause undesired flicker. - */ - intel_disable_pch_ports(dev_priv, pipe); + ironlake_fdi_disable(crtc); intel_disable_transcoder(dev_priv, pipe); @@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) /* disable PCH DPLL */ intel_disable_pch_pll(intel_crtc); - /* Switch from PCDclk to Rawclk */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_PCDCLK); - - /* Disable CPU FDI TX PLL */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); - - POSTING_READ(reg); - udelay(100); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); - - /* Wait for the clocks to turn off. */ - POSTING_READ(reg); - udelay(100); + ironlake_fdi_pll_disable(intel_crtc); intel_crtc->active = false; intel_update_watermarks(dev); @@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) mutex_unlock(&dev->struct_mutex); } -static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - - /* XXX: When our outputs are all unaware of DPMS modes other than off - * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. - */ - switch (mode) { - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); - ironlake_crtc_enable(crtc); - break; - - case DRM_MODE_DPMS_OFF: - DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); - ironlake_crtc_disable(crtc); - break; - } -} - static void ironlake_crtc_off(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + WARN_ON(!crtc->enabled); + if (intel_crtc->active) return; @@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); intel_crtc_update_cursor(crtc, true); + + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); } static void i9xx_crtc_disable(struct drm_crtc *crtc) @@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + if (!intel_crtc->active) return; + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->disable(encoder); + /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); @@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_update_watermarks(dev); } -static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - /* XXX: When our outputs are all unaware of DPMS modes other than off - * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. - */ - switch (mode) { - case DRM_MODE_DPMS_ON: - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - i9xx_crtc_enable(crtc); - break; - case DRM_MODE_DPMS_OFF: - i9xx_crtc_disable(crtc); - break; - } -} - static void i9xx_crtc_off(struct drm_crtc *crtc) { } -/** - * Sets the power management mode of the pipe and plane. - */ -static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) +static void intel_crtc_update_sarea(struct drm_crtc *crtc, + bool enabled) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - bool enabled; - - if (intel_crtc->dpms_mode == mode) - return; - - intel_crtc->dpms_mode = mode; - - dev_priv->display.dpms(crtc, mode); if (!dev->primary->master) return; @@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) if (!master_priv->sarea_priv) return; - enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; - switch (pipe) { case 0: master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; @@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) } } +/** + * Sets the power management mode of the pipe and plane. + */ +void intel_crtc_update_dpms(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + bool enable = false; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) + enable |= intel_encoder->connectors_active; + + if (enable) + dev_priv->display.crtc_enable(crtc); + else + dev_priv->display.crtc_disable(crtc); + + intel_crtc_update_sarea(crtc, enable); +} + +static void intel_crtc_noop(struct drm_crtc *crtc) +{ +} + static void intel_crtc_disable(struct drm_crtc *crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; + struct drm_connector *connector; struct drm_i915_private *dev_priv = dev->dev_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + /* crtc should still be enabled when we disable it. */ + WARN_ON(!crtc->enabled); + + dev_priv->display.crtc_disable(crtc); + intel_crtc_update_sarea(crtc, false); dev_priv->display.off(crtc); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); @@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); + crtc->fb = NULL; + } + + /* Update computed state. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->encoder || !connector->encoder->crtc) + continue; + + if (connector->encoder->crtc != crtc) + continue; + + connector->dpms = DRM_MODE_DPMS_OFF; + to_intel_encoder(connector->encoder)->connectors_active = false; } } -/* Prepare for a mode set. - * - * Note we could be a lot smarter here. We need to figure out which outputs - * will be enabled, which disabled (in short, how the config will changes) - * and perform the minimum necessary steps to accomplish that, e.g. updating - * watermarks, FBC configuration, making sure PLLs are programmed correctly, - * panel fitting is in the proper state, etc. - */ -static void i9xx_crtc_prepare(struct drm_crtc *crtc) +void intel_modeset_disable(struct drm_device *dev) { - i9xx_crtc_disable(crtc); + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (crtc->enabled) + intel_crtc_disable(crtc); + } } -static void i9xx_crtc_commit(struct drm_crtc *crtc) +void intel_encoder_noop(struct drm_encoder *encoder) { - i9xx_crtc_enable(crtc); } -static void ironlake_crtc_prepare(struct drm_crtc *crtc) +void intel_encoder_destroy(struct drm_encoder *encoder) { - ironlake_crtc_disable(crtc); + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_encoder); } -static void ironlake_crtc_commit(struct drm_crtc *crtc) +/* Simple dpms helper for encodres with just one connector, no cloning and only + * one kind of off state. It clamps all !ON modes to fully OFF and changes the + * state of the entire output pipe. */ +void intel_encoder_dpms(struct intel_encoder *encoder, int mode) { - ironlake_crtc_enable(crtc); + if (mode == DRM_MODE_DPMS_ON) { + encoder->connectors_active = true; + + intel_crtc_update_dpms(encoder->base.crtc); + } else { + encoder->connectors_active = false; + + intel_crtc_update_dpms(encoder->base.crtc); + } } -void intel_encoder_prepare(struct drm_encoder *encoder) +/* Cross check the actual hw state with our own modeset state tracking (and it's + * internal consistency). */ +static void intel_connector_check_state(struct intel_connector *connector) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - /* lvds has its own version of prepare see intel_lvds_prepare */ - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); + if (connector->get_hw_state(connector)) { + struct intel_encoder *encoder = connector->encoder; + struct drm_crtc *crtc; + bool encoder_enabled; + enum pipe pipe; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.base.id, + drm_get_connector_name(&connector->base)); + + WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, + "wrong connector dpms state\n"); + WARN(connector->base.encoder != &encoder->base, + "active connector not linked to encoder\n"); + WARN(!encoder->connectors_active, + "encoder->connectors_active not set\n"); + + encoder_enabled = encoder->get_hw_state(encoder, &pipe); + WARN(!encoder_enabled, "encoder not enabled\n"); + if (WARN_ON(!encoder->base.crtc)) + return; + + crtc = encoder->base.crtc; + + WARN(!crtc->enabled, "crtc not enabled\n"); + WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); + WARN(pipe != to_intel_crtc(crtc)->pipe, + "encoder active on the wrong pipe\n"); + } } -void intel_encoder_commit(struct drm_encoder *encoder) +/* Even simpler default implementation, if there's really no special case to + * consider. */ +void intel_connector_dpms(struct drm_connector *connector, int mode) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - struct drm_device *dev = encoder->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_encoder *encoder = intel_attached_encoder(connector); - /* lvds has its own version of commit see intel_lvds_commit */ - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); + /* All the simple cases only support two dpms states. */ + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; - if (HAS_PCH_CPT(dev)) - intel_cpt_verify_modeset(dev, intel_crtc->pipe); + if (mode == connector->dpms) + return; + + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + if (encoder->base.crtc) + intel_encoder_dpms(encoder, mode); + else + WARN_ON(encoder->connectors_active != false); + + intel_modeset_check_state(connector->dev); } -void intel_encoder_destroy(struct drm_encoder *encoder) +/* Simple connector->get_hw_state implementation for encoders that support only + * one connector and no cloning and hence the encoder state determines the state + * of the connector. */ +bool intel_connector_get_hw_state(struct intel_connector *connector) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + enum pipe pipe = 0; + struct intel_encoder *encoder = connector->encoder; - drm_encoder_cleanup(encoder); - kfree(intel_encoder); + return encoder->get_hw_state(encoder, &pipe); } static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, @@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) drm_mode_set_crtcinfo(adjusted_mode, 0); + /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes + * with a hsync front porch of 0. + */ + if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && + adjusted_mode->hsync_start == adjusted_mode->hdisplay) + return false; + return true; } @@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) * true if they don't match). */ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, + struct drm_framebuffer *fb, unsigned int *pipe_bpp, struct drm_display_mode *mode) { @@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, * also stays within the max display bpc discovered above. */ - switch (crtc->fb->depth) { + switch (fb->depth) { case 8: bpc = 8; /* since we go through a colormap */ break; @@ -4191,12 +4231,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc, POSTING_READ(DPLL(pipe)); udelay(150); - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -4204,6 +4238,12 @@ static void i8xx_update_pll(struct drm_crtc *crtc, if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) intel_update_lvds(crtc, clock, adjusted_mode); + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * @@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb) + struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); - ret = intel_pipe_set_base(crtc, x, y, old_fb); + ret = intel_pipe_set_base(crtc, x, y, fb); intel_update_watermarks(dev); @@ -4560,63 +4600,78 @@ static int ironlake_get_refclk(struct drm_crtc *crtc) return 120000; } -static int ironlake_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, +static void ironlake_set_pipeconf(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) + bool dither) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; - int refclk, num_connectors = 0; - intel_clock_t clock, reduced_clock; - u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; - bool ok, has_reduced_clock = false, is_sdvo = false; - bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; - struct intel_encoder *encoder, *edp_encoder = NULL; - const intel_limit_t *limit; - int ret; - struct fdi_m_n m_n = {0}; - u32 temp; - int target_clock, pixel_multiplier, lane, link_bw, factor; - unsigned int pipe_bpp; - bool dither; - bool is_cpu_edp = false, is_pch_edp = false; + uint32_t val; - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - case INTEL_OUTPUT_SDVO: - case INTEL_OUTPUT_HDMI: - is_sdvo = true; - if (encoder->needs_tv_clock) - is_tv = true; - break; - case INTEL_OUTPUT_TVOUT: - is_tv = true; - break; - case INTEL_OUTPUT_ANALOG: - is_crt = true; - break; - case INTEL_OUTPUT_DISPLAYPORT: - is_dp = true; - break; - case INTEL_OUTPUT_EDP: - is_dp = true; - if (intel_encoder_is_pch_edp(&encoder->base)) - is_pch_edp = true; - else - is_cpu_edp = true; - edp_encoder = encoder; + val = I915_READ(PIPECONF(pipe)); + + val &= ~PIPE_BPC_MASK; + switch (intel_crtc->bpp) { + case 18: + val |= PIPE_6BPC; + break; + case 24: + val |= PIPE_8BPC; + break; + case 30: + val |= PIPE_10BPC; + break; + case 36: + val |= PIPE_12BPC; + break; + default: + val |= PIPE_8BPC; + break; + } + + val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); + if (dither) + val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + + val &= ~PIPECONF_INTERLACE_MASK; + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + val |= PIPECONF_INTERLACED_ILK; + else + val |= PIPECONF_PROGRESSIVE; + + I915_WRITE(PIPECONF(pipe), val); + POSTING_READ(PIPECONF(pipe)); +} + +static bool ironlake_compute_clocks(struct drm_crtc *crtc, + struct drm_display_mode *adjusted_mode, + intel_clock_t *clock, + bool *has_reduced_clock, + intel_clock_t *reduced_clock) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *intel_encoder; + int refclk; + const intel_limit_t *limit; + bool ret, is_sdvo = false, is_tv = false, is_lvds = false; + + for_each_encoder_on_crtc(dev, crtc, intel_encoder) { + switch (intel_encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (intel_encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; break; } - - num_connectors++; } refclk = ironlake_get_refclk(crtc); @@ -4627,15 +4682,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); - ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, - &clock); - if (!ok) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; - } - - /* Ensure that the cursor is valid for the new mode before changing... */ - intel_crtc_update_cursor(crtc, true); + ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, + clock); + if (!ret) + return false; if (is_lvds && dev_priv->lvds_downclock_avail) { /* @@ -4644,16 +4694,86 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, * by using the FP0/FP1. In such case we will disable the LVDS * downclock feature. */ - has_reduced_clock = limit->find_pll(limit, crtc, - dev_priv->lvds_downclock, - refclk, - &clock, - &reduced_clock); + *has_reduced_clock = limit->find_pll(limit, crtc, + dev_priv->lvds_downclock, + refclk, + clock, + reduced_clock); } if (is_sdvo && is_tv) - i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); + i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock); + + return true; +} + +static int ironlake_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, + struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int plane = intel_crtc->plane; + int num_connectors = 0; + intel_clock_t clock, reduced_clock; + u32 dpll, fp = 0, fp2 = 0; + bool ok, has_reduced_clock = false, is_sdvo = false; + bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; + struct intel_encoder *encoder, *edp_encoder = NULL; + int ret; + struct fdi_m_n m_n = {0}; + u32 temp; + int target_clock, pixel_multiplier, lane, link_bw, factor; + unsigned int pipe_bpp; + bool dither; + bool is_cpu_edp = false, is_pch_edp = false; + + for_each_encoder_on_crtc(dev, crtc, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + is_lvds = true; + break; + case INTEL_OUTPUT_SDVO: + case INTEL_OUTPUT_HDMI: + is_sdvo = true; + if (encoder->needs_tv_clock) + is_tv = true; + break; + case INTEL_OUTPUT_TVOUT: + is_tv = true; + break; + case INTEL_OUTPUT_ANALOG: + is_crt = true; + break; + case INTEL_OUTPUT_DISPLAYPORT: + is_dp = true; + break; + case INTEL_OUTPUT_EDP: + is_dp = true; + if (intel_encoder_is_pch_edp(&encoder->base)) + is_pch_edp = true; + else + is_cpu_edp = true; + edp_encoder = encoder; + break; + } + + num_connectors++; + } + + ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, + &has_reduced_clock, &reduced_clock); + if (!ok) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc, true); /* FDI link */ pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); @@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, target_clock = adjusted_mode->clock; /* determine panel color depth */ - temp = I915_READ(PIPECONF(pipe)); - temp &= ~PIPE_BPC_MASK; - dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); - switch (pipe_bpp) { - case 18: - temp |= PIPE_6BPC; - break; - case 24: - temp |= PIPE_8BPC; - break; - case 30: - temp |= PIPE_10BPC; - break; - case 36: - temp |= PIPE_12BPC; - break; - default: + dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode); + if (is_lvds && dev_priv->lvds_dither) + dither = true; + + if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 && + pipe_bpp != 36) { WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", - pipe_bpp); - temp |= PIPE_8BPC; + pipe_bpp); pipe_bpp = 24; - break; } - intel_crtc->bpp = pipe_bpp; - I915_WRITE(PIPECONF(pipe), temp); if (!lane) { /* @@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, else dpll |= PLL_REF_INPUT_DREFCLK; - /* setup pipeconf */ - pipeconf = I915_READ(PIPECONF(pipe)); - - /* Set up the display plane register */ - dspcntr = DISPPLANE_GAMMA_ENABLE; - DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); drm_mode_debug_printmodeline(mode); @@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(PCH_LVDS, temp); } - pipeconf &= ~PIPECONF_DITHER_EN; - pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; - if ((is_lvds && dev_priv->lvds_dither) || dither) { - pipeconf |= PIPECONF_DITHER_EN; - pipeconf |= PIPECONF_DITHER_TYPE_SP; - } if (is_dp && !is_cpu_edp) { intel_dp_set_m_n(crtc, mode, adjusted_mode); } else { @@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, } } - pipeconf &= ~PIPECONF_INTERLACE_MASK; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - pipeconf |= PIPECONF_INTERLACED_ILK; /* the chip adds 2 halflines automatically */ adjusted_mode->crtc_vtotal -= 1; adjusted_mode->crtc_vblank_end -= 1; @@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_htotal/2); } else { - pipeconf |= PIPECONF_PROGRESSIVE; I915_WRITE(VSYNCSHIFT(pipe), 0); } @@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (is_cpu_edp) ironlake_set_pll_edp(crtc, adjusted_mode->clock); - I915_WRITE(PIPECONF(pipe), pipeconf); - POSTING_READ(PIPECONF(pipe)); + ironlake_set_pipeconf(crtc, adjusted_mode, dither); intel_wait_for_vblank(dev, pipe); - I915_WRITE(DSPCNTR(plane), dspcntr); + /* Set up the display plane register */ + I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); POSTING_READ(DSPCNTR(plane)); - ret = intel_pipe_set_base(crtc, x, y, old_fb); + ret = intel_pipe_set_base(crtc, x, y, fb); intel_update_watermarks(dev); @@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb) + struct drm_framebuffer *fb) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, drm_vblank_pre_modeset(dev, pipe); ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, - x, y, old_fb); + x, y, fb); drm_vblank_post_modeset(dev, pipe); - if (ret) - intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; - else - intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; - return ret; } @@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector, I915_WRITE(G4X_AUD_CNTL_ST, i); } +static void haswell_write_eld(struct drm_connector *connector, + struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = connector->dev->dev_private; + uint8_t *eld = connector->eld; + struct drm_device *dev = crtc->dev; + uint32_t eldv; + uint32_t i; + int len; + int pipe = to_intel_crtc(crtc)->pipe; + int tmp; + + int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); + int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); + int aud_config = HSW_AUD_CFG(pipe); + int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; + + + DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); + + /* Audio output enable */ + DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); + tmp = I915_READ(aud_cntrl_st2); + tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); + I915_WRITE(aud_cntrl_st2, tmp); + + /* Wait for 1 vertical blank */ + intel_wait_for_vblank(dev, pipe); + + /* Set ELD valid state */ + tmp = I915_READ(aud_cntrl_st2); + DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); + tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); + I915_WRITE(aud_cntrl_st2, tmp); + tmp = I915_READ(aud_cntrl_st2); + DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); + + /* Enable HDMI mode */ + tmp = I915_READ(aud_config); + DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); + /* clear N_programing_enable and N_value_index */ + tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); + I915_WRITE(aud_config, tmp); + + DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); + + eldv = AUDIO_ELD_VALID_A << (pipe * 4); + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { + DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); + eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ + I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ + } else + I915_WRITE(aud_config, 0); + + if (intel_eld_uptodate(connector, + aud_cntrl_st2, eldv, + aud_cntl_st, IBX_ELD_ADDRESS, + hdmiw_hdmiedid)) + return; + + i = I915_READ(aud_cntrl_st2); + i &= ~eldv; + I915_WRITE(aud_cntrl_st2, i); + + if (!eld[0]) + return; + + i = I915_READ(aud_cntl_st); + i &= ~IBX_ELD_ADDRESS; + I915_WRITE(aud_cntl_st, i); + i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ + DRM_DEBUG_DRIVER("port num:%d\n", i); + + len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ + DRM_DEBUG_DRIVER("ELD size %d\n", len); + for (i = 0; i < len; i++) + I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); + + i = I915_READ(aud_cntrl_st2); + i |= eldv; + I915_WRITE(aud_cntrl_st2, i); + +} + static void ironlake_write_eld(struct drm_connector *connector, struct drm_crtc *crtc) { @@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector, int aud_config; int aud_cntl_st; int aud_cntrl_st2; + int pipe = to_intel_crtc(crtc)->pipe; if (HAS_PCH_IBX(connector->dev)) { - hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; - aud_config = IBX_AUD_CONFIG_A; - aud_cntl_st = IBX_AUD_CNTL_ST_A; + hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); + aud_config = IBX_AUD_CFG(pipe); + aud_cntl_st = IBX_AUD_CNTL_ST(pipe); aud_cntrl_st2 = IBX_AUD_CNTL_ST2; } else { - hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; - aud_config = CPT_AUD_CONFIG_A; - aud_cntl_st = CPT_AUD_CNTL_ST_A; + hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); + aud_config = CPT_AUD_CFG(pipe); + aud_cntl_st = CPT_AUD_CNTL_ST(pipe); aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; } - i = to_intel_crtc(crtc)->pipe; - hdmiw_hdmiedid += i * 0x100; - aud_cntl_st += i * 0x100; - aud_config += i * 0x100; - - DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); + DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); i = I915_READ(aud_cntl_st); - i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ + i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ if (!i) { DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); /* operate blindly on all ports */ @@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, uint32_t addr; int ret; - DRM_DEBUG_KMS("\n"); - /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); @@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev, return fb; } -bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, - struct drm_connector *connector, +bool intel_get_load_detect_pipe(struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old) { struct intel_crtc *intel_crtc; + struct intel_encoder *intel_encoder = + intel_attached_encoder(connector); struct drm_crtc *possible_crtc; struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; - struct drm_framebuffer *old_fb; + struct drm_framebuffer *fb; int i = -1; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", @@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, if (encoder->crtc) { crtc = encoder->crtc; - intel_crtc = to_intel_crtc(crtc); - old->dpms_mode = intel_crtc->dpms_mode; + old->dpms_mode = connector->dpms; old->load_detect_temp = false; /* Make sure the crtc and connector are running */ - if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { - struct drm_encoder_helper_funcs *encoder_funcs; - struct drm_crtc_helper_funcs *crtc_funcs; - - crtc_funcs = crtc->helper_private; - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - - encoder_funcs = encoder->helper_private; - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - } + if (connector->dpms != DRM_MODE_DPMS_ON) + connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); return true; } @@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, return false; } - encoder->crtc = crtc; - connector->encoder = encoder; + intel_encoder->new_crtc = to_intel_crtc(crtc); + to_intel_connector(connector)->new_encoder = intel_encoder; intel_crtc = to_intel_crtc(crtc); - old->dpms_mode = intel_crtc->dpms_mode; + old->dpms_mode = connector->dpms; old->load_detect_temp = true; old->release_fb = NULL; if (!mode) mode = &load_detect_mode; - old_fb = crtc->fb; - /* We need a framebuffer large enough to accommodate all accesses * that the plane may generate whilst we perform load detection. * We can not rely on the fbcon either being present (we get called @@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, * not even exist) or that it is large enough to satisfy the * requested mode. */ - crtc->fb = mode_fits_in_fbdev(dev, mode); - if (crtc->fb == NULL) { + fb = mode_fits_in_fbdev(dev, mode); + if (fb == NULL) { DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); - crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); - old->release_fb = crtc->fb; + fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); + old->release_fb = fb; } else DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); - if (IS_ERR(crtc->fb)) { + if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); - crtc->fb = old_fb; - return false; + goto fail; } - if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { + if (!intel_set_mode(crtc, mode, 0, 0, fb)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); - crtc->fb = old_fb; - return false; + goto fail; } /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); return true; +fail: + connector->encoder = NULL; + encoder->crtc = NULL; + return false; } -void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, - struct drm_connector *connector, +void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old) { + struct intel_encoder *intel_encoder = + intel_attached_encoder(connector); struct drm_encoder *encoder = &intel_encoder->base; - struct drm_device *dev = encoder->dev; - struct drm_crtc *crtc = encoder->crtc; - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, drm_get_connector_name(connector), encoder->base.id, drm_get_encoder_name(encoder)); if (old->load_detect_temp) { - connector->encoder = NULL; - drm_helper_disable_unused_functions(dev); + struct drm_crtc *crtc = encoder->crtc; + + to_intel_connector(connector)->new_encoder = NULL; + intel_encoder->new_crtc = NULL; + intel_set_mode(crtc, NULL, 0, 0, NULL); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); @@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, } /* Switch crtc and encoder back off if necessary */ - if (old->dpms_mode != DRM_MODE_DPMS_ON) { - encoder_funcs->dpms(encoder, old->dpms_mode); - crtc_funcs->dpms(crtc, old->dpms_mode); - } + if (old->dpms_mode != DRM_MODE_DPMS_ON) + connector->funcs->dpms(connector, old->dpms_mode); } /* Returns the clock of the currently programmed mode of the given pipe. */ @@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -#define GPU_IDLE_TIMEOUT 500 /* ms */ - -/* When this timer fires, we've been idle for awhile */ -static void intel_gpu_idle_timer(unsigned long arg) -{ - struct drm_device *dev = (struct drm_device *)arg; - drm_i915_private_t *dev_priv = dev->dev_private; - - if (!list_empty(&dev_priv->mm.active_list)) { - /* Still processing requests, so just re-arm the timer. */ - mod_timer(&dev_priv->idle_timer, jiffies + - msecs_to_jiffies(GPU_IDLE_TIMEOUT)); - return; - } - - dev_priv->busy = false; - queue_work(dev_priv->wq, &dev_priv->idle_work); -} - -#define CRTC_IDLE_TIMEOUT 1000 /* ms */ - -static void intel_crtc_idle_timer(unsigned long arg) -{ - struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; - struct drm_crtc *crtc = &intel_crtc->base; - drm_i915_private_t *dev_priv = crtc->dev->dev_private; - struct intel_framebuffer *intel_fb; - - intel_fb = to_intel_framebuffer(crtc->fb); - if (intel_fb && intel_fb->obj->active) { - /* The framebuffer is still being accessed by the GPU. */ - mod_timer(&intel_crtc->idle_timer, jiffies + - msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); - return; - } - - intel_crtc->busy = false; - queue_work(dev_priv->wq, &dev_priv->idle_work); -} - static void intel_increase_pllclock(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc) if (dpll & DISPLAY_RATE_SELECT_FPA1) DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); } - - /* Schedule downclock */ - mod_timer(&intel_crtc->idle_timer, jiffies + - msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); } static void intel_decrease_pllclock(struct drm_crtc *crtc) @@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) } -/** - * intel_idle_update - adjust clocks for idleness - * @work: work struct - * - * Either the GPU or display (or both) went idle. Check the busy status - * here and adjust the CRTC and GPU clocks as necessary. - */ -static void intel_idle_update(struct work_struct *work) +void intel_mark_busy(struct drm_device *dev) { - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, - idle_work); - struct drm_device *dev = dev_priv->dev; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + i915_update_gfx_val(dev->dev_private); +} - if (!i915_powersave) - return; +void intel_mark_idle(struct drm_device *dev) +{ +} - mutex_lock(&dev->struct_mutex); +void intel_mark_fb_busy(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_crtc *crtc; - i915_update_gfx_val(dev_priv); + if (!i915_powersave) + return; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - /* Skip inactive CRTCs */ if (!crtc->fb) continue; - intel_crtc = to_intel_crtc(crtc); - if (!intel_crtc->busy) - intel_decrease_pllclock(crtc); + if (to_intel_framebuffer(crtc->fb)->obj == obj) + intel_increase_pllclock(crtc); } - - - mutex_unlock(&dev->struct_mutex); } -/** - * intel_mark_busy - mark the GPU and possibly the display busy - * @dev: drm device - * @obj: object we're operating on - * - * Callers can use this function to indicate that the GPU is busy processing - * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout - * buffer), we'll also mark the display as busy, so we know to increase its - * clock frequency. - */ -void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) +void intel_mark_fb_idle(struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_crtc *crtc = NULL; - struct intel_framebuffer *intel_fb; - struct intel_crtc *intel_crtc; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - if (!dev_priv->busy) { - intel_sanitize_pm(dev); - dev_priv->busy = true; - } else - mod_timer(&dev_priv->idle_timer, jiffies + - msecs_to_jiffies(GPU_IDLE_TIMEOUT)); + struct drm_device *dev = obj->base.dev; + struct drm_crtc *crtc; - if (obj == NULL) + if (!i915_powersave) return; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (!crtc->fb) continue; - intel_crtc = to_intel_crtc(crtc); - intel_fb = to_intel_framebuffer(crtc->fb); - if (intel_fb->obj == obj) { - if (!intel_crtc->busy) { - /* Non-busy -> busy, upclock */ - intel_increase_pllclock(crtc); - intel_crtc->busy = true; - } else { - /* Busy -> busy, put off timer */ - mod_timer(&intel_crtc->idle_timer, jiffies + - msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); - } - } + if (to_intel_framebuffer(crtc->fb)->obj == obj) + intel_decrease_pllclock(crtc); } } @@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, default: WARN_ONCE(1, "unknown plane in flip command\n"); ret = -ENODEV; - goto err; + goto err_unpin; } ret = intel_ring_begin(ring, 4); @@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_pending; intel_disable_fbc(dev); - intel_mark_busy(dev, obj); + intel_mark_fb_busy(obj); mutex_unlock(&dev->struct_mutex); trace_i915_flip_request(intel_crtc->plane, obj); @@ -6527,81 +6594,807 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return ret; } -static void intel_sanitize_modesetting(struct drm_device *dev, - int pipe, int plane) +static struct drm_crtc_helper_funcs intel_helper_funcs = { + .mode_set_base_atomic = intel_pipe_set_base_atomic, + .load_lut = intel_crtc_load_lut, + .disable = intel_crtc_noop, +}; + +bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = dev->dev_private; - u32 reg, val; - int i; + struct intel_encoder *other_encoder; + struct drm_crtc *crtc = &encoder->new_crtc->base; - /* Clear any frame start delays used for debugging left by the BIOS */ - for_each_pipe(i) { - reg = PIPECONF(i); - I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + if (WARN_ON(!crtc)) + return false; + + list_for_each_entry(other_encoder, + &crtc->dev->mode_config.encoder_list, + base.head) { + + if (&other_encoder->new_crtc->base != crtc || + encoder == other_encoder) + continue; + else + return true; } - if (HAS_PCH_SPLIT(dev)) - return; + return false; +} - /* Who knows what state these registers were left in by the BIOS or - * grub? - * - * If we leave the registers in a conflicting state (e.g. with the - * display plane reading from the other pipe than the one we intend - * to use) then when we attempt to teardown the active mode, we will - * not disable the pipes and planes in the correct order -- leaving - * a plane reading from a disabled pipe and possibly leading to - * undefined behaviour. +static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, + struct drm_crtc *crtc) +{ + struct drm_device *dev; + struct drm_crtc *tmp; + int crtc_mask = 1; + + WARN(!crtc, "checking null crtc?\n"); + + dev = crtc->dev; + + list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { + if (tmp == crtc) + break; + crtc_mask <<= 1; + } + + if (encoder->possible_crtcs & crtc_mask) + return true; + return false; +} + +/** + * intel_modeset_update_staged_output_state + * + * Updates the staged output configuration state, e.g. after we've read out the + * current hw state. + */ +static void intel_modeset_update_staged_output_state(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + connector->new_encoder = + to_intel_encoder(connector->base.encoder); + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + encoder->new_crtc = + to_intel_crtc(encoder->base.crtc); + } +} + +/** + * intel_modeset_commit_output_state + * + * This function copies the stage display pipe configuration to the real one. + */ +static void intel_modeset_commit_output_state(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + connector->base.encoder = &connector->new_encoder->base; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + encoder->base.crtc = &encoder->new_crtc->base; + } +} + +static struct drm_display_mode * +intel_modeset_adjusted_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct drm_display_mode *adjusted_mode; + struct drm_encoder_helper_funcs *encoder_funcs; + struct intel_encoder *encoder; + + adjusted_mode = drm_mode_duplicate(dev, mode); + if (!adjusted_mode) + return ERR_PTR(-ENOMEM); + + /* Pass our mode to the connectors and the CRTC to give them a chance to + * adjust it according to limitations or connector properties, and also + * a chance to reject the mode entirely. */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { - reg = DSPCNTR(plane); - val = I915_READ(reg); + if (&encoder->new_crtc->base != crtc) + continue; + encoder_funcs = encoder->base.helper_private; + if (!(encoder_funcs->mode_fixup(&encoder->base, mode, + adjusted_mode))) { + DRM_DEBUG_KMS("Encoder fixup failed\n"); + goto fail; + } + } - if ((val & DISPLAY_PLANE_ENABLE) == 0) - return; - if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) - return; + if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) { + DRM_DEBUG_KMS("CRTC fixup failed\n"); + goto fail; + } + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); - /* This display plane is active and attached to the other CPU pipe. */ - pipe = !pipe; + return adjusted_mode; +fail: + drm_mode_destroy(dev, adjusted_mode); + return ERR_PTR(-EINVAL); +} - /* Disable the plane and wait for it to stop reading from the pipe. */ - intel_disable_plane(dev_priv, plane, pipe); - intel_disable_pipe(dev_priv, pipe); +/* Computes which crtcs are affected and sets the relevant bits in the mask. For + * simplicity we use the crtc's pipe number (because it's easier to obtain). */ +static void +intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, + unsigned *prepare_pipes, unsigned *disable_pipes) +{ + struct intel_crtc *intel_crtc; + struct drm_device *dev = crtc->dev; + struct intel_encoder *encoder; + struct intel_connector *connector; + struct drm_crtc *tmp_crtc; + + *disable_pipes = *modeset_pipes = *prepare_pipes = 0; + + /* Check which crtcs have changed outputs connected to them, these need + * to be part of the prepare_pipes mask. We don't (yet) support global + * modeset across multiple crtcs, so modeset_pipes will only have one + * bit set at most. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->base.encoder == &connector->new_encoder->base) + continue; + + if (connector->base.encoder) { + tmp_crtc = connector->base.encoder->crtc; + + *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; + } + + if (connector->new_encoder) + *prepare_pipes |= + 1 << connector->new_encoder->new_crtc->pipe; + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->base.crtc == &encoder->new_crtc->base) + continue; + + if (encoder->base.crtc) { + tmp_crtc = encoder->base.crtc; + + *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; + } + + if (encoder->new_crtc) + *prepare_pipes |= 1 << encoder->new_crtc->pipe; + } + + /* Check for any pipes that will be fully disabled ... */ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, + base.head) { + bool used = false; + + /* Don't try to disable disabled crtcs. */ + if (!intel_crtc->base.enabled) + continue; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->new_crtc == intel_crtc) + used = true; + } + + if (!used) + *disable_pipes |= 1 << intel_crtc->pipe; + } + + + /* set_mode is also used to update properties on life display pipes. */ + intel_crtc = to_intel_crtc(crtc); + if (crtc->enabled) + *prepare_pipes |= 1 << intel_crtc->pipe; + + /* We only support modeset on one single crtc, hence we need to do that + * only for the passed in crtc iff we change anything else than just + * disable crtcs. + * + * This is actually not true, to be fully compatible with the old crtc + * helper we automatically disable _any_ output (i.e. doesn't need to be + * connected to the crtc we're modesetting on) if it's disconnected. + * Which is a rather nutty api (since changed the output configuration + * without userspace's explicit request can lead to confusion), but + * alas. Hence we currently need to modeset on all pipes we prepare. */ + if (*prepare_pipes) + *modeset_pipes = *prepare_pipes; + + /* ... and mask these out. */ + *modeset_pipes &= ~(*disable_pipes); + *prepare_pipes &= ~(*disable_pipes); } -static void intel_crtc_reset(struct drm_crtc *crtc) +static bool intel_crtc_in_use(struct drm_crtc *crtc) { + struct drm_encoder *encoder; struct drm_device *dev = crtc->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - /* Reset flags back to the 'unknown' status so that they - * will be correctly set on the initial modeset. + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) + if (encoder->crtc == crtc) + return true; + + return false; +} + +static void +intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) +{ + struct intel_encoder *intel_encoder; + struct intel_crtc *intel_crtc; + struct drm_connector *connector; + + list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, + base.head) { + if (!intel_encoder->base.crtc) + continue; + + intel_crtc = to_intel_crtc(intel_encoder->base.crtc); + + if (prepare_pipes & (1 << intel_crtc->pipe)) + intel_encoder->connectors_active = false; + } + + intel_modeset_commit_output_state(dev); + + /* Update computed state. */ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, + base.head) { + intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (!connector->encoder || !connector->encoder->crtc) + continue; + + intel_crtc = to_intel_crtc(connector->encoder->crtc); + + if (prepare_pipes & (1 << intel_crtc->pipe)) { + struct drm_property *dpms_property = + dev->mode_config.dpms_property; + + connector->dpms = DRM_MODE_DPMS_ON; + drm_connector_property_set_value(connector, + dpms_property, + DRM_MODE_DPMS_ON); + + intel_encoder = to_intel_encoder(connector->encoder); + intel_encoder->connectors_active = true; + } + } + +} + +#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ + list_for_each_entry((intel_crtc), \ + &(dev)->mode_config.crtc_list, \ + base.head) \ + if (mask & (1 <<(intel_crtc)->pipe)) \ + +void +intel_modeset_check_state(struct drm_device *dev) +{ + struct intel_crtc *crtc; + struct intel_encoder *encoder; + struct intel_connector *connector; + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + /* This also checks the encoder/connector hw state with the + * ->get_hw_state callbacks. */ + intel_connector_check_state(connector); + + WARN(&connector->new_encoder->base != connector->base.encoder, + "connector's staged encoder doesn't match current encoder\n"); + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + bool enabled = false; + bool active = false; + enum pipe pipe, tracked_pipe; + + DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base)); + + WARN(&encoder->new_crtc->base != encoder->base.crtc, + "encoder's stage crtc doesn't match current crtc\n"); + WARN(encoder->connectors_active && !encoder->base.crtc, + "encoder's active_connectors set, but no crtc\n"); + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->base.encoder != &encoder->base) + continue; + enabled = true; + if (connector->base.dpms != DRM_MODE_DPMS_OFF) + active = true; + } + WARN(!!encoder->base.crtc != enabled, + "encoder's enabled state mismatch " + "(expected %i, found %i)\n", + !!encoder->base.crtc, enabled); + WARN(active && !encoder->base.crtc, + "active encoder with no crtc\n"); + + WARN(encoder->connectors_active != active, + "encoder's computed active state doesn't match tracked active state " + "(expected %i, found %i)\n", active, encoder->connectors_active); + + active = encoder->get_hw_state(encoder, &pipe); + WARN(active != encoder->connectors_active, + "encoder's hw state doesn't match sw tracking " + "(expected %i, found %i)\n", + encoder->connectors_active, active); + + if (!encoder->base.crtc) + continue; + + tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; + WARN(active && pipe != tracked_pipe, + "active encoder's pipe doesn't match" + "(expected %i, found %i)\n", + tracked_pipe, pipe); + + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + bool enabled = false; + bool active = false; + + DRM_DEBUG_KMS("[CRTC:%d]\n", + crtc->base.base.id); + + WARN(crtc->active && !crtc->base.enabled, + "active crtc, but not enabled in sw tracking\n"); + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->base.crtc != &crtc->base) + continue; + enabled = true; + if (encoder->connectors_active) + active = true; + } + WARN(active != crtc->active, + "crtc's computed active state doesn't match tracked active state " + "(expected %i, found %i)\n", active, crtc->active); + WARN(enabled != crtc->base.enabled, + "crtc's computed enabled state doesn't match tracked enabled state " + "(expected %i, found %i)\n", enabled, crtc->base.enabled); + + assert_pipe(dev->dev_private, crtc->pipe, crtc->active); + } +} + +bool intel_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb) +{ + struct drm_device *dev = crtc->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; + struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_encoder *encoder; + struct intel_crtc *intel_crtc; + unsigned disable_pipes, prepare_pipes, modeset_pipes; + bool ret = true; + + intel_modeset_affected_pipes(crtc, &modeset_pipes, + &prepare_pipes, &disable_pipes); + + DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", + modeset_pipes, prepare_pipes, disable_pipes); + + for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) + intel_crtc_disable(&intel_crtc->base); + + saved_hwmode = crtc->hwmode; + saved_mode = crtc->mode; + + /* Hack: Because we don't (yet) support global modeset on multiple + * crtcs, we don't keep track of the new mode for more than one crtc. + * Hence simply check whether any bit is set in modeset_pipes in all the + * pieces of code that are not yet converted to deal with mutliple crtcs + * changing their mode at the same time. */ + adjusted_mode = NULL; + if (modeset_pipes) { + adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); + if (IS_ERR(adjusted_mode)) { + return false; + } + } + + for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { + if (intel_crtc->base.enabled) + dev_priv->display.crtc_disable(&intel_crtc->base); + } + + /* crtc->mode is already used by the ->mode_set callbacks, hence we need + * to set it here already despite that we pass it down the callchain. */ - intel_crtc->dpms_mode = -1; + if (modeset_pipes) + crtc->mode = *mode; + + /* Only after disabling all output pipelines that will be changed can we + * update the the output configuration. */ + intel_modeset_update_state(dev, prepare_pipes); - /* We need to fix up any BIOS configuration that conflicts with - * our expectations. + /* Set up the DPLL and any encoders state that needs to adjust or depend + * on the DPLL. */ - intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); + for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { + ret = !intel_crtc_mode_set(&intel_crtc->base, + mode, adjusted_mode, + x, y, fb); + if (!ret) + goto done; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + + if (encoder->crtc != &intel_crtc->base) + continue; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", + encoder->base.id, drm_get_encoder_name(encoder), + mode->base.id, mode->name); + encoder_funcs = encoder->helper_private; + encoder_funcs->mode_set(encoder, mode, adjusted_mode); + } + } + + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ + for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) + dev_priv->display.crtc_enable(&intel_crtc->base); + + if (modeset_pipes) { + /* Store real post-adjustment hardware mode. */ + crtc->hwmode = *adjusted_mode; + + /* Calculate and store various constants which + * are later needed by vblank and swap-completion + * timestamping. They are derived from true hwmode. + */ + drm_calc_timestamping_constants(crtc); + } + + /* FIXME: add subpixel order */ +done: + drm_mode_destroy(dev, adjusted_mode); + if (!ret && crtc->enabled) { + crtc->hwmode = saved_hwmode; + crtc->mode = saved_mode; + } else { + intel_modeset_check_state(dev); + } + + return ret; } -static struct drm_crtc_helper_funcs intel_helper_funcs = { - .dpms = intel_crtc_dpms, - .mode_fixup = intel_crtc_mode_fixup, - .mode_set = intel_crtc_mode_set, - .mode_set_base = intel_pipe_set_base, - .mode_set_base_atomic = intel_pipe_set_base_atomic, - .load_lut = intel_crtc_load_lut, - .disable = intel_crtc_disable, -}; +#undef for_each_intel_crtc_masked + +static void intel_set_config_free(struct intel_set_config *config) +{ + if (!config) + return; + + kfree(config->save_connector_encoders); + kfree(config->save_encoder_crtcs); + kfree(config); +} + +static int intel_set_config_save_state(struct drm_device *dev, + struct intel_set_config *config) +{ + struct drm_encoder *encoder; + struct drm_connector *connector; + int count; + + config->save_encoder_crtcs = + kcalloc(dev->mode_config.num_encoder, + sizeof(struct drm_crtc *), GFP_KERNEL); + if (!config->save_encoder_crtcs) + return -ENOMEM; + + config->save_connector_encoders = + kcalloc(dev->mode_config.num_connector, + sizeof(struct drm_encoder *), GFP_KERNEL); + if (!config->save_connector_encoders) + return -ENOMEM; + + /* Copy data. Note that driver private data is not affected. + * Should anything bad happen only the expected state is + * restored, not the drivers personal bookkeeping. + */ + count = 0; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + config->save_encoder_crtcs[count++] = encoder->crtc; + } + + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + config->save_connector_encoders[count++] = connector->encoder; + } + + return 0; +} + +static void intel_set_config_restore_state(struct drm_device *dev, + struct intel_set_config *config) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; + int count; + + count = 0; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + encoder->new_crtc = + to_intel_crtc(config->save_encoder_crtcs[count++]); + } + + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + connector->new_encoder = + to_intel_encoder(config->save_connector_encoders[count++]); + } +} + +static void +intel_set_config_compute_mode_changes(struct drm_mode_set *set, + struct intel_set_config *config) +{ + + /* We should be able to check here if the fb has the same properties + * and then just flip_or_move it */ + if (set->crtc->fb != set->fb) { + /* If we have no fb then treat it as a full mode set */ + if (set->crtc->fb == NULL) { + DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); + config->mode_changed = true; + } else if (set->fb == NULL) { + config->mode_changed = true; + } else if (set->fb->depth != set->crtc->fb->depth) { + config->mode_changed = true; + } else if (set->fb->bits_per_pixel != + set->crtc->fb->bits_per_pixel) { + config->mode_changed = true; + } else + config->fb_changed = true; + } + + if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) + config->fb_changed = true; + + if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { + DRM_DEBUG_KMS("modes are different, full mode set\n"); + drm_mode_debug_printmodeline(&set->crtc->mode); + drm_mode_debug_printmodeline(set->mode); + config->mode_changed = true; + } +} + +static int +intel_modeset_stage_output_state(struct drm_device *dev, + struct drm_mode_set *set, + struct intel_set_config *config) +{ + struct drm_crtc *new_crtc; + struct intel_connector *connector; + struct intel_encoder *encoder; + int count, ro; + + /* The upper layers ensure that we either disabl a crtc or have a list + * of connectors. For paranoia, double-check this. */ + WARN_ON(!set->fb && (set->num_connectors != 0)); + WARN_ON(set->fb && (set->num_connectors == 0)); + + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + /* Otherwise traverse passed in connector list and get encoders + * for them. */ + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro] == &connector->base) { + connector->new_encoder = connector->encoder; + break; + } + } + + /* If we disable the crtc, disable all its connectors. Also, if + * the connector is on the changing crtc but not on the new + * connector list, disable it. */ + if ((!set->fb || ro == set->num_connectors) && + connector->base.encoder && + connector->base.encoder->crtc == set->crtc) { + connector->new_encoder = NULL; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", + connector->base.base.id, + drm_get_connector_name(&connector->base)); + } + + + if (&connector->new_encoder->base != connector->base.encoder) { + DRM_DEBUG_KMS("encoder changed, full mode switch\n"); + config->mode_changed = true; + } + + /* Disable all disconnected encoders. */ + if (connector->base.status == connector_status_disconnected) + connector->new_encoder = NULL; + } + /* connector->new_encoder is now updated for all connectors. */ + + /* Update crtc of enabled connectors. */ + count = 0; + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (!connector->new_encoder) + continue; + + new_crtc = connector->new_encoder->base.crtc; + + for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro] == &connector->base) + new_crtc = set->crtc; + } + + /* Make sure the new CRTC will work with the encoder */ + if (!intel_encoder_crtc_ok(&connector->new_encoder->base, + new_crtc)) { + return -EINVAL; + } + connector->encoder->new_crtc = to_intel_crtc(new_crtc); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", + connector->base.base.id, + drm_get_connector_name(&connector->base), + new_crtc->base.id); + } + + /* Check for any encoders that needs to be disabled. */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + list_for_each_entry(connector, + &dev->mode_config.connector_list, + base.head) { + if (connector->new_encoder == encoder) { + WARN_ON(!connector->new_encoder->new_crtc); + + goto next_encoder; + } + } + encoder->new_crtc = NULL; +next_encoder: + /* Only now check for crtc changes so we don't miss encoders + * that will be disabled. */ + if (&encoder->new_crtc->base != encoder->base.crtc) { + DRM_DEBUG_KMS("crtc changed, full mode switch\n"); + config->mode_changed = true; + } + } + /* Now we've also updated encoder->new_crtc for all encoders. */ + + return 0; +} + +static int intel_crtc_set_config(struct drm_mode_set *set) +{ + struct drm_device *dev; + struct drm_mode_set save_set; + struct intel_set_config *config; + int ret; + + BUG_ON(!set); + BUG_ON(!set->crtc); + BUG_ON(!set->crtc->helper_private); + + if (!set->mode) + set->fb = NULL; + + /* The fb helper likes to play gross jokes with ->mode_set_config. + * Unfortunately the crtc helper doesn't do much at all for this case, + * so we have to cope with this madness until the fb helper is fixed up. */ + if (set->fb && set->num_connectors == 0) + return 0; + + if (set->fb) { + DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->fb->base.id, + (int)set->num_connectors, set->x, set->y); + } else { + DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); + } + + dev = set->crtc->dev; + + ret = -ENOMEM; + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) + goto out_config; + + ret = intel_set_config_save_state(dev, config); + if (ret) + goto out_config; + + save_set.crtc = set->crtc; + save_set.mode = &set->crtc->mode; + save_set.x = set->crtc->x; + save_set.y = set->crtc->y; + save_set.fb = set->crtc->fb; + + /* Compute whether we need a full modeset, only an fb base update or no + * change at all. In the future we might also check whether only the + * mode changed, e.g. for LVDS where we only change the panel fitter in + * such cases. */ + intel_set_config_compute_mode_changes(set, config); + + ret = intel_modeset_stage_output_state(dev, set, config); + if (ret) + goto fail; + + if (config->mode_changed) { + if (set->mode) { + DRM_DEBUG_KMS("attempting to set mode from" + " userspace\n"); + drm_mode_debug_printmodeline(set->mode); + } + + if (!intel_set_mode(set->crtc, set->mode, + set->x, set->y, set->fb)) { + DRM_ERROR("failed to set mode on [CRTC:%d]\n", + set->crtc->base.id); + ret = -EINVAL; + goto fail; + } + } else if (config->fb_changed) { + ret = intel_pipe_set_base(set->crtc, + set->x, set->y, set->fb); + } + + intel_set_config_free(config); + + return 0; + +fail: + intel_set_config_restore_state(dev, config); + + /* Try to restore the config */ + if (config->mode_changed && + !intel_set_mode(save_set.crtc, save_set.mode, + save_set.x, save_set.y, save_set.fb)) + DRM_ERROR("failed to restore config after modeset failure\n"); + +out_config: + intel_set_config_free(config); + return ret; +} static const struct drm_crtc_funcs intel_crtc_funcs = { - .reset = intel_crtc_reset, .cursor_set = intel_crtc_cursor_set, .cursor_move = intel_crtc_cursor_move, .gamma_set = intel_crtc_gamma_set, - .set_config = drm_crtc_helper_set_config, + .set_config = intel_crtc_set_config, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, }; @@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; - intel_crtc_reset(&intel_crtc->base); - intel_crtc->active = true; /* force the pipe off on setup_init_config */ intel_crtc->bpp = 24; /* default for pre-Ironlake */ - if (HAS_PCH_SPLIT(dev)) { - intel_helper_funcs.prepare = ironlake_crtc_prepare; - intel_helper_funcs.commit = ironlake_crtc_commit; - } else { - intel_helper_funcs.prepare = i9xx_crtc_prepare; - intel_helper_funcs.commit = i9xx_crtc_commit; - } - drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); - - intel_crtc->busy = false; - - setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, - (unsigned long)intel_crtc); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, @@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, return 0; } -static int intel_encoder_clones(struct drm_device *dev, int type_mask) +static int intel_encoder_clones(struct intel_encoder *encoder) { - struct intel_encoder *encoder; + struct drm_device *dev = encoder->base.dev; + struct intel_encoder *source_encoder; int index_mask = 0; int entry = 0; - list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { - if (type_mask & encoder->clone_mask) + list_for_each_entry(source_encoder, + &dev->mode_config.encoder_list, base.head) { + + if (encoder == source_encoder) + index_mask |= (1 << entry); + + /* Intel hw has only one MUX where enocoders could be cloned. */ + if (encoder->cloneable && source_encoder->cloneable) index_mask |= (1 << entry); + entry++; } @@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev) dpd_is_edp = intel_dpd_is_edp(dev); if (has_edp_a(dev)) - intel_dp_init(dev, DP_A); + intel_dp_init(dev, DP_A, PORT_A); if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_D); + intel_dp_init(dev, PCH_DP_D, PORT_D); } intel_crt_init(dev); @@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev) /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, PCH_SDVOB, true); if (!found) - intel_hdmi_init(dev, HDMIB); + intel_hdmi_init(dev, HDMIB, PORT_B); if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_B); + intel_dp_init(dev, PCH_DP_B, PORT_B); } if (I915_READ(HDMIC) & PORT_DETECTED) - intel_hdmi_init(dev, HDMIC); + intel_hdmi_init(dev, HDMIC, PORT_C); if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) - intel_hdmi_init(dev, HDMID); + intel_hdmi_init(dev, HDMID, PORT_D); if (I915_READ(PCH_DP_C) & DP_DETECTED) - intel_dp_init(dev, PCH_DP_C); + intel_dp_init(dev, PCH_DP_C, PORT_C); if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) - intel_dp_init(dev, PCH_DP_D); + intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { int found; @@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev) /* SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, SDVOB, true); if (!found) - intel_hdmi_init(dev, SDVOB); + intel_hdmi_init(dev, SDVOB, PORT_B); if (!found && (I915_READ(DP_B) & DP_DETECTED)) - intel_dp_init(dev, DP_B); + intel_dp_init(dev, DP_B, PORT_B); } if (I915_READ(SDVOC) & PORT_DETECTED) - intel_hdmi_init(dev, SDVOC); + intel_hdmi_init(dev, SDVOC, PORT_C); /* Shares lanes with HDMI on SDVOC */ if (I915_READ(DP_C) & DP_DETECTED) - intel_dp_init(dev, DP_C); + intel_dp_init(dev, DP_C, PORT_C); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; @@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev) found = intel_sdvo_init(dev, SDVOB, true); if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); - intel_hdmi_init(dev, SDVOB); + intel_hdmi_init(dev, SDVOB, PORT_B); } if (!found && SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_B\n"); - intel_dp_init(dev, DP_B); + intel_dp_init(dev, DP_B, PORT_B); } } @@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_INTEGRATED_HDMI(dev)) { DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); - intel_hdmi_init(dev, SDVOC); + intel_hdmi_init(dev, SDVOC, PORT_C); } if (SUPPORTS_INTEGRATED_DP(dev)) { DRM_DEBUG_KMS("probing DP_C\n"); - intel_dp_init(dev, DP_C); + intel_dp_init(dev, DP_C, PORT_C); } } if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) { DRM_DEBUG_KMS("probing DP_D\n"); - intel_dp_init(dev, DP_D); + intel_dp_init(dev, DP_D, PORT_D); } } else if (IS_GEN2(dev)) intel_dvo_init(dev); @@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { encoder->base.possible_crtcs = encoder->crtc_mask; encoder->base.possible_clones = - intel_encoder_clones(dev, encoder->clone_mask); + intel_encoder_clones(encoder); } - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); } @@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev) /* We always want a DPMS function */ if (HAS_PCH_SPLIT(dev)) { - dev_priv->display.dpms = ironlake_crtc_dpms; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; + dev_priv->display.crtc_enable = ironlake_crtc_enable; + dev_priv->display.crtc_disable = ironlake_crtc_disable; dev_priv->display.off = ironlake_crtc_off; dev_priv->display.update_plane = ironlake_update_plane; } else { - dev_priv->display.dpms = i9xx_crtc_dpms; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; + dev_priv->display.crtc_enable = i9xx_crtc_enable; + dev_priv->display.crtc_disable = i9xx_crtc_disable; dev_priv->display.off = i9xx_crtc_off; dev_priv->display.update_plane = i9xx_update_plane; } @@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.write_eld = ironlake_write_eld; } else if (IS_HASWELL(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - dev_priv->display.write_eld = ironlake_write_eld; + dev_priv->display.write_eld = haswell_write_eld; } else dev_priv->display.update_wm = NULL; } else if (IS_G4X(dev)) { @@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = { /* HP Mini needs pipe A force quirk (LP: #322104) */ { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, - /* Thinkpad R31 needs pipe A force quirk */ - { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, - /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ - { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, - /* ThinkPad X40 needs pipe A force quirk */ - /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, /* 855 & before need to leave pipe A & dpll A up */ { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, + { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, /* Lenovo U160 cannot use SSC on LVDS */ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, @@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev) /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); +} + +static void +intel_connector_break_all_links(struct intel_connector *connector) +{ + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; + connector->encoder->connectors_active = false; + connector->encoder->base.crtc = NULL; +} + +static void intel_enable_pipe_a(struct drm_device *dev) +{ + struct intel_connector *connector; + struct drm_connector *crt = NULL; + struct intel_load_detect_pipe load_detect_temp; + + /* We can't just switch on the pipe A, we need to set things up with a + * proper mode and output configuration. As a gross hack, enable pipe A + * by enabling the load detect pipe once. */ + list_for_each_entry(connector, + &dev->mode_config.connector_list, + base.head) { + if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { + crt = &connector->base; + break; + } + } + + if (!crt) + return; + + if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) + intel_release_load_detect_pipe(crt, &load_detect_temp); + + +} + +static void intel_sanitize_crtc(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg, val; + + /* Clear any frame start delays used for debugging left by the BIOS */ + reg = PIPECONF(crtc->pipe); + I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + + /* We need to sanitize the plane -> pipe mapping first because this will + * disable the crtc (and hence change the state) if it is wrong. */ + if (!HAS_PCH_SPLIT(dev)) { + struct intel_connector *connector; + bool plane; + + reg = DSPCNTR(crtc->plane); + val = I915_READ(reg); + + if ((val & DISPLAY_PLANE_ENABLE) == 0 && + (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) + goto ok; + + DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", + crtc->base.base.id); + + /* Pipe has the wrong plane attached and the plane is active. + * Temporarily change the plane mapping and disable everything + * ... */ + plane = crtc->plane; + crtc->plane = !plane; + dev_priv->display.crtc_disable(&crtc->base); + crtc->plane = plane; + + /* ... and break all links. */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->encoder->base.crtc != &crtc->base) + continue; + + intel_connector_break_all_links(connector); + } + + WARN_ON(crtc->active); + crtc->base.enabled = false; + } +ok: + + if (dev_priv->quirks & QUIRK_PIPEA_FORCE && + crtc->pipe == PIPE_A && !crtc->active) { + /* BIOS forgot to enable pipe A, this mostly happens after + * resume. Force-enable the pipe to fix this, the update_dpms + * call below we restore the pipe to the right state, but leave + * the required bits on. */ + intel_enable_pipe_a(dev); + } + + /* Adjust the state of the output pipe according to whether we + * have active connectors/encoders. */ + intel_crtc_update_dpms(&crtc->base); - INIT_WORK(&dev_priv->idle_work, intel_idle_update); - setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, - (unsigned long)dev); + if (crtc->active != crtc->base.enabled) { + struct intel_encoder *encoder; + + /* This can happen either due to bugs in the get_hw_state + * functions or because the pipe is force-enabled due to the + * pipe A quirk. */ + DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", + crtc->base.base.id, + crtc->base.enabled ? "enabled" : "disabled", + crtc->active ? "enabled" : "disabled"); + + crtc->base.enabled = crtc->active; + + /* Because we only establish the connector -> encoder -> + * crtc links if something is active, this means the + * crtc is now deactivated. Break the links. connector + * -> encoder links are only establish when things are + * actually up, hence no need to break them. */ + WARN_ON(crtc->active); + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + WARN_ON(encoder->connectors_active); + encoder->base.crtc = NULL; + } + } +} + +static void intel_sanitize_encoder(struct intel_encoder *encoder) +{ + struct intel_connector *connector; + struct drm_device *dev = encoder->base.dev; + + /* We need to check both for a crtc link (meaning that the + * encoder is active and trying to read from a pipe) and the + * pipe itself being active. */ + bool has_active_crtc = encoder->base.crtc && + to_intel_crtc(encoder->base.crtc)->active; + + if (encoder->connectors_active && !has_active_crtc) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base)); + + /* Connector is active, but has no active pipe. This is + * fallout from our resume register restoring. Disable + * the encoder manually again. */ + if (encoder->base.crtc) { + DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base)); + encoder->disable(encoder); + } + + /* Inconsistent output/port/pipe state happens presumably due to + * a bug in one of the get_hw_state functions. Or someplace else + * in our code, like the register restore mess on resume. Clamp + * things to off as a safer default. */ + list_for_each_entry(connector, + &dev->mode_config.connector_list, + base.head) { + if (connector->encoder != encoder) + continue; + + intel_connector_break_all_links(connector); + } + } + /* Enabled encoders without active connectors will be fixed in + * the crtc fixup. */ +} + +/* Scan out the current hw modeset state, sanitizes it and maps it into the drm + * and i915 state tracking structures. */ +void intel_modeset_setup_hw_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + u32 tmp; + struct intel_crtc *crtc; + struct intel_encoder *encoder; + struct intel_connector *connector; + + for_each_pipe(pipe) { + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + + tmp = I915_READ(PIPECONF(pipe)); + if (tmp & PIPECONF_ENABLE) + crtc->active = true; + else + crtc->active = false; + + crtc->base.enabled = crtc->active; + + DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", + crtc->base.base.id, + crtc->active ? "enabled" : "disabled"); + } + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + pipe = 0; + + if (encoder->get_hw_state(encoder, &pipe)) { + encoder->base.crtc = + dev_priv->pipe_to_crtc_mapping[pipe]; + } else { + encoder->base.crtc = NULL; + } + + encoder->connectors_active = false; + DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", + encoder->base.base.id, + drm_get_encoder_name(&encoder->base), + encoder->base.crtc ? "enabled" : "disabled", + pipe); + } + + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) { + if (connector->get_hw_state(connector)) { + connector->base.dpms = DRM_MODE_DPMS_ON; + connector->encoder->connectors_active = true; + connector->base.encoder = &connector->encoder->base; + } else { + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; + } + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", + connector->base.base.id, + drm_get_connector_name(&connector->base), + connector->base.encoder ? "enabled" : "disabled"); + } + + /* HW state is read out, now we need to sanitize this mess. */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + intel_sanitize_encoder(encoder); + } + + for_each_pipe(pipe) { + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + intel_sanitize_crtc(crtc); + } + + intel_modeset_update_staged_output_state(dev); + + intel_modeset_check_state(dev); } void intel_modeset_gem_init(struct drm_device *dev) @@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev) intel_modeset_init_hw(dev); intel_setup_overlay(dev); + + intel_modeset_setup_hw_state(dev); } void intel_modeset_cleanup(struct drm_device *dev) @@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev) * enqueue unpin/hotplug work. */ drm_irq_uninstall(dev); cancel_work_sync(&dev_priv->hotplug_work); - cancel_work_sync(&dev_priv->rps_work); + cancel_work_sync(&dev_priv->rps.work); /* flush any delayed tasks or pending work */ flush_scheduled_work(); - /* Shut off idle work before the crtcs get freed. */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - intel_crtc = to_intel_crtc(crtc); - del_timer_sync(&intel_crtc->idle_timer); - } - del_timer_sync(&dev_priv->idle_timer); - cancel_work_sync(&dev_priv->idle_work); - drm_mode_config_cleanup(dev); } @@ -7338,7 +8353,7 @@ struct intel_display_error_state { u32 position; u32 base; u32 size; - } cursor[2]; + } cursor[I915_MAX_PIPES]; struct intel_pipe_error_state { u32 conf; @@ -7350,7 +8365,7 @@ struct intel_display_error_state { u32 vtotal; u32 vblank; u32 vsync; - } pipe[2]; + } pipe[I915_MAX_PIPES]; struct intel_plane_error_state { u32 control; @@ -7360,7 +8375,7 @@ struct intel_display_error_state { u32 addr; u32 surface; u32 tile_offset; - } plane[2]; + } plane[I915_MAX_PIPES]; }; struct intel_display_error_state * @@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev) if (error == NULL) return NULL; - for (i = 0; i < 2; i++) { + for_each_pipe(i) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); error->cursor[i].base = I915_READ(CURBASE(i)); @@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m, struct drm_device *dev, struct intel_display_error_state *error) { + drm_i915_private_t *dev_priv = dev->dev_private; int i; - for (i = 0; i < 2; i++) { + seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); + for_each_pipe(i) { seq_printf(m, "Pipe [%d]:\n", i); seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); seq_printf(m, " SRC: %08x\n", error->pipe[i].source); diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index ace757af9133..1474f84fdbd0 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -36,42 +36,10 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" -#include "drm_dp_helper.h" -#define DP_RECEIVER_CAP_SIZE 0xf #define DP_LINK_STATUS_SIZE 6 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) -#define DP_LINK_CONFIGURATION_SIZE 9 - -struct intel_dp { - struct intel_encoder base; - uint32_t output_reg; - uint32_t DP; - uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; - bool has_audio; - enum hdmi_force_audio force_audio; - uint32_t color_range; - int dpms_mode; - uint8_t link_bw; - uint8_t lane_count; - uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; - struct i2c_adapter adapter; - struct i2c_algo_dp_aux_data algo; - bool is_pch_edp; - uint8_t train_set[4]; - int panel_power_up_delay; - int panel_power_down_delay; - int panel_power_cycle_delay; - int backlight_on_delay; - int backlight_off_delay; - struct drm_display_mode *panel_fixed_mode; /* for eDP */ - struct delayed_work panel_vdd_work; - bool want_panel_vdd; - struct edid *edid; /* cached EDID for eDP */ - int edid_mode_count; -}; - /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct @@ -840,9 +808,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, } } -static void ironlake_edp_pll_on(struct drm_encoder *encoder); -static void ironlake_edp_pll_off(struct drm_encoder *encoder); - static void intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -853,14 +818,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_crtc *crtc = intel_dp->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - /* Turn on the eDP PLL if needed */ - if (is_edp(intel_dp)) { - if (!is_pch_edp(intel_dp)) - ironlake_edp_pll_on(encoder); - else - ironlake_edp_pll_off(encoder); - } - /* * There are four kinds of DP registers: * @@ -882,10 +839,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, * supposed to be read-only. */ intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; - intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; /* Handle DP bits in common between all three register formats */ - intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; switch (intel_dp->lane_count) { @@ -932,7 +887,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp->DP |= intel_crtc->pipe << 29; /* don't miss out required setting for eDP */ - intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else @@ -954,7 +908,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (is_cpu_edp(intel_dp)) { /* don't miss out required setting for eDP */ - intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) intel_dp->DP |= DP_PLL_FREQ_160MHZ; else @@ -1225,27 +1178,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) msleep(intel_dp->backlight_off_delay); } -static void ironlake_edp_pll_on(struct drm_encoder *encoder) +static void ironlake_edp_pll_on(struct intel_dp *intel_dp) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; + assert_pipe_disabled(dev_priv, + to_intel_crtc(crtc)->pipe); + DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); - dpa_ctl |= DP_PLL_ENABLE; - I915_WRITE(DP_A, dpa_ctl); + WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); + WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); + + /* We don't adjust intel_dp->DP while tearing down the link, to + * facilitate link retraining (e.g. after hotplug). Hence clear all + * enable bits here to ensure that we don't enable too much. */ + intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); + intel_dp->DP |= DP_PLL_ENABLE; + I915_WRITE(DP_A, intel_dp->DP); POSTING_READ(DP_A); udelay(200); } -static void ironlake_edp_pll_off(struct drm_encoder *encoder) +static void ironlake_edp_pll_off(struct intel_dp *intel_dp) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_crtc *crtc = intel_dp->base.base.crtc; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; + assert_pipe_disabled(dev_priv, + to_intel_crtc(crtc)->pipe); + dpa_ctl = I915_READ(DP_A); + WARN((dpa_ctl & DP_PLL_ENABLE) == 0, + "dp pll off, should be on\n"); + WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); + + /* We can't rely on the value tracked for the DP register in + * intel_dp->DP because link_down must not change that (otherwise link + * re-training will fail. */ dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); @@ -1282,10 +1257,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) } } -static void intel_dp_prepare(struct drm_encoder *encoder) +static bool intel_dp_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ(intel_dp->output_reg); + if (!(tmp & DP_PORT_EN)) + return false; + + if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { + *pipe = PORT_TO_PIPE_CPT(tmp); + } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { + *pipe = PORT_TO_PIPE(tmp); + } else { + u32 trans_sel; + u32 trans_dp; + int i; + + switch (intel_dp->output_reg) { + case PCH_DP_B: + trans_sel = TRANS_DP_PORT_SEL_B; + break; + case PCH_DP_C: + trans_sel = TRANS_DP_PORT_SEL_C; + break; + case PCH_DP_D: + trans_sel = TRANS_DP_PORT_SEL_D; + break; + default: + return true; + } + + for_each_pipe(i) { + trans_dp = I915_READ(TRANS_DP_CTL(i)); + if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { + *pipe = i; + return true; + } + } + } + + DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); + + return true; +} + +static void intel_disable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); /* Make sure the panel is off before trying to change the mode. But also * ensure that we have vdd while we switch off the panel. */ @@ -1293,14 +1315,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder) ironlake_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); ironlake_edp_panel_off(intel_dp); - intel_dp_link_down(intel_dp); + + /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ + if (!is_cpu_edp(intel_dp)) + intel_dp_link_down(intel_dp); } -static void intel_dp_commit(struct drm_encoder *encoder) +static void intel_post_disable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = encoder->dev; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + if (is_cpu_edp(intel_dp)) { + intel_dp_link_down(intel_dp); + ironlake_edp_pll_off(intel_dp); + } +} + +static void intel_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t dp_reg = I915_READ(intel_dp->output_reg); + + if (WARN_ON(dp_reg & DP_PORT_EN)) + return; ironlake_edp_panel_vdd_on(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); @@ -1309,47 +1348,14 @@ static void intel_dp_commit(struct drm_encoder *encoder) ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); - - intel_dp->dpms_mode = DRM_MODE_DPMS_ON; - - if (HAS_PCH_CPT(dev)) - intel_cpt_verify_modeset(dev, intel_crtc->pipe); } -static void -intel_dp_dpms(struct drm_encoder *encoder, int mode) +static void intel_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - if (mode != DRM_MODE_DPMS_ON) { - /* Switching the panel off requires vdd. */ - ironlake_edp_panel_vdd_on(intel_dp); - ironlake_edp_backlight_off(intel_dp); - intel_dp_sink_dpms(intel_dp, mode); - ironlake_edp_panel_off(intel_dp); - intel_dp_link_down(intel_dp); - - if (is_cpu_edp(intel_dp)) - ironlake_edp_pll_off(encoder); - } else { - if (is_cpu_edp(intel_dp)) - ironlake_edp_pll_on(encoder); - - ironlake_edp_panel_vdd_on(intel_dp); - intel_dp_sink_dpms(intel_dp, mode); - if (!(dp_reg & DP_PORT_EN)) { - intel_dp_start_link_train(intel_dp); - ironlake_edp_panel_on(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, true); - intel_dp_complete_link_train(intel_dp); - } else - ironlake_edp_panel_vdd_off(intel_dp, false); - ironlake_edp_backlight_on(intel_dp); - } - intel_dp->dpms_mode = mode; + if (is_cpu_edp(intel_dp)) + ironlake_edp_pll_on(intel_dp); } /* @@ -1668,6 +1674,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; int ret; + if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { + dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; + break; + case DP_TRAINING_PATTERN_1: + dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; + break; + case DP_TRAINING_PATTERN_2: + dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; + break; + case DP_TRAINING_PATTERN_3: + DRM_ERROR("DP training pattern 3 not supported\n"); + dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; + break; + } + + } else { + dp_reg_value &= ~DP_LINK_TRAIN_MASK; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + dp_reg_value |= DP_LINK_TRAIN_OFF; + break; + case DP_TRAINING_PATTERN_1: + dp_reg_value |= DP_LINK_TRAIN_PAT_1; + break; + case DP_TRAINING_PATTERN_2: + dp_reg_value |= DP_LINK_TRAIN_PAT_2; + break; + case DP_TRAINING_PATTERN_3: + DRM_ERROR("DP training pattern 3 not supported\n"); + dp_reg_value |= DP_LINK_TRAIN_PAT_2; + break; + } + } + I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); @@ -1675,12 +1720,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); - ret = intel_dp_aux_native_write(intel_dp, - DP_TRAINING_LANE0_SET, - intel_dp->train_set, - intel_dp->lane_count); - if (ret != intel_dp->lane_count) - return false; + if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != + DP_TRAINING_PATTERN_DISABLE) { + ret = intel_dp_aux_native_write(intel_dp, + DP_TRAINING_LANE0_SET, + intel_dp->train_set, + intel_dp->lane_count); + if (ret != intel_dp->lane_count) + return false; + } return true; } @@ -1690,26 +1738,12 @@ static void intel_dp_start_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); int i; uint8_t voltage; bool clock_recovery = false; int voltage_tries, loop_tries; - u32 reg; uint32_t DP = intel_dp->DP; - /* - * On CPT we have to enable the port in training pattern 1, which - * will happen below in intel_dp_set_link_train. Otherwise, enable - * the port and wait for it to become active. - */ - if (!HAS_PCH_CPT(dev)) { - I915_WRITE(intel_dp->output_reg, intel_dp->DP); - POSTING_READ(intel_dp->output_reg); - intel_wait_for_vblank(dev, intel_crtc->pipe); - } - /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, intel_dp->link_configuration, @@ -1717,10 +1751,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP |= DP_PORT_EN; - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) - DP &= ~DP_LINK_TRAIN_MASK_CPT; - else - DP &= ~DP_LINK_TRAIN_MASK; memset(intel_dp->train_set, 0, 4); voltage = 0xff; voltage_tries = 0; @@ -1744,12 +1774,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) - reg = DP | DP_LINK_TRAIN_PAT_1_CPT; - else - reg = DP | DP_LINK_TRAIN_PAT_1; - - if (!intel_dp_set_link_train(intel_dp, reg, + if (!intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) break; @@ -1804,10 +1829,8 @@ static void intel_dp_complete_link_train(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; bool channel_eq = false; int tries, cr_tries; - u32 reg; uint32_t DP = intel_dp->DP; /* channel equalization */ @@ -1836,13 +1859,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) - reg = DP | DP_LINK_TRAIN_PAT_2_CPT; - else - reg = DP | DP_LINK_TRAIN_PAT_2; - /* channel eq pattern */ - if (!intel_dp_set_link_train(intel_dp, reg, + if (!intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_2 | DP_LINK_SCRAMBLING_DISABLE)) break; @@ -1877,15 +1895,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) - reg = DP | DP_LINK_TRAIN_OFF_CPT; - else - reg = DP | DP_LINK_TRAIN_OFF; - - I915_WRITE(intel_dp->output_reg, reg); - POSTING_READ(intel_dp->output_reg); - intel_dp_aux_native_write_1(intel_dp, - DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); + intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); } static void @@ -1895,18 +1905,11 @@ intel_dp_link_down(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; - if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) + if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) return; DRM_DEBUG_KMS("\n"); - if (is_edp(intel_dp)) { - DP &= ~DP_PLL_ENABLE; - I915_WRITE(intel_dp->output_reg, DP); - POSTING_READ(intel_dp->output_reg); - udelay(100); - } - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); @@ -1918,13 +1921,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) msleep(17); - if (is_edp(intel_dp)) { - if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) - DP |= DP_LINK_TRAIN_OFF_CPT; - else - DP |= DP_LINK_TRAIN_OFF; - } - if (HAS_PCH_IBX(dev) && I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { struct drm_crtc *crtc = intel_dp->base.base.crtc; @@ -2033,10 +2029,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) u8 sink_irq_vector; u8 link_status[DP_LINK_STATUS_SIZE]; - if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) + if (!intel_dp->base.connectors_active) return; - if (!intel_dp->base.base.crtc) + if (WARN_ON(!intel_dp->base.base.crtc)) return; /* Try to read receiver status if the link appears to be up */ @@ -2160,7 +2156,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada ret = drm_add_edid_modes(connector, intel_dp->edid); drm_edid_to_eld(connector, intel_dp->edid); - connector->display_info.raw_edid = NULL; return intel_dp->edid_mode_count; } @@ -2206,7 +2201,6 @@ intel_dp_detect(struct drm_connector *connector, bool force) edid = intel_dp_get_edid(connector, &intel_dp->adapter); if (edid) { intel_dp->has_audio = drm_detect_monitor_audio(edid); - connector->display_info.raw_edid = NULL; kfree(edid); } } @@ -2271,8 +2265,6 @@ intel_dp_detect_audio(struct drm_connector *connector) edid = intel_dp_get_edid(connector, &intel_dp->adapter); if (edid) { has_audio = drm_detect_monitor_audio(edid); - - connector->display_info.raw_edid = NULL; kfree(edid); } @@ -2326,9 +2318,8 @@ intel_dp_set_property(struct drm_connector *connector, done: if (intel_dp->base.base.crtc) { struct drm_crtc *crtc = intel_dp->base.base.crtc; - drm_crtc_helper_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, - crtc->fb); + intel_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); } return 0; @@ -2362,15 +2353,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) } static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { - .dpms = intel_dp_dpms, .mode_fixup = intel_dp_mode_fixup, - .prepare = intel_dp_prepare, .mode_set = intel_dp_mode_set, - .commit = intel_dp_commit, + .disable = intel_encoder_noop, }; static const struct drm_connector_funcs intel_dp_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = intel_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_dp_set_property, @@ -2441,7 +2430,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect } void -intel_dp_init(struct drm_device *dev, int output_reg) +intel_dp_init(struct drm_device *dev, int output_reg, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; @@ -2456,7 +2445,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) return; intel_dp->output_reg = output_reg; - intel_dp->dpms_mode = -1; + intel_dp->port = port; + /* Preserve the current hw state. */ + intel_dp->DP = I915_READ(intel_dp->output_reg); intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { @@ -2483,18 +2474,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) connector->polled = DRM_CONNECTOR_POLL_HPD; - if (output_reg == DP_B || output_reg == PCH_DP_B) - intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); - else if (output_reg == DP_C || output_reg == PCH_DP_C) - intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); - else if (output_reg == DP_D || output_reg == PCH_DP_D) - intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); + intel_encoder->cloneable = false; - if (is_edp(intel_dp)) { - intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); - INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, - ironlake_panel_vdd_work); - } + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, + ironlake_panel_vdd_work); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); @@ -2508,29 +2491,33 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); + intel_encoder->enable = intel_enable_dp; + intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->disable = intel_disable_dp; + intel_encoder->post_disable = intel_post_disable_dp; + intel_encoder->get_hw_state = intel_dp_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; + /* Set up the DDC bus. */ - switch (output_reg) { - case DP_A: - name = "DPDDC-A"; - break; - case DP_B: - case PCH_DP_B: - dev_priv->hotplug_supported_mask |= - DPB_HOTPLUG_INT_STATUS; - name = "DPDDC-B"; - break; - case DP_C: - case PCH_DP_C: - dev_priv->hotplug_supported_mask |= - DPC_HOTPLUG_INT_STATUS; - name = "DPDDC-C"; - break; - case DP_D: - case PCH_DP_D: - dev_priv->hotplug_supported_mask |= - DPD_HOTPLUG_INT_STATUS; - name = "DPDDC-D"; - break; + switch (port) { + case PORT_A: + name = "DPDDC-A"; + break; + case PORT_B: + dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; + name = "DPDDC-B"; + break; + case PORT_C: + dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; + name = "DPDDC-C"; + break; + case PORT_D: + dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; + name = "DPDDC-D"; + break; + default: + WARN(1, "Invalid port %c\n", port_name(port)); + break; } /* Cache some DPCD data in the eDP case */ diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index cd54cf88a28f..351fd7179cd7 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -31,6 +31,7 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" +#include "drm_dp_helper.h" #define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ @@ -40,7 +41,11 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W && drm_can_sleep()) msleep(W); \ + if (W && drm_can_sleep()) { \ + msleep(W); \ + } else { \ + cpu_relax(); \ + } \ } \ ret__; \ }) @@ -90,25 +95,6 @@ #define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_EDP 8 -/* Intel Pipe Clone Bit */ -#define INTEL_HDMIB_CLONE_BIT 1 -#define INTEL_HDMIC_CLONE_BIT 2 -#define INTEL_HDMID_CLONE_BIT 3 -#define INTEL_HDMIE_CLONE_BIT 4 -#define INTEL_HDMIF_CLONE_BIT 5 -#define INTEL_SDVO_NON_TV_CLONE_BIT 6 -#define INTEL_SDVO_TV_CLONE_BIT 7 -#define INTEL_SDVO_LVDS_CLONE_BIT 8 -#define INTEL_ANALOG_CLONE_BIT 9 -#define INTEL_TV_CLONE_BIT 10 -#define INTEL_DP_B_CLONE_BIT 11 -#define INTEL_DP_C_CLONE_BIT 12 -#define INTEL_DP_D_CLONE_BIT 13 -#define INTEL_LVDS_CLONE_BIT 14 -#define INTEL_DVO_TMDS_CLONE_BIT 15 -#define INTEL_DVO_LVDS_CLONE_BIT 16 -#define INTEL_EDP_CLONE_BIT 17 - #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_TMDS 2 @@ -151,16 +137,48 @@ struct intel_fbdev { struct intel_encoder { struct drm_encoder base; + /* + * The new crtc this encoder will be driven from. Only differs from + * base->crtc while a modeset is in progress. + */ + struct intel_crtc *new_crtc; + int type; bool needs_tv_clock; + /* + * Intel hw has only one MUX where encoders could be clone, hence a + * simple flag is enough to compute the possible_clones mask. + */ + bool cloneable; + bool connectors_active; void (*hot_plug)(struct intel_encoder *); + void (*pre_enable)(struct intel_encoder *); + void (*enable)(struct intel_encoder *); + void (*disable)(struct intel_encoder *); + void (*post_disable)(struct intel_encoder *); + /* Read out the current hw state of this connector, returning true if + * the encoder is active. If the encoder is enabled it also set the pipe + * it is connected to in the pipe parameter. */ + bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); int crtc_mask; - int clone_mask; }; struct intel_connector { struct drm_connector base; + /* + * The fixed encoder this connector is connected to. + */ struct intel_encoder *encoder; + + /* + * The new encoder this connector will be driven. Only differs from + * encoder while a modeset is in progress. + */ + struct intel_encoder *new_encoder; + + /* Reads out the current hw, returning true if the connector is enabled + * and active (i.e. dpms ON state). */ + bool (*get_hw_state)(struct intel_connector *); }; struct intel_crtc { @@ -168,11 +186,13 @@ struct intel_crtc { enum pipe pipe; enum plane plane; u8 lut_r[256], lut_g[256], lut_b[256]; - int dpms_mode; - bool active; /* is the crtc on? independent of the dpms mode */ + /* + * Whether the crtc and the connected output pipeline is active. Implies + * that crtc->enabled is set, i.e. the current mode configuration has + * some outputs connected to this crtc. + */ + bool active; bool primary_disabled; /* is the crtc obscured by a plane? */ - bool busy; /* is scanout buffer being updated frequently? */ - struct timer_list idle_timer; bool lowfreq_avail; struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; @@ -311,6 +331,37 @@ struct intel_hdmi { struct drm_display_mode *adjusted_mode); }; +#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_LINK_CONFIGURATION_SIZE 9 + +struct intel_dp { + struct intel_encoder base; + uint32_t output_reg; + uint32_t DP; + uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; + bool has_audio; + enum hdmi_force_audio force_audio; + enum port port; + uint32_t color_range; + uint8_t link_bw; + uint8_t lane_count; + uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; + struct i2c_adapter adapter; + struct i2c_algo_dp_aux_data algo; + bool is_pch_edp; + uint8_t train_set[4]; + int panel_power_up_delay; + int panel_power_down_delay; + int panel_power_cycle_delay; + int backlight_on_delay; + int backlight_off_delay; + struct drm_display_mode *panel_fixed_mode; /* for eDP */ + struct delayed_work panel_vdd_work; + bool want_panel_vdd; + struct edid *edid; /* cached EDID for eDP */ + int edid_mode_count; +}; + static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { @@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector); extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); extern void intel_crt_init(struct drm_device *dev); -extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); +extern void intel_hdmi_init(struct drm_device *dev, + int sdvox_reg, enum port port); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); -extern void intel_mark_busy(struct drm_device *dev, - struct drm_i915_gem_object *obj); +extern void intel_mark_busy(struct drm_device *dev); +extern void intel_mark_idle(struct drm_device *dev); +extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); +extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); extern bool intel_lvds_init(struct drm_device *dev); -extern void intel_dp_init(struct drm_device *dev, int dp_reg); +extern void intel_dp_init(struct drm_device *dev, int output_reg, + enum port port); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); @@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, enum plane plane); -void intel_sanitize_pm(struct drm_device *dev); - /* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); @@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev); extern void intel_panel_destroy_backlight(struct drm_device *dev); extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); +struct intel_set_config { + struct drm_encoder **save_connector_encoders; + struct drm_crtc **save_encoder_crtcs; + + bool fb_changed; + bool mode_changed; +}; + +extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *old_fb); +extern void intel_modeset_disable(struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); -extern void intel_encoder_prepare(struct drm_encoder *encoder); -extern void intel_encoder_commit(struct drm_encoder *encoder); +extern void intel_crtc_update_dpms(struct drm_crtc *crtc); +extern void intel_encoder_noop(struct drm_encoder *encoder); extern void intel_encoder_destroy(struct drm_encoder *encoder); +extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); +extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder); +extern void intel_connector_dpms(struct drm_connector *, int mode); +extern bool intel_connector_get_hw_state(struct intel_connector *connector); +extern void intel_modeset_check_state(struct drm_device *dev); + static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) { @@ -417,12 +487,10 @@ struct intel_load_detect_pipe { bool load_detect_temp; int dpms_mode; }; -extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, - struct drm_connector *connector, +extern bool intel_get_load_detect_pipe(struct drm_connector *connector, struct drm_display_mode *mode, struct intel_load_detect_pipe *old); -extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, - struct drm_connector *connector, +extern void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old); extern void intelfb_restore(void); @@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void ironlake_teardown_rc6(struct drm_device *dev); -extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); +extern void intel_enable_ddi(struct intel_encoder *encoder); +extern void intel_disable_ddi(struct intel_encoder *encoder); +extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe); extern void intel_ddi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); diff --git a/trunk/drivers/gpu/drm/i915/intel_dvo.c b/trunk/drivers/gpu/drm/i915/intel_dvo.c index 36c542e5036b..4f1fdcc44005 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_dvo.c @@ -37,6 +37,7 @@ #define SIL164_ADDR 0x38 #define CH7xxx_ADDR 0x76 #define TFP410_ADDR 0x38 +#define NS2501_ADDR 0x38 static const struct intel_dvo_device intel_dvo_devices[] = { { @@ -74,7 +75,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .slave_addr = 0x75, .gpio = GMBUS_PORT_DPB, .dev_ops = &ch7017_ops, - } + }, + { + .type = INTEL_DVO_CHIP_TMDS, + .name = "ns2501", + .dvo_reg = DVOC, + .slave_addr = NS2501_ADDR, + .dev_ops = &ns2501_ops, + } }; struct intel_dvo { @@ -97,22 +105,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) struct intel_dvo, base); } -static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) +static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) { - struct drm_i915_private *dev_priv = encoder->dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); + + return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev); +} + +static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + u32 tmp; + + tmp = I915_READ(intel_dvo->dev.dvo_reg); + + if (!(tmp & DVO_ENABLE)) + return false; + + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_disable_dvo(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + u32 dvo_reg = intel_dvo->dev.dvo_reg; + u32 temp = I915_READ(dvo_reg); + + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); + I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); + I915_READ(dvo_reg); +} + +static void intel_enable_dvo(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); + I915_WRITE(dvo_reg, temp | DVO_ENABLE); + I915_READ(dvo_reg); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); +} + +static void intel_dvo_dpms(struct drm_connector *connector, int mode) +{ + struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + struct drm_crtc *crtc; + + /* dvo supports only 2 dpms states. */ + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + crtc = intel_dvo->base.base.crtc; + if (!crtc) { + intel_dvo->base.connectors_active = false; + return; + } + if (mode == DRM_MODE_DPMS_ON) { - I915_WRITE(dvo_reg, temp | DVO_ENABLE); - I915_READ(dvo_reg); - intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); + intel_dvo->base.connectors_active = true; + + intel_crtc_update_dpms(crtc); + + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); } else { - intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); - I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); - I915_READ(dvo_reg); + intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); + + intel_dvo->base.connectors_active = false; + + intel_crtc_update_dpms(crtc); } + + intel_modeset_check_state(connector->dev); } static int intel_dvo_mode_valid(struct drm_connector *connector, @@ -267,15 +344,13 @@ static void intel_dvo_destroy(struct drm_connector *connector) } static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { - .dpms = intel_dvo_dpms, .mode_fixup = intel_dvo_mode_fixup, - .prepare = intel_encoder_prepare, .mode_set = intel_dvo_mode_set, - .commit = intel_encoder_commit, + .disable = intel_encoder_noop, }; static const struct drm_connector_funcs intel_dvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = intel_dvo_dpms, .detect = intel_dvo_detect, .destroy = intel_dvo_destroy, .fill_modes = drm_helper_probe_single_connector_modes, @@ -364,6 +439,11 @@ void intel_dvo_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_dvo_enc_funcs, encoder_type); + intel_encoder->disable = intel_disable_dvo; + intel_encoder->enable = intel_enable_dvo; + intel_encoder->get_hw_state = intel_dvo_get_hw_state; + intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; + /* Now, try to find a controller */ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { struct drm_connector *connector = &intel_connector->base; @@ -396,17 +476,14 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: - intel_encoder->clone_mask = - (1 << INTEL_DVO_TMDS_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT); + intel_encoder->cloneable = true; drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; case INTEL_DVO_CHIP_LVDS: - intel_encoder->clone_mask = - (1 << INTEL_DVO_LVDS_CLONE_BIT); + intel_encoder->cloneable = false; drm_connector_init(dev, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c index 98f602427eb8..08f2b63d740a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c +++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c @@ -151,6 +151,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VIDEO_DIP_DATA, *data); data++; } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(VIDEO_DIP_DATA, 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -186,6 +189,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -224,6 +230,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -259,6 +268,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); val |= g4x_infoframe_enable(frame); @@ -292,6 +304,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, I915_WRITE(data_reg + i, *data); data++; } + /* Write every possible data byte to force correct ECC calculation. */ + for (; i < VIDEO_DIP_DATA_SIZE; i += 4) + I915_WRITE(data_reg + i, 0); mmiowb(); val |= hsw_infoframe_enable(frame); @@ -377,6 +392,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, port = VIDEO_DIP_PORT_C; break; default: + BUG(); return; } @@ -435,6 +451,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, port = VIDEO_DIP_PORT_D; break; default: + BUG(); return; } @@ -601,11 +618,32 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, intel_hdmi->set_infoframes(encoder, adjusted_mode); } -static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) +static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + u32 tmp; + + tmp = I915_READ(intel_hdmi->sdvox_reg); + + if (!(tmp & SDVO_ENABLE)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_enable_hdmi(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 temp; u32 enable_bits = SDVO_ENABLE; @@ -617,31 +655,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) /* HW workaround for IBX, we need to move the port to transcoder A * before disabling it. */ if (HAS_PCH_IBX(dev)) { - struct drm_crtc *crtc = encoder->crtc; + struct drm_crtc *crtc = encoder->base.crtc; int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; - if (mode != DRM_MODE_DPMS_ON) { - if (temp & SDVO_PIPE_B_SELECT) { - temp &= ~SDVO_PIPE_B_SELECT; - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); - - /* Again we need to write this twice. */ - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); - - /* Transcoder selection bits only update - * effectively on vblank. */ - if (crtc) - intel_wait_for_vblank(dev, pipe); - else - msleep(50); - } - } else { - /* Restore the transcoder select bit. */ - if (pipe == PIPE_B) - enable_bits |= SDVO_PIPE_B_SELECT; - } + /* Restore the transcoder select bit. */ + if (pipe == PIPE_B) + enable_bits |= SDVO_PIPE_B_SELECT; } /* HW workaround, need to toggle enable bit off and on for 12bpc, but @@ -652,12 +671,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) POSTING_READ(intel_hdmi->sdvox_reg); } - if (mode != DRM_MODE_DPMS_ON) { - temp &= ~enable_bits; - } else { - temp |= enable_bits; + temp |= enable_bits; + + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* HW workaround, need to write this twice for issue that may result + * in first write getting masked. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + } +} + +static void intel_disable_hdmi(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + u32 temp; + u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; + + temp = I915_READ(intel_hdmi->sdvox_reg); + + /* HW workaround for IBX, we need to move the port to transcoder A + * before disabling it. */ + if (HAS_PCH_IBX(dev)) { + struct drm_crtc *crtc = encoder->base.crtc; + int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; + + if (temp & SDVO_PIPE_B_SELECT) { + temp &= ~SDVO_PIPE_B_SELECT; + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* Again we need to write this twice. */ + I915_WRITE(intel_hdmi->sdvox_reg, temp); + POSTING_READ(intel_hdmi->sdvox_reg); + + /* Transcoder selection bits only update + * effectively on vblank. */ + if (crtc) + intel_wait_for_vblank(dev, pipe); + else + msleep(50); + } + } + + /* HW workaround, need to toggle enable bit off and on for 12bpc, but + * we do this anyway which shows more stable in testing. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); + POSTING_READ(intel_hdmi->sdvox_reg); } + temp &= ~enable_bits; + I915_WRITE(intel_hdmi->sdvox_reg, temp); POSTING_READ(intel_hdmi->sdvox_reg); @@ -737,7 +808,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) drm_detect_hdmi_monitor(edid); intel_hdmi->has_audio = drm_detect_monitor_audio(edid); } - connector->display_info.raw_edid = NULL; kfree(edid); } @@ -778,8 +848,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector) if (edid) { if (edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); - - connector->display_info.raw_edid = NULL; kfree(edid); } @@ -833,9 +901,8 @@ intel_hdmi_set_property(struct drm_connector *connector, done: if (intel_hdmi->base.base.crtc) { struct drm_crtc *crtc = intel_hdmi->base.base.crtc; - drm_crtc_helper_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, - crtc->fb); + intel_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); } return 0; @@ -849,23 +916,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector) } static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { - .dpms = intel_ddi_dpms, .mode_fixup = intel_hdmi_mode_fixup, - .prepare = intel_encoder_prepare, .mode_set = intel_ddi_mode_set, - .commit = intel_encoder_commit, + .disable = intel_encoder_noop, }; static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { - .dpms = intel_hdmi_dpms, .mode_fixup = intel_hdmi_mode_fixup, - .prepare = intel_encoder_prepare, .mode_set = intel_hdmi_mode_set, - .commit = intel_encoder_commit, + .disable = intel_encoder_noop, }; static const struct drm_connector_funcs intel_hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = intel_connector_dpms, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_hdmi_set_property, @@ -889,7 +952,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_broadcast_rgb_property(connector); } -void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) +void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_connector *connector; @@ -923,48 +986,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) connector->doublescan_allowed = 0; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - /* Set up the DDC bus. */ - if (sdvox_reg == SDVOB) { - intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); - intel_hdmi->ddc_bus = GMBUS_PORT_DPB; - dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == SDVOC) { - intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); - intel_hdmi->ddc_bus = GMBUS_PORT_DPC; - dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == HDMIB) { - intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); - intel_hdmi->ddc_bus = GMBUS_PORT_DPB; - dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == HDMIC) { - intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); - intel_hdmi->ddc_bus = GMBUS_PORT_DPC; - dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == HDMID) { - intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); - intel_hdmi->ddc_bus = GMBUS_PORT_DPD; - dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) { - DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n"); - intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); + intel_encoder->cloneable = false; + + intel_hdmi->ddi_port = port; + switch (port) { + case PORT_B: intel_hdmi->ddc_bus = GMBUS_PORT_DPB; - intel_hdmi->ddi_port = PORT_B; dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { - DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); - intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); + break; + case PORT_C: intel_hdmi->ddc_bus = GMBUS_PORT_DPC; - intel_hdmi->ddi_port = PORT_C; dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; - } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { - DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); - intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); + break; + case PORT_D: intel_hdmi->ddc_bus = GMBUS_PORT_DPD; - intel_hdmi->ddi_port = PORT_D; dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; - } else { - /* If we got an unknown sdvox_reg, things are pretty much broken - * in a way that we should let the kernel know about it */ + break; + case PORT_A: + /* Internal port only for eDP. */ + default: BUG(); } @@ -987,10 +1027,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) intel_hdmi->set_infoframes = cpt_set_infoframes; } - if (IS_HASWELL(dev)) - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw); - else - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); + if (IS_HASWELL(dev)) { + intel_encoder->enable = intel_enable_ddi; + intel_encoder->disable = intel_disable_ddi; + intel_encoder->get_hw_state = intel_ddi_get_hw_state; + drm_encoder_helper_add(&intel_encoder->base, + &intel_hdmi_helper_funcs_hsw); + } else { + intel_encoder->enable = intel_enable_hdmi; + intel_encoder->disable = intel_disable_hdmi; + intel_encoder->get_hw_state = intel_hdmi_get_hw_state; + drm_encoder_helper_add(&intel_encoder->base, + &intel_hdmi_helper_funcs); + } + intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_hdmi_add_properties(intel_hdmi, connector); diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index e9a6f6aaed85..40d72bd64e11 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -65,13 +65,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) struct intel_lvds, base); } +static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 lvds_reg, tmp; + + if (HAS_PCH_SPLIT(dev)) { + lvds_reg = PCH_LVDS; + } else { + lvds_reg = LVDS; + } + + tmp = I915_READ(lvds_reg); + + if (!(tmp & LVDS_PORT_EN)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + /** * Sets the power state for the panel. */ -static void intel_lvds_enable(struct intel_lvds *intel_lvds) +static void intel_enable_lvds(struct intel_encoder *encoder) { - struct drm_device *dev = intel_lvds->base.base.dev; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc); + struct drm_device *dev = encoder->base.dev; + struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -111,9 +138,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) intel_panel_enable_backlight(dev, intel_crtc->pipe); } -static void intel_lvds_disable(struct intel_lvds *intel_lvds) +static void intel_disable_lvds(struct intel_encoder *encoder) { - struct drm_device *dev = intel_lvds->base.base.dev; + struct drm_device *dev = encoder->base.dev; + struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct drm_i915_private *dev_priv = dev->dev_private; u32 ctl_reg, lvds_reg, stat_reg; @@ -142,18 +170,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) POSTING_READ(lvds_reg); } -static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) -{ - struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - - if (mode == DRM_MODE_DPMS_ON) - intel_lvds_enable(intel_lvds); - else - intel_lvds_disable(intel_lvds); - - /* XXX: We never power down the LVDS pairs. */ -} - static int intel_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { @@ -234,9 +250,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - struct intel_encoder *tmp_encoder; + struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; int pipe; @@ -246,14 +261,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return false; } - /* Should never happen!! */ - for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) { - if (&tmp_encoder->base != encoder) { - DRM_ERROR("Can't enable LVDS and another " - "encoder on the same pipe\n"); - return false; - } - } + if (intel_encoder_check_is_cloned(&intel_lvds->base)) + return false; /* * We have timings from the BIOS for the panel, put them in @@ -405,23 +414,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, return true; } -static void intel_lvds_prepare(struct drm_encoder *encoder) -{ - struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - - intel_lvds_disable(intel_lvds); -} - -static void intel_lvds_commit(struct drm_encoder *encoder) -{ - struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - - /* Always do a full power on as we do not know what state - * we were left in. - */ - intel_lvds_enable(intel_lvds); -} - static void intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -535,7 +527,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, dev_priv->modeset_on_lid = 0; mutex_lock(&dev->mode_config.mutex); - drm_helper_resume_force_mode(dev); + intel_modeset_check_state(dev); mutex_unlock(&dev->mode_config.mutex); return NOTIFY_OK; @@ -587,8 +579,8 @@ static int intel_lvds_set_property(struct drm_connector *connector, * If the CRTC is enabled, the display will be changed * according to the new panel fitting mode. */ - drm_crtc_helper_set_mode(crtc, &crtc->mode, - crtc->x, crtc->y, crtc->fb); + intel_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); } } @@ -596,11 +588,9 @@ static int intel_lvds_set_property(struct drm_connector *connector, } static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { - .dpms = intel_lvds_dpms, .mode_fixup = intel_lvds_mode_fixup, - .prepare = intel_lvds_prepare, .mode_set = intel_lvds_mode_set, - .commit = intel_lvds_commit, + .disable = intel_encoder_noop, }; static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { @@ -610,7 +600,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = intel_connector_dpms, .detect = intel_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_lvds_set_property, @@ -972,10 +962,15 @@ bool intel_lvds_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); + intel_encoder->enable = intel_enable_lvds; + intel_encoder->disable = intel_disable_lvds; + intel_encoder->get_hw_state = intel_lvds_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); + intel_encoder->cloneable = false; if (HAS_PCH_SPLIT(dev)) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); else if (IS_GEN4(dev)) diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c index 29b72593fbb2..4bc1c0fc342a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_modes.c +++ b/trunk/drivers/gpu/drm/i915/intel_modes.c @@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector, drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); - connector->display_info.raw_edid = NULL; kfree(edid); return ret; diff --git a/trunk/drivers/gpu/drm/i915/intel_opregion.c b/trunk/drivers/gpu/drm/i915/intel_opregion.c index 18bd0af855dc..e27c17012628 100644 --- a/trunk/drivers/gpu/drm/i915/intel_opregion.c +++ b/trunk/drivers/gpu/drm/i915/intel_opregion.c @@ -427,6 +427,25 @@ static void intel_didl_outputs(struct drm_device *dev) goto end; } +static void intel_setup_cadls(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + int i = 0; + u32 disp_id; + + /* Initialize the CADL field by duplicating the DIDL values. + * Technically, this is not always correct as display outputs may exist, + * but not active. This initialization is necessary for some Clevo + * laptops that check this field before processing the brightness and + * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if + * there are less than eight devices. */ + do { + disp_id = ioread32(&opregion->acpi->didl[i]); + iowrite32(disp_id, &opregion->acpi->cadl[i]); + } while (++i < 8 && disp_id != 0); +} + void intel_opregion_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev) return; if (opregion->acpi) { - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_didl_outputs(dev); + intel_setup_cadls(dev); + } /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. diff --git a/trunk/drivers/gpu/drm/i915/intel_overlay.c b/trunk/drivers/gpu/drm/i915/intel_overlay.c index 830d0dd610e1..afd0f30ab882 100644 --- a/trunk/drivers/gpu/drm/i915/intel_overlay.c +++ b/trunk/drivers/gpu/drm/i915/intel_overlay.c @@ -235,54 +235,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, return 0; } -/* Workaround for i830 bug where pipe a must be enable to change control regs */ -static int -i830_activate_pipe_a(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_crtc *crtc; - struct drm_crtc_helper_funcs *crtc_funcs; - struct drm_display_mode vesa_640x480 = { - DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, - 752, 800, 0, 480, 489, 492, 525, 0, - DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) - }, *mode; - - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]); - if (crtc->dpms_mode == DRM_MODE_DPMS_ON) - return 0; - - /* most i8xx have pipe a forced on, so don't trust dpms mode */ - if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE) - return 0; - - crtc_funcs = crtc->base.helper_private; - if (crtc_funcs->dpms == NULL) - return 0; - - DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n"); - - mode = drm_mode_duplicate(dev, &vesa_640x480); - - if (!drm_crtc_helper_set_mode(&crtc->base, mode, - crtc->base.x, crtc->base.y, - crtc->base.fb)) - return 0; - - crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON); - return 1; -} - -static void -i830_deactivate_pipe_a(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0]; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); -} - /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { @@ -290,17 +242,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; struct drm_i915_gem_request *request; - int pipe_a_quirk = 0; int ret; BUG_ON(overlay->active); overlay->active = 1; - if (IS_I830(dev)) { - pipe_a_quirk = i830_activate_pipe_a(dev); - if (pipe_a_quirk < 0) - return pipe_a_quirk; - } + WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); request = kzalloc(sizeof(*request), GFP_KERNEL); if (request == NULL) { @@ -322,9 +269,6 @@ static int intel_overlay_on(struct intel_overlay *overlay) ret = intel_overlay_do_wait_request(overlay, request, NULL); out: - if (pipe_a_quirk) - i830_deactivate_pipe_a(dev); - return ret; } @@ -1439,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index ba8a27b1757a..d69f8f49beb5 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -31,6 +31,8 @@ #include "../../../platform/x86/intel_ips.h" #include +#define FORCEWAKE_ACK_TIMEOUT_MS 2 + /* FBC, or Frame Buffer Compression, is a technique employed to compress the * framebuffer contents in-memory, aiming at reducing the required bandwidth * during in-memory transfers and, therefore, reduce the power packet. @@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) break; } - dev_priv->r_t = dev_priv->mem_freq; + dev_priv->ips.r_t = dev_priv->mem_freq; switch (csipll & 0x3ff) { case 0x00c: @@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) } if (dev_priv->fsb_freq == 3200) { - dev_priv->c_m = 0; + dev_priv->ips.c_m = 0; } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { - dev_priv->c_m = 1; + dev_priv->ips.c_m = 1; } else { - dev_priv->c_m = 2; + dev_priv->ips.c_m = 2; } } @@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } - ret = i915_gem_object_pin(ctx, 4096, true); + ret = i915_gem_object_pin(ctx, 4096, true, false); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; @@ -2160,11 +2162,22 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } +/** + * Lock protecting IPS related data structures + */ +DEFINE_SPINLOCK(mchdev_lock); + +/* Global for IPS driver to get at the current i915 device. Protected by + * mchdev_lock. */ +static struct drm_i915_private *i915_mch_dev; + bool ironlake_set_drps(struct drm_device *dev, u8 val) { struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; + assert_spin_locked(&mchdev_lock); + rgvswctl = I915_READ16(MEMSWCTL); if (rgvswctl & MEMCTL_CMD_STS) { DRM_DEBUG("gpu busy, RCS change rejected\n"); @@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev) u32 rgvmodectl = I915_READ(MEMMODECTL); u8 fmax, fmin, fstart, vstart; + spin_lock_irq(&mchdev_lock); + /* Enable temp reporting */ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); @@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev) vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - dev_priv->fmax = fmax; /* IPS callback will increase this */ - dev_priv->fstart = fstart; + dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ + dev_priv->ips.fstart = fstart; - dev_priv->max_delay = fstart; - dev_priv->min_delay = fmin; - dev_priv->cur_delay = fstart; + dev_priv->ips.max_delay = fstart; + dev_priv->ips.min_delay = fmin; + dev_priv->ips.cur_delay = fstart; DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, fstart); @@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev) rgvmodectl |= MEMMODE_SWMODE_EN; I915_WRITE(MEMMODECTL, rgvmodectl); - if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) + if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) DRM_ERROR("stuck trying to change perf mode\n"); - msleep(1); + mdelay(1); ironlake_set_drps(dev, fstart); - dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + + dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + I915_READ(0x112e0); - dev_priv->last_time1 = jiffies_to_msecs(jiffies); - dev_priv->last_count2 = I915_READ(0x112f4); - getrawmonotonic(&dev_priv->last_time2); + dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); + dev_priv->ips.last_count2 = I915_READ(0x112f4); + getrawmonotonic(&dev_priv->ips.last_time2); + + spin_unlock_irq(&mchdev_lock); } static void ironlake_disable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvswctl; + + spin_lock_irq(&mchdev_lock); + + rgvswctl = I915_READ16(MEMSWCTL); /* Ack interrupts, disable EFC interrupt */ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); @@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev) I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->fstart); - msleep(1); + ironlake_set_drps(dev, dev_priv->ips.fstart); + mdelay(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); - msleep(1); + mdelay(1); + spin_unlock_irq(&mchdev_lock); } -void gen6_set_rps(struct drm_device *dev, u8 val) +/* There's a funny hw issue where the hw returns all 0 when reading from + * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value + * ourselves, instead of doing a rmw cycle (which might result in us clearing + * all limits and the gpu stuck at whatever frequency it is at atm). + */ +static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 limits; limits = 0; - if (val >= dev_priv->max_delay) - val = dev_priv->max_delay; - else - limits |= dev_priv->max_delay << 24; - if (val <= dev_priv->min_delay) - val = dev_priv->min_delay; - else - limits |= dev_priv->min_delay << 16; + if (*val >= dev_priv->rps.max_delay) + *val = dev_priv->rps.max_delay; + limits |= dev_priv->rps.max_delay << 24; + + /* Only set the down limit when we've reached the lowest level to avoid + * getting more interrupts, otherwise leave this clear. This prevents a + * race in the hw when coming out of rc6: There's a tiny window where + * the hw runs at the minimal clock before selecting the desired + * frequency, if the down threshold expires in that window we will not + * receive a down interrupt. */ + if (*val <= dev_priv->rps.min_delay) { + *val = dev_priv->rps.min_delay; + limits |= dev_priv->rps.min_delay << 16; + } - if (val == dev_priv->cur_delay) + return limits; +} + +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 limits = gen6_rps_limits(dev_priv, &val); + + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + WARN_ON(val > dev_priv->rps.max_delay); + WARN_ON(val < dev_priv->rps.min_delay); + + if (val == dev_priv->rps.cur_delay) return; I915_WRITE(GEN6_RPNSWREQ, @@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val) */ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); - dev_priv->cur_delay = val; + POSTING_READ(GEN6_RPNSWREQ); + + dev_priv->rps.cur_delay = val; + + trace_intel_gpu_freq_change(val * 50); } static void gen6_disable_rps(struct drm_device *dev) @@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev) * register (PMIMR) to mask PM interrupts. The only risk is in leaving * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - spin_lock_irq(&dev_priv->rps_lock); - dev_priv->pm_iir = 0; - spin_unlock_irq(&dev_priv->rps_lock); + spin_lock_irq(&dev_priv->rps.lock); + dev_priv->rps.pm_iir = 0; + spin_unlock_irq(&dev_priv->rps.lock); I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); } int intel_enable_rc6(const struct drm_device *dev) { - /* - * Respect the kernel parameter if it is set - */ + /* Respect the kernel parameter if it is set */ if (i915_enable_rc6 >= 0) return i915_enable_rc6; - /* - * Disable RC6 on Ironlake - */ - if (INTEL_INFO(dev)->gen == 5) - return 0; + if (INTEL_INFO(dev)->gen == 5) { +#ifdef CONFIG_INTEL_IOMMU + /* Disable rc6 on ilk if VT-d is on. */ + if (intel_iommu_gfx_mapped) + return false; +#endif + DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n"); + return INTEL_RC6_ENABLE; + } - /* On Haswell, only RC6 is available. So let's enable it by default to - * provide better testing and coverage since the beginning. - */ - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev)) { + DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); return INTEL_RC6_ENABLE; + } - /* - * Disable rc6 on Sandybridge - */ + /* snb/ivb have more than one rc6 state. */ if (INTEL_INFO(dev)->gen == 6) { DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); return INTEL_RC6_ENABLE; } + DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } @@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev) gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); /* In units of 100MHz */ - dev_priv->max_delay = rp_state_cap & 0xff; - dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; - dev_priv->cur_delay = 0; + dev_priv->rps.max_delay = rp_state_cap & 0xff; + dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; + dev_priv->rps.cur_delay = 0; /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - dev_priv->max_delay << 24 | - dev_priv->min_delay << 16); + dev_priv->rps.max_delay << 24 | + dev_priv->rps.min_delay << 16); I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); @@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev) 500)) DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); if (pcu_mbox & (1<<31)) { /* OC supported */ - dev_priv->max_delay = pcu_mbox & 0xff; + dev_priv->rps.max_delay = pcu_mbox & 0xff; DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); } @@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev) /* requires MSI enabled */ I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); - spin_lock_irq(&dev_priv->rps_lock); - WARN_ON(dev_priv->pm_iir != 0); + spin_lock_irq(&dev_priv->rps.lock); + WARN_ON(dev_priv->rps.pm_iir != 0); I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps_lock); + spin_unlock_irq(&dev_priv->rps.lock); /* enable all PM interrupts */ I915_WRITE(GEN6_PMINTRMSK, 0); @@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev) * to use for memory access. We do this by specifying the IA frequency * the PCU should use as a reference to determine the ring frequency. */ - for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; + for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; gpu_freq--) { - int diff = dev_priv->max_delay - gpu_freq; + int diff = dev_priv->rps.max_delay - gpu_freq; /* * For GPU frequencies less than 750MHz, just use the lowest @@ -2686,14 +2734,16 @@ static const struct cparams { { 0, 800, 231, 23784 }, }; -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) +static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) { u64 total_count, diff, ret; u32 count1, count2, count3, m = 0, c = 0; unsigned long now = jiffies_to_msecs(jiffies), diff1; int i; - diff1 = now - dev_priv->last_time1; + assert_spin_locked(&mchdev_lock); + + diff1 = now - dev_priv->ips.last_time1; /* Prevent division-by-zero if we are asking too fast. * Also, we don't get interesting results if we are polling @@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) * in such cases. */ if (diff1 <= 10) - return dev_priv->chipset_power; + return dev_priv->ips.chipset_power; count1 = I915_READ(DMIEC); count2 = I915_READ(DDREC); @@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) total_count = count1 + count2 + count3; /* FIXME: handle per-counter overflow */ - if (total_count < dev_priv->last_count1) { - diff = ~0UL - dev_priv->last_count1; + if (total_count < dev_priv->ips.last_count1) { + diff = ~0UL - dev_priv->ips.last_count1; diff += total_count; } else { - diff = total_count - dev_priv->last_count1; + diff = total_count - dev_priv->ips.last_count1; } for (i = 0; i < ARRAY_SIZE(cparams); i++) { - if (cparams[i].i == dev_priv->c_m && - cparams[i].t == dev_priv->r_t) { + if (cparams[i].i == dev_priv->ips.c_m && + cparams[i].t == dev_priv->ips.r_t) { m = cparams[i].m; c = cparams[i].c; break; @@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) ret = ((m * diff) + c); ret = div_u64(ret, 10); - dev_priv->last_count1 = total_count; - dev_priv->last_time1 = now; + dev_priv->ips.last_count1 = total_count; + dev_priv->ips.last_time1 = now; - dev_priv->chipset_power = ret; + dev_priv->ips.chipset_power = ret; return ret; } +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) +{ + unsigned long val; + + if (dev_priv->info->gen != 5) + return 0; + + spin_lock_irq(&mchdev_lock); + + val = __i915_chipset_val(dev_priv); + + spin_unlock_irq(&mchdev_lock); + + return val; +} + unsigned long i915_mch_val(struct drm_i915_private *dev_priv) { unsigned long m, x, b; @@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) return v_table[pxvid].vd; } -void i915_update_gfx_val(struct drm_i915_private *dev_priv) +static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) { struct timespec now, diff1; u64 diff; unsigned long diffms; u32 count; - if (dev_priv->info->gen != 5) - return; + assert_spin_locked(&mchdev_lock); getrawmonotonic(&now); - diff1 = timespec_sub(now, dev_priv->last_time2); + diff1 = timespec_sub(now, dev_priv->ips.last_time2); /* Don't divide by 0 */ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; @@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) count = I915_READ(GFXEC); - if (count < dev_priv->last_count2) { - diff = ~0UL - dev_priv->last_count2; + if (count < dev_priv->ips.last_count2) { + diff = ~0UL - dev_priv->ips.last_count2; diff += count; } else { - diff = count - dev_priv->last_count2; + diff = count - dev_priv->ips.last_count2; } - dev_priv->last_count2 = count; - dev_priv->last_time2 = now; + dev_priv->ips.last_count2 = count; + dev_priv->ips.last_time2 = now; /* More magic constants... */ diff = diff * 1181; diff = div_u64(diff, diffms * 10); - dev_priv->gfx_power = diff; + dev_priv->ips.gfx_power = diff; } -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) +void i915_update_gfx_val(struct drm_i915_private *dev_priv) +{ + if (dev_priv->info->gen != 5) + return; + + spin_lock_irq(&mchdev_lock); + + __i915_update_gfx_val(dev_priv); + + spin_unlock_irq(&mchdev_lock); +} + +static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) { unsigned long t, corr, state1, corr2, state2; u32 pxvid, ext_v; - pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); + assert_spin_locked(&mchdev_lock); + + pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); pxvid = (pxvid >> 24) & 0x7f; ext_v = pvid_to_extvid(dev_priv, pxvid); @@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) corr = corr * ((150142 * state1) / 10000 - 78642); corr /= 100000; - corr2 = (corr * dev_priv->corr); + corr2 = (corr * dev_priv->ips.corr); state2 = (corr2 * state1) / 10000; state2 /= 100; /* convert to mW */ - i915_update_gfx_val(dev_priv); + __i915_update_gfx_val(dev_priv); - return dev_priv->gfx_power + state2; + return dev_priv->ips.gfx_power + state2; } -/* Global for IPS driver to get at the current i915 device */ -static struct drm_i915_private *i915_mch_dev; -/* - * Lock protecting IPS related data structures - * - i915_mch_dev - * - dev_priv->max_delay - * - dev_priv->min_delay - * - dev_priv->fmax - * - dev_priv->gpu_busy - */ -static DEFINE_SPINLOCK(mchdev_lock); +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) +{ + unsigned long val; + + if (dev_priv->info->gen != 5) + return 0; + + spin_lock_irq(&mchdev_lock); + + val = __i915_gfx_val(dev_priv); + + spin_unlock_irq(&mchdev_lock); + + return val; +} /** * i915_read_mch_val - return value for IPS use @@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void) struct drm_i915_private *dev_priv; unsigned long chipset_val, graphics_val, ret = 0; - spin_lock(&mchdev_lock); + spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - chipset_val = i915_chipset_val(dev_priv); - graphics_val = i915_gfx_val(dev_priv); + chipset_val = __i915_chipset_val(dev_priv); + graphics_val = __i915_gfx_val(dev_priv); ret = chipset_val + graphics_val; out_unlock: - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); return ret; } @@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void) struct drm_i915_private *dev_priv; bool ret = true; - spin_lock(&mchdev_lock); + spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; - if (dev_priv->max_delay > dev_priv->fmax) - dev_priv->max_delay--; + if (dev_priv->ips.max_delay > dev_priv->ips.fmax) + dev_priv->ips.max_delay--; out_unlock: - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); return ret; } @@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void) struct drm_i915_private *dev_priv; bool ret = true; - spin_lock(&mchdev_lock); + spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; - if (dev_priv->max_delay < dev_priv->min_delay) - dev_priv->max_delay++; + if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) + dev_priv->ips.max_delay++; out_unlock: - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); return ret; } @@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv; + struct intel_ring_buffer *ring; bool ret = false; + int i; - spin_lock(&mchdev_lock); + spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - ret = dev_priv->busy; + for_each_ring(ring, dev_priv, i) + ret |= !list_empty(&ring->request_list); out_unlock: - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); return ret; } @@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void) struct drm_i915_private *dev_priv; bool ret = true; - spin_lock(&mchdev_lock); + spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; - dev_priv->max_delay = dev_priv->fstart; + dev_priv->ips.max_delay = dev_priv->ips.fstart; - if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) + if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) ret = false; out_unlock: - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); return ret; } @@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void) void intel_gpu_ips_init(struct drm_i915_private *dev_priv) { - spin_lock(&mchdev_lock); + /* We only register the i915 ips part with intel-ips once everything is + * set up, to avoid intel-ips sneaking in and reading bogus values. */ + spin_lock_irq(&mchdev_lock); i915_mch_dev = dev_priv; - dev_priv->mchdev_lock = &mchdev_lock; - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); ips_ping_for_i915_load(); } void intel_gpu_ips_teardown(void) { - spin_lock(&mchdev_lock); + spin_lock_irq(&mchdev_lock); i915_mch_dev = NULL; - spin_unlock(&mchdev_lock); + spin_unlock_irq(&mchdev_lock); } static void intel_init_emon(struct drm_device *dev) { @@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev) lcfuse = I915_READ(LCFUSE02); - dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); + dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } void intel_disable_gt_powersave(struct drm_device *dev) @@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev) dev_priv->display.init_pch_clock_gating(dev); } -static void gen6_sanitize_pm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 limits, delay, old; - - gen6_gt_force_wake_get(dev_priv); - - old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS); - /* Make sure we continue to get interrupts - * until we hit the minimum or maximum frequencies. - */ - limits &= ~(0x3f << 16 | 0x3f << 24); - delay = dev_priv->cur_delay; - if (delay < dev_priv->max_delay) - limits |= (dev_priv->max_delay & 0x3f) << 24; - if (delay > dev_priv->min_delay) - limits |= (dev_priv->min_delay & 0x3f) << 16; - - if (old != limits) { - /* Note that the known failure case is to read back 0. */ - DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS " - "expected %08x, was %08x\n", limits, old); - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); - } - - gen6_gt_force_wake_put(dev_priv); -} - -void intel_sanitize_pm(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->display.sanitize_pm) - dev_priv->display.sanitize_pm(dev); -} - /* Starting with Haswell, we have different power wells for * different parts of the GPU. This attempts to enable them all. */ @@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = gen6_init_clock_gating; - dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ if (SNB_READ_WM0_LATENCY()) { @@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; - dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else if (IS_HASWELL(dev)) { if (SNB_READ_WM0_LATENCY()) { dev_priv->display.update_wm = sandybridge_update_wm; @@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } dev_priv->display.init_clock_gating = haswell_init_clock_gating; - dev_priv->display.sanitize_pm = gen6_sanitize_pm; } else dev_priv->display.update_wm = NULL; } else if (IS_VALLEYVIEW(dev)) { @@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) else forcewake_ack = FORCEWAKE_ACK; - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) - DRM_ERROR("Force wake wait timed out\n"); + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(FORCEWAKE); + POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) - DRM_ERROR("Force wake wait timed out\n"); + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); __gen6_gt_wait_for_thread_c0(dev_priv); } @@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) else forcewake_ack = FORCEWAKE_MT_ACK; - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) - DRM_ERROR("Force wake wait timed out\n"); + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); - POSTING_READ(FORCEWAKE_MT); + POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ - if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) - DRM_ERROR("Force wake wait timed out\n"); + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); __gen6_gt_wait_for_thread_c0(dev_priv); } @@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - POSTING_READ(FORCEWAKE); + /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - POSTING_READ(FORCEWAKE_MT); + /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } @@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) static void vlv_force_wake_get(struct drm_i915_private *dev_priv) { - /* Already awake? */ - if ((I915_READ(0x130094) & 0xa1) == 0xa1) - return; + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); - POSTING_READ(FORCEWAKE_VLV); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); - if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) - DRM_ERROR("Force wake wait timed out\n"); + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); __gen6_gt_wait_for_thread_c0(dev_priv); } static void vlv_force_wake_put(struct drm_i915_private *dev_priv) { - I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); - /* FIXME: confirm VLV behavior with Punit folks */ - POSTING_READ(FORCEWAKE_VLV); + I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); + /* The below doubles as a POSTING_READ */ + gen6_gt_check_fifodbg(dev_priv); } void intel_gt_init(struct drm_device *dev) diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index e2a73b38abe9..984a0c5fbf5d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -262,6 +262,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, return 0; } +static int +gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + +static int +gen7_render_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, u32 flush_domains) +{ + u32 flags = 0; + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + /* + * Ensure that any following seqno writes only happen when the render + * cache is indeed flushed. + * + * Workaround: 4th PIPE_CONTROL command (except the ones with only + * read-cache invalidate bits set) must have the CS_STALL bit set. We + * don't try to be clever and just set it unconditionally. + */ + flags |= PIPE_CONTROL_CS_STALL; + + /* Just flush everything. Experiments have shown that reducing the + * number of bits based on the write domains has little performance + * impact. + */ + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + + /* Workaround: we must issue a pipe_control with CS-stall bit + * set before a pipe_control command that has the state cache + * invalidate bit set. */ + gen7_render_ring_cs_stall_wa(ring); + } + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + return 0; +} + static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { @@ -382,12 +459,12 @@ init_pipe_control(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true); + ret = i915_gem_object_pin(obj, 4096, true, false); if (ret) goto err_unref; pc->gtt_offset = obj->gtt_offset; - pc->cpu_page = kmap(obj->pages[0]); + pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) goto err_unpin; @@ -414,7 +491,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) return; obj = pc->obj; - kunmap(obj->pages[0]); + + kunmap(sg_page(obj->pages->sgl)); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); @@ -462,7 +540,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 6) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (IS_IVYBRIDGE(dev)) + if (HAS_L3_GPU_CACHE(dev)) I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); return ret; @@ -628,26 +706,24 @@ pc_render_add_request(struct intel_ring_buffer *ring, } static u32 -gen6_ring_get_seqno(struct intel_ring_buffer *ring) +gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) { - struct drm_device *dev = ring->dev; - /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (!lazy_coherency) intel_ring_get_active_head(ring); return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static u32 -ring_get_seqno(struct intel_ring_buffer *ring) +ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring) +pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) { struct pipe_control *pc = ring->private; return pc->cpu_page[0]; @@ -852,7 +928,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { - if (IS_IVYBRIDGE(dev) && ring->id == RCS) + if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | GEN6_RENDER_L3_PARITY_ERROR)); else @@ -875,7 +951,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { - if (IS_IVYBRIDGE(dev) && ring->id == RCS) + if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); else I915_WRITE_IMR(ring, ~0); @@ -951,7 +1027,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) if (obj == NULL) return; - kunmap(obj->pages[0]); + kunmap(sg_page(obj->pages->sgl)); i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; @@ -972,13 +1048,13 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true); + ret = i915_gem_object_pin(obj, 4096, true, false); if (ret != 0) { goto err_unref; } ring->status_page.gfx_addr = obj->gtt_offset; - ring->status_page.page_addr = kmap(obj->pages[0]); + ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; goto err_unpin; @@ -1010,7 +1086,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); - INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = 32 * PAGE_SIZE; init_waitqueue_head(&ring->irq_queue); @@ -1030,7 +1105,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); if (ret) goto err_unref; @@ -1379,7 +1454,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; - ring->flush = gen6_render_ring_flush; + ring->flush = gen7_render_ring_flush; + if (INTEL_INFO(dev)->gen == 6) + ring->flush = gen6_render_ring_flush; ring->irq_get = gen6_ring_get_irq; ring->irq_put = gen6_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT; @@ -1481,7 +1558,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); - INIT_LIST_HEAD(&ring->gpu_write_list); ring->size = size; ring->effective_size = ring->size; @@ -1574,3 +1650,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) return intel_init_ring_buffer(dev, ring); } + +int +intel_ring_flush_all_caches(struct intel_ring_buffer *ring) +{ + int ret; + + if (!ring->gpu_caches_dirty) + return 0; + + ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); + if (ret) + return ret; + + trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); + + ring->gpu_caches_dirty = false; + return 0; +} + +int +intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) +{ + uint32_t flush_domains; + int ret; + + flush_domains = 0; + if (ring->gpu_caches_dirty) + flush_domains = I915_GEM_GPU_DOMAINS; + + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); + if (ret) + return ret; + + trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); + + ring->gpu_caches_dirty = false; + return 0; +} diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h index 1d3c81fdad92..2ea7a311a1f0 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -72,7 +72,14 @@ struct intel_ring_buffer { u32 flush_domains); int (*add_request)(struct intel_ring_buffer *ring, u32 *seqno); - u32 (*get_seqno)(struct intel_ring_buffer *ring); + /* Some chipsets are not quite as coherent as advertised and need + * an expensive kick to force a true read of the up-to-date seqno. + * However, the up-to-date seqno is not always required and the last + * seen value is good enough. Note that the seqno will always be + * monotonic, even if not coherent. + */ + u32 (*get_seqno)(struct intel_ring_buffer *ring, + bool lazy_coherency); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, u32 offset, u32 length); void (*cleanup)(struct intel_ring_buffer *ring); @@ -100,15 +107,6 @@ struct intel_ring_buffer { */ struct list_head request_list; - /** - * List of objects currently pending a GPU write flush. - * - * All elements on this list will belong to either the - * active_list or flushing_list, last_rendering_seqno can - * be used to differentiate between the two elements. - */ - struct list_head gpu_write_list; - /** * Do we have some not yet emitted requests outstanding? */ @@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, void intel_ring_advance(struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); +int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); +int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 123afd357611..39c319827f91 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -97,7 +97,7 @@ struct intel_sdvo { /* * Hotplug activation bits for this device */ - uint8_t hotplug_active[2]; + uint16_t hotplug_active; /** * This is used to select the color range of RBG outputs in HDMI mode. @@ -628,6 +628,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo, &outputs, sizeof(outputs)); } +static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo, + u16 *outputs) +{ + return intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_ACTIVE_OUTPUTS, + outputs, sizeof(*outputs)); +} + static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, int mode) { @@ -1142,51 +1150,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo_write_sdvox(intel_sdvo, sdvox); } -static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) +static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) { - struct drm_device *dev = encoder->dev; + struct intel_sdvo_connector *intel_sdvo_connector = + to_intel_sdvo_connector(&connector->base); + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); + u16 active_outputs; + + intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); + + if (active_outputs & intel_sdvo_connector->output_flag) + return true; + else + return false; +} + +static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + u32 tmp; + + tmp = I915_READ(intel_sdvo->sdvo_reg); + + if (!(tmp & SDVO_ENABLE)) + return false; + + if (HAS_PCH_CPT(dev)) + *pipe = PORT_TO_PIPE_CPT(tmp); + else + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + +static void intel_disable_sdvo(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + u32 temp; + + intel_sdvo_set_active_outputs(intel_sdvo, 0); + if (0) + intel_sdvo_set_encoder_power_state(intel_sdvo, + DRM_MODE_DPMS_OFF); + + temp = I915_READ(intel_sdvo->sdvo_reg); + if ((temp & SDVO_ENABLE) != 0) { + intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); + } +} + +static void intel_enable_sdvo(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); u32 temp; + bool input1, input2; + int i; + u8 status; + + temp = I915_READ(intel_sdvo->sdvo_reg); + if ((temp & SDVO_ENABLE) == 0) + intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); + for (i = 0; i < 2; i++) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); + /* Warn if the device reported failure to sync. + * A lot of SDVO devices fail to notify of sync, but it's + * a given it the status is a success, we succeeded. + */ + if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { + DRM_DEBUG_KMS("First %s output reported failure to " + "sync\n", SDVO_NAME(intel_sdvo)); + } + + if (0) + intel_sdvo_set_encoder_power_state(intel_sdvo, + DRM_MODE_DPMS_ON); + intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); +} + +static void intel_sdvo_dpms(struct drm_connector *connector, int mode) +{ + struct drm_crtc *crtc; + struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + + /* dvo supports only 2 dpms states. */ + if (mode != DRM_MODE_DPMS_ON) + mode = DRM_MODE_DPMS_OFF; + + if (mode == connector->dpms) + return; + + connector->dpms = mode; + + /* Only need to change hw state when actually enabled */ + crtc = intel_sdvo->base.base.crtc; + if (!crtc) { + intel_sdvo->base.connectors_active = false; + return; + } if (mode != DRM_MODE_DPMS_ON) { intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); - if (mode == DRM_MODE_DPMS_OFF) { - temp = I915_READ(intel_sdvo->sdvo_reg); - if ((temp & SDVO_ENABLE) != 0) { - intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); - } - } + intel_sdvo->base.connectors_active = false; + + intel_crtc_update_dpms(crtc); } else { - bool input1, input2; - int i; - u8 status; - - temp = I915_READ(intel_sdvo->sdvo_reg); - if ((temp & SDVO_ENABLE) == 0) - intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); - for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev, intel_crtc->pipe); - - status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); - /* Warn if the device reported failure to sync. - * A lot of SDVO devices fail to notify of sync, but it's - * a given it the status is a success, we succeeded. - */ - if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { - DRM_DEBUG_KMS("First %s output reported failure to " - "sync\n", SDVO_NAME(intel_sdvo)); - } + intel_sdvo->base.connectors_active = true; + + intel_crtc_update_dpms(crtc); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, mode); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); } - return; + + intel_modeset_check_state(connector->dev); } static int intel_sdvo_mode_valid(struct drm_connector *connector, @@ -1251,25 +1340,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } -static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) +static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) { struct drm_device *dev = intel_sdvo->base.base.dev; - u8 response[2]; + uint16_t hotplug; /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise * on the line. */ if (IS_I945G(dev) || IS_I945GM(dev)) - return false; + return 0; - return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, - &response, 2) && response[0]; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, + &hotplug, sizeof(hotplug))) + return 0; + + return hotplug; } static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); + intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, + &intel_sdvo->hotplug_active, 2); } static bool @@ -1345,7 +1438,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector) } } else status = connector_status_disconnected; - connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1419,7 +1511,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) else ret = connector_status_disconnected; - connector->display_info.raw_edid = NULL; kfree(edid); } else ret = connector_status_connected; @@ -1465,7 +1556,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) drm_add_edid_modes(connector, edid); } - connector->display_info.raw_edid = NULL; kfree(edid); } } @@ -1837,8 +1927,8 @@ intel_sdvo_set_property(struct drm_connector *connector, done: if (intel_sdvo->base.base.crtc) { struct drm_crtc *crtc = intel_sdvo->base.base.crtc; - drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, - crtc->y, crtc->fb); + intel_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); } return 0; @@ -1846,15 +1936,13 @@ intel_sdvo_set_property(struct drm_connector *connector, } static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { - .dpms = intel_sdvo_dpms, .mode_fixup = intel_sdvo_mode_fixup, - .prepare = intel_encoder_prepare, .mode_set = intel_sdvo_mode_set, - .commit = intel_encoder_commit, + .disable = intel_encoder_noop, }; static const struct drm_connector_funcs intel_sdvo_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = intel_sdvo_dpms, .detect = intel_sdvo_detect, .fill_modes = drm_helper_probe_single_connector_modes, .set_property = intel_sdvo_set_property, @@ -2026,6 +2114,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, connector->base.base.interlace_allowed = 1; connector->base.base.doublescan_allowed = 0; connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; + connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; intel_connector_attach_encoder(&connector->base, &encoder->base); drm_sysfs_connector_add(&connector->base.base); @@ -2064,17 +2153,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; - if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { + if (intel_sdvo_get_hotplug_support(intel_sdvo) & + intel_sdvo_connector->output_flag) { connector->polled = DRM_CONNECTOR_POLL_HPD; - intel_sdvo->hotplug_active[0] |= 1 << device; + intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; /* Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ intel_encoder->hot_plug = intel_sdvo_enable_hotplug; intel_sdvo_enable_hotplug(intel_encoder); - } - else + } else { connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + } encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; @@ -2082,8 +2172,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo->is_hdmi = true; } - intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT)); + intel_sdvo->base.cloneable = true; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (intel_sdvo->is_hdmi) @@ -2114,7 +2203,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) intel_sdvo->is_tv = true; intel_sdvo->base.needs_tv_clock = true; - intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; + intel_sdvo->base.cloneable = false; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); @@ -2157,8 +2246,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; } - intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT)); + intel_sdvo->base.cloneable = true; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); @@ -2190,8 +2278,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; } - intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT)); + /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling, + * as opposed to native LVDS, where we upscale with the panel-fitter + * (and hence only the native LVDS resolution could be cloned). */ + intel_sdvo->base.cloneable = true; intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) @@ -2576,6 +2666,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); + intel_encoder->disable = intel_disable_sdvo; + intel_encoder->enable = intel_enable_sdvo; + intel_encoder->get_hw_state = intel_sdvo_get_hw_state; + /* In default case sdvo lvds is false */ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; @@ -2590,7 +2684,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) /* Only enable the hotplug irq if we need it, to work around noisy * hotplug lines. */ - if (intel_sdvo->hotplug_active[0]) + if (intel_sdvo->hotplug_active) dev_priv->hotplug_supported_mask |= hotplug_mask; intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); diff --git a/trunk/drivers/gpu/drm/i915/intel_tv.c b/trunk/drivers/gpu/drm/i915/intel_tv.c index befce6c49704..d2c5c8f3baf3 100644 --- a/trunk/drivers/gpu/drm/i915/intel_tv.c +++ b/trunk/drivers/gpu/drm/i915/intel_tv.c @@ -836,22 +836,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector) base); } +static bool +intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ(TV_CTL); + + if (!(tmp & TV_ENC_ENABLE)) + return false; + + *pipe = PORT_TO_PIPE(tmp); + + return true; +} + static void -intel_tv_dpms(struct drm_encoder *encoder, int mode) +intel_enable_tv(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - switch (mode) { - case DRM_MODE_DPMS_ON: - I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); - break; - } + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); +} + +static void +intel_disable_tv(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); } static const struct tv_mode * @@ -895,17 +910,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = encoder->dev; struct intel_tv *intel_tv = enc_to_intel_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); - struct intel_encoder *other_encoder; if (!tv_mode) return false; - for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder) - if (&other_encoder->base != encoder) - return false; + if (intel_encoder_check_is_cloned(&intel_tv->base)) + return false; adjusted_mode->clock = tv_mode->clock; return true; @@ -1303,12 +1315,9 @@ intel_tv_detect(struct drm_connector *connector, bool force) if (force) { struct intel_load_detect_pipe tmp; - if (intel_get_load_detect_pipe(&intel_tv->base, connector, - &mode, &tmp)) { + if (intel_get_load_detect_pipe(connector, &mode, &tmp)) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(&intel_tv->base, - connector, - &tmp); + intel_release_load_detect_pipe(connector, &tmp); } else return connector_status_unknown; } else @@ -1474,22 +1483,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop } if (changed && crtc) - drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, - crtc->y, crtc->fb); + intel_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); out: return ret; } static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { - .dpms = intel_tv_dpms, .mode_fixup = intel_tv_mode_fixup, - .prepare = intel_encoder_prepare, .mode_set = intel_tv_mode_set, - .commit = intel_encoder_commit, + .disable = intel_encoder_noop, }; static const struct drm_connector_funcs intel_tv_connector_funcs = { - .dpms = drm_helper_connector_dpms, + .dpms = intel_connector_dpms, .detect = intel_tv_detect, .destroy = intel_tv_destroy, .set_property = intel_tv_set_property, @@ -1619,10 +1626,15 @@ intel_tv_init(struct drm_device *dev) drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, DRM_MODE_ENCODER_TVDAC); + intel_encoder->enable = intel_enable_tv; + intel_encoder->disable = intel_disable_tv; + intel_encoder->get_hw_state = intel_tv_get_hw_state; + intel_connector->get_hw_state = intel_connector_get_hw_state; + intel_connector_attach_encoder(intel_connector, intel_encoder); intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); - intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); + intel_encoder->cloneable = false; intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h index 6f13b3563234..d22cbbf3a202 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -195,7 +195,6 @@ struct mga_device { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; - atomic_t validate_sequence; } ttm; u32 reg_1e24; /* SE model number */ diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index b69642d5d850..c7420e83c0b9 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1399,7 +1399,6 @@ static int mga_vga_get_modes(struct drm_connector *connector) if (edid) { drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); - connector->display_info.raw_edid = NULL; kfree(edid); } return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c index 7e289d2ad8e4..e754aa32edf1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -289,9 +289,7 @@ dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay) if (ret) return ret; - NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n", - dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3], - dp->stat[4], dp->stat[5]); + NV_DEBUG_KMS(dev, "status %*ph\n", 6, dp->stat); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c b/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c index f03490534893..c399d510b27a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ +#include #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_hw.h" @@ -110,13 +111,25 @@ nv50_gpio_isr(struct drm_device *dev) nv_wr32(dev, 0xe074, intr1); } +static struct dmi_system_id gpio_reset_ids[] = { + { + .ident = "Apple Macbook 10,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"), + } + }, + { } +}; + int nv50_gpio_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; /* initialise gpios and routing to vbios defaults */ - nouveau_gpio_reset(dev); + if (dmi_check_system(gpio_reset_ids)) + nouveau_gpio_reset(dev); /* disable, and ack any pending gpio interrupts */ nv_wr32(dev, 0xe050, 0x00000000); diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index 3623b98ed3fe..d48224b0d6b4 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -653,9 +653,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, return false; } - DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n", - link_status[0], link_status[1], link_status[2], - link_status[3], link_status[4], link_status[5]); + DRM_DEBUG_KMS("link status %*ph\n", 6, link_status); return true; } diff --git a/trunk/drivers/gpu/drm/shmobile/Kconfig b/trunk/drivers/gpu/drm/shmobile/Kconfig new file mode 100644 index 000000000000..7e7d52b2a2fc --- /dev/null +++ b/trunk/drivers/gpu/drm/shmobile/Kconfig @@ -0,0 +1,10 @@ +config DRM_SHMOBILE + tristate "DRM Support for SH Mobile" + depends on DRM && (SUPERH || ARCH_SHMOBILE) + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + help + Choose this option if you have an SH Mobile chipset. + If M is selected the module will be called shmob-drm. + diff --git a/trunk/drivers/gpu/drm/shmobile/Makefile b/trunk/drivers/gpu/drm/shmobile/Makefile new file mode 100644 index 000000000000..4c3eeb355630 --- /dev/null +++ b/trunk/drivers/gpu/drm/shmobile/Makefile @@ -0,0 +1,7 @@ +shmob-drm-y := shmob_drm_backlight.o \ + shmob_drm_crtc.o \ + shmob_drm_drv.o \ + shmob_drm_kms.o \ + shmob_drm_plane.o + +obj-$(CONFIG_DRM_SHMOBILE) += shmob-drm.o diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.c new file mode 100644 index 000000000000..463aee18f774 --- /dev/null +++ b/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.c @@ -0,0 +1,90 @@ +/* + * shmob_drm_backlight.c -- SH Mobile DRM Backlight + * + * Copyright (C) 2012 Renesas Corporation + * + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +#include "shmob_drm_backlight.h" +#include "shmob_drm_crtc.h" +#include "shmob_drm_drv.h" + +static int shmob_drm_backlight_update(struct backlight_device *bdev) +{ + struct shmob_drm_connector *scon = bl_get_data(bdev); + struct shmob_drm_device *sdev = scon->connector.dev->dev_private; + const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; + int brightness = bdev->props.brightness; + + if (bdev->props.power != FB_BLANK_UNBLANK || + bdev->props.state & BL_CORE_SUSPENDED) + brightness = 0; + + return bdata->set_brightness(brightness); +} + +static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev) +{ + struct shmob_drm_connector *scon = bl_get_data(bdev); + struct shmob_drm_device *sdev = scon->connector.dev->dev_private; + const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; + + return bdata->get_brightness(); +} + +static const struct backlight_ops shmob_drm_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, + .update_status = shmob_drm_backlight_update, + .get_brightness = shmob_drm_backlight_get_brightness, +}; + +void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode) +{ + if (scon->backlight == NULL) + return; + + scon->backlight->props.power = mode == DRM_MODE_DPMS_ON + ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + backlight_update_status(scon->backlight); +} + +int shmob_drm_backlight_init(struct shmob_drm_connector *scon) +{ + struct shmob_drm_device *sdev = scon->connector.dev->dev_private; + const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight; + struct drm_connector *connector = &scon->connector; + struct drm_device *dev = connector->dev; + struct backlight_device *backlight; + + if (!bdata->max_brightness) + return 0; + + backlight = backlight_device_register(bdata->name, dev->dev, scon, + &shmob_drm_backlight_ops, NULL); + if (IS_ERR(backlight)) { + dev_err(dev->dev, "unable to register backlight device: %ld\n", + PTR_ERR(backlight)); + return PTR_ERR(backlight); + } + + backlight->props.max_brightness = bdata->max_brightness; + backlight->props.brightness = bdata->max_brightness; + backlight->props.power = FB_BLANK_POWERDOWN; + backlight_update_status(backlight); + + scon->backlight = backlight; + return 0; +} + +void shmob_drm_backlight_exit(struct shmob_drm_connector *scon) +{ + backlight_device_unregister(scon->backlight); +} diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.h new file mode 100644 index 000000000000..9477595d2ff3 --- /dev/null +++ b/trunk/drivers/gpu/drm/shmobile/shmob_drm_backlight.h @@ -0,0 +1,23 @@ +/* + * shmob_drm_backlight.h -- SH Mobile DRM Backlight + * + * Copyright (C) 2012 Renesas Corporation + * + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __SHMOB_DRM_BACKLIGHT_H__ +#define __SHMOB_DRM_BACKLIGHT_H__ + +struct shmob_drm_connector; + +void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode); +int shmob_drm_backlight_init(struct shmob_drm_connector *scon); +void shmob_drm_backlight_exit(struct shmob_drm_connector *scon); + +#endif /* __SHMOB_DRM_BACKLIGHT_H__ */ diff --git a/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c new file mode 100644 index 000000000000..0e7a9306bd0c --- /dev/null +++ b/trunk/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -0,0 +1,763 @@ +/* + * shmob_drm_crtc.c -- SH Mobile DRM CRTCs + * + * Copyright (C) 2012 Renesas Corporation + * + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include