From 171c5f641031c0eee424ba79a3db4736c32e18ca Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Sat, 21 Oct 2023 00:30:17 +0200 Subject: [PATCH 001/117] dt-bindings: display: ssd132x: Remove '-' before compatible enum This is a leftover from when the binding schema had the compatible string property enum as a 'oneOf' child and the '-' was not removed when 'oneOf' got dropped during the binding review process. Reported-by: Rob Herring Closes: https://lore.kernel.org/dri-devel/CAL_Jsq+h8DcnpKqhokQOODCc8+Qi3M0PrxRFKz_Y4v37yMJvvA@mail.gmail.com/ Signed-off-by: Javier Martinez Canillas Acked-by: Conor Dooley Reviewed-by: Rob Herring Link: https://patchwork.freedesktop.org/patch/msgid/20231020223029.1667190-1-javierm@redhat.com --- .../devicetree/bindings/display/solomon,ssd132x.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml b/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml index 0aa41bd9ddca4..37975ee61c5ad 100644 --- a/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml +++ b/Documentation/devicetree/bindings/display/solomon,ssd132x.yaml @@ -11,10 +11,10 @@ maintainers: properties: compatible: - - enum: - - solomon,ssd1322 - - solomon,ssd1325 - - solomon,ssd1327 + enum: + - solomon,ssd1322 + - solomon,ssd1325 + - solomon,ssd1327 required: - compatible From 9e4db199e66d427c50458f4d72734cc4f0b92948 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Sat, 21 Oct 2023 00:52:57 +0200 Subject: [PATCH 002/117] drm/ssd130x: Fix possible uninitialized usage of crtc_state variable Avoid a possible uninitialized use of the crtc_state variable in function ssd132x_primary_plane_atomic_check() and avoid the following Smatch warn: drivers/gpu/drm/solomon/ssd130x.c:921 ssd132x_primary_plane_atomic_check() error: uninitialized symbol 'crtc_state'. Fixes: fdd591e00a9c ("drm/ssd130x: Add support for the SSD132x OLED controller family") Reported-by: Dan Carpenter Closes: https://lore.kernel.org/dri-devel/7dd6ca45-8263-44fe-a318-2fd9d761425d@moroto.mountain/ Signed-off-by: Javier Martinez Canillas Acked-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20231020225338.1686974-1-javierm@redhat.com --- drivers/gpu/drm/solomon/ssd130x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index 32f0857aec9f1..e0174f82e3537 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -910,7 +910,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state); struct drm_crtc *crtc = plane_state->crtc; - struct drm_crtc_state *crtc_state; + struct drm_crtc_state *crtc_state = NULL; const struct drm_format_info *fi; unsigned int pitch; int ret; From 80683bf48afcdbebbaf51057e71b2701aa07826d Mon Sep 17 00:00:00 2001 From: Kunwu Chan Date: Fri, 27 Oct 2023 10:44:59 +0800 Subject: [PATCH 003/117] drm/atomic-helper: Fix spelling mistake "preceeding" -> "preceding" There is a typo in the kernel documentation for function drm_atomic_helper_wait_for_dependencies. Fix it. Signed-off-by: Kunwu Chan Signed-off-by: Hamza Mahfooz Link: https://patchwork.freedesktop.org/patch/msgid/20231027024459.12793-1-chentao@kylinos.cn --- drivers/gpu/drm/drm_atomic_helper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 71d3993971075..10aadd324cc37 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2373,10 +2373,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_helper_setup_commit); /** - * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits + * drm_atomic_helper_wait_for_dependencies - wait for required preceding commits * @old_state: atomic state object with old state structures * - * This function waits for all preceeding commits that touch the same CRTC as + * This function waits for all preceding commits that touch the same CRTC as * @old_state to both be committed to the hardware (as signalled by * drm_atomic_helper_commit_hw_done()) and executed by the hardware (as signalled * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event). From 0226ba393eb1a90d63955cc407340c5d506ecacf Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Fri, 20 Oct 2023 10:19:38 +0000 Subject: [PATCH 004/117] drm: extract closefb logic in separate function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_mode_rmfb performs two operations: drop the FB from the file_priv->fbs list, and make sure the FB is no longer used on a plane. In the next commit an IOCTL which only does so former will be introduced, so let's split it into a separate function. No functional change, only refactoring. v2: no change Signed-off-by: Simon Ser Cc: Hans de Goede Cc: Dennis Filder Cc: Daniel Vetter Cc: Pekka Paalanen Cc: Rob Clark Cc: Sean Paul Reviewed-by: Daniel Stone Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20231020101926.145327-1-contact@emersion.fr --- drivers/gpu/drm/drm_framebuffer.c | 53 ++++++++++++++++++------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index d3ba0698b84b4..ef3f1e4bc334a 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -394,6 +394,31 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w) } } +static int drm_mode_closefb(struct drm_framebuffer *fb, + struct drm_file *file_priv) +{ + struct drm_framebuffer *fbl; + bool found = false; + + mutex_lock(&file_priv->fbs_lock); + list_for_each_entry(fbl, &file_priv->fbs, filp_head) + if (fb == fbl) + found = true; + + if (!found) { + mutex_unlock(&file_priv->fbs_lock); + return -ENOENT; + } + + list_del_init(&fb->filp_head); + mutex_unlock(&file_priv->fbs_lock); + + /* Drop the reference that was stored in the fbs list */ + drm_framebuffer_put(fb); + + return 0; +} + /** * drm_mode_rmfb - remove an FB from the configuration * @dev: drm device @@ -410,9 +435,8 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w) int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, struct drm_file *file_priv) { - struct drm_framebuffer *fb = NULL; - struct drm_framebuffer *fbl = NULL; - int found = 0; + struct drm_framebuffer *fb; + int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; @@ -421,24 +445,13 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, if (!fb) return -ENOENT; - mutex_lock(&file_priv->fbs_lock); - list_for_each_entry(fbl, &file_priv->fbs, filp_head) - if (fb == fbl) - found = 1; - if (!found) { - mutex_unlock(&file_priv->fbs_lock); - goto fail_unref; + ret = drm_mode_closefb(fb, file_priv); + if (ret != 0) { + drm_framebuffer_put(fb); + return ret; } - list_del_init(&fb->filp_head); - mutex_unlock(&file_priv->fbs_lock); - - /* drop the reference we picked up in framebuffer lookup */ - drm_framebuffer_put(fb); - /* - * we now own the reference that was stored in the fbs list - * * drm_framebuffer_remove may fail with -EINTR on pending signals, * so run this in a separate stack as there's no way to correctly * handle this after the fb is already removed from the lookup table. @@ -457,10 +470,6 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id, drm_framebuffer_put(fb); return 0; - -fail_unref: - drm_framebuffer_put(fb); - return -ENOENT; } int drm_mode_rmfb_ioctl(struct drm_device *dev, From d208d875667e2a29beeec5d475f4b6b164b632fa Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Fri, 20 Oct 2023 10:19:45 +0000 Subject: [PATCH 005/117] drm: introduce CLOSEFB IOCTL This new IOCTL allows callers to close a framebuffer without disabling planes or CRTCs. This takes inspiration from Rob Clark's unref_fb IOCTL [1] and DRM_MODE_FB_PERSIST [2]. User-space patch for wlroots available at [3]. IGT test available at [4]. v2: add an extra pad field just in case we want to extend this IOCTL in the future (Pekka, Sima). [1]: https://lore.kernel.org/dri-devel/20170509153654.23464-1-robdclark@gmail.com/ [2]: https://lore.kernel.org/dri-devel/20211006151921.312714-1-contact@emersion.fr/ [3]: https://gitlab.freedesktop.org/wlroots/wlroots/-/merge_requests/4394 [4]: https://lists.freedesktop.org/archives/igt-dev/2023-October/063294.html Signed-off-by: Simon Ser Reviewed-by: Daniel Stone Acked-by: Pekka Paalanen Cc: Hans de Goede Cc: Dennis Filder Cc: Daniel Vetter Cc: Rob Clark Cc: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20231020101926.145327-2-contact@emersion.fr --- drivers/gpu/drm/drm_crtc_internal.h | 2 ++ drivers/gpu/drm/drm_framebuffer.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/drm_ioctl.c | 1 + include/uapi/drm/drm.h | 20 ++++++++++++++++++++ include/uapi/drm/drm_mode.h | 10 ++++++++++ 5 files changed, 55 insertions(+) diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 8556c3b3ff88a..6b646e0783be9 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -222,6 +222,8 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_rmfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_mode_closefb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_getfb2_ioctl(struct drm_device *dev, diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index ef3f1e4bc334a..09e289fca5c31 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -480,6 +480,28 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev, return drm_mode_rmfb(dev, *fb_id, file_priv); } +int drm_mode_closefb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_closefb *r = data; + struct drm_framebuffer *fb; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EOPNOTSUPP; + + if (r->pad) + return -EINVAL; + + fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id); + if (!fb) + return -ENOENT; + + ret = drm_mode_closefb(fb, file_priv); + drm_framebuffer_put(fb); + return ret; +} + /** * drm_mode_getfb - get FB info * @dev: drm device for the ioctl diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 77590b0f38fa3..44fda68c28aeb 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -675,6 +675,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CLOSEFB, drm_mode_closefb_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0), diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index de723566c5ae8..8662b5aeea0c8 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -1218,6 +1218,26 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) +/** + * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer. + * + * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL + * argument is a framebuffer object ID. + * + * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable + * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept + * alive. When the plane no longer uses the framebuffer (because the + * framebuffer is replaced with another one, or the plane is disabled), the + * framebuffer is cleaned up. + * + * This is useful to implement flicker-free transitions between two processes. + * + * Depending on the threat model, user-space may want to ensure that the + * framebuffer doesn't expose any sensitive user information: closed + * framebuffers attached to a plane can be read back by the next DRM master. + */ +#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 128d09138ceb3..09e7a471ee30b 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -1323,6 +1323,16 @@ struct drm_mode_rect { __s32 y2; }; +/** + * struct drm_mode_closefb + * @fb_id: Framebuffer ID. + * @pad: Must be zero. + */ +struct drm_mode_closefb { + __u32 fb_id; + __u32 pad; +}; + #if defined(__cplusplus) } #endif From 88b02ebca8b6ea7457bed6809b1dd575420b7544 Mon Sep 17 00:00:00 2001 From: Simon Ser Date: Mon, 23 Oct 2023 20:36:39 +0000 Subject: [PATCH 006/117] drm/doc: describe PATH format for DP MST This is already uAPI, xserver parses it. It's useful to document since user-space might want to lookup the parent connector. Additionally, people (me included) have misunderstood the PATH property for being stable across reboots, but since a KMS object ID is baked in there that's not the case. So PATH shouldn't be used as-is in config files and such. Signed-off-by: Simon Ser Reviewed-by: Dmitry Baryshkov Acked-by: Pekka Paalanen Cc: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231023203629.198109-1-contact@emersion.fr --- drivers/gpu/drm/drm_connector.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index c3725086f4132..b0516505f7ae9 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1198,6 +1198,12 @@ static const u32 dp_colorspaces = * drm_connector_set_path_property(), in the case of DP MST with the * path property the MST manager created. Userspace cannot change this * property. + * + * In the case of DP MST, the property has the format + * ``mst:-`` where ```` is the KMS object ID of the + * parent connector and ```` is a hyphen-separated list of DP MST + * port numbers. Note, KMS object IDs are not guaranteed to be stable + * across reboots. * TILE: * Connector tile group property to indicate how a set of DRM connector * compose together into one logical screen. This is used by both high-res From bb8e97e26ce6437d2f57f37e8ba767a2b9cf0d65 Mon Sep 17 00:00:00 2001 From: Carl Vanderlip Date: Mon, 16 Oct 2023 11:00:36 -0600 Subject: [PATCH 007/117] accel/qaic: Enable 1 MSI fallback mode Several virtualization use-cases either don't support 32 MultiMSIs (Xen/VMware) or have significant drawbacks to their use (KVM's vIOMMU, which is required to support 32 MSI, needs to allocate an alternate system memory space for each device using vIOMMU (e.g. 8GB VM mem and 2 cards => 8 + 2 * 8 = 24GB host memory required)). Support these cases by enabling a 1 MSI fallback mode. Whenever all 32 MSIs requested are not available, a second request for a single MSI is made. Its success is the initiator of single MSI mode. This mode causes all interrupts generated by the device to be directed to the 0th MSI (firmware >=v1.10 will do this as a response to the PCIe MSI capability configuration). Likewise, all interrupt handlers for the device are registered to the 0th MSI. Since the DBC interrupt handler checks if the DBC is in use or if there is any pending changes, the 'spurious' interrupts are disregarded. If there is work to be done, the standard threaded IRQ handler is dispatched. On every interrupt, the MHI handler wakes up its threaded interrupt handler, and attempts to wake any waiters for MHI state events. Performance is within +-0.6% for test cases that typify real world use. Larger differences ([-4,+132]%, avg +47%) exist for very simple tasks (e.g. addition) compiled for single NSPs. It is assumed that the small work and many interrupts typically cause contention (e.g. 16 NSPs vs 4 CPUs), as evidenced by the standard deviation between runs also decreasing (r=-0.48 between delta(Performace_test) and delta(StdDev_test/Avg_test)) Signed-off-by: Carl Vanderlip Reviewed-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Signed-off-by: Jeffrey Hugo Reviewed-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231016170036.5409-1-quic_jhugo@quicinc.com --- Documentation/accel/qaic/aic100.rst | 5 +++-- Documentation/accel/qaic/qaic.rst | 23 +++++++++++++++++++ drivers/accel/qaic/mhi_controller.c | 6 ++++- drivers/accel/qaic/mhi_controller.h | 2 +- drivers/accel/qaic/qaic.h | 2 ++ drivers/accel/qaic/qaic_data.c | 25 +++++++++++++++------ drivers/accel/qaic/qaic_drv.c | 35 ++++++++++++++++++++--------- 7 files changed, 76 insertions(+), 22 deletions(-) diff --git a/Documentation/accel/qaic/aic100.rst b/Documentation/accel/qaic/aic100.rst index c80d0f1307dbb..a5fef0869aabc 100644 --- a/Documentation/accel/qaic/aic100.rst +++ b/Documentation/accel/qaic/aic100.rst @@ -36,8 +36,9 @@ AIC100 DID (0xa100). AIC100 does not implement FLR (function level reset). -AIC100 implements MSI but does not implement MSI-X. AIC100 requires 17 MSIs to -operate (1 for MHI, 16 for the DMA Bridge). +AIC100 implements MSI but does not implement MSI-X. AIC100 prefers 17 MSIs to +operate (1 for MHI, 16 for the DMA Bridge). Falling back to 1 MSI is possible in +scenarios where reserving 32 MSIs isn't feasible. As a PCIe device, AIC100 utilizes BARs to provide host interfaces to the device hardware. AIC100 provides 3, 64-bit BARs. diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst index c885023831367..9ccbfea86f5a7 100644 --- a/Documentation/accel/qaic/qaic.rst +++ b/Documentation/accel/qaic/qaic.rst @@ -10,6 +10,9 @@ accelerator products. Interrupts ========== +IRQ Storm Mitigation +-------------------- + While the AIC100 DMA Bridge hardware implements an IRQ storm mitigation mechanism, it is still possible for an IRQ storm to occur. A storm can happen if the workload is particularly quick, and the host is responsive. If the host @@ -35,6 +38,26 @@ generates 100k IRQs per second (per /proc/interrupts) is reduced to roughly 64 IRQs over 5 minutes while keeping the host system stable, and having the same workload throughput performance (within run to run noise variation). +Single MSI Mode +--------------- + +MultiMSI is not well supported on all systems; virtualized ones even less so +(circa 2023). Between hypervisors masking the PCIe MSI capability structure to +large memory requirements for vIOMMUs (required for supporting MultiMSI), it is +useful to be able to fall back to a single MSI when needed. + +To support this fallback, we allow the case where only one MSI is able to be +allocated, and share that one MSI between MHI and the DBCs. The device detects +when only one MSI has been configured and directs the interrupts for the DBCs +to the interrupt normally used for MHI. Unfortunately this means that the +interrupt handlers for every DBC and MHI wake up for every interrupt that +arrives; however, the DBC threaded irq handlers only are started when work to be +done is detected (MHI will always start its threaded handler). + +If the DBC is configured to force MSI interrupts, this can circumvent the +software IRQ storm mitigation mentioned above. Since the MSI is shared it is +never disabled, allowing each new entry to the FIFO to trigger a new interrupt. + Neural Network Control (NNC) Protocol ===================================== diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index 5036e58e7235b..41be3626587dc 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -468,7 +468,7 @@ static int mhi_reset_and_async_power_up(struct mhi_controller *mhi_cntrl) } struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar, - int mhi_irq) + int mhi_irq, bool shared_msi) { struct mhi_controller *mhi_cntrl; int ret; @@ -500,6 +500,10 @@ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, voi return ERR_PTR(-ENOMEM); mhi_cntrl->irq[0] = mhi_irq; + + if (shared_msi) /* MSI shared with data path, no IRQF_NO_SUSPEND */ + mhi_cntrl->irq_flags = IRQF_SHARED; + mhi_cntrl->fw_image = "qcom/aic100/sbl.bin"; /* use latest configured timeout */ diff --git a/drivers/accel/qaic/mhi_controller.h b/drivers/accel/qaic/mhi_controller.h index 2ae45d768e247..500e7f4af2afe 100644 --- a/drivers/accel/qaic/mhi_controller.h +++ b/drivers/accel/qaic/mhi_controller.h @@ -8,7 +8,7 @@ #define MHICONTROLLERQAIC_H_ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar, - int mhi_irq); + int mhi_irq, bool shared_msi); void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up); void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl); void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl); diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index e3f4c30f3ffd2..01e9dda0cc378 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -123,6 +123,8 @@ struct qaic_device { struct srcu_struct dev_lock; /* true: Device under reset; false: Device not under reset */ bool in_reset; + /* true: single MSI is used to operate device */ + bool single_msi; /* * true: A tx MHI transaction has failed and a rx buffer is still queued * in control device. Such a buffer is considered lost rx buffer diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 4a8e43a7a6a40..ebc3cca1b0947 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -1466,6 +1466,16 @@ irqreturn_t dbc_irq_handler(int irq, void *data) rcu_id = srcu_read_lock(&dbc->ch_lock); + if (datapath_polling) { + srcu_read_unlock(&dbc->ch_lock, rcu_id); + /* + * Normally datapath_polling will not have irqs enabled, but + * when running with only one MSI the interrupt is shared with + * MHI so it cannot be disabled. Return ASAP instead. + */ + return IRQ_HANDLED; + } + if (!dbc->usr) { srcu_read_unlock(&dbc->ch_lock, rcu_id); return IRQ_HANDLED; @@ -1488,7 +1498,8 @@ irqreturn_t dbc_irq_handler(int irq, void *data) return IRQ_NONE; } - disable_irq_nosync(irq); + if (!dbc->qdev->single_msi) + disable_irq_nosync(irq); srcu_read_unlock(&dbc->ch_lock, rcu_id); return IRQ_WAKE_THREAD; } @@ -1559,12 +1570,12 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data) u32 tail; rcu_id = srcu_read_lock(&dbc->ch_lock); + qdev = dbc->qdev; head = readl(dbc->dbc_base + RSPHP_OFF); if (head == U32_MAX) /* PCI link error */ goto error_out; - qdev = dbc->qdev; read_fifo: if (!event_count) { @@ -1645,14 +1656,14 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data) goto read_fifo; normal_out: - if (likely(!datapath_polling)) + if (!qdev->single_msi && likely(!datapath_polling)) enable_irq(irq); - else + else if (unlikely(datapath_polling)) schedule_work(&dbc->poll_work); /* checking the fifo and enabling irqs is a race, missed event check */ tail = readl(dbc->dbc_base + RSPTP_OFF); if (tail != U32_MAX && head != tail) { - if (likely(!datapath_polling)) + if (!qdev->single_msi && likely(!datapath_polling)) disable_irq_nosync(irq); goto read_fifo; } @@ -1661,9 +1672,9 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data) error_out: srcu_read_unlock(&dbc->ch_lock, rcu_id); - if (likely(!datapath_polling)) + if (!qdev->single_msi && likely(!datapath_polling)) enable_irq(irq); - else + else if (unlikely(datapath_polling)) schedule_work(&dbc->poll_work); return IRQ_HANDLED; diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 6f58095767df6..5eba5f03d3bf7 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -424,14 +424,24 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev) int i; /* Managed release since we use pcim_enable_device */ - ret = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); - if (ret < 0) - return ret; + ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI); + if (ret == -ENOSPC) { + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) + return ret; - if (ret < 32) { - pci_err(pdev, "%s: Requested 32 MSIs. Obtained %d MSIs which is less than the 32 required.\n", - __func__, ret); - return -ENODEV; + /* + * Operate in one MSI mode. All interrupts will be directed to + * MSI0; every interrupt will wake up all the interrupt handlers + * (MHI and DBC[0-15]). Since the interrupt is now shared, it is + * not disabled during DBC threaded handler, but only one thread + * will be allowed to run per DBC, so while it can be + * interrupted, it shouldn't race with itself. + */ + qdev->single_msi = true; + pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n"); + } else if (ret < 0) { + return ret; } mhi_irq = pci_irq_vector(pdev, 0); @@ -439,15 +449,17 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev) return mhi_irq; for (i = 0; i < qdev->num_dbc; ++i) { - ret = devm_request_threaded_irq(&pdev->dev, pci_irq_vector(pdev, i + 1), + ret = devm_request_threaded_irq(&pdev->dev, + pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1), dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED, "qaic_dbc", &qdev->dbc[i]); if (ret) return ret; if (datapath_polling) { - qdev->dbc[i].irq = pci_irq_vector(pdev, i + 1); - disable_irq_nosync(qdev->dbc[i].irq); + qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1); + if (!qdev->single_msi) + disable_irq_nosync(qdev->dbc[i].irq); INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work); } } @@ -479,7 +491,8 @@ static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto cleanup_qdev; } - qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq); + qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq, + qdev->single_msi); if (IS_ERR(qdev->mhi_cntrl)) { ret = PTR_ERR(qdev->mhi_cntrl); goto cleanup_qdev; From 6216fb03f8bd0b1439e9e799a306bafd5b462622 Mon Sep 17 00:00:00 2001 From: Ajit Pal Singh Date: Mon, 16 Oct 2023 11:01:13 -0600 Subject: [PATCH 008/117] accel/qaic: Add support for periodic timesync Device and Host have a time synchronization mechanism that happens once during boot when device is in SBL mode. After that, in mission-mode there is no timesync. In an experiment after continuous operation, device time drifted w.r.t. host by approximately 3 seconds per day. This drift leads to mismatch in timestamp of device and Host logs. To correct this implement periodic timesync in driver. This timesync is carried out via QAIC_TIMESYNC_PERIODIC MHI channel. Signed-off-by: Ajit Pal Singh Signed-off-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Reviewed-by: Carl Vanderlip Reviewed-by: Pranjal Ramajor Asha Kanojiya Signed-off-by: Jeffrey Hugo Reviewed-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231016170114.5446-2-quic_jhugo@quicinc.com --- Documentation/accel/qaic/aic100.rst | 4 + Documentation/accel/qaic/qaic.rst | 5 + drivers/accel/qaic/Makefile | 3 +- drivers/accel/qaic/mhi_controller.c | 32 ++++ drivers/accel/qaic/qaic_drv.c | 6 + drivers/accel/qaic/qaic_timesync.c | 245 ++++++++++++++++++++++++++++ drivers/accel/qaic/qaic_timesync.h | 11 ++ 7 files changed, 305 insertions(+), 1 deletion(-) create mode 100644 drivers/accel/qaic/qaic_timesync.c create mode 100644 drivers/accel/qaic/qaic_timesync.h diff --git a/Documentation/accel/qaic/aic100.rst b/Documentation/accel/qaic/aic100.rst index a5fef0869aabc..2ed32be7ae251 100644 --- a/Documentation/accel/qaic/aic100.rst +++ b/Documentation/accel/qaic/aic100.rst @@ -225,6 +225,10 @@ of the defined channels, and their uses. | | | | device side logs with the host time | | | | | source. | +----------------+---------+----------+----------------------------------------+ +| QAIC_TIMESYNC | 22 & 23 | AMSS | Used to periodically synchronize | +| _PERIODIC | | | timestamps in the device side logs with| +| | | | the host time source. | ++----------------+---------+----------+----------------------------------------+ DMA Bridge ========== diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst index 9ccbfea86f5a7..f81020736ebfb 100644 --- a/Documentation/accel/qaic/qaic.rst +++ b/Documentation/accel/qaic/qaic.rst @@ -201,3 +201,8 @@ overrides this for that call. Default is 5000 (5 seconds). Sets the polling interval in microseconds (us) when datapath polling is active. Takes effect at the next polling interval. Default is 100 (100 us). + +**timesync_delay_ms (unsigned int)** + +Sets the time interval in milliseconds (ms) between two consecutive timesync +operations. Default is 1000 (1000 ms). diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile index 2418418f7a505..3f7f6dfde7f2c 100644 --- a/drivers/accel/qaic/Makefile +++ b/drivers/accel/qaic/Makefile @@ -9,4 +9,5 @@ qaic-y := \ mhi_controller.o \ qaic_control.o \ qaic_data.o \ - qaic_drv.o + qaic_drv.o \ + qaic_timesync.o diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index 41be3626587dc..705898777ec52 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -373,6 +373,38 @@ static struct mhi_channel_config aic100_channels[] = { .auto_queue = false, .wake_capable = false, }, + { + .name = "QAIC_TIMESYNC_PERIODIC", + .num = 22, + .num_elements = 32, + .local_elements = 0, + .event_ring = 0, + .dir = DMA_TO_DEVICE, + .ee_mask = MHI_CH_EE_AMSS, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + .wake_capable = false, + }, + { + .num = 23, + .name = "QAIC_TIMESYNC_PERIODIC", + .num_elements = 32, + .local_elements = 0, + .event_ring = 0, + .dir = DMA_FROM_DEVICE, + .ee_mask = MHI_CH_EE_AMSS, + .pollcfg = 0, + .doorbell = MHI_DB_BRST_DISABLE, + .lpm_notify = false, + .offload_channel = false, + .doorbell_mode_switch = false, + .auto_queue = false, + .wake_capable = false, + }, }; static struct mhi_event_config aic100_events[] = { diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 5eba5f03d3bf7..08d940d3da466 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -27,6 +27,7 @@ #include "mhi_controller.h" #include "qaic.h" +#include "qaic_timesync.h" MODULE_IMPORT_NS(DMA_BUF); @@ -599,6 +600,10 @@ static int __init qaic_init(void) goto free_pci; } + ret = qaic_timesync_init(); + if (ret) + pr_debug("qaic: qaic_timesync_init failed %d\n", ret); + return 0; free_pci: @@ -624,6 +629,7 @@ static void __exit qaic_exit(void) * reinitializing the link_up state after the cleanup is done. */ link_up = true; + qaic_timesync_deinit(); mhi_driver_unregister(&qaic_mhi_driver); pci_unregister_driver(&qaic_pci_driver); } diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c new file mode 100644 index 0000000000000..769250272c210 --- /dev/null +++ b/drivers/accel/qaic/qaic_timesync.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qaic.h" +#include "qaic_timesync.h" + +#define QTIMER_REG_OFFSET 0xa28 +#define QAIC_TIMESYNC_SIGNATURE 0x55aa +#define QAIC_CONV_QTIMER_TO_US(qtimer) (mul_u64_u32_div(qtimer, 10, 192)) + +static unsigned int timesync_delay_ms = 1000; /* 1 sec default */ +module_param(timesync_delay_ms, uint, 0600); +MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations"); + +enum qts_msg_type { + QAIC_TS_SYNC_REQ = 1, + QAIC_TS_ACK_TO_HOST, + QAIC_TS_MSG_TYPE_MAX +}; + +/** + * struct qts_hdr - Timesync message header structure. + * @signature: Unique signature to identify the timesync message. + * @reserved_1: Reserved for future use. + * @reserved_2: Reserved for future use. + * @msg_type: sub-type of the timesync message. + * @reserved_3: Reserved for future use. + */ +struct qts_hdr { + __le16 signature; + __le16 reserved_1; + u8 reserved_2; + u8 msg_type; + __le16 reserved_3; +} __packed; + +/** + * struct qts_timeval - Structure to carry time information. + * @tv_sec: Seconds part of the time. + * @tv_usec: uS (microseconds) part of the time. + */ +struct qts_timeval { + __le64 tv_sec; + __le64 tv_usec; +} __packed; + +/** + * struct qts_host_time_sync_msg_data - Structure to denote the timesync message. + * @header: Header of the timesync message. + * @data: Time information. + */ +struct qts_host_time_sync_msg_data { + struct qts_hdr header; + struct qts_timeval data; +} __packed; + +/** + * struct mqts_dev - MHI QAIC Timesync Control device. + * @qdev: Pointer to the root device struct driven by QAIC driver. + * @mhi_dev: Pointer to associated MHI device. + * @timer: Timer handle used for timesync. + * @qtimer_addr: Device QTimer register pointer. + * @buff_in_use: atomic variable to track if the sync_msg buffer is in use. + * @dev: Device pointer to qdev->pdev->dev stored for easy access. + * @sync_msg: Buffer used to send timesync message over MHI. + */ +struct mqts_dev { + struct qaic_device *qdev; + struct mhi_device *mhi_dev; + struct timer_list timer; + void __iomem *qtimer_addr; + atomic_t buff_in_use; + struct device *dev; + struct qts_host_time_sync_msg_data *sync_msg; +}; + +#ifdef readq +static u64 read_qtimer(const volatile void __iomem *addr) +{ + return readq(addr); +} +#else +static u64 read_qtimer(const volatile void __iomem *addr) +{ + u64 low, high; + + low = readl(addr); + high = readl(addr + sizeof(u32)); + return low | (high << 32); +} +#endif + +static void qaic_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); + + dev_dbg(mqtsdev->dev, "%s status: %d xfer_len: %zu\n", __func__, + mhi_result->transaction_status, mhi_result->bytes_xferd); + + atomic_set(&mqtsdev->buff_in_use, 0); +} + +static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); + + dev_err(mqtsdev->dev, "%s no data expected on dl channel\n", __func__); +} + +static void qaic_timesync_timer(struct timer_list *t) +{ + struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer); + struct qts_host_time_sync_msg_data *sync_msg; + u64 device_qtimer_us; + u64 device_qtimer; + u64 host_time_us; + u64 offset_us; + u64 host_sec; + int ret; + + if (atomic_read(&mqtsdev->buff_in_use)) { + dev_dbg(mqtsdev->dev, "%s buffer not free, schedule next cycle\n", __func__); + goto mod_timer; + } + atomic_set(&mqtsdev->buff_in_use, 1); + + sync_msg = mqtsdev->sync_msg; + sync_msg->header.signature = cpu_to_le16(QAIC_TIMESYNC_SIGNATURE); + sync_msg->header.msg_type = QAIC_TS_SYNC_REQ; + /* Read host UTC time and convert to uS*/ + host_time_us = div_u64(ktime_get_real_ns(), NSEC_PER_USEC); + device_qtimer = read_qtimer(mqtsdev->qtimer_addr); + device_qtimer_us = QAIC_CONV_QTIMER_TO_US(device_qtimer); + /* Offset between host UTC and device time */ + offset_us = host_time_us - device_qtimer_us; + + host_sec = div_u64(offset_us, USEC_PER_SEC); + sync_msg->data.tv_usec = cpu_to_le64(offset_us - host_sec * USEC_PER_SEC); + sync_msg->data.tv_sec = cpu_to_le64(host_sec); + ret = mhi_queue_buf(mqtsdev->mhi_dev, DMA_TO_DEVICE, sync_msg, sizeof(*sync_msg), MHI_EOT); + if (ret && (ret != -EAGAIN)) { + dev_err(mqtsdev->dev, "%s unable to queue to mhi:%d\n", __func__, ret); + return; + } else if (ret == -EAGAIN) { + atomic_set(&mqtsdev->buff_in_use, 0); + } + +mod_timer: + ret = mod_timer(t, jiffies + msecs_to_jiffies(timesync_delay_ms)); + if (ret) + dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret); +} + +static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); + struct mqts_dev *mqtsdev; + struct timer_list *timer; + int ret; + + mqtsdev = kzalloc(sizeof(*mqtsdev), GFP_KERNEL); + if (!mqtsdev) { + ret = -ENOMEM; + goto out; + } + + timer = &mqtsdev->timer; + mqtsdev->mhi_dev = mhi_dev; + mqtsdev->qdev = qdev; + mqtsdev->dev = &qdev->pdev->dev; + + mqtsdev->sync_msg = kzalloc(sizeof(*mqtsdev->sync_msg), GFP_KERNEL); + if (!mqtsdev->sync_msg) { + ret = -ENOMEM; + goto free_mqts_dev; + } + atomic_set(&mqtsdev->buff_in_use, 0); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) + goto free_sync_msg; + + /* Qtimer register pointer */ + mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET; + timer_setup(timer, qaic_timesync_timer, 0); + timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms); + add_timer(timer); + dev_set_drvdata(&mhi_dev->dev, mqtsdev); + + return 0; + +free_sync_msg: + kfree(mqtsdev->sync_msg); +free_mqts_dev: + kfree(mqtsdev); +out: + return ret; +}; + +static void qaic_timesync_remove(struct mhi_device *mhi_dev) +{ + struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev); + + del_timer_sync(&mqtsdev->timer); + mhi_unprepare_from_transfer(mqtsdev->mhi_dev); + kfree(mqtsdev->sync_msg); + kfree(mqtsdev); +} + +static const struct mhi_device_id qaic_timesync_match_table[] = { + { .chan = "QAIC_TIMESYNC_PERIODIC"}, + {}, +}; + +MODULE_DEVICE_TABLE(mhi, qaic_timesync_match_table); + +static struct mhi_driver qaic_timesync_driver = { + .id_table = qaic_timesync_match_table, + .remove = qaic_timesync_remove, + .probe = qaic_timesync_probe, + .ul_xfer_cb = qaic_timesync_ul_xfer_cb, + .dl_xfer_cb = qaic_timesync_dl_xfer_cb, + .driver = { + .name = "qaic_timesync_periodic", + }, +}; + +int qaic_timesync_init(void) +{ + return mhi_driver_register(&qaic_timesync_driver); +} + +void qaic_timesync_deinit(void) +{ + mhi_driver_unregister(&qaic_timesync_driver); +} diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h new file mode 100644 index 0000000000000..851b7acd43bbb --- /dev/null +++ b/drivers/accel/qaic/qaic_timesync.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __QAIC_TIMESYNC_H__ +#define __QAIC_TIMESYNC_H__ + +int qaic_timesync_init(void); +void qaic_timesync_deinit(void); +#endif /* __QAIC_TIMESYNC_H__ */ From 41cfbaa47fd7d94df5faf1c416801600f26270d8 Mon Sep 17 00:00:00 2001 From: Pranjal Ramajor Asha Kanojiya Date: Mon, 16 Oct 2023 11:01:14 -0600 Subject: [PATCH 009/117] accel/qaic: Support MHI QAIC_TIMESYNC channel Use QAIC_TIMESYNC MHI channel to send UTC time to device in SBL environment. Remove support for QAIC_TIMESYNC MHI channel in AMSS environment as it is not used in that environment. Signed-off-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Reviewed-by: Carl Vanderlip Signed-off-by: Jeffrey Hugo Reviewed-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231016170114.5446-3-quic_jhugo@quicinc.com --- Documentation/accel/qaic/aic100.rst | 2 +- drivers/accel/qaic/mhi_controller.c | 4 +- drivers/accel/qaic/qaic.h | 4 + drivers/accel/qaic/qaic_drv.c | 7 ++ drivers/accel/qaic/qaic_timesync.c | 154 +++++++++++++++++++++++++++- 5 files changed, 166 insertions(+), 5 deletions(-) diff --git a/Documentation/accel/qaic/aic100.rst b/Documentation/accel/qaic/aic100.rst index 2ed32be7ae251..590dae77ea124 100644 --- a/Documentation/accel/qaic/aic100.rst +++ b/Documentation/accel/qaic/aic100.rst @@ -221,7 +221,7 @@ of the defined channels, and their uses. +----------------+---------+----------+----------------------------------------+ | QAIC_DEBUG | 18 & 19 | AMSS | Not used. | +----------------+---------+----------+----------------------------------------+ -| QAIC_TIMESYNC | 20 & 21 | SBL/AMSS | Used to synchronize timestamps in the | +| QAIC_TIMESYNC | 20 & 21 | SBL | Used to synchronize timestamps in the | | | | | device side logs with the host time | | | | | source. | +----------------+---------+----------+----------------------------------------+ diff --git a/drivers/accel/qaic/mhi_controller.c b/drivers/accel/qaic/mhi_controller.c index 705898777ec52..5d3cc30009cce 100644 --- a/drivers/accel/qaic/mhi_controller.c +++ b/drivers/accel/qaic/mhi_controller.c @@ -348,7 +348,7 @@ static struct mhi_channel_config aic100_channels[] = { .local_elements = 0, .event_ring = 0, .dir = DMA_TO_DEVICE, - .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS, + .ee_mask = MHI_CH_EE_SBL, .pollcfg = 0, .doorbell = MHI_DB_BRST_DISABLE, .lpm_notify = false, @@ -364,7 +364,7 @@ static struct mhi_channel_config aic100_channels[] = { .local_elements = 0, .event_ring = 0, .dir = DMA_FROM_DEVICE, - .ee_mask = MHI_CH_EE_SBL | MHI_CH_EE_AMSS, + .ee_mask = MHI_CH_EE_SBL, .pollcfg = 0, .doorbell = MHI_DB_BRST_DISABLE, .lpm_notify = false, diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index 01e9dda0cc378..bc40d52dc0104 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -139,6 +139,10 @@ struct qaic_device { u32 (*gen_crc)(void *msg); /* Validate the CRC of a control message */ bool (*valid_crc)(void *msg); + /* MHI "QAIC_TIMESYNC" channel device */ + struct mhi_device *qts_ch; + /* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */ + struct workqueue_struct *qts_wq; }; struct qaic_drm_device { diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index 08d940d3da466..b12226385003d 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -325,6 +325,7 @@ static void cleanup_qdev(struct qaic_device *qdev) cleanup_srcu_struct(&qdev->dev_lock); pci_set_drvdata(qdev->pdev, NULL); destroy_workqueue(qdev->cntl_wq); + destroy_workqueue(qdev->qts_wq); } static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id) @@ -348,6 +349,12 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de if (!qdev->cntl_wq) return NULL; + qdev->qts_wq = alloc_workqueue("qaic_ts", WQ_UNBOUND, 0); + if (!qdev->qts_wq) { + destroy_workqueue(qdev->cntl_wq); + return NULL; + } + pci_set_drvdata(pdev, qdev); qdev->pdev = pdev; diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c index 769250272c210..301f4462d51bd 100644 --- a/drivers/accel/qaic/qaic_timesync.c +++ b/drivers/accel/qaic/qaic_timesync.c @@ -22,7 +22,8 @@ module_param(timesync_delay_ms, uint, 0600); MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations"); enum qts_msg_type { - QAIC_TS_SYNC_REQ = 1, + QAIC_TS_CMD_TO_HOST, + QAIC_TS_SYNC_REQ, QAIC_TS_ACK_TO_HOST, QAIC_TS_MSG_TYPE_MAX }; @@ -83,6 +84,16 @@ struct mqts_dev { struct qts_host_time_sync_msg_data *sync_msg; }; +struct qts_resp_msg { + struct qts_hdr hdr; +} __packed; + +struct qts_resp { + struct qts_resp_msg data; + struct work_struct work; + struct qaic_device *qdev; +}; + #ifdef readq static u64 read_qtimer(const volatile void __iomem *addr) { @@ -234,12 +245,151 @@ static struct mhi_driver qaic_timesync_driver = { }, }; +static void qaic_boot_timesync_worker(struct work_struct *work) +{ + struct qts_resp *resp = container_of(work, struct qts_resp, work); + struct qts_host_time_sync_msg_data *req; + struct qts_resp_msg data = resp->data; + struct qaic_device *qdev = resp->qdev; + struct mhi_device *mhi_dev; + struct timespec64 ts; + int ret; + + mhi_dev = qdev->qts_ch; + /* Queue the response message beforehand to avoid race conditions */ + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT); + if (ret) { + kfree(resp); + dev_warn(&mhi_dev->dev, "Failed to re-queue response buffer %d\n", ret); + return; + } + + switch (data.hdr.msg_type) { + case QAIC_TS_CMD_TO_HOST: + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + break; + + req->header = data.hdr; + req->header.msg_type = QAIC_TS_SYNC_REQ; + ktime_get_real_ts64(&ts); + req->data.tv_sec = cpu_to_le64(ts.tv_sec); + req->data.tv_usec = cpu_to_le64(div_u64(ts.tv_nsec, NSEC_PER_USEC)); + + ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, req, sizeof(*req), MHI_EOT); + if (ret) { + kfree(req); + dev_dbg(&mhi_dev->dev, "Failed to send request message. Error %d\n", ret); + } + break; + case QAIC_TS_ACK_TO_HOST: + dev_dbg(&mhi_dev->dev, "ACK received from device\n"); + break; + default: + dev_err(&mhi_dev->dev, "Invalid message type %u.\n", data.hdr.msg_type); + } +} + +static int qaic_boot_timesync_queue_resp(struct mhi_device *mhi_dev, struct qaic_device *qdev) +{ + struct qts_resp *resp; + int ret; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + resp->qdev = qdev; + INIT_WORK(&resp->work, qaic_boot_timesync_worker); + + ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT); + if (ret) { + kfree(resp); + dev_warn(&mhi_dev->dev, "Failed to queue response buffer %d\n", ret); + return ret; + } + + return 0; +} + +static void qaic_boot_timesync_remove(struct mhi_device *mhi_dev) +{ + struct qaic_device *qdev; + + qdev = dev_get_drvdata(&mhi_dev->dev); + mhi_unprepare_from_transfer(qdev->qts_ch); + qdev->qts_ch = NULL; +} + +static int qaic_boot_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) +{ + struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); + int ret; + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) + return ret; + + qdev->qts_ch = mhi_dev; + dev_set_drvdata(&mhi_dev->dev, qdev); + + ret = qaic_boot_timesync_queue_resp(mhi_dev, qdev); + if (ret) { + dev_set_drvdata(&mhi_dev->dev, NULL); + qdev->qts_ch = NULL; + mhi_unprepare_from_transfer(mhi_dev); + } + + return ret; +} + +static void qaic_boot_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + kfree(mhi_result->buf_addr); +} + +static void qaic_boot_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) +{ + struct qts_resp *resp = container_of(mhi_result->buf_addr, struct qts_resp, data); + + if (mhi_result->transaction_status || mhi_result->bytes_xferd != sizeof(resp->data)) { + kfree(resp); + return; + } + + queue_work(resp->qdev->qts_wq, &resp->work); +} + +static const struct mhi_device_id qaic_boot_timesync_match_table[] = { + { .chan = "QAIC_TIMESYNC"}, + {}, +}; + +static struct mhi_driver qaic_boot_timesync_driver = { + .id_table = qaic_boot_timesync_match_table, + .remove = qaic_boot_timesync_remove, + .probe = qaic_boot_timesync_probe, + .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb, + .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb, + .driver = { + .name = "qaic_timesync", + }, +}; + int qaic_timesync_init(void) { - return mhi_driver_register(&qaic_timesync_driver); + int ret; + + ret = mhi_driver_register(&qaic_timesync_driver); + if (ret) + return ret; + ret = mhi_driver_register(&qaic_boot_timesync_driver); + + return ret; } void qaic_timesync_deinit(void) { + mhi_driver_unregister(&qaic_boot_timesync_driver); mhi_driver_unregister(&qaic_timesync_driver); } From 3db2420422a5912d97966e0176050bb0fc9aa63e Mon Sep 17 00:00:00 2001 From: Sheng-Liang Pan Date: Fri, 27 Oct 2023 11:04:56 +0800 Subject: [PATCH 010/117] drm/panel-edp: Add AUO B116XTN02, BOE NT116WHM-N21,836X2, NV116WHM-N49 V8.0 Add panel identification entry for - AUO B116XTN02 family (product ID:0x235c) - BOE NT116WHM-N21,836X2 (product ID:0x09c3) - BOE NV116WHM-N49 V8.0 (product ID:0x0979) Signed-off-by: Sheng-Liang Pan Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231027110435.1.Ia01fe9ec1c0953e0050a232eaa782fef2c037516@changeid --- drivers/gpu/drm/panel/panel-edp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index feb665df35a1d..9dce4c7024142 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1869,6 +1869,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), @@ -1877,8 +1878,10 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), From 1470acbef122c7e2e588f6346ce459c26d0568a2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 27 Oct 2023 17:26:23 +0200 Subject: [PATCH 011/117] accel/ivpu: avoid build failure with CONFIG_PM=n The usage count of struct dev_pm_info is an implementation detail that is only available if CONFIG_PM is enabled, so printing it in a debug message causes a build failure in configurations without PM: In file included from include/linux/device.h:15, from include/linux/pci.h:37, from drivers/accel/ivpu/ivpu_pm.c:8: drivers/accel/ivpu/ivpu_pm.c: In function 'ivpu_rpm_get_if_active': drivers/accel/ivpu/ivpu_pm.c:254:51: error: 'struct dev_pm_info' has no member named 'usage_count' 254 | atomic_read(&vdev->drm.dev->power.usage_count)); | ^ include/linux/dev_printk.h:129:48: note: in definition of macro 'dev_printk' 129 | _dev_printk(level, dev, fmt, ##__VA_ARGS__); \ | ^~~~~~~~~~~ drivers/accel/ivpu/ivpu_drv.h:75:17: note: in expansion of macro 'dev_dbg' 75 | dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args); \ | ^~~~~~~ drivers/accel/ivpu/ivpu_pm.c:253:9: note: in expansion of macro 'ivpu_dbg' 253 | ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n", | ^~~~~~~~ The print message does not seem essential, so the easiest workaround is to just remove it. Fixes: c39dc15191c4 ("accel/ivpu: Read clock rate only if device is up") Signed-off-by: Arnd Bergmann Reviewed-by: Stanislaw Gruszka Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231027152633.528490-1-arnd@kernel.org --- drivers/accel/ivpu/ivpu_pm.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index d14b6fd796b4e..568e4295dc56b 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -250,9 +250,6 @@ int ivpu_rpm_get_if_active(struct ivpu_device *vdev) { int ret; - ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n", - atomic_read(&vdev->drm.dev->power.usage_count)); - ret = pm_runtime_get_if_active(vdev->drm.dev, false); drm_WARN_ON(&vdev->drm, ret < 0); From 8c63b47412ad3f015db47b380916722299511cc3 Mon Sep 17 00:00:00 2001 From: Krystian Pradzynski Date: Sat, 28 Oct 2023 15:34:05 +0200 Subject: [PATCH 012/117] accel/ivpu: Update FW API Bump boot API to 4.20 Bump JSM API to 3.15 Signed-off-by: Krystian Pradzynski Reviewed-by: Stanislaw Gruszka Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-2-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_jsm_msg.c | 17 ++ drivers/accel/ivpu/vpu_boot_api.h | 90 ++++++++- drivers/accel/ivpu/vpu_jsm_api.h | 309 ++++++++++++++++++++++++++++-- 3 files changed, 392 insertions(+), 24 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c index 0c2fe7142024c..35a689475c68d 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.c +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -36,6 +36,17 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type) IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE); IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES); IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME); + IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP); + IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP); IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT); IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL); IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE); @@ -65,6 +76,12 @@ const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type) IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP); IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE); IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP); + IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER); + IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE); + IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE); } #undef IVPU_CASE_TO_STR diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h index 6b71be92ba653..04c954258563a 100644 --- a/drivers/accel/ivpu/vpu_boot_api.h +++ b/drivers/accel/ivpu/vpu_boot_api.h @@ -11,7 +11,10 @@ * The bellow values will be used to construct the version info this way: * fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) | * VPU_BOOT_API_VER_MINOR; - * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes. + * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes + * This information is collected by using vpuip_2/application/vpuFirmware/make_std_fw_image.py + * If a header is missing this info we ignore the header, if a header is missing or contains + * partial info a build error will be generated. */ /* @@ -24,12 +27,12 @@ * Minor version changes when API backward compatibility is preserved. * Resets to 0 if Major version is incremented. */ -#define VPU_BOOT_API_VER_MINOR 12 +#define VPU_BOOT_API_VER_MINOR 20 /* * API header changed (field names, documentation, formatting) but API itself has not been changed */ -#define VPU_BOOT_API_VER_PATCH 2 +#define VPU_BOOT_API_VER_PATCH 4 /* * Index in the API version table @@ -63,6 +66,12 @@ struct vpu_firmware_header { /* Size of memory require for firmware execution */ u32 runtime_size; u32 shave_nn_fw_size; + /* Size of primary preemption buffer. */ + u32 preemption_buffer_1_size; + /* Size of secondary preemption buffer. */ + u32 preemption_buffer_2_size; + /* Space reserved for future preemption-related fields. */ + u32 preemption_reserved[6]; }; /* @@ -89,6 +98,14 @@ enum VPU_BOOT_L2_CACHE_CFG_TYPE { VPU_BOOT_L2_CACHE_CFG_NUM = 2 }; +/** VPU MCA ECC signalling mode. By default, no signalling is used */ +enum VPU_BOOT_MCA_ECC_SIGNAL_TYPE { + VPU_BOOT_MCA_ECC_NONE = 0, + VPU_BOOT_MCA_ECC_CORR = 1, + VPU_BOOT_MCA_ECC_FATAL = 2, + VPU_BOOT_MCA_ECC_BOTH = 3 +}; + /** * Logging destinations. * @@ -131,9 +148,11 @@ enum vpu_trace_destination { #define VPU_TRACE_PROC_BIT_ACT_SHV_3 22 #define VPU_TRACE_PROC_NO_OF_HW_DEVS 23 -/* KMB HW component IDs are sequential, so define first and last IDs. */ -#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_LRT -#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_SHV_15 +/* VPU 30xx HW component IDs are sequential, so define first and last IDs. */ +#define VPU_TRACE_PROC_BIT_30XX_FIRST VPU_TRACE_PROC_BIT_LRT +#define VPU_TRACE_PROC_BIT_30XX_LAST VPU_TRACE_PROC_BIT_SHV_15 +#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_30XX_FIRST +#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_30XX_LAST struct vpu_boot_l2_cache_config { u8 use; @@ -148,6 +167,25 @@ struct vpu_warm_boot_section { u32 is_clear_op; }; +/* + * When HW scheduling mode is enabled, a present period is defined. + * It will be used by VPU to swap between normal and focus priorities + * to prevent starving of normal priority band (when implemented). + * Host must provide a valid value at boot time in + * `vpu_focus_present_timer_ms`. If the value provided by the host is not within the + * defined range a default value will be used. Here we define the min. and max. + * allowed values and the and default value of the present period. Units are milliseconds. + */ +#define VPU_PRESENT_CALL_PERIOD_MS_DEFAULT 50 +#define VPU_PRESENT_CALL_PERIOD_MS_MIN 16 +#define VPU_PRESENT_CALL_PERIOD_MS_MAX 10000 + +/** + * Macros to enable various operation modes within the VPU. + * To be defined as part of 32 bit mask. + */ +#define VPU_OP_MODE_SURVIVABILITY 0x1 + struct vpu_boot_params { u32 magic; u32 vpu_id; @@ -218,6 +256,7 @@ struct vpu_boot_params { * the threshold will not be logged); applies to every enabled logging * destination and loggable HW component. See 'mvLog_t' enum for acceptable * values. + * TODO: EISW-33556: Move log level definition (mvLog_t) to this file. */ u32 default_trace_level; u32 boot_type; @@ -249,7 +288,36 @@ struct vpu_boot_params { u32 temp_sensor_period_ms; /** PLL ratio for efficient clock frequency */ u32 pn_freq_pll_ratio; - u32 pad4[28]; + /** DVFS Mode: Default: 0, Max Performance: 1, On Demand: 2, Power Save: 3 */ + u32 dvfs_mode; + /** + * Depending on DVFS Mode: + * On-demand: Default if 0. + * Bit 0-7 - uint8_t: Highest residency percent + * Bit 8-15 - uint8_t: High residency percent + * Bit 16-23 - uint8_t: Low residency percent + * Bit 24-31 - uint8_t: Lowest residency percent + * Bit 32-35 - unsigned 4b: PLL Ratio increase amount on highest residency + * Bit 36-39 - unsigned 4b: PLL Ratio increase amount on high residency + * Bit 40-43 - unsigned 4b: PLL Ratio decrease amount on low residency + * Bit 44-47 - unsigned 4b: PLL Ratio decrease amount on lowest frequency + * Bit 48-55 - uint8_t: Period (ms) for residency decisions + * Bit 56-63 - uint8_t: Averaging windows (as multiples of period. Max: 30 decimal) + * Power Save/Max Performance: Unused + */ + u64 dvfs_param; + /** + * D0i3 delayed entry + * Bit0: Disable CPU state save on D0i2 entry flow. + * 0: Every D0i2 entry saves state. Save state IPC message ignored. + * 1: IPC message required to save state on D0i3 entry flow. + */ + u32 d0i3_delayed_entry; + /* Time spent by VPU in D0i3 state */ + u64 d0i3_residency_time_us; + /* Value of VPU perf counter at the time of entering D0i3 state . */ + u64 d0i3_entry_vpu_ts; + u32 pad4[20]; /* Warm boot information: 0x400 - 0x43F */ u32 warm_boot_sections_count; u32 warm_boot_start_address_reference; @@ -274,8 +342,12 @@ struct vpu_boot_params { u32 vpu_scheduling_mode; /* Present call period in milliseconds. */ u32 vpu_focus_present_timer_ms; - /* Unused/reserved: 0x478 - 0xFFF */ - u32 pad6[738]; + /* VPU ECC Signaling */ + u32 vpu_uses_ecc_mca_signal; + /* Values defined by VPU_OP_MODE* macros */ + u32 vpu_operation_mode; + /* Unused/reserved: 0x480 - 0xFFF */ + u32 pad6[736]; }; /* diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 2949ec8365bd5..7da7622742bee 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -22,12 +22,12 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 0 +#define VPU_JSM_API_VER_MINOR 15 /* * API header changed (field names, documentation, formatting) but API itself has not been changed */ -#define VPU_JSM_API_VER_PATCH 1 +#define VPU_JSM_API_VER_PATCH 0 /* * Index in the API version table @@ -84,11 +84,13 @@ * Job flags bit masks. */ #define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001 +#define VPU_JOB_FLAGS_PRIVATE_DATA_MASK 0xFF000000 /* * Sizes of the reserved areas in jobs, in bytes. */ -#define VPU_JOB_RESERVED_BYTES 16 +#define VPU_JOB_RESERVED_BYTES 8 + /* * Sizes of the reserved areas in job queues, in bytes. */ @@ -108,6 +110,20 @@ */ #define VPU_DYNDBG_CMD_MAX_LEN 96 +/* + * For HWS command queue scheduling, we can prioritise command queues inside the + * same process with a relative in-process priority. Valid values for relative + * priority are given below - max and min. + */ +#define VPU_HWS_COMMAND_QUEUE_MAX_IN_PROCESS_PRIORITY 7 +#define VPU_HWS_COMMAND_QUEUE_MIN_IN_PROCESS_PRIORITY -7 + +/* + * For HWS priority scheduling, we can have multiple realtime priority bands. + * They are numbered 0 to a MAX. + */ +#define VPU_HWS_MAX_REALTIME_PRIORITY_LEVEL 31U + /* * Job format. */ @@ -117,8 +133,14 @@ struct vpu_job_queue_entry { u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */ u64 root_page_table_addr; /**< Address of root page table to use for this job */ u64 root_page_table_update_counter; /**< Page tables update events counter */ - u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */ - u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */ + u64 primary_preempt_buf_addr; + /**< Address of the primary preemption buffer to use for this job */ + u32 primary_preempt_buf_size; + /**< Size of the primary preemption buffer to use for this job */ + u32 secondary_preempt_buf_size; + /**< Size of secondary preemption buffer to use for this job */ + u64 secondary_preempt_buf_addr; + /**< Address of secondary preemption buffer to use for this job */ u8 reserved_0[VPU_JOB_RESERVED_BYTES]; }; @@ -152,6 +174,46 @@ enum vpu_trace_entity_type { VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, }; +/* + * HWS specific log buffer header details. + * Total size is 32 bytes. + */ +struct vpu_hws_log_buffer_header { + /* Written by VPU after adding a log entry. Initialised by host to 0. */ + u32 first_free_entry_index; + /* Incremented by VPU every time the VPU overwrites the 0th entry; + * initialised by host to 0. + */ + u32 wraparound_count; + /* + * This is the number of buffers that can be stored in the log buffer provided by the host. + * It is written by host before passing buffer to VPU. VPU should consider it read-only. + */ + u64 num_of_entries; + u64 reserved[2]; +}; + +/* + * HWS specific log buffer entry details. + * Total size is 32 bytes. + */ +struct vpu_hws_log_buffer_entry { + /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ + u64 vpu_timestamp; + /* + * Operation type: + * 0 - context state change + * 1 - queue new work + * 2 - queue unwait sync object + * 3 - queue no more work + * 4 - queue wait sync object + */ + u32 operation_type; + u32 reserved; + /* Operation data depends on operation type */ + u64 operation_data[2]; +}; + /* * Host <-> VPU IPC messages types. */ @@ -228,6 +290,23 @@ enum vpu_ipc_msg_type { * deallocated or reassigned to another context. */ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, + /** Control command: Log buffer setting */ + VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118, + /* Control command: Suspend command queue. */ + VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119, + /* Control command: Resume command queue */ + VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a, + /* Control command: Resume engine after reset */ + VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b, + /* Control command: Enable survivability/DCT mode */ + VPU_JSM_MSG_DCT_ENABLE = 0x111c, + /* Control command: Disable survivability/DCT mode */ + VPU_JSM_MSG_DCT_DISABLE = 0x111d, + /** + * Dump VPU state. To be used for debug purposes only. + * NOTE: Please introduce new ASYNC commands before this one. * + */ + VPU_JSM_MSG_STATE_DUMP = 0x11FF, /* IPC Host -> Device, General commands */ VPU_JSM_MSG_GENERAL_CMD = 0x1200, VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD, @@ -236,6 +315,10 @@ enum vpu_ipc_msg_type { * Linux command: `echo '' > /dynamic_debug/control`. */ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, + /** + * Perform the save procedure for the D0i3 entry + */ + VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202, /* IPC Device -> Host, Job completion */ VPU_JSM_MSG_JOB_DONE = 0x2100, /* IPC Device -> Host, Async command completion */ @@ -304,11 +387,35 @@ enum vpu_ipc_msg_type { VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, /** Response to control command: Set context scheduling properties */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, + /** Response to control command: Log buffer setting */ + VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218, + /* IPC Device -> Host, HWS notify index entry of log buffer written */ + VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219, + /* IPC Device -> Host, HWS completion of a context suspend request */ + VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a, + /* Response to control command: Resume command queue */ + VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b, + /* Response to control command: Resume engine command response */ + VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c, + /* Response to control command: Enable survivability/DCT mode */ + VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d, + /* Response to control command: Disable survivability/DCT mode */ + VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e, + /** + * Response to state dump control command. + * NOTE: Please introduce new ASYNC responses before this one. * + */ + VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF, /* IPC Device -> Host, General command completion */ VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300, VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE, /** Response to VPU_JSM_MSG_DYNDBG_CONTROL. */ VPU_JSM_MSG_DYNDBG_CONTROL_RSP = 0x2301, + /** + * Acknowledgment of completion of the save procedure initiated by + * VPU_JSM_MSG_PWR_D0I3_ENTER + */ + VPU_JSM_MSG_PWR_D0I3_ENTER_DONE = 0x2302, }; enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED }; @@ -593,12 +700,12 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { * Default quantum in 100ns units for scheduling across processes * within a priority band */ - u64 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS]; + u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS]; /* * Default grace period in 100ns units for processes that preempt each * other within a priority band */ - u64 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS]; + u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS]; /* * For normal priority band, specifies the target VPU percentage * in situations when it's starved by the focus band. @@ -608,32 +715,51 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { u32 reserved_0; }; -/* HWS create command queue request */ +/* + * @brief HWS create command queue request. + * Host will create a command queue via this command. + * Note: Cmdq group is a handle of an object which + * may contain one or more command queues. + * @see VPU_JSM_MSG_CREATE_CMD_QUEUE + * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP + */ struct vpu_ipc_msg_payload_hws_create_cmdq { /* Process id */ u64 process_id; /* Host SSID */ u32 host_ssid; - /* Zero Padding */ - u32 reserved; + /* Engine for which queue is being created */ + u32 engine_idx; + /* + * Cmdq group may be set to 0 or equal to + * cmdq_id while each priority band contains + * only single engine instances. + */ + u64 cmdq_group; /* Command queue id */ u64 cmdq_id; /* Command queue base */ u64 cmdq_base; /* Command queue size */ u32 cmdq_size; - /* Reserved */ + /* Zero padding */ u32 reserved_0; }; -/* HWS create command queue response */ +/* + * @brief HWS create command queue response. + * @see VPU_JSM_MSG_CREATE_CMD_QUEUE + * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP + */ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { /* Process id */ u64 process_id; /* Host SSID */ u32 host_ssid; - /* Zero Padding */ - u32 reserved; + /* Engine for which queue is being created */ + u32 engine_idx; + /* Command queue group */ + u64 cmdq_group; /* Command queue id */ u64 cmdq_id; }; @@ -661,7 +787,7 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { /* Inside realtime band assigns a further priority */ u32 realtime_priority_level; /* Priority relative to other contexts in the same process */ - u32 in_process_priority; + s32 in_process_priority; /* Zero padding / Reserved */ u32 reserved_1; /* Context quantum relative to other contexts of same priority in the same process */ @@ -694,6 +820,123 @@ struct vpu_jsm_hws_register_db { u64 cmdq_size; }; +/* + * @brief Structure to set another buffer to be used for scheduling-related logging. + * The size of the logging buffer and the number of entries is defined as part of the + * buffer itself as described next. + * The log buffer received from the host is made up of; + * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'. + * The header contains the number of log entries in the buffer. + * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in + * 'struct vpu_hws_log_buffer_entry'. + * The entry contains the VPU timestamp, operation type and data. + * The host should provide the notify index value of log buffer to VPU. This is a + * value defined within the log buffer and when written to will generate the + * scheduling log notification. + * The host should set engine_idx and vpu_log_buffer_va to 0 to disable logging + * for a particular engine. + * VPU will handle one log buffer for each of supported engines. + * VPU should allow the logging to consume one host_ssid. + * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG + * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP + * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION + */ +struct vpu_ipc_msg_payload_hws_set_scheduling_log { + /* Engine ordinal */ + u32 engine_idx; + /* Host SSID */ + u32 host_ssid; + /* + * VPU log buffer virtual address. + * Set to 0 to disable logging for this engine. + */ + u64 vpu_log_buffer_va; + /* + * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION + * is generated when an event log is written to this index. + */ + u64 notify_index; +}; + +/* + * @brief The scheduling log notification is generated by VPU when it writes + * an event into the log buffer at the notify_index. VPU notifies host with + * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous + * message from VPU to host. + * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION + * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG + */ +struct vpu_ipc_msg_payload_hws_scheduling_log_notification { + /* Engine ordinal */ + u32 engine_idx; + /* Zero Padding */ + u32 reserved_0; +}; + +/* + * @brief HWS suspend command queue request and done structure. + * Host will request the suspend of contexts and VPU will; + * - Suspend all work on this context + * - Preempt any running work + * - Asynchronously perform the above and return success immediately once + * all items above are started successfully + * - Notify the host of completion of these operations via + * VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE + * - Reject any other context operations on a context with an in-flight + * suspend request running + * Same structure used when VPU notifies host of completion of a context suspend + * request. The ids and suspend fence value reported in this command will match + * the one in the request from the host to suspend the context. Once suspend is + * complete, VPU will not access any data relating to this command queue until + * it is resumed. + * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ + * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE + */ +struct vpu_ipc_msg_payload_hws_suspend_cmdq { + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved_0; + /* Command queue id */ + u64 cmdq_id; + /* + * Suspend fence value - reported by the VPU suspend context + * completed once suspend is complete. + */ + u64 suspend_fence_value; +}; + +/* + * @brief HWS Resume command queue request / response structure. + * Host will request the resume of a context; + * - VPU will resume all work on this context + * - Scheduler will allow this context to be scheduled + * @see VPU_JSM_MSG_HWS_RESUME_CMDQ + * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP + */ +struct vpu_ipc_msg_payload_hws_resume_cmdq { + /* Host SSID */ + u32 host_ssid; + /* Zero Padding */ + u32 reserved_0; + /* Command queue id */ + u64 cmdq_id; +}; + +/* + * @brief HWS Resume engine request / response structure. + * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume. + * Host shall send this command to resume scheduling of any valid queue. + * @see VPU_JSM_MSG_HWS_RESUME_ENGINE + * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE + */ +struct vpu_ipc_msg_payload_hws_resume_engine { + /* Engine to be resumed */ + u32 engine_idx; + /* Reserved */ + u32 reserved_0; +}; + /** * Payload for VPU_JSM_MSG_TRACE_SET_CONFIG[_RSP] and * VPU_JSM_MSG_TRACE_GET_CONFIG_RSP messages. @@ -938,6 +1181,35 @@ struct vpu_ipc_msg_payload_dyndbg_control { char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN]; }; +/** + * Payload for VPU_JSM_MSG_PWR_D0I3_ENTER + * + * This is a bi-directional payload. + */ +struct vpu_ipc_msg_payload_pwr_d0i3_enter { + /** + * 0: VPU_JSM_MSG_PWR_D0I3_ENTER_DONE is not sent to the host driver + * The driver will poll for D0i2 Idle state transitions. + * 1: VPU_JSM_MSG_PWR_D0I3_ENTER_DONE is sent after VPU state save is complete + */ + u32 send_response; + u32 reserved_0; +}; + +/** + * Payload for VPU_JSM_MSG_DCT_ENABLE message. + * + * Default values for DCT active/inactive times are 5.3ms and 30ms respectively, + * corresponding to a 85% duty cycle. This payload allows the host to tune these + * values according to application requirements. + */ +struct vpu_ipc_msg_payload_pwr_dct_control { + /** Duty cycle active time in microseconds */ + u32 dct_active_us; + /** Duty cycle inactive time in microseconds */ + u32 dct_inactive_us; +}; + /* * Payloads union, used to define complete message format. */ @@ -974,6 +1246,13 @@ union vpu_ipc_msg_payload { struct vpu_ipc_msg_payload_hws_destroy_cmdq hws_destroy_cmdq; struct vpu_ipc_msg_payload_hws_set_context_sched_properties hws_set_context_sched_properties; + struct vpu_ipc_msg_payload_hws_set_scheduling_log hws_set_scheduling_log; + struct vpu_ipc_msg_payload_hws_scheduling_log_notification hws_scheduling_log_notification; + struct vpu_ipc_msg_payload_hws_suspend_cmdq hws_suspend_cmdq; + struct vpu_ipc_msg_payload_hws_resume_cmdq hws_resume_cmdq; + struct vpu_ipc_msg_payload_hws_resume_engine hws_resume_engine; + struct vpu_ipc_msg_payload_pwr_d0i3_enter pwr_d0i3_enter; + struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control; }; /* From 9692b1dcefe79ea47f7d5a94acff74303c6563c6 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sat, 28 Oct 2023 15:34:06 +0200 Subject: [PATCH 013/117] accel/ivpu: Remove unneeded drm_driver declaration Cleanup drm_driver declaration leftover. Reviewed-by: Krystian Pradzynski Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-3-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index b6aa8893ff1ce..8f5655dfd83fb 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -31,8 +31,6 @@ __stringify(DRM_IVPU_DRIVER_MINOR) "." #endif -static const struct drm_driver driver; - static struct lock_class_key submitted_jobs_xa_lock_class_key; int ivpu_dbg_mask; From f13108fc7bae7085616805ed176b2114cdf4bd55 Mon Sep 17 00:00:00 2001 From: Tomasz Rusinowicz Date: Sat, 28 Oct 2023 15:34:07 +0200 Subject: [PATCH 014/117] accel/ivpu: Add dvfs_mode file to debugfs Add new debugfs file to set dvfs_mode FW boot parameter and restart the FW to allow experimenting with DVFS (dynamic voltage & frequency scaling). Signed-off-by: Tomasz Rusinowicz Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-4-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_debugfs.c | 28 ++++++++++++++++++++++++++++ drivers/accel/ivpu/ivpu_fw.c | 5 +++++ drivers/accel/ivpu/ivpu_fw.h | 1 + 3 files changed, 34 insertions(+) diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index ea453b985b491..6e0d568230241 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -115,6 +115,31 @@ static const struct drm_debugfs_info vdev_debugfs_list[] = { {"reset_pending", reset_pending_show, 0}, }; +static ssize_t +dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + struct ivpu_fw_info *fw = vdev->fw; + u32 dvfs_mode; + int ret; + + ret = kstrtou32_from_user(user_buf, size, 0, &dvfs_mode); + if (ret < 0) + return ret; + + fw->dvfs_mode = dvfs_mode; + + ivpu_pm_schedule_recovery(vdev); + + return size; +} + +static const struct file_operations dvfs_mode_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dvfs_mode_fops_write, +}; + static int fw_log_show(struct seq_file *s, void *v) { struct ivpu_device *vdev = s->private; @@ -280,6 +305,9 @@ void ivpu_debugfs_init(struct ivpu_device *vdev) debugfs_create_file("force_recovery", 0200, debugfs_root, vdev, &ivpu_force_recovery_fops); + debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev, + &dvfs_mode_fops); + debugfs_create_file("fw_log", 0644, debugfs_root, vdev, &fw_log_fops); debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev, diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 6142b09cf55a4..b81827540db94 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -182,6 +182,8 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->trace_destination_mask = VPU_TRACE_DESTINATION_VERBOSE_TRACING; fw->trace_hw_component_mask = -1; + fw->dvfs_mode = 0; + ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", @@ -422,6 +424,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->punit_telemetry_sram_size); ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n", boot_params->vpu_telemetry_enable); + ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n", + boot_params->dvfs_mode); } void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) @@ -492,6 +496,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev); boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); + boot_params->dvfs_mode = vdev->fw->dvfs_mode; wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 10ae2847f0ef3..66b60fa161b52 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -27,6 +27,7 @@ struct ivpu_fw_info { u32 trace_level; u32 trace_destination_mask; u64 trace_hw_component_mask; + u32 dvfs_mode; }; int ivpu_fw_init(struct ivpu_device *vdev); From bacc130d4671a9568352b8d7606cfe7a50d38a56 Mon Sep 17 00:00:00 2001 From: Karol Wachowski Date: Sat, 28 Oct 2023 15:34:08 +0200 Subject: [PATCH 015/117] accel/ivpu: Remove reset from power up sequence Setting a non-zero work point resets the IP hence IP_RESET trigger is redundant. Signed-off-by: Karol Wachowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-5-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_hw_37xx.c | 4 ---- drivers/accel/ivpu/ivpu_hw_40xx.c | 6 ------ 2 files changed, 10 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index e5cb9d8acb828..8340c84ed6dea 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -651,10 +651,6 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev) { int ret; - ret = ivpu_hw_37xx_reset(vdev); - if (ret) - ivpu_warn(vdev, "Failed to reset HW: %d\n", ret); - ret = ivpu_hw_37xx_d0i3_disable(vdev); if (ret) ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 4bf4c87800446..eb8218d15f01e 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -811,12 +811,6 @@ static int ivpu_hw_40xx_power_up(struct ivpu_device *vdev) { int ret; - ret = ivpu_hw_40xx_reset(vdev); - if (ret) { - ivpu_err(vdev, "Failed to reset HW: %d\n", ret); - return ret; - } - ret = ivpu_hw_40xx_d0i3_disable(vdev); if (ret) ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret); From 61ab485f0eb1e5b7e2be10ec657a054d36ef5035 Mon Sep 17 00:00:00 2001 From: Andrzej Kacprowski Date: Sat, 28 Oct 2023 15:34:09 +0200 Subject: [PATCH 016/117] accel/ivpu: Add support for VPU_JOB_FLAGS_NULL_SUBMISSION_MASK Add test_mode = 3 that add VPU_JOB_FLAGS_NULL_SUBMISSION_MASK flag to the job send to the VPU device. Then the VPU will process the job but won't execute commands (except the command to signal the fence). This can be used to estimate job processing overhead in the host software and VPU firmware. Unlike the null hardware mode, the null submission mode will still work even if UMD uses VPU fences to track job completion. Signed-off-by: Andrzej Kacprowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-6-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 2 +- drivers/accel/ivpu/ivpu_drv.h | 7 ++++--- drivers/accel/ivpu/ivpu_job.c | 2 ++ 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 8f5655dfd83fb..4ec8d25a120cf 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -39,7 +39,7 @@ MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); int ivpu_test_mode; module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); -MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw"); +MODULE_PARM_DESC(test_mode, "Test mode: 0 - disabled , 1 - fw unit test, 2 - null hw, 3 - null submission"); u8 ivpu_pll_min_ratio; module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 12a63f8a73e8a..fdec8272da8cf 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -147,9 +147,10 @@ extern u8 ivpu_pll_min_ratio; extern u8 ivpu_pll_max_ratio; extern bool ivpu_disable_mmu_cont_pages; -#define IVPU_TEST_MODE_DISABLED 0 -#define IVPU_TEST_MODE_FW_TEST 1 -#define IVPU_TEST_MODE_NULL_HW 2 +#define IVPU_TEST_MODE_DISABLED 0 +#define IVPU_TEST_MODE_FW_TEST 1 +#define IVPU_TEST_MODE_NULL_HW 2 +#define IVPU_TEST_MODE_NULL_SUBMISSION 3 extern int ivpu_test_mode; struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 689dc0d13b8fa..646b8f8129015 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -196,6 +196,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) entry->batch_buf_addr = job->cmd_buf_vpu_addr; entry->job_id = job->job_id; entry->flags = 0; + if (unlikely(ivpu_test_mode == IVPU_TEST_MODE_NULL_SUBMISSION)) + entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; wmb(); /* Ensure that tail is updated after filling entry */ header->tail = next_entry; wmb(); /* Flush WC buffer for jobq header */ From 8b5cec3c2ccf0b12121cb151560c2876219d87e5 Mon Sep 17 00:00:00 2001 From: Karol Wachowski Date: Sat, 28 Oct 2023 15:34:10 +0200 Subject: [PATCH 017/117] accel/ivpu: Change test_mode module param to bitmask Change meaning of test_mode module parameter from integer value to bitmask allowing setting different test features with corresponding bits. Signed-off-by: Karol Wachowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-7-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 4 ++-- drivers/accel/ivpu/ivpu_drv.h | 7 +++---- drivers/accel/ivpu/ivpu_job.c | 4 ++-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 4ec8d25a120cf..39bac45d88b5e 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -39,7 +39,7 @@ MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); int ivpu_test_mode; module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); -MODULE_PARM_DESC(test_mode, "Test mode: 0 - disabled , 1 - fw unit test, 2 - null hw, 3 - null submission"); +MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros."); u8 ivpu_pll_min_ratio; module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); @@ -315,7 +315,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev) unsigned long timeout; int ret; - if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST) + if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST) return 0; ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index fdec8272da8cf..ada43ba565c4a 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -147,10 +147,9 @@ extern u8 ivpu_pll_min_ratio; extern u8 ivpu_pll_max_ratio; extern bool ivpu_disable_mmu_cont_pages; -#define IVPU_TEST_MODE_DISABLED 0 -#define IVPU_TEST_MODE_FW_TEST 1 -#define IVPU_TEST_MODE_NULL_HW 2 -#define IVPU_TEST_MODE_NULL_SUBMISSION 3 +#define IVPU_TEST_MODE_FW_TEST BIT(0) +#define IVPU_TEST_MODE_NULL_HW BIT(1) +#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2) extern int ivpu_test_mode; struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 646b8f8129015..6e96c921547d5 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -196,7 +196,7 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) entry->batch_buf_addr = job->cmd_buf_vpu_addr; entry->job_id = job->job_id; entry->flags = 0; - if (unlikely(ivpu_test_mode == IVPU_TEST_MODE_NULL_SUBMISSION)) + if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; wmb(); /* Ensure that tail is updated after filling entry */ header->tail = next_entry; @@ -404,7 +404,7 @@ static int ivpu_direct_job_submission(struct ivpu_job *job) job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id, job->engine_idx, cmdq->jobq->header.tail); - if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) { + if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) { ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS); cmdq->jobq->header.head = cmdq->jobq->header.tail; wmb(); /* Flush WC buffer for jobq header */ From db37a5bfe97588992c75c29c1f0e19c88d364ce5 Mon Sep 17 00:00:00 2001 From: Andrzej Kacprowski Date: Sat, 28 Oct 2023 15:34:11 +0200 Subject: [PATCH 018/117] accel/ivpu/40xx: Capture D0i3 entry host and device timestamps The driver needs to capture the D0i3 entry timestamp to calculate D0i3 residency time. The D0i3 residency time and the VPU timestamp are passed to the firmware at D0i3 exit (warm boot). Signed-off-by: Andrzej Kacprowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-8-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_hw_40xx.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index eb8218d15f01e..0eb9c827f6dcf 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -879,10 +879,18 @@ static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev) REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val); } +static void ivpu_hw_40xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) +{ + vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); + vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT); +} + static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev) { int ret = 0; + ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev); + if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev)) ivpu_warn(vdev, "Failed to reset the VPU\n"); From 3de6d9597892ef899d7e90f589e23c9298013134 Mon Sep 17 00:00:00 2001 From: Andrzej Kacprowski Date: Sat, 28 Oct 2023 15:34:12 +0200 Subject: [PATCH 019/117] accel/ivpu: Pass D0i3 residency time to the VPU firmware The firmware needs to know the time spent in D0i3/D3 to calculate telemetry data. The D0i3/D3 residency time is calculated by the driver and passed to the firmware in the boot parameters. The driver also passes VPU perf counter value captured right before entering D0i3 - this allows the VPU firmware to generate monotonic timestamps for the logs. Signed-off-by: Andrzej Kacprowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-9-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_fw.c | 17 ++++++++++++++++- drivers/accel/ivpu/ivpu_hw.h | 2 ++ drivers/accel/ivpu/ivpu_hw_37xx.c | 8 ++++++++ drivers/accel/ivpu/ivpu_hw_37xx_reg.h | 2 ++ 4 files changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index b81827540db94..383e4d9b97c85 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -426,14 +426,27 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->vpu_telemetry_enable); ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n", boot_params->dvfs_mode); + ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n", + boot_params->d0i3_residency_time_us); + ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n", + boot_params->d0i3_entry_vpu_ts); } void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) { struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx; - /* In case of warm boot we only have to reset the entrypoint addr */ + /* In case of warm boot only update variable params */ if (!ivpu_fw_is_cold_boot(vdev)) { + boot_params->d0i3_residency_time_us = + ktime_us_delta(ktime_get_boottime(), vdev->hw->d0i3_entry_host_ts); + boot_params->d0i3_entry_vpu_ts = vdev->hw->d0i3_entry_vpu_ts; + + ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n", + boot_params->d0i3_residency_time_us); + ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n", + boot_params->d0i3_entry_vpu_ts); + boot_params->save_restore_ret_address = 0; vdev->pm->is_warmboot = true; return; @@ -497,6 +510,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); boot_params->dvfs_mode = vdev->fw->dvfs_mode; + boot_params->d0i3_residency_time_us = 0; + boot_params->d0i3_entry_vpu_ts = 0; wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index ab341237bcf97..fd4809b56168f 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -57,6 +57,8 @@ struct ivpu_hw_info { u32 sku; u16 config; int dma_bits; + ktime_t d0i3_entry_host_ts; + u64 d0i3_entry_vpu_ts; }; extern const struct ivpu_hw_ops ivpu_hw_37xx_ops; diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 8340c84ed6dea..451c9777b237b 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -714,10 +714,18 @@ static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev) REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val); } +static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) +{ + vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); + vdev->hw->d0i3_entry_vpu_ts = REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT); +} + static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev) { int ret = 0; + ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev); + if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev)) ivpu_err(vdev, "Failed to reset the VPU\n"); diff --git a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h index 4083beb5e9dba..f6fec19192020 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_37xx_reg.h @@ -240,6 +240,8 @@ #define VPU_37XX_CPU_SS_TIM_GEN_CONFIG 0x06021008u #define VPU_37XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9) +#define VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT 0x06029000u + #define VPU_37XX_CPU_SS_DOORBELL_0 0x06300000u #define VPU_37XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0) From 45e45362e0955fc3b0b622e8a0d788097f3de902 Mon Sep 17 00:00:00 2001 From: Karol Wachowski Date: Sat, 28 Oct 2023 15:34:13 +0200 Subject: [PATCH 020/117] accel/ivpu: Introduce ivpu_ipc_send_receive_active() Split ivpu_ipc_send_receive() implementation to have a version that does not call pm_runtime_resume_and_get(). That implementation can be invoked when device is up and runtime resume is prohibited (for example at the end of boot sequence). The new function will be used for D0i3 entry IPC message addition in the separate change. Signed-off-by: Karol Wachowski Reviewed-by: Stanislaw Gruszka Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-10-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_ipc.c | 33 ++++++++++++++++++++++----------- drivers/accel/ivpu/ivpu_ipc.h | 8 +++++--- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 6e213a5afb8cf..d069d1e1f91d5 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -288,23 +288,20 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req return ret; } -int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, - enum vpu_ipc_msg_type expected_resp_type, - struct vpu_jsm_msg *resp, u32 channel, - unsigned long timeout_ms) +int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, + u32 channel, unsigned long timeout_ms) { struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; struct vpu_jsm_msg hb_resp; int ret, hb_ret; - ret = ivpu_rpm_get(vdev); - if (ret < 0) - return ret; + drm_WARN_ON(&vdev->drm, + vdev->drm.dev->power.runtime_status == RPM_SUSPENDED); - ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp, - channel, timeout_ms); + ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms); if (ret != -ETIMEDOUT) - goto rpm_put; + return ret; hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &hb_resp, VPU_IPC_CHAN_ASYNC_CMD, @@ -314,7 +311,21 @@ int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, ivpu_pm_schedule_recovery(vdev); } -rpm_put: + return ret; +} + +int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, + u32 channel, unsigned long timeout_ms) +{ + int ret; + + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + + ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms); + ivpu_rpm_put(vdev); return ret; } diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h index 68f5b6668e00b..6918db23daa4b 100644 --- a/drivers/accel/ivpu/ivpu_ipc.h +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -85,9 +85,11 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms); +int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, + enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, + u32 channel, unsigned long timeout_ms); int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, - enum vpu_ipc_msg_type expected_resp_type, - struct vpu_jsm_msg *resp, u32 channel, - unsigned long timeout_ms); + enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, + u32 channel, unsigned long timeout_ms); #endif /* __IVPU_IPC_H__ */ From cc19fedab8bdbe0f81d320ff9a6fdb3b5b01be83 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sat, 28 Oct 2023 15:34:14 +0200 Subject: [PATCH 021/117] accel/ivpu/37xx: Print warning when VPUIP is not idle during power down Print warning if VPUIP is not idle during power down. Use warn log level also when we fail to enter reset state as this is not really an error but unexpected behavior. Reviewed-by: Krystian Pradzynski Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-11-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_hw_37xx.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 451c9777b237b..1b47d77b4c6ee 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -726,8 +726,11 @@ static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev) ivpu_hw_37xx_save_d0i3_entry_timestamp(vdev); - if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev)) - ivpu_err(vdev, "Failed to reset the VPU\n"); + if (!ivpu_hw_37xx_is_idle(vdev)) { + ivpu_warn(vdev, "VPU not idle during power down\n"); + if (ivpu_hw_37xx_reset(vdev)) + ivpu_warn(vdev, "Failed to reset the VPU\n"); + } if (ivpu_pll_disable(vdev)) { ivpu_err(vdev, "Failed to disable PLL\n"); From 3198a62eb8f8411b605501cb78c2298c67ecee91 Mon Sep 17 00:00:00 2001 From: Andrzej Kacprowski Date: Sat, 28 Oct 2023 15:34:15 +0200 Subject: [PATCH 022/117] accel/ivpu: Add support for delayed D0i3 entry message Currently the VPU firmware prepares for D0i3 every time the VPU is entering D0i2 Idle state. This is not optimal as we might not enter D0i3 every time we enter D0i2 Idle and this preparation is quite costly. This optimization moves D0i3 preparation to a dedicated message sent from the host driver only when the driver is about to enter D0i3 - this reduces power consumption and latency for certain workloads, for example audio workloads that submit inference every 10 ms. The VPU needs non zero time to enter IDLE state after responding to D0i3 entry message. If the driver does not wait for the VPU to enter IDLE state it could cause warm boot failures. Signed-off-by: Andrzej Kacprowski Reviewed-by: Stanislaw Gruszka Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028133415.1169975-12-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.h | 10 +++++-- drivers/accel/ivpu/ivpu_fw.c | 48 +++++++++++++++++++++++++++++-- drivers/accel/ivpu/ivpu_hw.h | 6 ++++ drivers/accel/ivpu/ivpu_hw_37xx.c | 9 +++++- drivers/accel/ivpu/ivpu_hw_40xx.c | 10 +++++++ drivers/accel/ivpu/ivpu_jsm_msg.c | 21 ++++++++++++++ drivers/accel/ivpu/ivpu_jsm_msg.h | 1 + drivers/accel/ivpu/ivpu_pm.c | 11 ++++++- 8 files changed, 108 insertions(+), 8 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index ada43ba565c4a..1b482d1d66d95 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -87,6 +87,7 @@ struct ivpu_wa_table { bool d3hot_after_power_off; bool interrupt_clear_with_0; bool disable_clock_relinquish; + bool disable_d0i3_msg; }; struct ivpu_hw_info; @@ -125,6 +126,7 @@ struct ivpu_device { int tdr; int reschedule_suspend; int autosuspend; + int d0i3_entry_msg; } timeout; }; @@ -147,9 +149,11 @@ extern u8 ivpu_pll_min_ratio; extern u8 ivpu_pll_max_ratio; extern bool ivpu_disable_mmu_cont_pages; -#define IVPU_TEST_MODE_FW_TEST BIT(0) -#define IVPU_TEST_MODE_NULL_HW BIT(1) -#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2) +#define IVPU_TEST_MODE_FW_TEST BIT(0) +#define IVPU_TEST_MODE_NULL_HW BIT(1) +#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2) +#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4) +#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5) extern int ivpu_test_mode; struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 383e4d9b97c85..4a21be3a0c59b 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -33,12 +33,17 @@ #define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31) -#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \ +/* Check if FW API is compatible with the driver */ +#define IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, name, min_major) \ ivpu_fw_check_api(vdev, fw_hdr, #name, \ VPU_##name##_API_VER_INDEX, \ VPU_##name##_API_VER_MAJOR, \ VPU_##name##_API_VER_MINOR, min_major) +/* Check if API version is lower that the given version */ +#define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \ + ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor) + static char *ivpu_firmware; module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644); MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/.."); @@ -105,6 +110,19 @@ ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw return 0; } +static bool +ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr, + const char *str, int index, u16 major, u16 minor) +{ + u16 fw_major = (u16)(fw_hdr->api_version[index] >> 16); + u16 fw_minor = (u16)(fw_hdr->api_version[index]); + + if (fw_major < major || (fw_major == major && fw_minor < minor)) + return true; + + return false; +} + static int ivpu_fw_parse(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; @@ -164,9 +182,9 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) ivpu_info(vdev, "Firmware: %s, version: %s", fw->name, (const char *)fw_hdr + VPU_FW_HEADER_SIZE); - if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3)) + if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, BOOT, 3)) return -EINVAL; - if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3)) + if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3)) return -EINVAL; fw->runtime_addr = runtime_addr; @@ -197,6 +215,24 @@ static void ivpu_fw_release(struct ivpu_device *vdev) release_firmware(vdev->fw->file); } +/* Initialize workarounds that depend on FW version */ +static void +ivpu_fw_init_wa(struct ivpu_device *vdev) +{ + const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data; + + if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) || + (ivpu_hw_gen(vdev) > IVPU_HW_37XX) || + (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE)) + vdev->wa.disable_d0i3_msg = true; + + /* Force enable the feature for testing purposes */ + if (ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_ENABLE) + vdev->wa.disable_d0i3_msg = false; + + IVPU_PRINT_WA(disable_d0i3_msg); +} + static int ivpu_fw_update_global_range(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; @@ -299,6 +335,8 @@ int ivpu_fw_init(struct ivpu_device *vdev) if (ret) goto err_fw_release; + ivpu_fw_init_wa(vdev); + ret = ivpu_fw_mem_init(vdev); if (ret) goto err_fw_release; @@ -426,6 +464,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->vpu_telemetry_enable); ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n", boot_params->dvfs_mode); + ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n", + boot_params->d0i3_delayed_entry); ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_residency_time_us = %lld\n", boot_params->d0i3_residency_time_us); ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_entry_vpu_ts = %llu\n", @@ -510,6 +550,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev); boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev); boot_params->dvfs_mode = vdev->fw->dvfs_mode; + if (!IVPU_WA(disable_d0i3_msg)) + boot_params->d0i3_delayed_entry = 1; boot_params->d0i3_residency_time_us = 0; boot_params->d0i3_entry_vpu_ts = 0; diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index fd4809b56168f..b7694b1cbc02f 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -14,6 +14,7 @@ struct ivpu_hw_ops { int (*boot_fw)(struct ivpu_device *vdev); int (*power_down)(struct ivpu_device *vdev); bool (*is_idle)(struct ivpu_device *vdev); + int (*wait_for_idle)(struct ivpu_device *vdev); void (*wdt_disable)(struct ivpu_device *vdev); void (*diagnose_failure)(struct ivpu_device *vdev); u32 (*reg_pll_freq_get)(struct ivpu_device *vdev); @@ -86,6 +87,11 @@ static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev) return vdev->hw->ops->is_idle(vdev); }; +static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev) +{ + return vdev->hw->ops->wait_for_idle(vdev); +}; + static inline int ivpu_hw_power_down(struct ivpu_device *vdev) { ivpu_dbg(vdev, PM, "HW power down\n"); diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 1b47d77b4c6ee..1c8c5715095be 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -37,7 +37,7 @@ #define TIMEOUT_US (150 * USEC_PER_MSEC) #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) -#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC) +#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) #define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \ (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \ @@ -90,6 +90,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) vdev->timeout.tdr = 2000; vdev->timeout.reschedule_suspend = 10; vdev->timeout.autosuspend = 10; + vdev->timeout.d0i3_entry_msg = 5; } static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev) @@ -714,6 +715,11 @@ static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev) REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val); } +static int ivpu_hw_37xx_wait_for_idle(struct ivpu_device *vdev) +{ + return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); +} + static void ivpu_hw_37xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) { vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); @@ -1001,6 +1007,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = { .info_init = ivpu_hw_37xx_info_init, .power_up = ivpu_hw_37xx_power_up, .is_idle = ivpu_hw_37xx_is_idle, + .wait_for_idle = ivpu_hw_37xx_wait_for_idle, .power_down = ivpu_hw_37xx_power_down, .boot_fw = ivpu_hw_37xx_boot_fw, .wdt_disable = ivpu_hw_37xx_wdt_disable, diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 0eb9c827f6dcf..6a9672f650d18 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -39,6 +39,7 @@ #define TIMEOUT_US (150 * USEC_PER_MSEC) #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC) #define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC) +#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC) #define WEIGHTS_DEFAULT 0xf711f711u #define WEIGHTS_ATS_DEFAULT 0x0000f711u @@ -140,18 +141,21 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev) vdev->timeout.tdr = 2000000; vdev->timeout.reschedule_suspend = 1000; vdev->timeout.autosuspend = -1; + vdev->timeout.d0i3_entry_msg = 500; } else if (ivpu_is_simics(vdev)) { vdev->timeout.boot = 50; vdev->timeout.jsm = 500; vdev->timeout.tdr = 10000; vdev->timeout.reschedule_suspend = 10; vdev->timeout.autosuspend = -1; + vdev->timeout.d0i3_entry_msg = 100; } else { vdev->timeout.boot = 1000; vdev->timeout.jsm = 500; vdev->timeout.tdr = 2000; vdev->timeout.reschedule_suspend = 10; vdev->timeout.autosuspend = 10; + vdev->timeout.d0i3_entry_msg = 5; } } @@ -879,6 +883,11 @@ static bool ivpu_hw_40xx_is_idle(struct ivpu_device *vdev) REG_TEST_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, val); } +static int ivpu_hw_40xx_wait_for_idle(struct ivpu_device *vdev) +{ + return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US); +} + static void ivpu_hw_40xx_save_d0i3_entry_timestamp(struct ivpu_device *vdev) { vdev->hw->d0i3_entry_host_ts = ktime_get_boottime(); @@ -1168,6 +1177,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = { .info_init = ivpu_hw_40xx_info_init, .power_up = ivpu_hw_40xx_power_up, .is_idle = ivpu_hw_40xx_is_idle, + .wait_for_idle = ivpu_hw_40xx_wait_for_idle, .power_down = ivpu_hw_40xx_power_down, .boot_fw = ivpu_hw_40xx_boot_fw, .wdt_disable = ivpu_hw_40xx_wdt_disable, diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c index 35a689475c68d..8cea0dd731b91 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.c +++ b/drivers/accel/ivpu/ivpu_jsm_msg.c @@ -4,6 +4,7 @@ */ #include "ivpu_drv.h" +#include "ivpu_hw.h" #include "ivpu_ipc.h" #include "ivpu_jsm_msg.h" @@ -260,3 +261,23 @@ int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid) return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm); } + +int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev) +{ + struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER }; + struct vpu_jsm_msg resp; + int ret; + + if (IVPU_WA(disable_d0i3_msg)) + return 0; + + req.payload.pwr_d0i3_enter.send_response = 1; + + ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, + &resp, VPU_IPC_CHAN_GEN_CMD, + vdev->timeout.d0i3_entry_msg); + if (ret) + return ret; + + return ivpu_hw_wait_for_idle(vdev); +} diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h index 66979a948c7c6..ae75e5dbcc41d 100644 --- a/drivers/accel/ivpu/ivpu_jsm_msg.h +++ b/drivers/accel/ivpu/ivpu_jsm_msg.h @@ -22,4 +22,5 @@ int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destinati int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask, u64 trace_hw_component_mask); int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid); +int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev); #endif diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 568e4295dc56b..2d05fb9e31970 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -15,6 +15,7 @@ #include "ivpu_fw.h" #include "ivpu_ipc.h" #include "ivpu_job.h" +#include "ivpu_jsm_msg.h" #include "ivpu_mmu.h" #include "ivpu_pm.h" @@ -153,6 +154,8 @@ int ivpu_pm_suspend_cb(struct device *dev) } } + ivpu_jsm_pwr_d0i3_enter(vdev); + ivpu_suspend(vdev); ivpu_pm_prepare_warm_boot(vdev); @@ -188,6 +191,7 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct ivpu_device *vdev = to_ivpu_device(drm); + bool hw_is_idle = true; int ret; ivpu_dbg(vdev, PM, "Runtime suspend..\n"); @@ -200,11 +204,16 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev) return -EAGAIN; } + if (!vdev->pm->suspend_reschedule_counter) + hw_is_idle = false; + else if (ivpu_jsm_pwr_d0i3_enter(vdev)) + hw_is_idle = false; + ret = ivpu_suspend(vdev); if (ret) ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret); - if (!vdev->pm->suspend_reschedule_counter) { + if (!hw_is_idle) { ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n"); ivpu_pm_prepare_cold_boot(vdev); } else { From 79d94360d50fcd487edcfe118a47a2881534923f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 23 Oct 2023 07:58:33 -0300 Subject: [PATCH 023/117] drm/v3d: wait for all jobs to finish before unregistering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we are only warning the user if the BIN or RENDER jobs don't finish before we unregister V3D. We must wait for all jobs to finish before unregistering. Therefore, warn the user if TFU or CSD jobs are not done by the time the driver is unregistered. Signed-off-by: Maíra Canal Reviewed-by: Iago Toral Quiroga Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20231023105927.101502-1-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_gem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 2e94ce788c714..afa7d170d1fff 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -1072,6 +1072,8 @@ v3d_gem_destroy(struct drm_device *dev) */ WARN_ON(v3d->bin_job); WARN_ON(v3d->render_job); + WARN_ON(v3d->tfu_job); + WARN_ON(v3d->csd_job); drm_mm_takedown(&v3d->mm); From b2139fb5051554a7f297e4ad584ef1bc26c76d5d Mon Sep 17 00:00:00 2001 From: Steven Price Date: Fri, 20 Oct 2023 11:44:05 +0100 Subject: [PATCH 024/117] drm/panfrost: Remove incorrect IS_ERR() check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sg_page_iter_page() doesn't return an error code, so the IS_ERR() check is wrong and the error path will never be executed. This also allows simplifying the code to remove the local variable 'page'. CC: Adrián Larumbe Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/376713ff-9a4f-4ea3-b097-fb5efb685d95@moroto.mountain Signed-off-by: Steven Price Reviewed-by: Adrián Larumbe Tested-by: Adrián Larumbe Link: https://patchwork.freedesktop.org/patch/msgid/20231020104405.53992-1-steven.price@arm.com --- drivers/gpu/drm/panfrost/panfrost_dump.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index e7942ac449c68..47751302f1bc9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -220,16 +220,8 @@ void panfrost_core_dump(struct panfrost_job *job) iter.hdr->bomap.data[0] = bomap - bomap_start; - for_each_sgtable_page(bo->base.sgt, &page_iter, 0) { - struct page *page = sg_page_iter_page(&page_iter); - - if (!IS_ERR(page)) { - *bomap++ = page_to_phys(page); - } else { - dev_err(pfdev->dev, "Panfrost Dump: wrong page\n"); - *bomap++ = 0; - } - } + for_each_sgtable_page(bo->base.sgt, &page_iter, 0) + *bomap++ = page_to_phys(sg_page_iter_page(&page_iter)); iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT; From ca34d816558c3e4c3f8fe037b5a6b16c944693de Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Wed, 20 Sep 2023 15:57:16 +0300 Subject: [PATCH 025/117] Revert "drm/tidss: Annotate dma-fence critical section in commit path" This reverts commit 4d56a4f08391857ba93465de489707b66adad114. The DMA-fence annotations cause a lockdep warning (see below). As per https://patchwork.freedesktop.org/patch/462170/ it sounds like the annotations don't work correctly. ====================================================== WARNING: possible circular locking dependency detected 6.6.0-rc2+ #1 Not tainted ------------------------------------------------------ kmstest/733 is trying to acquire lock: ffff8000819377f0 (fs_reclaim){+.+.}-{0:0}, at: __kmem_cache_alloc_node+0x58/0x2d4 but task is already holding lock: ffff800081a06aa0 (dma_fence_map){++++}-{0:0}, at: tidss_atomic_commit_tail+0x20/0xc0 [tidss] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (dma_fence_map){++++}-{0:0}: __dma_fence_might_wait+0x5c/0xd0 dma_resv_lockdep+0x1a4/0x32c do_one_initcall+0x84/0x2fc kernel_init_freeable+0x28c/0x4c4 kernel_init+0x24/0x1dc ret_from_fork+0x10/0x20 -> #1 (mmu_notifier_invalidate_range_start){+.+.}-{0:0}: fs_reclaim_acquire+0x70/0xe4 __kmem_cache_alloc_node+0x58/0x2d4 kmalloc_trace+0x38/0x78 __kthread_create_worker+0x3c/0x150 kthread_create_worker+0x64/0x8c workqueue_init+0x1e8/0x2f0 kernel_init_freeable+0x11c/0x4c4 kernel_init+0x24/0x1dc ret_from_fork+0x10/0x20 -> #0 (fs_reclaim){+.+.}-{0:0}: __lock_acquire+0x1370/0x20d8 lock_acquire+0x1e8/0x308 fs_reclaim_acquire+0xd0/0xe4 __kmem_cache_alloc_node+0x58/0x2d4 __kmalloc_node_track_caller+0x58/0xf0 kmemdup+0x34/0x60 regmap_bulk_write+0x64/0x2c0 tc358768_bridge_pre_enable+0x8c/0x12d0 [tc358768] drm_atomic_bridge_call_pre_enable+0x68/0x80 [drm] drm_atomic_bridge_chain_pre_enable+0x50/0x158 [drm] drm_atomic_helper_commit_modeset_enables+0x164/0x264 [drm_kms_helper] tidss_atomic_commit_tail+0x58/0xc0 [tidss] commit_tail+0xa0/0x188 [drm_kms_helper] drm_atomic_helper_commit+0x1a8/0x1c0 [drm_kms_helper] drm_atomic_commit+0xa8/0xe0 [drm] drm_mode_atomic_ioctl+0x9ec/0xc80 [drm] drm_ioctl_kernel+0xc4/0x170 [drm] drm_ioctl+0x234/0x4b0 [drm] drm_compat_ioctl+0x110/0x12c [drm] __arm64_compat_sys_ioctl+0x128/0x150 invoke_syscall+0x48/0x110 el0_svc_common.constprop.0+0x40/0xe0 do_el0_svc_compat+0x1c/0x38 el0_svc_compat+0x48/0xb4 el0t_32_sync_handler+0xb0/0x138 el0t_32_sync+0x194/0x198 other info that might help us debug this: Chain exists of: fs_reclaim --> mmu_notifier_invalidate_range_start --> dma_fence_map Possible unsafe locking scenario: CPU0 CPU1 ---- ---- rlock(dma_fence_map); lock(mmu_notifier_invalidate_range_start); lock(dma_fence_map); lock(fs_reclaim); *** DEADLOCK *** 3 locks held by kmstest/733: #0: ffff800082e5bba0 (crtc_ww_class_acquire){+.+.}-{0:0}, at: drm_mode_atomic_ioctl+0x118/0xc80 [drm] #1: ffff000004224c88 (crtc_ww_class_mutex){+.+.}-{3:3}, at: modeset_lock+0xdc/0x1a0 [drm] #2: ffff800081a06aa0 (dma_fence_map){++++}-{0:0}, at: tidss_atomic_commit_tail+0x20/0xc0 [tidss] stack backtrace: CPU: 0 PID: 733 Comm: kmstest Not tainted 6.6.0-rc2+ #1 Hardware name: Toradex Verdin AM62 on Verdin Development Board (DT) Call trace: dump_backtrace+0x98/0x118 show_stack+0x18/0x24 dump_stack_lvl+0x60/0xac dump_stack+0x18/0x24 print_circular_bug+0x288/0x368 check_noncircular+0x168/0x17c __lock_acquire+0x1370/0x20d8 lock_acquire+0x1e8/0x308 fs_reclaim_acquire+0xd0/0xe4 __kmem_cache_alloc_node+0x58/0x2d4 __kmalloc_node_track_caller+0x58/0xf0 kmemdup+0x34/0x60 regmap_bulk_write+0x64/0x2c0 tc358768_bridge_pre_enable+0x8c/0x12d0 [tc358768] drm_atomic_bridge_call_pre_enable+0x68/0x80 [drm] drm_atomic_bridge_chain_pre_enable+0x50/0x158 [drm] drm_atomic_helper_commit_modeset_enables+0x164/0x264 [drm_kms_helper] tidss_atomic_commit_tail+0x58/0xc0 [tidss] commit_tail+0xa0/0x188 [drm_kms_helper] drm_atomic_helper_commit+0x1a8/0x1c0 [drm_kms_helper] drm_atomic_commit+0xa8/0xe0 [drm] drm_mode_atomic_ioctl+0x9ec/0xc80 [drm] drm_ioctl_kernel+0xc4/0x170 [drm] drm_ioctl+0x234/0x4b0 [drm] drm_compat_ioctl+0x110/0x12c [drm] __arm64_compat_sys_ioctl+0x128/0x150 invoke_syscall+0x48/0x110 el0_svc_common.constprop.0+0x40/0xe0 do_el0_svc_compat+0x1c/0x38 el0_svc_compat+0x48/0xb4 el0t_32_sync_handler+0xb0/0x138 el0t_32_sync+0x194/0x198 Fixes: 4d56a4f08391 ("drm/tidss: Annotate dma-fence critical section in commit path") Reviewed-by: Aradhya Bhatia Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20230920-dma-fence-annotation-revert-v1-1-7ebf6f7f5bf6@ideasonboard.com --- drivers/gpu/drm/tidss/tidss_kms.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index c979ad1af2366..d096d8d2bc8f8 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -4,8 +4,6 @@ * Author: Tomi Valkeinen */ -#include - #include #include #include @@ -25,7 +23,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *ddev = old_state->dev; struct tidss_device *tidss = to_tidss(ddev); - bool fence_cookie = dma_fence_begin_signalling(); dev_dbg(ddev->dev, "%s\n", __func__); @@ -36,7 +33,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_enables(ddev, old_state); drm_atomic_helper_commit_hw_done(old_state); - dma_fence_end_signalling(fence_cookie); drm_atomic_helper_wait_for_flip_done(ddev, old_state); drm_atomic_helper_cleanup_planes(ddev, old_state); From 9d7c8c066916f231ca0ed4e4fce6c4b58ca3e451 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Wed, 20 Sep 2023 15:57:17 +0300 Subject: [PATCH 026/117] Revert "drm/omapdrm: Annotate dma-fence critical section in commit path" This reverts commit 250aa22920cd5d956a5d3e9c6a43d671c2bae217. The DMA-fence annotations cause a lockdep warning (see below). As per https://patchwork.freedesktop.org/patch/462170/ it sounds like the annotations don't work correctly. ====================================================== WARNING: possible circular locking dependency detected 6.5.0-rc2+ #2 Not tainted ------------------------------------------------------ kmstest/219 is trying to acquire lock: c4705838 (&hdmi->lock){+.+.}-{3:3}, at: hdmi5_bridge_mode_set+0x1c/0x50 but task is already holding lock: c11e1128 (dma_fence_map){++++}-{0:0}, at: omap_atomic_commit_tail+0x14/0xbc which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (dma_fence_map){++++}-{0:0}: __dma_fence_might_wait+0x48/0xb4 dma_resv_lockdep+0x1b8/0x2bc do_one_initcall+0x68/0x3b0 kernel_init_freeable+0x260/0x34c kernel_init+0x14/0x140 ret_from_fork+0x14/0x28 -> #1 (fs_reclaim){+.+.}-{0:0}: fs_reclaim_acquire+0x70/0xa8 __kmem_cache_alloc_node+0x3c/0x368 kmalloc_trace+0x28/0x58 _drm_do_get_edid+0x7c/0x35c hdmi5_bridge_get_edid+0xc8/0x1ac drm_bridge_connector_get_modes+0x64/0xc0 drm_helper_probe_single_connector_modes+0x170/0x528 drm_client_modeset_probe+0x208/0x1334 __drm_fb_helper_initial_config_and_unlock+0x30/0x548 omap_fbdev_client_hotplug+0x3c/0x6c drm_client_register+0x58/0x94 pdev_probe+0x544/0x6b0 platform_probe+0x58/0xbc really_probe+0xd8/0x3fc __driver_probe_device+0x94/0x1f4 driver_probe_device+0x2c/0xc4 __device_attach_driver+0xa4/0x11c bus_for_each_drv+0x84/0xdc __device_attach+0xac/0x20c bus_probe_device+0x8c/0x90 device_add+0x588/0x7e0 platform_device_add+0x110/0x24c platform_device_register_full+0x108/0x15c dss_bind+0x90/0xc0 try_to_bring_up_aggregate_device+0x1e0/0x2c8 __component_add+0xa4/0x174 hdmi5_probe+0x1c8/0x270 platform_probe+0x58/0xbc really_probe+0xd8/0x3fc __driver_probe_device+0x94/0x1f4 driver_probe_device+0x2c/0xc4 __device_attach_driver+0xa4/0x11c bus_for_each_drv+0x84/0xdc __device_attach+0xac/0x20c bus_probe_device+0x8c/0x90 deferred_probe_work_func+0x8c/0xd8 process_one_work+0x2ac/0x6e4 worker_thread+0x30/0x4ec kthread+0x100/0x124 ret_from_fork+0x14/0x28 -> #0 (&hdmi->lock){+.+.}-{3:3}: __lock_acquire+0x145c/0x29cc lock_acquire.part.0+0xb4/0x258 __mutex_lock+0x90/0x950 mutex_lock_nested+0x1c/0x24 hdmi5_bridge_mode_set+0x1c/0x50 drm_bridge_chain_mode_set+0x48/0x5c crtc_set_mode+0x188/0x1d0 omap_atomic_commit_tail+0x2c/0xbc commit_tail+0x9c/0x188 drm_atomic_helper_commit+0x158/0x18c drm_atomic_commit+0xa4/0xe8 drm_mode_atomic_ioctl+0x9a4/0xc38 drm_ioctl+0x210/0x4a8 sys_ioctl+0x138/0xf00 ret_fast_syscall+0x0/0x1c other info that might help us debug this: Chain exists of: &hdmi->lock --> fs_reclaim --> dma_fence_map Possible unsafe locking scenario: CPU0 CPU1 ---- ---- rlock(dma_fence_map); lock(fs_reclaim); lock(dma_fence_map); lock(&hdmi->lock); *** DEADLOCK *** 3 locks held by kmstest/219: #0: f1011de4 (crtc_ww_class_acquire){+.+.}-{0:0}, at: drm_mode_atomic_ioctl+0xf0/0xc38 #1: c47059c8 (crtc_ww_class_mutex){+.+.}-{3:3}, at: modeset_lock+0xf8/0x230 #2: c11e1128 (dma_fence_map){++++}-{0:0}, at: omap_atomic_commit_tail+0x14/0xbc stack backtrace: CPU: 1 PID: 219 Comm: kmstest Not tainted 6.5.0-rc2+ #2 Hardware name: Generic DRA74X (Flattened Device Tree) unwind_backtrace from show_stack+0x10/0x14 show_stack from dump_stack_lvl+0x58/0x70 dump_stack_lvl from check_noncircular+0x164/0x198 check_noncircular from __lock_acquire+0x145c/0x29cc __lock_acquire from lock_acquire.part.0+0xb4/0x258 lock_acquire.part.0 from __mutex_lock+0x90/0x950 __mutex_lock from mutex_lock_nested+0x1c/0x24 mutex_lock_nested from hdmi5_bridge_mode_set+0x1c/0x50 hdmi5_bridge_mode_set from drm_bridge_chain_mode_set+0x48/0x5c drm_bridge_chain_mode_set from crtc_set_mode+0x188/0x1d0 crtc_set_mode from omap_atomic_commit_tail+0x2c/0xbc omap_atomic_commit_tail from commit_tail+0x9c/0x188 commit_tail from drm_atomic_helper_commit+0x158/0x18c drm_atomic_helper_commit from drm_atomic_commit+0xa4/0xe8 drm_atomic_commit from drm_mode_atomic_ioctl+0x9a4/0xc38 drm_mode_atomic_ioctl from drm_ioctl+0x210/0x4a8 drm_ioctl from sys_ioctl+0x138/0xf00 sys_ioctl from ret_fast_syscall+0x0/0x1c Exception stack(0xf1011fa8 to 0xf1011ff0) 1fa0: 00466d58 be9ab510 00000003 c03864bc be9ab510 be9ab4e0 1fc0: 00466d58 be9ab510 c03864bc 00000036 00466ef0 00466fc0 00467020 00466f20 1fe0: b6bc7ef4 be9ab4d0 b6bbbb00 b6cb2cc0 Fixes: 250aa22920cd ("drm/omapdrm: Annotate dma-fence critical section in commit path") Reviewed-by: Aradhya Bhatia Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20230920-dma-fence-annotation-revert-v1-2-7ebf6f7f5bf6@ideasonboard.com --- drivers/gpu/drm/omapdrm/omap_drv.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index b2835b3ea6f58..6598c9c08ba11 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -69,7 +69,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; - bool fence_cookie = dma_fence_begin_signalling(); dispc_runtime_get(priv->dispc); @@ -92,6 +91,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_hw_done(old_state); } else { /* * OMAP3 DSS seems to have issues with the work-around above, @@ -101,11 +102,9 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); - } - drm_atomic_helper_commit_hw_done(old_state); - - dma_fence_end_signalling(fence_cookie); + drm_atomic_helper_commit_hw_done(old_state); + } /* * Wait for completion of the page flips to ensure that old buffers From bfc87f90614523612ae7e94d06798e82bc78ade6 Mon Sep 17 00:00:00 2001 From: Krystian Pradzynski Date: Sat, 28 Oct 2023 17:59:29 +0200 Subject: [PATCH 027/117] accel/ivpu/40xx: Allow to change profiling frequency Profiling freq is a debug firmware feature. It switches default clock to higher resolution for fine-grained and more accurate firmware task profiling. We already configure it during boot up of VPU4. Add debugfs knob and helpers per HW generation that allow to change it. For vpu37xx the implementation is empty as profiling frequency can only be changed on VPU4 or newer. Signed-off-by: Krystian Pradzynski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-2-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_debugfs.c | 29 +++++++++++++++++++++++++++++ drivers/accel/ivpu/ivpu_fw.c | 7 +++++++ drivers/accel/ivpu/ivpu_hw.h | 12 ++++++++++++ drivers/accel/ivpu/ivpu_hw_37xx.c | 13 +++++++++++++ drivers/accel/ivpu/ivpu_hw_40xx.c | 15 +++++++++++++++ 5 files changed, 76 insertions(+) diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index 6e0d568230241..19035230563d7 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -14,6 +14,7 @@ #include "ivpu_fw.h" #include "ivpu_fw_log.h" #include "ivpu_gem.h" +#include "ivpu_hw.h" #include "ivpu_jsm_msg.h" #include "ivpu_pm.h" @@ -176,6 +177,30 @@ static const struct file_operations fw_log_fops = { .release = single_release, }; +static ssize_t +fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf, + size_t size, loff_t *pos) +{ + struct ivpu_device *vdev = file->private_data; + bool enable; + int ret; + + ret = kstrtobool_from_user(user_buf, size, &enable); + if (ret < 0) + return ret; + + ivpu_hw_profiling_freq_drive(vdev, enable); + ivpu_pm_schedule_recovery(vdev); + + return size; +} + +static const struct file_operations fw_profiling_freq_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = fw_profiling_freq_fops_write, +}; + static ssize_t fw_trace_destination_mask_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos) @@ -319,4 +344,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev) debugfs_create_file("reset_engine", 0200, debugfs_root, vdev, &ivpu_reset_engine_fops); + + if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) + debugfs_create_file("fw_profiling_freq_drive", 0200, + debugfs_root, vdev, &fw_profiling_freq_fops); } diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 4a21be3a0c59b..3fd74cd4205f9 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -498,6 +498,13 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number; boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev); + /* + * This param is a debug firmware feature. It switches default clock + * to higher resolution one for fine-grained and more accurate firmware + * task profiling. + */ + boot_params->perf_clk_frequency = ivpu_hw_profiling_freq_get(vdev); + /* * Uncached region of VPU address space, covers IPC buffers, job queues * and log buffers, programmable to L2$ Uncached by VPU MTRR diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index b7694b1cbc02f..aa52e5c29a65f 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -17,6 +17,8 @@ struct ivpu_hw_ops { int (*wait_for_idle)(struct ivpu_device *vdev); void (*wdt_disable)(struct ivpu_device *vdev); void (*diagnose_failure)(struct ivpu_device *vdev); + u32 (*profiling_freq_get)(struct ivpu_device *vdev); + void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable); u32 (*reg_pll_freq_get)(struct ivpu_device *vdev); u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev); u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev); @@ -104,6 +106,16 @@ static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev) vdev->hw->ops->wdt_disable(vdev); }; +static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev) +{ + return vdev->hw->ops->profiling_freq_get(vdev); +}; + +static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable) +{ + return vdev->hw->ops->profiling_freq_drive(vdev, enable); +}; + /* Register indirect accesses */ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev) { diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 1c8c5715095be..81f81046d39a3 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -29,6 +29,7 @@ #define PLL_REF_CLK_FREQ (50 * 1000000) #define PLL_SIMULATION_FREQ (10 * 1000000) +#define PLL_PROF_CLK_FREQ (38400 * 1000) #define PLL_DEFAULT_EPP_VALUE 0x80 #define TIM_SAFE_ENABLE 0xf1d0dead @@ -769,6 +770,16 @@ static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev) REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val); } +static u32 ivpu_hw_37xx_profiling_freq_get(struct ivpu_device *vdev) +{ + return PLL_PROF_CLK_FREQ; +} + +static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable) +{ + /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */ +} + static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config) { u32 pll_clock = PLL_REF_CLK_FREQ * ratio; @@ -1012,6 +1023,8 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = { .boot_fw = ivpu_hw_37xx_boot_fw, .wdt_disable = ivpu_hw_37xx_wdt_disable, .diagnose_failure = ivpu_hw_37xx_diagnose_failure, + .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get, + .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive, .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get, .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get, .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get, diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index 6a9672f650d18..a779b905f8b15 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -931,6 +931,19 @@ static void ivpu_hw_40xx_wdt_disable(struct ivpu_device *vdev) REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val); } +static u32 ivpu_hw_40xx_profiling_freq_get(struct ivpu_device *vdev) +{ + return vdev->hw->pll.profiling_freq; +} + +static void ivpu_hw_40xx_profiling_freq_drive(struct ivpu_device *vdev, bool enable) +{ + if (enable) + vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH; + else + vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT; +} + /* Register indirect accesses */ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev) { @@ -1182,6 +1195,8 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = { .boot_fw = ivpu_hw_40xx_boot_fw, .wdt_disable = ivpu_hw_40xx_wdt_disable, .diagnose_failure = ivpu_hw_40xx_diagnose_failure, + .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get, + .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive, .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get, .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get, .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get, From a06eb9be49a66e16e0c7819c630c1267c25b36dc Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sat, 28 Oct 2023 17:59:30 +0200 Subject: [PATCH 028/117] accel/ivpu: Assure device is off if power up sequence fail We should not leave device half enabled if there is failure somewhere it power up sequence. Fix device init and resume paths. Reviewed-by: Karol Wachowski Signed-off-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-3-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 2 +- drivers/accel/ivpu/ivpu_pm.c | 30 +++++++++++++++++------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 39bac45d88b5e..064cabef41bb4 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -543,7 +543,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev) /* Power up early so the rest of init code can access VPU registers */ ret = ivpu_hw_power_up(vdev); if (ret) - goto err_xa_destroy; + goto err_power_down; ret = ivpu_mmu_global_context_init(vdev); if (ret) diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 2d05fb9e31970..33d7c7731fe32 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -70,27 +70,31 @@ static int ivpu_resume(struct ivpu_device *vdev) ret = ivpu_hw_power_up(vdev); if (ret) { ivpu_err(vdev, "Failed to power up HW: %d\n", ret); - return ret; + goto err_power_down; } ret = ivpu_mmu_enable(vdev); if (ret) { ivpu_err(vdev, "Failed to resume MMU: %d\n", ret); - ivpu_hw_power_down(vdev); - return ret; + goto err_power_down; } ret = ivpu_boot(vdev); - if (ret) { - ivpu_mmu_disable(vdev); - ivpu_hw_power_down(vdev); - if (!ivpu_fw_is_cold_boot(vdev)) { - ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret); - ivpu_pm_prepare_cold_boot(vdev); - goto retry; - } else { - ivpu_err(vdev, "Failed to resume the FW: %d\n", ret); - } + if (ret) + goto err_mmu_disable; + + return 0; + +err_mmu_disable: + ivpu_mmu_disable(vdev); +err_power_down: + ivpu_hw_power_down(vdev); + + if (!ivpu_fw_is_cold_boot(vdev)) { + ivpu_pm_prepare_cold_boot(vdev); + goto retry; + } else { + ivpu_err(vdev, "Failed to resume the FW: %d\n", ret); } return ret; From 57c7e3e4800ad65048a7044b03723cd85d9595af Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sat, 28 Oct 2023 17:59:31 +0200 Subject: [PATCH 029/117] accel/ivpu: Stop job_done_thread on suspend Stop job_done thread when going to suspend. Use kthread_park() instead of kthread_stop() to avoid memory allocation and potential failure on resume. Use separate function as thread wake up condition. Use spin lock to assure rx_msg_list is properly protected against concurrent access. This avoid race condition when the rx_msg_list list is modified and read in ivpu_ipc_recive() at the same time. Reviewed-by: Karol Wachowski Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-4-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 2 ++ drivers/accel/ivpu/ivpu_ipc.c | 17 +++++++++++++++-- drivers/accel/ivpu/ivpu_job.c | 20 ++++++++++++++++---- drivers/accel/ivpu/ivpu_job.h | 2 ++ 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 064cabef41bb4..60277ff6af69c 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -378,6 +378,7 @@ int ivpu_boot(struct ivpu_device *vdev) enable_irq(vdev->irq); ivpu_hw_irq_enable(vdev); ivpu_ipc_enable(vdev); + ivpu_job_done_thread_enable(vdev); return 0; } @@ -389,6 +390,7 @@ int ivpu_shutdown(struct ivpu_device *vdev) disable_irq(vdev->irq); ivpu_ipc_disable(vdev); ivpu_mmu_disable(vdev); + ivpu_job_done_thread_disable(vdev); ret = ivpu_hw_power_down(vdev); if (ret) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index d069d1e1f91d5..270caef789bf8 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -202,6 +202,20 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v return ret; } +static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) +{ + int ret = 0; + + if (IS_KTHREAD()) + ret |= (kthread_should_stop() || kthread_should_park()); + + spin_lock_irq(&cons->rx_msg_lock); + ret |= !list_empty(&cons->rx_msg_list); + spin_unlock_irq(&cons->rx_msg_lock); + + return ret; +} + int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms) @@ -211,8 +225,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, int wait_ret, ret = 0; wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq, - (IS_KTHREAD() && kthread_should_stop()) || - !list_empty(&cons->rx_msg_list), + ivpu_ipc_rx_need_wakeup(cons), msecs_to_jiffies(timeout_ms)); if (IS_KTHREAD() && kthread_should_stop()) diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 6e96c921547d5..a245b2d44db77 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -590,6 +590,11 @@ static int ivpu_job_done_thread(void *arg) ivpu_pm_schedule_recovery(vdev); } } + if (kthread_should_park()) { + ivpu_dbg(vdev, JOB, "Parked %s\n", __func__); + kthread_parkme(); + ivpu_dbg(vdev, JOB, "Unparked %s\n", __func__); + } } ivpu_ipc_consumer_del(vdev, &cons); @@ -610,9 +615,6 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev) return -EIO; } - get_task_struct(thread); - wake_up_process(thread); - vdev->job_done_thread = thread; return 0; @@ -620,6 +622,16 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev) void ivpu_job_done_thread_fini(struct ivpu_device *vdev) { + kthread_unpark(vdev->job_done_thread); kthread_stop(vdev->job_done_thread); - put_task_struct(vdev->job_done_thread); +} + +void ivpu_job_done_thread_disable(struct ivpu_device *vdev) +{ + kthread_park(vdev->job_done_thread); +} + +void ivpu_job_done_thread_enable(struct ivpu_device *vdev) +{ + kthread_unpark(vdev->job_done_thread); } diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index aa1f0b9479b0b..a8e914e5affcd 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -61,6 +61,8 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); int ivpu_job_done_thread_init(struct ivpu_device *vdev); void ivpu_job_done_thread_fini(struct ivpu_device *vdev); +void ivpu_job_done_thread_disable(struct ivpu_device *vdev); +void ivpu_job_done_thread_enable(struct ivpu_device *vdev); void ivpu_jobs_abort_all(struct ivpu_device *vdev); From ba6b035daac8c4fa5da1f14f262d97e0ffa45a6d Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Sat, 28 Oct 2023 17:59:32 +0200 Subject: [PATCH 030/117] accel/ivpu: Abort pending rx ipc on reset Waking up process, which wait for particular condition, will go to sleep again on wake_up() if the condition is not met. Add abort flag to wake up IPC receivers, which will finish with -ECANCELED error. This is only needed for reset, run time power management prevent to suspend VPU when there is pending IPC processing or pending job. Reviewed-by: Karol Wachowski Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-5-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_ipc.c | 20 +++++++++++++++++--- drivers/accel/ivpu/ivpu_ipc.h | 3 ++- drivers/accel/ivpu/ivpu_job.c | 1 + 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 270caef789bf8..255f2b8b0b5e3 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -148,6 +148,7 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, cons->channel = channel; cons->tx_vpu_addr = 0; cons->request_id = 0; + cons->aborted = false; spin_lock_init(&cons->rx_msg_lock); INIT_LIST_HEAD(&cons->rx_msg_list); init_waitqueue_head(&cons->rx_msg_wq); @@ -169,7 +170,8 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c spin_lock_irq(&cons->rx_msg_lock); list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) { list_del(&rx_msg->link); - ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); + if (!cons->aborted) + ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); atomic_dec(&ipc->rx_msg_count); kfree(rx_msg); } @@ -210,7 +212,7 @@ static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) ret |= (kthread_should_stop() || kthread_should_park()); spin_lock_irq(&cons->rx_msg_lock); - ret |= !list_empty(&cons->rx_msg_list); + ret |= !list_empty(&cons->rx_msg_list) || cons->aborted; spin_unlock_irq(&cons->rx_msg_lock); return ret; @@ -244,6 +246,12 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, return -EAGAIN; } list_del(&rx_msg->link); + if (cons->aborted) { + spin_unlock_irq(&cons->rx_msg_lock); + ret = -ECANCELED; + goto out; + } + spin_unlock_irq(&cons->rx_msg_lock); if (ipc_buf) @@ -261,6 +269,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, } ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); +out: atomic_dec(&ipc->rx_msg_count); kfree(rx_msg); @@ -522,8 +531,12 @@ void ivpu_ipc_disable(struct ivpu_device *vdev) mutex_unlock(&ipc->lock); spin_lock_irqsave(&ipc->cons_list_lock, flags); - list_for_each_entry_safe(cons, c, &ipc->cons_list, link) + list_for_each_entry_safe(cons, c, &ipc->cons_list, link) { + spin_lock(&cons->rx_msg_lock); + cons->aborted = true; + spin_unlock(&cons->rx_msg_lock); wake_up(&cons->rx_msg_wq); + } spin_unlock_irqrestore(&ipc->cons_list_lock, flags); } @@ -532,6 +545,7 @@ void ivpu_ipc_reset(struct ivpu_device *vdev) struct ivpu_ipc_info *ipc = vdev->ipc; mutex_lock(&ipc->lock); + drm_WARN_ON(&vdev->drm, ipc->on); memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx)); memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx)); diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h index 6918db23daa4b..a380787f7222a 100644 --- a/drivers/accel/ivpu/ivpu_ipc.h +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -47,8 +47,9 @@ struct ivpu_ipc_consumer { u32 channel; u32 tx_vpu_addr; u32 request_id; + bool aborted; - spinlock_t rx_msg_lock; /* Protects rx_msg_list */ + spinlock_t rx_msg_lock; /* Protects rx_msg_list and aborted */ struct list_head rx_msg_list; wait_queue_head_t rx_msg_wq; }; diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index a245b2d44db77..15a408fad4945 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -578,6 +578,7 @@ static int ivpu_job_done_thread(void *arg) ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET); while (!kthread_should_stop()) { + cons.aborted = false; timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa); ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout); From e013aa9ab01b400cccc6c3e8b969b8e7f10bc6cb Mon Sep 17 00:00:00 2001 From: Karol Wachowski Date: Sat, 28 Oct 2023 17:59:33 +0200 Subject: [PATCH 031/117] accel/ivpu: Print CMDQ errors after consumer timeout Add checking of error reason bits in IVPU_MMU_CMDQ_CONS register when waiting for consumer timeout occurred. Signed-off-by: Karol Wachowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-6-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_mmu.c | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 2538c78fbebe2..b15cf9cc9c433 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -230,7 +230,12 @@ (REG_FLD(IVPU_MMU_REG_GERROR, MSI_PRIQ_ABT)) | \ (REG_FLD(IVPU_MMU_REG_GERROR, MSI_ABT))) -static char *ivpu_mmu_event_to_str(u32 cmd) +#define IVPU_MMU_CERROR_NONE 0x0 +#define IVPU_MMU_CERROR_ILL 0x1 +#define IVPU_MMU_CERROR_ABT 0x2 +#define IVPU_MMU_CERROR_ATC_INV_SYNC 0x3 + +static const char *ivpu_mmu_event_to_str(u32 cmd) { switch (cmd) { case IVPU_MMU_EVT_F_UUT: @@ -276,6 +281,22 @@ static char *ivpu_mmu_event_to_str(u32 cmd) } } +static const char *ivpu_mmu_cmdq_err_to_str(u32 err) +{ + switch (err) { + case IVPU_MMU_CERROR_NONE: + return "No CMDQ Error"; + case IVPU_MMU_CERROR_ILL: + return "Illegal command"; + case IVPU_MMU_CERROR_ABT: + return "External abort on CMDQ read"; + case IVPU_MMU_CERROR_ATC_INV_SYNC: + return "Sync failed to complete ATS invalidation"; + default: + return "Unknown CMDQ Error"; + } +} + static void ivpu_mmu_config_check(struct ivpu_device *vdev) { u32 val_ref; @@ -492,8 +513,15 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod); ret = ivpu_mmu_cmdq_wait_for_cons(vdev); - if (ret) - ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret); + if (ret) { + u32 err; + + val = REGV_RD32(IVPU_MMU_REG_CMDQ_CONS); + err = REG_GET_FLD(IVPU_MMU_REG_CMDQ_CONS, ERR, val); + + ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret, + ivpu_mmu_cmdq_err_to_str(err)); + } return ret; } From 3bcc5209ba6af1019487b54b30fc226ecf79193d Mon Sep 17 00:00:00 2001 From: Karol Wachowski Date: Sat, 28 Oct 2023 17:59:34 +0200 Subject: [PATCH 032/117] accel/ivpu: Make DMA allocations for MMU600 write combined Previously using dma_alloc_wc() API we created cache coherent (mapped as write-back) mappings. Because we disable MMU600 snooping it was required to do costly page walk and cache flushes after each page table modification. With write-combined buffers it's possible to do a single write memory barrier to flush write-combined buffer to memory which simplifies the driver and significantly reduce time of map/unmap operations. Mapping time of 255 MB is reduced from 2.5 ms to 500 us. Signed-off-by: Karol Wachowski Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-7-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_mmu_context.c | 115 ++++++++++++++------------ 1 file changed, 63 insertions(+), 52 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index 0c8c653519193..7d8005bc78d2f 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -5,6 +5,9 @@ #include #include +#include + +#include #include "ivpu_drv.h" #include "ivpu_hw.h" @@ -38,12 +41,57 @@ #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) +static void *ivpu_pgtable_alloc_page(struct ivpu_device *vdev, dma_addr_t *dma) +{ + dma_addr_t dma_addr; + struct page *page; + void *cpu; + + page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (!page) + return NULL; + + set_pages_array_wc(&page, 1); + + dma_addr = dma_map_page(vdev->drm.dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(vdev->drm.dev, dma_addr)) + goto err_free_page; + + cpu = vmap(&page, 1, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (!cpu) + goto err_dma_unmap_page; + + + *dma = dma_addr; + return cpu; + +err_dma_unmap_page: + dma_unmap_page(vdev->drm.dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + +err_free_page: + put_page(page); + return NULL; +} + +static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) +{ + struct page *page; + + if (cpu_addr) { + page = vmalloc_to_page(cpu_addr); + vunmap(cpu_addr); + dma_unmap_page(vdev->drm.dev, dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK, PAGE_SIZE, + DMA_BIDIRECTIONAL); + set_pages_array_wb(&page, 1); + put_page(page); + } +} + static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) { dma_addr_t pgd_dma; - pgtable->pgd_dma_ptr = dma_alloc_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, - GFP_KERNEL); + pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma); if (!pgtable->pgd_dma_ptr) return -ENOMEM; @@ -52,13 +100,6 @@ static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtab return 0; } -static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, u64 *cpu_addr, dma_addr_t dma_addr) -{ - if (cpu_addr) - dma_free_coherent(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, cpu_addr, - dma_addr & ~IVPU_MMU_ENTRY_FLAGS_MASK); -} - static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) { int pgd_idx, pud_idx, pmd_idx; @@ -83,19 +124,19 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt pte_dma_ptr = pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx]; pte_dma = pgtable->pmd_ptrs[pgd_idx][pud_idx][pmd_idx]; - ivpu_mmu_pgtable_free(vdev, pte_dma_ptr, pte_dma); + ivpu_pgtable_free_page(vdev, pte_dma_ptr, pte_dma); } kfree(pgtable->pte_ptrs[pgd_idx][pud_idx]); - ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma); + ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); } kfree(pgtable->pmd_ptrs[pgd_idx]); kfree(pgtable->pte_ptrs[pgd_idx]); - ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma); + ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); } - ivpu_mmu_pgtable_free(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); + ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma); } static u64* @@ -107,7 +148,7 @@ ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, if (pud_dma_ptr) return pud_dma_ptr; - pud_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL); + pud_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pud_dma); if (!pud_dma_ptr) return NULL; @@ -130,7 +171,7 @@ ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, kfree(pgtable->pmd_ptrs[pgd_idx]); err_free_pud_dma_ptr: - ivpu_mmu_pgtable_free(vdev, pud_dma_ptr, pud_dma); + ivpu_pgtable_free_page(vdev, pud_dma_ptr, pud_dma); return NULL; } @@ -144,7 +185,7 @@ ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, if (pmd_dma_ptr) return pmd_dma_ptr; - pmd_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL); + pmd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pmd_dma); if (!pmd_dma_ptr) return NULL; @@ -159,7 +200,7 @@ ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, return pmd_dma_ptr; err_free_pmd_dma_ptr: - ivpu_mmu_pgtable_free(vdev, pmd_dma_ptr, pmd_dma); + ivpu_pgtable_free_page(vdev, pmd_dma_ptr, pmd_dma); return NULL; } @@ -173,7 +214,7 @@ ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, if (pte_dma_ptr) return pte_dma_ptr; - pte_dma_ptr = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL); + pte_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pte_dma); if (!pte_dma_ptr) return NULL; @@ -248,38 +289,6 @@ static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_ad ctx->pgtable.pte_ptrs[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; } -static void -ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) -{ - struct ivpu_mmu_pgtable *pgtable = &ctx->pgtable; - u64 end_addr = vpu_addr + size; - - /* Align to PMD entry (2 MB) */ - vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1); - - while (vpu_addr < end_addr) { - int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); - u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE; - - while (vpu_addr < end_addr && vpu_addr < pud_end) { - int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); - u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE; - - while (vpu_addr < end_addr && vpu_addr < pmd_end) { - int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); - - clflush_cache_range(pgtable->pte_ptrs[pgd_idx][pud_idx][pmd_idx], - IVPU_MMU_PGTABLE_SIZE); - vpu_addr += IVPU_MMU_PTE_MAP_SIZE; - } - clflush_cache_range(pgtable->pmd_ptrs[pgd_idx][pud_idx], - IVPU_MMU_PGTABLE_SIZE); - } - clflush_cache_range(pgtable->pud_ptrs[pgd_idx], IVPU_MMU_PGTABLE_SIZE); - } - clflush_cache_range(pgtable->pgd_dma_ptr, IVPU_MMU_PGTABLE_SIZE); -} - static int ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, dma_addr_t dma_addr, size_t size, u64 prot) @@ -352,10 +361,11 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, mutex_unlock(&ctx->lock); return ret; } - ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size); vpu_addr += size; } + /* Ensure page table modifications are flushed from wc buffers to memory */ + wmb(); mutex_unlock(&ctx->lock); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); @@ -381,10 +391,11 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct size_t size = sg_dma_len(sg) + sg->offset; ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); - ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size); vpu_addr += size; } + /* Ensure page table modifications are flushed from wc buffers to memory */ + wmb(); mutex_unlock(&ctx->lock); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); From 0c287c27fbffa2737f155af73646a794e540e1d3 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Sat, 28 Oct 2023 17:59:35 +0200 Subject: [PATCH 033/117] accel/ivpu: Simplify MMU SYNC command CMD_SYNC does not need any args as we poll for completion anyway. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-8-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_mmu.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index b15cf9cc9c433..b46f3743c0e7d 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -500,10 +500,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev) u64 val; int ret; - val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) | - FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) | - FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) | - FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf); + val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC); ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0); if (ret) From 2fc1a50fa4472301a3b262fcfc78744c841d5587 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Sat, 28 Oct 2023 17:59:36 +0200 Subject: [PATCH 034/117] accel/ivpu: Rename VPU to NPU in product strings VPU was rebranded as NPU (Neural Processing Unit) so user facing strings have to be updated but the code remains as is and the module is still called intel_vpu.ko. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Stanislaw Gruszka Reviewed-by: Jeffrey Hugo Signed-off-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-9-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/Kconfig | 9 +++++---- drivers/accel/ivpu/ivpu_drv.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig index 1a4c4ed9d1136..82749e0da524c 100644 --- a/drivers/accel/ivpu/Kconfig +++ b/drivers/accel/ivpu/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_ACCEL_IVPU - tristate "Intel VPU for Meteor Lake and newer" + tristate "Intel NPU (Neural Processing Unit)" depends on DRM_ACCEL depends on X86_64 && !UML depends on PCI && PCI_MSI @@ -9,8 +9,9 @@ config DRM_ACCEL_IVPU select SHMEM select GENERIC_ALLOCATOR help - Choose this option if you have a system that has an 14th generation Intel CPU - or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated - inference accelerator for Computer Vision and Deep Learning applications. + Choose this option if you have a system with an 14th generation + Intel CPU (Meteor Lake) or newer. Intel NPU (formerly called Intel VPU) + is a CPU-integrated inference accelerator for Computer Vision + and Deep Learning applications. If "M" is selected, the module will be called intel_vpu. diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 1b482d1d66d95..79b193ffbc264 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -19,7 +19,7 @@ #include "ivpu_mmu_context.h" #define DRIVER_NAME "intel_vpu" -#define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)" +#define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)" #define DRIVER_DATE "20230117" #define PCI_DEVICE_ID_MTL 0x7d1d From 0da611a8702101814257a7c03f6caf0574c83b98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 8 Sep 2023 10:27:23 +0200 Subject: [PATCH 035/117] dma-buf: add dma_fence_timestamp helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a fence signals there is a very small race window where the timestamp isn't updated yet. sync_file solves this by busy waiting for the timestamp to appear, but on other ocassions didn't handled this correctly. Provide a dma_fence_timestamp() helper function for this and use it in all appropriate cases. Another alternative would be to grab the spinlock when that happens. v2 by teddy: add a wait parameter to wait for the timestamp to show up, in case the accurate timestamp is needed and/or the timestamp is not based on ktime (e.g. hw timestamp) v3 chk: drop the parameter again for unified handling Signed-off-by: Yunxiang Li Signed-off-by: Christian König Fixes: 1774baa64f93 ("drm/scheduler: Change scheduled fence track v2") Reviewed-by: Alex Deucher CC: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20230929104725.2358-1-christian.koenig@amd.com --- drivers/dma-buf/dma-fence-unwrap.c | 13 ++++--------- drivers/dma-buf/sync_file.c | 9 +++------ drivers/gpu/drm/scheduler/sched_main.c | 2 +- include/linux/dma-fence.h | 19 +++++++++++++++++++ 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c index c625bb2b5d563..628af51c81af3 100644 --- a/drivers/dma-buf/dma-fence-unwrap.c +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { ++count; - } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, - &tmp->flags)) { - if (ktime_after(tmp->timestamp, timestamp)) - timestamp = tmp->timestamp; } else { - /* - * Use the current time if the fence is - * currently signaling. - */ - timestamp = ktime_get(); + ktime_t t = dma_fence_timestamp(tmp); + + if (ktime_after(t, timestamp)) + timestamp = t; } } } diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index af57799c86cee..2e9a316c596a3 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence, sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); - while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && - !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) - cpu_relax(); info->timestamp_ns = - test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? - ktime_to_ns(fence->timestamp) : - ktime_set(0, 0); + dma_fence_is_signaled(fence) ? + ktime_to_ns(dma_fence_timestamp(fence)) : + ktime_set(0, 0); return info->status; } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index fd755e9534878..99797a8c836ac 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -935,7 +935,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) if (next) { next->s_fence->scheduled.timestamp = - job->s_fence->finished.timestamp; + dma_fence_timestamp(&job->s_fence->finished); /* start TO timer for next job */ drm_sched_start_timeout(sched); } diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 0d678e9a7b248..ebe78bd3d121d 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence, fence->error = error; } +/** + * dma_fence_timestamp - helper to get the completion timestamp of a fence + * @fence: fence to get the timestamp from. + * + * After a fence is signaled the timestamp is updated with the signaling time, + * but setting the timestamp can race with tasks waiting for the signaling. This + * helper busy waits for the correct timestamp to appear. + */ +static inline ktime_t dma_fence_timestamp(struct dma_fence *fence) +{ + if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) + return ktime_get(); + + while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) + cpu_relax(); + + return fence->timestamp; +} + signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout); signed long dma_fence_wait_any_timeout(struct dma_fence **fences, From 35963cf2cd25eeea8bdb4d02853dac1e66fb13a0 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 30 Oct 2023 20:24:35 -0700 Subject: [PATCH 036/117] drm/sched: Add drm_sched_wqueue_* helpers Add scheduler wqueue ready, stop, and start helpers to hide the implementation details of the scheduler from the drivers. v2: - s/sched_wqueue/sched_wqueue (Luben) - Remove the extra white line after the return-statement (Luben) - update drm_sched_wqueue_ready comment (Luben) Cc: Luben Tuikov Signed-off-by: Matthew Brost Reviewed-by: Luben Tuikov Link: https://lore.kernel.org/r/20231031032439.1558703-2-matthew.brost@intel.com Signed-off-by: Luben Tuikov --- .../drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 15 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 +++--- drivers/gpu/drm/msm/adreno/adreno_device.c | 6 ++- drivers/gpu/drm/scheduler/sched_main.c | 39 ++++++++++++++++++- include/drm/gpu_scheduler.h | 3 ++ 6 files changed, 59 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 625db444df1cb..10d56979fe3b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -290,7 +290,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus for (i = 0; i < adev->gfx.num_compute_rings; i++) { struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; - if (!(ring && ring->sched.thread)) + if (!(ring && drm_sched_wqueue_ready(&ring->sched))) continue; /* stop secheduler and drain ring. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a4faea4fa0b59..a4c0bb358db71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1659,9 +1659,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); } seq_puts(m, "run ib test:\n"); @@ -1675,9 +1675,9 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); } up_write(&adev->reset_domain->sem); @@ -1897,7 +1897,8 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring = adev->rings[val]; - if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread) + if (!ring || !ring->funcs->preempt_ib || + !drm_sched_wqueue_ready(&ring->sched)) return -EINVAL; /* the last preemption failed */ @@ -1915,7 +1916,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) goto pro_end; /* stop the scheduler */ - kthread_park(ring->sched.thread); + drm_sched_wqueue_stop(&ring->sched); /* preempt the IB */ r = amdgpu_ring_preempt_ib(ring); @@ -1949,7 +1950,7 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) failure: /* restart the scheduler */ - kthread_unpark(ring->sched.thread); + drm_sched_wqueue_start(&ring->sched); up_read(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 19b539cab7fec..a7f7afcb2bf0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4601,7 +4601,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; spin_lock(&ring->sched.job_list_lock); @@ -4740,7 +4740,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; /* Clear job fence from fence drv to avoid force_completion @@ -5282,7 +5282,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); @@ -5357,7 +5357,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; drm_sched_start(&ring->sched, true); @@ -5683,7 +5683,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; drm_sched_stop(&ring->sched, NULL); @@ -5811,7 +5811,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; - if (!ring || !ring->sched.thread) + if (!ring || !drm_sched_wqueue_ready(&ring->sched)) continue; drm_sched_start(&ring->sched, true); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index fa527935ffd42..8fa9ce3746b60 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -809,7 +809,8 @@ static void suspend_scheduler(struct msm_gpu *gpu) */ for (i = 0; i < gpu->nr_rings; i++) { struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; - kthread_park(sched->thread); + + drm_sched_wqueue_stop(sched); } } @@ -819,7 +820,8 @@ static void resume_scheduler(struct msm_gpu *gpu) for (i = 0; i < gpu->nr_rings; i++) { struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched; - kthread_unpark(sched->thread); + + drm_sched_wqueue_start(sched); } } diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 99797a8c836ac..54c1c5fe01ba9 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -439,7 +439,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { struct drm_sched_job *s_job, *tmp; - kthread_park(sched->thread); + drm_sched_wqueue_stop(sched); /* * Reinsert back the bad job here - now it's safe as @@ -552,7 +552,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) spin_unlock(&sched->job_list_lock); } - kthread_unpark(sched->thread); + drm_sched_wqueue_start(sched); } EXPORT_SYMBOL(drm_sched_start); @@ -1252,3 +1252,38 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) } } EXPORT_SYMBOL(drm_sched_increase_karma); + +/** + * drm_sched_wqueue_ready - Is the scheduler ready for submission + * + * @sched: scheduler instance + * + * Returns true if submission is ready + */ +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched) +{ + return !!sched->thread; +} +EXPORT_SYMBOL(drm_sched_wqueue_ready); + +/** + * drm_sched_wqueue_stop - stop scheduler submission + * + * @sched: scheduler instance + */ +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) +{ + kthread_park(sched->thread); +} +EXPORT_SYMBOL(drm_sched_wqueue_stop); + +/** + * drm_sched_wqueue_start - start scheduler submission + * + * @sched: scheduler instance + */ +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) +{ + kthread_unpark(sched->thread); +} +EXPORT_SYMBOL(drm_sched_wqueue_start); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 653e0eec9743e..7e622c18d3365 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -553,6 +553,9 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); +bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); +void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); From a6149f0393699308fb00149be913044977bceb56 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 30 Oct 2023 20:24:36 -0700 Subject: [PATCH 037/117] drm/sched: Convert drm scheduler to use a work queue rather than kthread In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1 mapping between a drm_gpu_scheduler and drm_sched_entity. At first this seems a bit odd but let us explain the reasoning below. 1. In Xe the submission order from multiple drm_sched_entity is not guaranteed to be the same completion even if targeting the same hardware engine. This is because in Xe we have a firmware scheduler, the GuC, which allowed to reorder, timeslice, and preempt submissions. If a using shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls apart as the TDR expects submission order == completion order. Using a dedicated drm_gpu_scheduler per drm_sched_entity solve this problem. 2. In Xe submissions are done via programming a ring buffer (circular buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow control on the ring for free. A problem with this design is currently a drm_gpu_scheduler uses a kthread for submission / job cleanup. This doesn't scale if a large number of drm_gpu_scheduler are used. To work around the scaling issue, use a worker rather than kthread for submission / job cleanup. v2: - (Rob Clark) Fix msm build - Pass in run work queue v3: - (Boris) don't have loop in worker v4: - (Tvrtko) break out submit ready, stop, start helpers into own patch v5: - (Boris) default to ordered work queue v6: - (Luben / checkpatch) fix alignment in msm_ringbuffer.c - (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue - (Luben) Update comment for drm_sched_wqueue_enqueue - (Luben) Positive check for submit_wq in drm_sched_init - (Luben) s/alloc_submit_wq/own_submit_wq v7: - (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue v8: - (Luben) Adjust var names / comments Signed-off-by: Matthew Brost Reviewed-by: Luben Tuikov Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com Signed-off-by: Luben Tuikov --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 +- drivers/gpu/drm/lima/lima_sched.c | 2 +- drivers/gpu/drm/msm/msm_ringbuffer.c | 2 +- drivers/gpu/drm/nouveau/nouveau_sched.c | 2 +- drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 131 +++++++++++---------- drivers/gpu/drm/v3d/v3d_sched.c | 10 +- include/drm/gpu_scheduler.h | 14 ++- 9 files changed, 86 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a7f7afcb2bf0e..203dfcea82b18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2279,7 +2279,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) break; } - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, ring->num_hw_submission, 0, timeout, adev->reset_domain->wq, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 9b79f218e21af..c4b04b0dee16a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu) { int ret; - ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, + ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, msecs_to_jiffies(500), NULL, NULL, diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 295f0353a02e5..aa030e1f7cdae 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) INIT_WORK(&pipe->recover_work, lima_sched_recover_work); - return drm_sched_init(&pipe->base, &lima_sched_ops, + return drm_sched_init(&pipe->base, &lima_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, 1, lima_job_hang_limit, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 95257ab0185dc..4968568e3b546 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -94,7 +94,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, /* currently managing hangcheck ourselves: */ sched_timeout = MAX_SCHEDULE_TIMEOUT; - ret = drm_sched_init(&ring->sched, &msm_sched_ops, + ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, num_hw_submissions, 0, sched_timeout, NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index e62b04109b2f7..7e64b5ef90fb2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -429,7 +429,7 @@ int nouveau_sched_init(struct nouveau_drm *drm) if (!drm->sched_wq) return -ENOMEM; - return drm_sched_init(sched, &nouveau_sched_ops, + return drm_sched_init(sched, &nouveau_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit, NULL, NULL, "nouveau_sched", drm->dev->dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index ecd2e035147fd..6d89e24322dbf 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -852,7 +852,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) js->queue[j].fence_context = dma_fence_context_alloc(1); ret = drm_sched_init(&js->queue[j].sched, - &panfrost_sched_ops, + &panfrost_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, nentries, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 54c1c5fe01ba9..d1ae05bded15c 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -48,7 +48,6 @@ * through the jobs entity pointer. */ -#include #include #include #include @@ -256,6 +255,16 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL; } +/** + * drm_sched_run_job_queue - enqueue run-job work + * @sched: scheduler instance + */ +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, &sched->work_run_job); +} + /** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done @@ -275,7 +284,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); + drm_sched_run_job_queue(sched); } /** @@ -874,7 +883,7 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) { if (drm_sched_can_queue(sched)) - wake_up_interruptible(&sched->wake_up_worker); + drm_sched_run_job_queue(sched); } /** @@ -985,60 +994,41 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_blocked - check if the scheduler is blocked + * drm_sched_run_job_work - main scheduler thread * - * @sched: scheduler instance - * - * Returns true if blocked, otherwise false. + * @w: run job work */ -static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) +static void drm_sched_run_job_work(struct work_struct *w) { - if (kthread_should_park()) { - kthread_parkme(); - return true; - } - - return false; -} - -/** - * drm_sched_main - main scheduler thread - * - * @param: scheduler instance - * - * Returns 0. - */ -static int drm_sched_main(void *param) -{ - struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_run_job); + struct drm_sched_entity *entity; + struct drm_sched_job *cleanup_job; int r; - sched_set_fifo_low(current); + if (READ_ONCE(sched->pause_submit)) + return; - while (!kthread_should_stop()) { - struct drm_sched_entity *entity = NULL; - struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job; - struct dma_fence *fence; - struct drm_sched_job *cleanup_job = NULL; + cleanup_job = drm_sched_get_cleanup_job(sched); + entity = drm_sched_select_entity(sched); - wait_event_interruptible(sched->wake_up_worker, - (cleanup_job = drm_sched_get_cleanup_job(sched)) || - (!drm_sched_blocked(sched) && - (entity = drm_sched_select_entity(sched))) || - kthread_should_stop()); + if (!entity && !cleanup_job) + return; /* No more work */ - if (cleanup_job) - sched->ops->free_job(cleanup_job); + if (cleanup_job) + sched->ops->free_job(cleanup_job); - if (!entity) - continue; + if (entity) { + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; sched_job = drm_sched_entity_pop_job(entity); - if (!sched_job) { complete_all(&entity->entity_idle); - continue; + if (!cleanup_job) + return; /* No more work */ + goto again; } s_fence = sched_job->s_fence; @@ -1069,7 +1059,9 @@ static int drm_sched_main(void *param) wake_up(&sched->job_scheduled); } - return 0; + +again: + drm_sched_run_job_queue(sched); } /** @@ -1077,6 +1069,8 @@ static int drm_sched_main(void *param) * * @sched: scheduler instance * @ops: backend operations for this scheduler + * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is + * allocated and used * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT * @hw_submission: number of hw submissions that can be in flight * @hang_limit: number of times to allow a job to hang before dropping it @@ -1091,6 +1085,7 @@ static int drm_sched_main(void *param) */ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, + struct workqueue_struct *submit_wq, u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev) @@ -1121,14 +1116,22 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, return 0; } + if (submit_wq) { + sched->submit_wq = submit_wq; + sched->own_submit_wq = false; + } else { + sched->submit_wq = alloc_ordered_workqueue(name, 0); + if (!sched->submit_wq) + return -ENOMEM; + + sched->own_submit_wq = true; + } + ret = -ENOMEM; sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq), GFP_KERNEL | __GFP_ZERO); - if (!sched->sched_rq) { - drm_err(sched, "%s: out of memory for sched_rq\n", __func__); - return -ENOMEM; - } + if (!sched->sched_rq) + goto Out_free; sched->num_rqs = num_rqs; - ret = -ENOMEM; for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) { sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); if (!sched->sched_rq[i]) @@ -1136,31 +1139,26 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, drm_sched_rq_init(sched, sched->sched_rq[i]); } - init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); INIT_LIST_HEAD(&sched->pending_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); + INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); atomic_set(&sched->_score, 0); atomic64_set(&sched->job_id_count, 0); - - /* Each scheduler will run on a seperate kernel thread */ - sched->thread = kthread_run(drm_sched_main, sched, sched->name); - if (IS_ERR(sched->thread)) { - ret = PTR_ERR(sched->thread); - sched->thread = NULL; - DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name); - goto Out_unroll; - } + sched->pause_submit = false; sched->ready = true; return 0; Out_unroll: for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--) kfree(sched->sched_rq[i]); +Out_free: kfree(sched->sched_rq); sched->sched_rq = NULL; + if (sched->own_submit_wq) + destroy_workqueue(sched->submit_wq); drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); return ret; } @@ -1178,8 +1176,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) struct drm_sched_entity *s_entity; int i; - if (sched->thread) - kthread_stop(sched->thread); + drm_sched_wqueue_stop(sched); for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { struct drm_sched_rq *rq = sched->sched_rq[i]; @@ -1202,6 +1199,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) /* Confirm no work left behind accessing device structures */ cancel_delayed_work_sync(&sched->work_tdr); + if (sched->own_submit_wq) + destroy_workqueue(sched->submit_wq); sched->ready = false; kfree(sched->sched_rq); sched->sched_rq = NULL; @@ -1262,7 +1261,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma); */ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched) { - return !!sched->thread; + return sched->ready; } EXPORT_SYMBOL(drm_sched_wqueue_ready); @@ -1273,7 +1272,8 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready); */ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) { - kthread_park(sched->thread); + WRITE_ONCE(sched->pause_submit, true); + cancel_work_sync(&sched->work_run_job); } EXPORT_SYMBOL(drm_sched_wqueue_stop); @@ -1284,6 +1284,7 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop); */ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) { - kthread_unpark(sched->thread); + WRITE_ONCE(sched->pause_submit, false); + queue_work(sched->submit_wq, &sched->work_run_job); } EXPORT_SYMBOL(drm_sched_wqueue_start); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 038e1ae589c71..0b6696b0d882f 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -388,7 +388,7 @@ v3d_sched_init(struct v3d_dev *v3d) int ret; ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, - &v3d_bin_sched_ops, + &v3d_bin_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, @@ -397,7 +397,7 @@ v3d_sched_init(struct v3d_dev *v3d) return ret; ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, - &v3d_render_sched_ops, + &v3d_render_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, @@ -406,7 +406,7 @@ v3d_sched_init(struct v3d_dev *v3d) goto fail; ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, - &v3d_tfu_sched_ops, + &v3d_tfu_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, @@ -416,7 +416,7 @@ v3d_sched_init(struct v3d_dev *v3d) if (v3d_has_csd(v3d)) { ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, - &v3d_csd_sched_ops, + &v3d_csd_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, @@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d) goto fail; ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, - &v3d_cache_clean_sched_ops, + &v3d_cache_clean_sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), NULL, diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 7e622c18d3365..4bf94aca26316 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -475,17 +475,16 @@ struct drm_sched_backend_ops { * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, * as there's usually one run-queue per priority, but could be less. * @sched_rq: An allocated array of run-queues of size @num_rqs; - * @wake_up_worker: the wait queue on which the scheduler sleeps until a job - * is ready to be scheduled. * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. + * @submit_wq: workqueue used to queue @work_run_job * @timeout_wq: workqueue used to queue @work_tdr + * @work_run_job: work which calls run_job op of each scheduler. * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. - * @thread: the kthread on which the scheduler which run. * @pending_list: the list of jobs which are currently in the job queue. * @job_list_lock: lock to protect the pending_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked @@ -494,6 +493,8 @@ struct drm_sched_backend_ops { * @_score: score used when the driver doesn't provide one * @ready: marks if the underlying HW is ready to work * @free_guilty: A hit to time out handler to free the guilty job. + * @pause_submit: pause queuing of @work_run_job on @submit_wq + * @own_submit_wq: scheduler owns allocation of @submit_wq * @dev: system &struct device * * One scheduler is implemented for each hardware ring. @@ -505,13 +506,13 @@ struct drm_gpu_scheduler { const char *name; u32 num_rqs; struct drm_sched_rq **sched_rq; - wait_queue_head_t wake_up_worker; wait_queue_head_t job_scheduled; atomic_t hw_rq_count; atomic64_t job_id_count; + struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; + struct work_struct work_run_job; struct delayed_work work_tdr; - struct task_struct *thread; struct list_head pending_list; spinlock_t job_list_lock; int hang_limit; @@ -519,11 +520,14 @@ struct drm_gpu_scheduler { atomic_t _score; bool ready; bool free_guilty; + bool pause_submit; + bool own_submit_wq; struct device *dev; }; int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, + struct workqueue_struct *submit_wq, u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev); From f7fe64ad0f22ff034f8ebcfbd7299ee9cc9b57d7 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 30 Oct 2023 20:24:37 -0700 Subject: [PATCH 038/117] drm/sched: Split free_job into own work item Rather than call free_job and run_job in same work item have a dedicated work item for each. This aligns with the design and intended use of work queues. v2: - Test for DMA_FENCE_FLAG_TIMESTAMP_BIT before setting timestamp in free_job() work item (Danilo) v3: - Drop forward dec of drm_sched_select_entity (Boris) - Return in drm_sched_run_job_work if entity NULL (Boris) v4: - Replace dequeue with peek and invert logic (Luben) - Wrap to 100 lines (Luben) - Update comments for *_queue / *_queue_if_ready functions (Luben) v5: - Drop peek argument, blindly reinit idle (Luben) - s/drm_sched_free_job_queue_if_ready/drm_sched_free_job_queue_if_done (Luben) - Update work_run_job & work_free_job kernel doc (Luben) v6: - Do not move drm_sched_select_entity in file (Luben) Signed-off-by: Matthew Brost Link: https://lore.kernel.org/r/20231031032439.1558703-4-matthew.brost@intel.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 146 +++++++++++++++++-------- include/drm/gpu_scheduler.h | 4 +- 2 files changed, 101 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index d1ae05bded15c..3b1b2f8eafe83 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -265,6 +265,32 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) queue_work(sched->submit_wq, &sched->work_run_job); } +/** + * drm_sched_free_job_queue - enqueue free-job work + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) +{ + if (!READ_ONCE(sched->pause_submit)) + queue_work(sched->submit_wq, &sched->work_free_job); +} + +/** + * drm_sched_free_job_queue_if_done - enqueue free-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *job; + + spin_lock(&sched->job_list_lock); + job = list_first_entry_or_null(&sched->pending_list, + struct drm_sched_job, list); + if (job && dma_fence_is_signaled(&job->s_fence->finished)) + drm_sched_free_job_queue(sched); + spin_unlock(&sched->job_list_lock); +} + /** * drm_sched_job_done - complete a job * @s_job: pointer to the job which is done @@ -284,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); - drm_sched_run_job_queue(sched); + drm_sched_free_job_queue(sched); } /** @@ -943,8 +969,10 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) typeof(*next), list); if (next) { - next->s_fence->scheduled.timestamp = - dma_fence_timestamp(&job->s_fence->finished); + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, + &next->s_fence->scheduled.flags)) + next->s_fence->scheduled.timestamp = + dma_fence_timestamp(&job->s_fence->finished); /* start TO timer for next job */ drm_sched_start_timeout(sched); } @@ -994,7 +1022,40 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_run_job_work - main scheduler thread + * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready + * @sched: scheduler instance + */ +static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched) +{ + if (drm_sched_select_entity(sched)) + drm_sched_run_job_queue(sched); +} + +/** + * drm_sched_free_job_work - worker to call free_job + * + * @w: free job work + */ +static void drm_sched_free_job_work(struct work_struct *w) +{ + struct drm_gpu_scheduler *sched = + container_of(w, struct drm_gpu_scheduler, work_free_job); + struct drm_sched_job *cleanup_job; + + if (READ_ONCE(sched->pause_submit)) + return; + + cleanup_job = drm_sched_get_cleanup_job(sched); + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + + drm_sched_free_job_queue_if_done(sched); + drm_sched_run_job_queue_if_ready(sched); + } +} + +/** + * drm_sched_run_job_work - worker to call run_job * * @w: run job work */ @@ -1003,65 +1064,51 @@ static void drm_sched_run_job_work(struct work_struct *w) struct drm_gpu_scheduler *sched = container_of(w, struct drm_gpu_scheduler, work_run_job); struct drm_sched_entity *entity; - struct drm_sched_job *cleanup_job; + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; int r; if (READ_ONCE(sched->pause_submit)) return; - cleanup_job = drm_sched_get_cleanup_job(sched); entity = drm_sched_select_entity(sched); + if (!entity) + return; - if (!entity && !cleanup_job) + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) { + complete_all(&entity->entity_idle); return; /* No more work */ + } - if (cleanup_job) - sched->ops->free_job(cleanup_job); - - if (entity) { - struct dma_fence *fence; - struct drm_sched_fence *s_fence; - struct drm_sched_job *sched_job; - - sched_job = drm_sched_entity_pop_job(entity); - if (!sched_job) { - complete_all(&entity->entity_idle); - if (!cleanup_job) - return; /* No more work */ - goto again; - } - - s_fence = sched_job->s_fence; - - atomic_inc(&sched->hw_rq_count); - drm_sched_job_begin(sched_job); + s_fence = sched_job->s_fence; - trace_drm_run_job(sched_job, entity); - fence = sched->ops->run_job(sched_job); - complete_all(&entity->entity_idle); - drm_sched_fence_scheduled(s_fence, fence); + atomic_inc(&sched->hw_rq_count); + drm_sched_job_begin(sched_job); - if (!IS_ERR_OR_NULL(fence)) { - /* Drop for original kref_init of the fence */ - dma_fence_put(fence); + trace_drm_run_job(sched_job, entity); + fence = sched->ops->run_job(sched_job); + complete_all(&entity->entity_idle); + drm_sched_fence_scheduled(s_fence, fence); - r = dma_fence_add_callback(fence, &sched_job->cb, - drm_sched_job_done_cb); - if (r == -ENOENT) - drm_sched_job_done(sched_job, fence->error); - else if (r) - DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", - r); - } else { - drm_sched_job_done(sched_job, IS_ERR(fence) ? - PTR_ERR(fence) : 0); - } + if (!IS_ERR_OR_NULL(fence)) { + /* Drop for original kref_init of the fence */ + dma_fence_put(fence); - wake_up(&sched->job_scheduled); + r = dma_fence_add_callback(fence, &sched_job->cb, + drm_sched_job_done_cb); + if (r == -ENOENT) + drm_sched_job_done(sched_job, fence->error); + else if (r) + DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); + } else { + drm_sched_job_done(sched_job, IS_ERR(fence) ? + PTR_ERR(fence) : 0); } -again: - drm_sched_run_job_queue(sched); + wake_up(&sched->job_scheduled); + drm_sched_run_job_queue_if_ready(sched); } /** @@ -1145,6 +1192,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); + INIT_WORK(&sched->work_free_job, drm_sched_free_job_work); atomic_set(&sched->_score, 0); atomic64_set(&sched->job_id_count, 0); sched->pause_submit = false; @@ -1274,6 +1322,7 @@ void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched) { WRITE_ONCE(sched->pause_submit, true); cancel_work_sync(&sched->work_run_job); + cancel_work_sync(&sched->work_free_job); } EXPORT_SYMBOL(drm_sched_wqueue_stop); @@ -1286,5 +1335,6 @@ void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched) { WRITE_ONCE(sched->pause_submit, false); queue_work(sched->submit_wq, &sched->work_run_job); + queue_work(sched->submit_wq, &sched->work_free_job); } EXPORT_SYMBOL(drm_sched_wqueue_start); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 4bf94aca26316..6655aeeef9bb2 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -480,9 +480,10 @@ struct drm_sched_backend_ops { * finished. * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. - * @submit_wq: workqueue used to queue @work_run_job + * @submit_wq: workqueue used to queue @work_run_job and @work_free_job * @timeout_wq: workqueue used to queue @work_tdr * @work_run_job: work which calls run_job op of each scheduler. + * @work_free_job: work which calls free_job op of each scheduler. * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the * timeout interval is over. * @pending_list: the list of jobs which are currently in the job queue. @@ -512,6 +513,7 @@ struct drm_gpu_scheduler { struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; struct work_struct work_run_job; + struct work_struct work_free_job; struct delayed_work work_tdr; struct list_head pending_list; spinlock_t job_list_lock; From 7a36dcfa16a5a7a87f65e03e1a3eb2b5e2fca812 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 30 Oct 2023 20:24:38 -0700 Subject: [PATCH 039/117] drm/sched: Add drm_sched_start_timeout_unlocked helper Also add a lockdep assert to drm_sched_start_timeout. Signed-off-by: Matthew Brost Reviewed-by: Luben Tuikov Link: https://lore.kernel.org/r/20231031032439.1558703-5-matthew.brost@intel.com Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 3b1b2f8eafe83..fc387de5a0c75 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -334,11 +334,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) */ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) { + lockdep_assert_held(&sched->job_list_lock); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT && !list_empty(&sched->pending_list)) queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); } +static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched) +{ + spin_lock(&sched->job_list_lock); + drm_sched_start_timeout(sched); + spin_unlock(&sched->job_list_lock); +} + /** * drm_sched_fault - immediately start timeout handler * @@ -451,11 +460,8 @@ static void drm_sched_job_timedout(struct work_struct *work) spin_unlock(&sched->job_list_lock); } - if (status != DRM_GPU_SCHED_STAT_ENODEV) { - spin_lock(&sched->job_list_lock); - drm_sched_start_timeout(sched); - spin_unlock(&sched->job_list_lock); - } + if (status != DRM_GPU_SCHED_STAT_ENODEV) + drm_sched_start_timeout_unlocked(sched); } /** @@ -581,11 +587,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) drm_sched_job_done(s_job, -ECANCELED); } - if (full_recovery) { - spin_lock(&sched->job_list_lock); - drm_sched_start_timeout(sched); - spin_unlock(&sched->job_list_lock); - } + if (full_recovery) + drm_sched_start_timeout_unlocked(sched); drm_sched_wqueue_start(sched); } From 3c6c7ca4508b6cb1a033ac954c50a1b2c97af883 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 30 Oct 2023 20:24:39 -0700 Subject: [PATCH 040/117] drm/sched: Add a helper to queue TDR immediately Add a helper whereby a driver can invoke TDR immediately. v2: - Drop timeout args, rename function, use mod delayed work (Luben) v3: - s/XE/Xe (Luben) - present tense in commit message (Luben) - Adjust comment for drm_sched_tdr_queue_imm (Luben) v4: - Adjust commit message (Luben) Cc: Luben Tuikov Signed-off-by: Matthew Brost Reviewed-by: Luben Tuikov Link: https://lore.kernel.org/r/20231031032439.1558703-6-matthew.brost@intel.com Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 18 +++++++++++++++++- include/drm/gpu_scheduler.h | 1 + 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index fc387de5a0c75..98b2ad54fc707 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -338,7 +338,7 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) if (sched->timeout != MAX_SCHEDULE_TIMEOUT && !list_empty(&sched->pending_list)) - queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); + mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout); } static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched) @@ -348,6 +348,22 @@ static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched) spin_unlock(&sched->job_list_lock); } +/** + * drm_sched_tdr_queue_imm: - immediately start job timeout handler + * + * @sched: scheduler for which the timeout handling should be started. + * + * Start timeout handling immediately for the named scheduler. + */ +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched) +{ + spin_lock(&sched->job_list_lock); + sched->timeout = 0; + drm_sched_start_timeout(sched); + spin_unlock(&sched->job_list_lock); +} +EXPORT_SYMBOL(drm_sched_tdr_queue_imm); + /** * drm_sched_fault - immediately start timeout handler * diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 6655aeeef9bb2..9daa78d2c04c1 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -557,6 +557,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, struct drm_gpu_scheduler **sched_list, unsigned int num_sched_list); +void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); From 8ddfc01ace51c85a2333fb9a9cbea34d9f87885d Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 1 Nov 2023 18:20:16 +0100 Subject: [PATCH 041/117] fbdev/simplefb: Support memory-region property The simple-framebuffer bindings specify that the "memory-region" property can be used as an alternative to the "reg" property to define the framebuffer memory used by the display hardware. Implement support for this in the simplefb driver. Reviewed-by: Hans de Goede Signed-off-by: Thierry Reding Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20231101172017.3872242-2-thierry.reding@gmail.com --- drivers/video/fbdev/simplefb.c | 35 +++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 62f99f6fccd3c..18025f34fde71 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -121,12 +122,13 @@ struct simplefb_params { u32 height; u32 stride; struct simplefb_format *format; + struct resource memory; }; static int simplefb_parse_dt(struct platform_device *pdev, struct simplefb_params *params) { - struct device_node *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node, *mem; int ret; const char *format; int i; @@ -166,6 +168,23 @@ static int simplefb_parse_dt(struct platform_device *pdev, return -EINVAL; } + mem = of_parse_phandle(np, "memory-region", 0); + if (mem) { + ret = of_address_to_resource(mem, 0, ¶ms->memory); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse memory-region\n"); + of_node_put(mem); + return ret; + } + + if (of_property_present(np, "reg")) + dev_warn(&pdev->dev, "preferring \"memory-region\" over \"reg\" property\n"); + + of_node_put(mem); + } else { + memset(¶ms->memory, 0, sizeof(params->memory)); + } + return 0; } @@ -193,6 +212,8 @@ static int simplefb_parse_pd(struct platform_device *pdev, return -EINVAL; } + memset(¶ms->memory, 0, sizeof(params->memory)); + return 0; } @@ -431,10 +452,14 @@ static int simplefb_probe(struct platform_device *pdev) if (ret) return ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "No memory resource\n"); - return -EINVAL; + if (params.memory.start == 0 && params.memory.end == 0) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "No memory resource\n"); + return -EINVAL; + } + } else { + res = ¶ms.memory; } mem = request_mem_region(res->start, resource_size(res), "simplefb"); From 92a511a568e44cf11681a2223cae4d576a1a515d Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 1 Nov 2023 18:20:17 +0100 Subject: [PATCH 042/117] fbdev/simplefb: Add support for generic power-domains The simple-framebuffer device tree bindings document the power-domains property, so make sure that simplefb supports it. This ensures that the power domains remain enabled as long as simplefb is active. v2: - remove unnecessary call to simplefb_detach_genpds() since that's already done automatically by devres - fix crash if power-domains property is missing in DT Signed-off-by: Thierry Reding Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20231101172017.3872242-3-thierry.reding@gmail.com --- drivers/video/fbdev/simplefb.c | 93 ++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 18025f34fde71..fe682af63827b 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -25,6 +25,7 @@ #include #include #include +#include #include static const struct fb_fix_screeninfo simplefb_fix = { @@ -78,6 +79,11 @@ struct simplefb_par { unsigned int clk_count; struct clk **clks; #endif +#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS + unsigned int num_genpds; + struct device **genpds; + struct device_link **genpd_links; +#endif #if defined CONFIG_OF && defined CONFIG_REGULATOR bool regulators_enabled; u32 regulator_count; @@ -432,6 +438,89 @@ static void simplefb_regulators_enable(struct simplefb_par *par, static void simplefb_regulators_destroy(struct simplefb_par *par) { } #endif +#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS +static void simplefb_detach_genpds(void *res) +{ + struct simplefb_par *par = res; + unsigned int i = par->num_genpds; + + if (par->num_genpds <= 1) + return; + + while (i--) { + if (par->genpd_links[i]) + device_link_del(par->genpd_links[i]); + + if (!IS_ERR_OR_NULL(par->genpds[i])) + dev_pm_domain_detach(par->genpds[i], true); + } +} + +static int simplefb_attach_genpds(struct simplefb_par *par, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned int i; + int err; + + err = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (err < 0) { + dev_info(dev, "failed to parse power-domains: %d\n", err); + return err; + } + + par->num_genpds = err; + + /* + * Single power-domain devices are handled by the driver core, so + * nothing to do here. + */ + if (par->num_genpds <= 1) + return 0; + + par->genpds = devm_kcalloc(dev, par->num_genpds, sizeof(*par->genpds), + GFP_KERNEL); + if (!par->genpds) + return -ENOMEM; + + par->genpd_links = devm_kcalloc(dev, par->num_genpds, + sizeof(*par->genpd_links), + GFP_KERNEL); + if (!par->genpd_links) + return -ENOMEM; + + for (i = 0; i < par->num_genpds; i++) { + par->genpds[i] = dev_pm_domain_attach_by_id(dev, i); + if (IS_ERR(par->genpds[i])) { + err = PTR_ERR(par->genpds[i]); + if (err == -EPROBE_DEFER) { + simplefb_detach_genpds(par); + return err; + } + + dev_warn(dev, "failed to attach domain %u: %d\n", i, err); + continue; + } + + par->genpd_links[i] = device_link_add(dev, par->genpds[i], + DL_FLAG_STATELESS | + DL_FLAG_PM_RUNTIME | + DL_FLAG_RPM_ACTIVE); + if (!par->genpd_links[i]) + dev_warn(dev, "failed to link power-domain %u\n", i); + } + + return devm_add_action_or_reset(dev, simplefb_detach_genpds, par); +} +#else +static int simplefb_attach_genpds(struct simplefb_par *par, + struct platform_device *pdev) +{ + return 0; +} +#endif + static int simplefb_probe(struct platform_device *pdev) { int ret; @@ -518,6 +607,10 @@ static int simplefb_probe(struct platform_device *pdev) if (ret < 0) goto error_clocks; + ret = simplefb_attach_genpds(par, pdev); + if (ret < 0) + goto error_regulators; + simplefb_clocks_enable(par, pdev); simplefb_regulators_enable(par, pdev); From 1118d10f5e5ab544c489fad4da373f9988416ece Mon Sep 17 00:00:00 2001 From: Iago Toral Quiroga Date: Tue, 31 Oct 2023 08:38:56 +0100 Subject: [PATCH 043/117] drm/v3d: update UAPI to match user-space for V3D 7.x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V3D 7.x takes a new parameter to configure TFU jobs that needs to be provided by user space. Signed-off-by: Iago Toral Quiroga Reviewed-by: Maíra Canal Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20231031073859.25298-2-itoral@igalia.com --- include/uapi/drm/v3d_drm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 3dfc0af8756aa..1a7d7a689de38 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -319,6 +319,11 @@ struct drm_v3d_submit_tfu { /* Pointer to an array of ioctl extensions*/ __u64 extensions; + + struct { + __u32 ioc; + __u32 pad; + } v71; }; /* Submits a compute shader for dispatch. This job will block on any From 0ad5bc1ce4634ce9b5eaf017b01399ec5e49a03d Mon Sep 17 00:00:00 2001 From: Iago Toral Quiroga Date: Tue, 31 Oct 2023 08:38:57 +0100 Subject: [PATCH 044/117] drm/v3d: fix up register addresses for V3D 7.x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch updates a number of register addresses that have been changed in Raspberry Pi 5 (V3D 7.1) and updates the code to use the corresponding registers and addresses based on the actual V3D version. Signed-off-by: Iago Toral Quiroga Reviewed-by: Maíra Canal Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20231031073859.25298-3-itoral@igalia.com --- drivers/gpu/drm/v3d/v3d_debugfs.c | 178 +++++++++++++++++------------- drivers/gpu/drm/v3d/v3d_gem.c | 4 +- drivers/gpu/drm/v3d/v3d_irq.c | 46 ++++---- drivers/gpu/drm/v3d/v3d_regs.h | 94 +++++++++------- drivers/gpu/drm/v3d/v3d_sched.c | 38 ++++--- 5 files changed, 204 insertions(+), 156 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 330669f51fa79..f843a50d5dce6 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -12,69 +12,83 @@ #include "v3d_drv.h" #include "v3d_regs.h" -#define REGDEF(reg) { reg, #reg } +#define REGDEF(min_ver, max_ver, reg) { min_ver, max_ver, reg, #reg } struct v3d_reg_def { + u32 min_ver; + u32 max_ver; u32 reg; const char *name; }; static const struct v3d_reg_def v3d_hub_reg_defs[] = { - REGDEF(V3D_HUB_AXICFG), - REGDEF(V3D_HUB_UIFCFG), - REGDEF(V3D_HUB_IDENT0), - REGDEF(V3D_HUB_IDENT1), - REGDEF(V3D_HUB_IDENT2), - REGDEF(V3D_HUB_IDENT3), - REGDEF(V3D_HUB_INT_STS), - REGDEF(V3D_HUB_INT_MSK_STS), - - REGDEF(V3D_MMU_CTL), - REGDEF(V3D_MMU_VIO_ADDR), - REGDEF(V3D_MMU_VIO_ID), - REGDEF(V3D_MMU_DEBUG_INFO), + REGDEF(33, 42, V3D_HUB_AXICFG), + REGDEF(33, 71, V3D_HUB_UIFCFG), + REGDEF(33, 71, V3D_HUB_IDENT0), + REGDEF(33, 71, V3D_HUB_IDENT1), + REGDEF(33, 71, V3D_HUB_IDENT2), + REGDEF(33, 71, V3D_HUB_IDENT3), + REGDEF(33, 71, V3D_HUB_INT_STS), + REGDEF(33, 71, V3D_HUB_INT_MSK_STS), + + REGDEF(33, 71, V3D_MMU_CTL), + REGDEF(33, 71, V3D_MMU_VIO_ADDR), + REGDEF(33, 71, V3D_MMU_VIO_ID), + REGDEF(33, 71, V3D_MMU_DEBUG_INFO), + + REGDEF(71, 71, V3D_GMP_STATUS(71)), + REGDEF(71, 71, V3D_GMP_CFG(71)), + REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)), }; static const struct v3d_reg_def v3d_gca_reg_defs[] = { - REGDEF(V3D_GCA_SAFE_SHUTDOWN), - REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK), + REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN), + REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK), }; static const struct v3d_reg_def v3d_core_reg_defs[] = { - REGDEF(V3D_CTL_IDENT0), - REGDEF(V3D_CTL_IDENT1), - REGDEF(V3D_CTL_IDENT2), - REGDEF(V3D_CTL_MISCCFG), - REGDEF(V3D_CTL_INT_STS), - REGDEF(V3D_CTL_INT_MSK_STS), - REGDEF(V3D_CLE_CT0CS), - REGDEF(V3D_CLE_CT0CA), - REGDEF(V3D_CLE_CT0EA), - REGDEF(V3D_CLE_CT1CS), - REGDEF(V3D_CLE_CT1CA), - REGDEF(V3D_CLE_CT1EA), - - REGDEF(V3D_PTB_BPCA), - REGDEF(V3D_PTB_BPCS), - - REGDEF(V3D_GMP_STATUS), - REGDEF(V3D_GMP_CFG), - REGDEF(V3D_GMP_VIO_ADDR), - - REGDEF(V3D_ERR_FDBGO), - REGDEF(V3D_ERR_FDBGB), - REGDEF(V3D_ERR_FDBGS), - REGDEF(V3D_ERR_STAT), + REGDEF(33, 71, V3D_CTL_IDENT0), + REGDEF(33, 71, V3D_CTL_IDENT1), + REGDEF(33, 71, V3D_CTL_IDENT2), + REGDEF(33, 71, V3D_CTL_MISCCFG), + REGDEF(33, 71, V3D_CTL_INT_STS), + REGDEF(33, 71, V3D_CTL_INT_MSK_STS), + REGDEF(33, 71, V3D_CLE_CT0CS), + REGDEF(33, 71, V3D_CLE_CT0CA), + REGDEF(33, 71, V3D_CLE_CT0EA), + REGDEF(33, 71, V3D_CLE_CT1CS), + REGDEF(33, 71, V3D_CLE_CT1CA), + REGDEF(33, 71, V3D_CLE_CT1EA), + + REGDEF(33, 71, V3D_PTB_BPCA), + REGDEF(33, 71, V3D_PTB_BPCS), + + REGDEF(33, 41, V3D_GMP_STATUS(33)), + REGDEF(33, 41, V3D_GMP_CFG(33)), + REGDEF(33, 41, V3D_GMP_VIO_ADDR(33)), + + REGDEF(33, 71, V3D_ERR_FDBGO), + REGDEF(33, 71, V3D_ERR_FDBGB), + REGDEF(33, 71, V3D_ERR_FDBGS), + REGDEF(33, 71, V3D_ERR_STAT), }; static const struct v3d_reg_def v3d_csd_reg_defs[] = { - REGDEF(V3D_CSD_STATUS), - REGDEF(V3D_CSD_CURRENT_CFG0), - REGDEF(V3D_CSD_CURRENT_CFG1), - REGDEF(V3D_CSD_CURRENT_CFG2), - REGDEF(V3D_CSD_CURRENT_CFG3), - REGDEF(V3D_CSD_CURRENT_CFG4), - REGDEF(V3D_CSD_CURRENT_CFG5), - REGDEF(V3D_CSD_CURRENT_CFG6), + REGDEF(41, 71, V3D_CSD_STATUS), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG0(41)), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG1(41)), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG2(41)), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG3(41)), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG4(41)), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG5(41)), + REGDEF(41, 41, V3D_CSD_CURRENT_CFG6(41)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)), + REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)), + REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7), }; static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) @@ -85,38 +99,41 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) int i, core; for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) { - seq_printf(m, "%s (0x%04x): 0x%08x\n", - v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg, - V3D_READ(v3d_hub_reg_defs[i].reg)); + const struct v3d_reg_def *def = &v3d_hub_reg_defs[i]; + + if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) { + seq_printf(m, "%s (0x%04x): 0x%08x\n", + def->name, def->reg, V3D_READ(def->reg)); + } } - if (v3d->ver < 41) { - for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { + const struct v3d_reg_def *def = &v3d_gca_reg_defs[i]; + + if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) { seq_printf(m, "%s (0x%04x): 0x%08x\n", - v3d_gca_reg_defs[i].name, - v3d_gca_reg_defs[i].reg, - V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); + def->name, def->reg, V3D_GCA_READ(def->reg)); } } for (core = 0; core < v3d->cores; core++) { for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) { - seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", - core, - v3d_core_reg_defs[i].name, - v3d_core_reg_defs[i].reg, - V3D_CORE_READ(core, - v3d_core_reg_defs[i].reg)); + const struct v3d_reg_def *def = &v3d_core_reg_defs[i]; + + if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) { + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", + core, def->name, def->reg, + V3D_CORE_READ(core, def->reg)); + } } - if (v3d_has_csd(v3d)) { - for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) { + for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) { + const struct v3d_reg_def *def = &v3d_csd_reg_defs[i]; + + if (v3d->ver >= def->min_ver && v3d->ver <= def->max_ver) { seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", - core, - v3d_csd_reg_defs[i].name, - v3d_csd_reg_defs[i].reg, - V3D_CORE_READ(core, - v3d_csd_reg_defs[i].reg)); + core, def->name, def->reg, + V3D_CORE_READ(core, def->reg)); } } } @@ -147,8 +164,10 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); seq_printf(m, "TFU: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); - seq_printf(m, "TSY: %s\n", - str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); + if (v3d->ver <= 42) { + seq_printf(m, "TSY: %s\n", + str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); + } seq_printf(m, "MSO: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_MSO)); seq_printf(m, "L3C: %s (%dkb)\n", @@ -177,10 +196,14 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) seq_printf(m, " QPUs: %d\n", nslc * qups); seq_printf(m, " Semaphores: %d\n", V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); - seq_printf(m, " BCG int: %d\n", - (ident2 & V3D_IDENT2_BCG_INT) != 0); - seq_printf(m, " Override TMU: %d\n", - (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); + if (v3d->ver <= 42) { + seq_printf(m, " BCG int: %d\n", + (ident2 & V3D_IDENT2_BCG_INT) != 0); + } + if (v3d->ver < 40) { + seq_printf(m, " Override TMU: %d\n", + (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); + } } return 0; @@ -212,14 +235,15 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) int measure_ms = 1000; if (v3d->ver >= 40) { + int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, - V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT, + V3D_SET_FIELD(cycle_count_reg, V3D_PCTR_S0)); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1); } else { V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0, - V3D_PCTR_CYCLE_COUNT); + V3D_PCTR_CYCLE_COUNT(v3d->ver)); V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1); V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN, V3D_V3_PCTR_0_EN_ENABLE | diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index afa7d170d1fff..5688d46d30e69 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -47,9 +47,9 @@ v3d_init_hw_state(struct v3d_dev *v3d) static void v3d_idle_axi(struct v3d_dev *v3d, int core) { - V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); + V3D_CORE_WRITE(core, V3D_GMP_CFG(v3d->ver), V3D_GMP_CFG_STOP_REQ); - if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & + if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS(v3d->ver)) & (V3D_GMP_STATUS_RD_COUNT_MASK | V3D_GMP_STATUS_WR_COUNT_MASK | V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index e714d5318f309..9705e0cb576d8 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -19,16 +19,17 @@ #include "v3d_regs.h" #include "v3d_trace.h" -#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ - V3D_INT_FLDONE | \ - V3D_INT_FRDONE | \ - V3D_INT_CSDDONE | \ - V3D_INT_GMPV)) - -#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ - V3D_HUB_INT_MMU_PTI | \ - V3D_HUB_INT_MMU_CAP | \ - V3D_HUB_INT_TFUC)) +#define V3D_CORE_IRQS(ver) ((u32)(V3D_INT_OUTOMEM | \ + V3D_INT_FLDONE | \ + V3D_INT_FRDONE | \ + V3D_INT_CSDDONE(ver) | \ + (ver < 71 ? V3D_INT_GMPV : 0))) + +#define V3D_HUB_IRQS(ver) ((u32)(V3D_HUB_INT_MMU_WRV | \ + V3D_HUB_INT_MMU_PTI | \ + V3D_HUB_INT_MMU_CAP | \ + V3D_HUB_INT_TFUC | \ + (ver >= 71 ? V3D_V7_HUB_INT_GMPV : 0))) static irqreturn_t v3d_hub_irq(int irq, void *arg); @@ -115,7 +116,7 @@ v3d_irq(int irq, void *arg) status = IRQ_HANDLED; } - if (intsts & V3D_INT_CSDDONE) { + if (intsts & V3D_INT_CSDDONE(v3d->ver)) { struct v3d_fence *fence = to_v3d_fence(v3d->csd_job->base.irq_fence); @@ -127,7 +128,7 @@ v3d_irq(int irq, void *arg) /* We shouldn't be triggering these if we have GMP in * always-allowed mode. */ - if (intsts & V3D_INT_GMPV) + if (v3d->ver < 71 && (intsts & V3D_INT_GMPV)) dev_err(v3d->drm.dev, "GMP violation\n"); /* V3D 4.2 wires the hub and core IRQs together, so if we & @@ -197,6 +198,11 @@ v3d_hub_irq(int irq, void *arg) status = IRQ_HANDLED; } + if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) { + dev_err(v3d->drm.dev, "GMP Violation\n"); + status = IRQ_HANDLED; + } + return status; } @@ -211,8 +217,8 @@ v3d_irq_init(struct v3d_dev *v3d) * for us. */ for (core = 0; core < v3d->cores; core++) - V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); - V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver)); irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); if (irq1 == -EPROBE_DEFER) @@ -256,12 +262,12 @@ v3d_irq_enable(struct v3d_dev *v3d) /* Enable our set of interrupts, masking out any others. */ for (core = 0; core < v3d->cores; core++) { - V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); - V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS(v3d->ver)); + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS(v3d->ver)); } - V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); - V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS(v3d->ver)); + V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS(v3d->ver)); } void @@ -276,8 +282,8 @@ v3d_irq_disable(struct v3d_dev *v3d) /* Clear any pending interrupts we might have left. */ for (core = 0; core < v3d->cores; core++) - V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); - V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver)); cancel_work_sync(&v3d->overflow_mem_work); } diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 3663e0d6bf766..1b1a62ad95852 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -57,6 +57,7 @@ #define V3D_HUB_INT_MSK_STS 0x0005c #define V3D_HUB_INT_MSK_SET 0x00060 #define V3D_HUB_INT_MSK_CLR 0x00064 +# define V3D_V7_HUB_INT_GMPV BIT(6) # define V3D_HUB_INT_MMU_WRV BIT(5) # define V3D_HUB_INT_MMU_PTI BIT(4) # define V3D_HUB_INT_MMU_CAP BIT(3) @@ -64,6 +65,7 @@ # define V3D_HUB_INT_TFUC BIT(1) # define V3D_HUB_INT_TFUF BIT(0) +/* GCA registers only exist in V3D < 41 */ #define V3D_GCA_CACHE_CTRL 0x0000c # define V3D_GCA_CACHE_CTRL_FLUSH BIT(0) @@ -86,7 +88,8 @@ # define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c # define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) -#define V3D_TFU_CS 0x00400 +#define V3D_TFU_CS(ver) ((ver >= 71) ? 0x00700 : 0x00400) + /* Stops current job, empties input fifo. */ # define V3D_TFU_CS_TFURST BIT(31) # define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16) @@ -95,7 +98,7 @@ # define V3D_TFU_CS_NFREE_SHIFT 8 # define V3D_TFU_CS_BUSY BIT(0) -#define V3D_TFU_SU 0x00404 +#define V3D_TFU_SU(ver) ((ver >= 71) ? 0x00704 : 0x00404) /* Interrupt when FINTTHR input slots are free (0 = disabled) */ # define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8) # define V3D_TFU_SU_FINTTHR_SHIFT 8 @@ -106,39 +109,42 @@ # define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0) # define V3D_TFU_SU_THROTTLE_SHIFT 0 -#define V3D_TFU_ICFG 0x00408 +#define V3D_TFU_ICFG(ver) ((ver >= 71) ? 0x00708 : 0x00408) /* Interrupt when the conversion is complete. */ # define V3D_TFU_ICFG_IOC BIT(0) /* Input Image Address */ -#define V3D_TFU_IIA 0x0040c +#define V3D_TFU_IIA(ver) ((ver >= 71) ? 0x0070c : 0x0040c) /* Input Chroma Address */ -#define V3D_TFU_ICA 0x00410 +#define V3D_TFU_ICA(ver) ((ver >= 71) ? 0x00710 : 0x00410) /* Input Image Stride */ -#define V3D_TFU_IIS 0x00414 +#define V3D_TFU_IIS(ver) ((ver >= 71) ? 0x00714 : 0x00414) /* Input Image U-Plane Address */ -#define V3D_TFU_IUA 0x00418 +#define V3D_TFU_IUA(ver) ((ver >= 71) ? 0x00718 : 0x00418) +/* Image output config (VD 7.x only) */ +#define V3D_V7_TFU_IOC 0x0071c /* Output Image Address */ -#define V3D_TFU_IOA 0x0041c +#define V3D_TFU_IOA(ver) ((ver >= 71) ? 0x00720 : 0x0041c) /* Image Output Size */ -#define V3D_TFU_IOS 0x00420 +#define V3D_TFU_IOS(ver) ((ver >= 71) ? 0x00724 : 0x00420) /* TFU YUV Coefficient 0 */ -#define V3D_TFU_COEF0 0x00424 -/* Use these regs instead of the defaults. */ +#define V3D_TFU_COEF0(ver) ((ver >= 71) ? 0x00728 : 0x00424) +/* Use these regs instead of the defaults (V3D 4.x only) */ # define V3D_TFU_COEF0_USECOEF BIT(31) /* TFU YUV Coefficient 1 */ -#define V3D_TFU_COEF1 0x00428 +#define V3D_TFU_COEF1(ver) ((ver >= 71) ? 0x0072c : 0x00428) /* TFU YUV Coefficient 2 */ -#define V3D_TFU_COEF2 0x0042c +#define V3D_TFU_COEF2(ver) ((ver >= 71) ? 0x00730 : 0x0042c) /* TFU YUV Coefficient 3 */ -#define V3D_TFU_COEF3 0x00430 +#define V3D_TFU_COEF3(ver) ((ver >= 71) ? 0x00734 : 0x00430) +/* V3D 4.x only */ #define V3D_TFU_CRC 0x00434 /* Per-MMU registers. */ #define V3D_MMUC_CONTROL 0x01000 -# define V3D_MMUC_CONTROL_CLEAR BIT(3) +#define V3D_MMUC_CONTROL_CLEAR(ver) ((ver >= 71) ? BIT(11) : BIT(3)) # define V3D_MMUC_CONTROL_FLUSHING BIT(2) # define V3D_MMUC_CONTROL_FLUSH BIT(1) # define V3D_MMUC_CONTROL_ENABLE BIT(0) @@ -246,7 +252,6 @@ #define V3D_CTL_L2TCACTL 0x00030 # define V3D_L2TCACTL_TMUWCF BIT(8) -# define V3D_L2TCACTL_L2T_NO_WM BIT(4) /* Invalidates cache lines. */ # define V3D_L2TCACTL_FLM_FLUSH 0 /* Removes cachelines without writing dirty lines back. */ @@ -267,8 +272,8 @@ #define V3D_CTL_INT_MSK_CLR 0x00064 # define V3D_INT_QPU_MASK V3D_MASK(27, 16) # define V3D_INT_QPU_SHIFT 16 -# define V3D_INT_CSDDONE BIT(7) -# define V3D_INT_PCTR BIT(6) +#define V3D_INT_CSDDONE(ver) ((ver >= 71) ? BIT(6) : BIT(7)) +#define V3D_INT_PCTR(ver) ((ver >= 71) ? BIT(5) : BIT(6)) # define V3D_INT_GMPV BIT(5) # define V3D_INT_TRFB BIT(4) # define V3D_INT_SPILLUSE BIT(3) @@ -350,21 +355,25 @@ #define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \ 4 * (x)) # define V3D_PCTR_S0_MASK V3D_MASK(6, 0) +# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0) # define V3D_PCTR_S0_SHIFT 0 # define V3D_PCTR_S1_MASK V3D_MASK(14, 8) +# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8) # define V3D_PCTR_S1_SHIFT 8 # define V3D_PCTR_S2_MASK V3D_MASK(22, 16) +# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16) # define V3D_PCTR_S2_SHIFT 16 # define V3D_PCTR_S3_MASK V3D_MASK(30, 24) +# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24) # define V3D_PCTR_S3_SHIFT 24 -# define V3D_PCTR_CYCLE_COUNT 32 +#define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32) /* Output values of the counters. */ #define V3D_PCTR_0_PCTR0 0x00680 #define V3D_PCTR_0_PCTR31 0x006fc #define V3D_PCTR_0_PCTRX(x) (V3D_PCTR_0_PCTR0 + \ 4 * (x)) -#define V3D_GMP_STATUS 0x00800 +#define V3D_GMP_STATUS(ver) ((ver >= 71) ? 0x00600 : 0x00800) # define V3D_GMP_STATUS_GMPRST BIT(31) # define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24) # define V3D_GMP_STATUS_WR_COUNT_SHIFT 24 @@ -377,13 +386,13 @@ # define V3D_GMP_STATUS_INVPROT BIT(1) # define V3D_GMP_STATUS_VIO BIT(0) -#define V3D_GMP_CFG 0x00804 +#define V3D_GMP_CFG(ver) ((ver >= 71) ? 0x00604 : 0x00804) # define V3D_GMP_CFG_LBURSTEN BIT(3) # define V3D_GMP_CFG_PGCRSEN BIT() # define V3D_GMP_CFG_STOP_REQ BIT(1) # define V3D_GMP_CFG_PROT_ENABLE BIT(0) -#define V3D_GMP_VIO_ADDR 0x00808 +#define V3D_GMP_VIO_ADDR(ver) ((ver >= 71) ? 0x00608 : 0x00808) #define V3D_GMP_VIO_TYPE 0x0080c #define V3D_GMP_TABLE_ADDR 0x00810 #define V3D_GMP_CLEAR_LOAD 0x00814 @@ -398,25 +407,25 @@ # define V3D_CSD_STATUS_HAVE_CURRENT_DISPATCH BIT(1) # define V3D_CSD_STATUS_HAVE_QUEUED_DISPATCH BIT(0) -#define V3D_CSD_QUEUED_CFG0 0x00904 +#define V3D_CSD_QUEUED_CFG0(ver) ((ver >= 71) ? 0x00930 : 0x00904) # define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_MASK V3D_MASK(31, 16) # define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_SHIFT 16 # define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_MASK V3D_MASK(15, 0) # define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_SHIFT 0 -#define V3D_CSD_QUEUED_CFG1 0x00908 +#define V3D_CSD_QUEUED_CFG1(ver) ((ver >= 71) ? 0x00934 : 0x00908) # define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_MASK V3D_MASK(31, 16) # define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_SHIFT 16 # define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_MASK V3D_MASK(15, 0) # define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_SHIFT 0 -#define V3D_CSD_QUEUED_CFG2 0x0090c +#define V3D_CSD_QUEUED_CFG2(ver) ((ver >= 71) ? 0x00938 : 0x0090c) # define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_MASK V3D_MASK(31, 16) # define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_SHIFT 16 # define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_MASK V3D_MASK(15, 0) # define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_SHIFT 0 -#define V3D_CSD_QUEUED_CFG3 0x00910 +#define V3D_CSD_QUEUED_CFG3(ver) ((ver >= 71) ? 0x0093c : 0x00910) # define V3D_CSD_QUEUED_CFG3_OVERLAP_WITH_PREV BIT(26) # define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_MASK V3D_MASK(25, 20) # define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_SHIFT 20 @@ -428,23 +437,28 @@ # define V3D_CSD_QUEUED_CFG3_WG_SIZE_SHIFT 0 /* Number of batches, minus 1 */ -#define V3D_CSD_QUEUED_CFG4 0x00914 +#define V3D_CSD_QUEUED_CFG4(ver) ((ver >= 71) ? 0x00940 : 0x00914) /* Shader address, pnan, singleseg, threading, like a shader record. */ -#define V3D_CSD_QUEUED_CFG5 0x00918 +#define V3D_CSD_QUEUED_CFG5(ver) ((ver >= 71) ? 0x00944 : 0x00918) /* Uniforms address (4 byte aligned) */ -#define V3D_CSD_QUEUED_CFG6 0x0091c - -#define V3D_CSD_CURRENT_CFG0 0x00920 -#define V3D_CSD_CURRENT_CFG1 0x00924 -#define V3D_CSD_CURRENT_CFG2 0x00928 -#define V3D_CSD_CURRENT_CFG3 0x0092c -#define V3D_CSD_CURRENT_CFG4 0x00930 -#define V3D_CSD_CURRENT_CFG5 0x00934 -#define V3D_CSD_CURRENT_CFG6 0x00938 - -#define V3D_CSD_CURRENT_ID0 0x0093c +#define V3D_CSD_QUEUED_CFG6(ver) ((ver >= 71) ? 0x00948 : 0x0091c) + +/* V3D 7.x+ only */ +#define V3D_V7_CSD_QUEUED_CFG7 0x0094c + +#define V3D_CSD_CURRENT_CFG0(ver) ((ver >= 71) ? 0x00958 : 0x00920) +#define V3D_CSD_CURRENT_CFG1(ver) ((ver >= 71) ? 0x0095c : 0x00924) +#define V3D_CSD_CURRENT_CFG2(ver) ((ver >= 71) ? 0x00960 : 0x00928) +#define V3D_CSD_CURRENT_CFG3(ver) ((ver >= 71) ? 0x00964 : 0x0092c) +#define V3D_CSD_CURRENT_CFG4(ver) ((ver >= 71) ? 0x00968 : 0x00930) +#define V3D_CSD_CURRENT_CFG5(ver) ((ver >= 71) ? 0x0096c : 0x00934) +#define V3D_CSD_CURRENT_CFG6(ver) ((ver >= 71) ? 0x00970 : 0x00938) +/* V3D 7.x+ only */ +#define V3D_V7_CSD_CURRENT_CFG7 0x00974 + +#define V3D_CSD_CURRENT_ID0(ver) ((ver >= 71) ? 0x00978 : 0x0093c) # define V3D_CSD_CURRENT_ID0_WG_X_MASK V3D_MASK(31, 16) # define V3D_CSD_CURRENT_ID0_WG_X_SHIFT 16 # define V3D_CSD_CURRENT_ID0_WG_IN_SG_MASK V3D_MASK(11, 8) @@ -452,7 +466,7 @@ # define V3D_CSD_CURRENT_ID0_L_IDX_MASK V3D_MASK(7, 0) # define V3D_CSD_CURRENT_ID0_L_IDX_SHIFT 0 -#define V3D_CSD_CURRENT_ID1 0x00940 +#define V3D_CSD_CURRENT_ID1(ver) ((ver >= 71) ? 0x0097c : 0x00940) # define V3D_CSD_CURRENT_ID0_WG_Z_MASK V3D_MASK(31, 16) # define V3D_CSD_CURRENT_ID0_WG_Z_SHIFT 16 # define V3D_CSD_CURRENT_ID0_WG_Y_MASK V3D_MASK(15, 0) diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 0b6696b0d882f..f59b63e51b14b 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -190,20 +190,22 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); - V3D_WRITE(V3D_TFU_IIA, job->args.iia); - V3D_WRITE(V3D_TFU_IIS, job->args.iis); - V3D_WRITE(V3D_TFU_ICA, job->args.ica); - V3D_WRITE(V3D_TFU_IUA, job->args.iua); - V3D_WRITE(V3D_TFU_IOA, job->args.ioa); - V3D_WRITE(V3D_TFU_IOS, job->args.ios); - V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]); - if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) { - V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]); - V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]); - V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]); + V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia); + V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis); + V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); + V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua); + V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa); + if (v3d->ver >= 71) + V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc); + V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios); + V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]); + if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { + V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]); + V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]); + V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]); } /* ICFG kicks off the job. */ - V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC); + V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC); return fence; } @@ -215,7 +217,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) struct v3d_dev *v3d = job->base.v3d; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; - int i; + int i, csd_cfg0_reg, csd_cfg_reg_count; v3d->csd_job = job; @@ -233,10 +235,12 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) v3d_switch_perfmon(v3d, &job->base); - for (i = 1; i <= 6; i++) - V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); + csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver); + csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7; + for (i = 1; i <= csd_cfg_reg_count; i++) + V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]); /* CFG0 write kicks off the job. */ - V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); + V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]); return fence; } @@ -336,7 +340,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) { struct v3d_csd_job *job = to_csd_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4); + u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver)); /* If we've made progress, skip reset and let the timer get * rearmed. From ebb2f6eea688b9ffa46527c3e7570b2c347497b8 Mon Sep 17 00:00:00 2001 From: Iago Toral Quiroga Date: Tue, 31 Oct 2023 08:38:58 +0100 Subject: [PATCH 045/117] dt-bindings: gpu: v3d: Add BCM2712's compatible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BCM2712, Raspberry Pi 5's SoC, contains a V3D core. So add its specific compatible to the bindings. Signed-off-by: Iago Toral Quiroga Reviewed-by: Maíra Canal Acked-by: Conor Dooley Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20231031073859.25298-4-itoral@igalia.com --- Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml index dae55b8a267b0..dc078ceeca9ac 100644 --- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml +++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml @@ -17,6 +17,7 @@ properties: compatible: enum: - brcm,2711-v3d + - brcm,2712-v3d - brcm,7268-v3d - brcm,7278-v3d From 6fd9487147c4f18ad77eea00bd8c9189eec74a3e Mon Sep 17 00:00:00 2001 From: Iago Toral Quiroga Date: Tue, 31 Oct 2023 08:38:59 +0100 Subject: [PATCH 046/117] drm/v3d: add brcm,2712-v3d as a compatible V3D device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is required to get the V3D module to load with Raspberry Pi 5. Signed-off-by: Iago Toral Quiroga Reviewed-by: Stefan Wahren Reviewed-by: Maíra Canal Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20231031073859.25298-5-itoral@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index ffbbe9d527d32..1ab46bdf8ad71 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -187,6 +187,7 @@ static const struct drm_driver v3d_drm_driver = { static const struct of_device_id v3d_of_match[] = { { .compatible = "brcm,2711-v3d" }, + { .compatible = "brcm,2712-v3d" }, { .compatible = "brcm,7268-v3d" }, { .compatible = "brcm,7278-v3d" }, {}, From 44793c6a5b784f1f25608e3773fd40e011c63391 Mon Sep 17 00:00:00 2001 From: Carl Vanderlip Date: Fri, 27 Oct 2023 12:08:10 -0600 Subject: [PATCH 047/117] accel/qaic: Quiet array bounds check on DMA abort message Current wrapper is right-sized to the message being transferred; however, this is smaller than the structure defining message wrappers since the trailing element is a union of message/transfer headers of various sizes (8 and 32 bytes on 32-bit system where issue was reported). Using the smaller header with a small message (wire_trans_dma_xfer is 24 bytes including header) ends up being smaller than a wrapper with the larger header. There are no accesses outside of the defined size, however they are possible if the larger union member is referenced. Abort messages are outside of hot-path and changing the wrapper struct would require a larger rewrite, so having the memory allocated to the message be 8 bytes too big is acceptable. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202310182253.bcb9JcyJ-lkp@intel.com/ Signed-off-by: Carl Vanderlip Reviewed-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Signed-off-by: Jeffrey Hugo Reviewed-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231027180810.4873-1-quic_jhugo@quicinc.com --- drivers/accel/qaic/qaic_control.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index 388abd40024ba..84915824be543 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -1138,7 +1138,7 @@ static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrapper if (!list_is_first(&wrapper->list, &wrappers->list)) kref_put(&wrapper->ref_count, free_wrapper); - wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans)); + wrapper = add_wrapper(wrappers, sizeof(*wrapper)); if (!wrapper) return -ENOMEM; From 3b511278b6ef514b3ae3d99ff62947cddd434479 Mon Sep 17 00:00:00 2001 From: Pranjal Ramajor Asha Kanojiya Date: Fri, 27 Oct 2023 10:43:30 -0600 Subject: [PATCH 048/117] accel/qaic: Support for 0 resize slice execution in BO Add support to partially execute a slice which is resized to zero. Executing a zero size slice in a BO should mean that there is no DMA transfers involved but you should still configure doorbell and semaphores. For example consider a BO of size 18K and it is sliced into 3 6K slices and user calls partial execute ioctl with resize as 10K. slice 0 - size is 6k and offset is 0, so resize of 10K will not cut short this slice hence we send the entire slice for execution. slice 1 - size is 6k and offset is 6k, so resize of 10K will cut short this slice and only the first 4k should be DMA along with configuring doorbell and semaphores. slice 2 - size is 6k and offset is 12k, so resize of 10k will cut short this slice and no DMA transfer would be involved but we should would configure doorbell and semaphores. This change begs to change the behavior of 0 resize. Currently, 0 resize partial execute ioctl behaves exactly like execute ioctl i.e. no resize. After this patch all the slice in BO should behave exactly like slice 2 in above example. Refactor copy_partial_exec_reqs() to make it more readable and less complex. Signed-off-by: Pranjal Ramajor Asha Kanojiya Reviewed-by: Jeffrey Hugo Signed-off-by: Jeffrey Hugo Reviewed-by: Stanislaw Gruszka Link: https://patchwork.freedesktop.org/patch/msgid/20231027164330.11978-1-quic_jhugo@quicinc.com --- drivers/accel/qaic/qaic_data.c | 104 ++++++++++++++------------------- include/uapi/drm/qaic_accel.h | 5 +- 2 files changed, 46 insertions(+), 63 deletions(-) diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index ebc3cca1b0947..8da81768f2abb 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -51,6 +51,7 @@ }) #define NUM_EVENTS 128 #define NUM_DELAYS 10 +#define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size()) static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */ module_param(wait_exec_default_timeout_ms, uint, 0600); @@ -1058,6 +1059,16 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi return ret; } +static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size) +{ + u32 avail = head - tail - 1; + + if (head <= tail) + avail += q_size; + + return avail; +} + static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, u32 head, u32 *ptail) { @@ -1066,27 +1077,20 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic u32 tail = *ptail; u32 avail; - avail = head - tail; - if (head <= tail) - avail += dbc->nelem; - - --avail; - + avail = fifo_space_avail(head, tail, dbc->nelem); if (avail < slice->nents) return -EAGAIN; if (tail + slice->nents > dbc->nelem) { avail = dbc->nelem - tail; avail = min_t(u32, avail, slice->nents); - memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, - sizeof(*reqs) * avail); + memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); reqs += avail; avail = slice->nents - avail; if (avail) memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail); } else { - memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, - sizeof(*reqs) * slice->nents); + memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); } *ptail = (tail + slice->nents) % dbc->nelem; @@ -1094,46 +1098,31 @@ static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slic return 0; } -/* - * Based on the value of resize we may only need to transmit first_n - * entries and the last entry, with last_bytes to send from the last entry. - * Note that first_n could be 0. - */ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, - u64 resize, u32 dbc_id, u32 head, u32 *ptail) + u64 resize, struct dma_bridge_chan *dbc, u32 head, + u32 *ptail) { - struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; struct dbc_req *reqs = slice->reqs; struct dbc_req *last_req; u32 tail = *ptail; - u64 total_bytes; u64 last_bytes; u32 first_n; u32 avail; - int ret; - int i; - - avail = head - tail; - if (head <= tail) - avail += dbc->nelem; - --avail; + avail = fifo_space_avail(head, tail, dbc->nelem); - total_bytes = 0; - for (i = 0; i < slice->nents; i++) { - total_bytes += le32_to_cpu(reqs[i].len); - if (total_bytes >= resize) + /* + * After this for loop is complete, first_n represents the index + * of the last DMA request of this slice that needs to be + * transferred after resizing and last_bytes represents DMA size + * of that request. + */ + last_bytes = resize; + for (first_n = 0; first_n < slice->nents; first_n++) + if (last_bytes > le32_to_cpu(reqs[first_n].len)) + last_bytes -= le32_to_cpu(reqs[first_n].len); + else break; - } - - if (total_bytes < resize) { - /* User space should have used the full buffer path. */ - ret = -EINVAL; - return ret; - } - - first_n = i; - last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize; if (avail < (first_n + 1)) return -EAGAIN; @@ -1142,22 +1131,21 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli if (tail + first_n > dbc->nelem) { avail = dbc->nelem - tail; avail = min_t(u32, avail, first_n); - memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, - sizeof(*reqs) * avail); + memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); last_req = reqs + avail; avail = first_n - avail; if (avail) memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail); } else { - memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs, - sizeof(*reqs) * first_n); + memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n); } } - /* Copy over the last entry. Here we need to adjust len to the left over + /* + * Copy over the last entry. Here we need to adjust len to the left over * size, and set src and dst to the entry it is copied to. */ - last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size(); + last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem); memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); /* @@ -1168,6 +1156,9 @@ static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_sli last_req->len = cpu_to_le32((u32)last_bytes); last_req->src_addr = reqs[first_n].src_addr; last_req->dest_addr = reqs[first_n].dest_addr; + if (!last_bytes) + /* Disable DMA transfer */ + last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd; *ptail = (tail + first_n + 1) % dbc->nelem; @@ -1227,26 +1218,17 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil bo->req_id = dbc->next_req_id++; list_for_each_entry(slice, &bo->slices, slice) { - /* - * If this slice does not fall under the given - * resize then skip this slice and continue the loop - */ - if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset) - continue; - for (j = 0; j < slice->nents; j++) slice->reqs[j].req_id = cpu_to_le16(bo->req_id); - /* - * If it is a partial execute ioctl call then check if - * resize has cut this slice short then do a partial copy - * else do complete copy - */ - if (is_partial && pexec[i].resize && - pexec[i].resize < slice->offset + slice->size) + if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset)) + /* Configure the slice for no DMA transfer */ + ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail); + else if (is_partial && pexec[i].resize < slice->offset + slice->size) + /* Configure the slice to be partially DMA transferred */ ret = copy_partial_exec_reqs(qdev, slice, - pexec[i].resize - slice->offset, - dbc->id, head, tail); + pexec[i].resize - slice->offset, dbc, + head, tail); else ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); if (ret) { diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h index 43ac5d8645125..9dab32316aee4 100644 --- a/include/uapi/drm/qaic_accel.h +++ b/include/uapi/drm/qaic_accel.h @@ -287,8 +287,9 @@ struct qaic_execute_entry { * struct qaic_partial_execute_entry - Defines a BO to resize and submit. * @handle: In. GEM handle of the BO to commit to the device. * @dir: In. Direction of data. 1 = to device, 2 = from device. - * @resize: In. New size of the BO. Must be <= the original BO size. 0 is - * short for no resize. + * @resize: In. New size of the BO. Must be <= the original BO size. + * @resize as 0 would be interpreted as no DMA transfer is + * involved. */ struct qaic_partial_execute_entry { __u32 handle; From 7abbbe2694b3d4fd366dc91934f42c047a6d282d Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 2 Nov 2023 10:55:34 +0000 Subject: [PATCH 049/117] drm/sched: Rename drm_sched_get_cleanup_job to be more descriptive "Get cleanup job" makes it sound like helper is returning a job which will execute some cleanup, or something, while the kerneldoc itself accurately says "fetch the next _finished_ job". So lets rename the helper to be self documenting. Signed-off-by: Tvrtko Ursulin Cc: Luben Tuikov Cc: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-2-tvrtko.ursulin@linux.intel.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 98b2ad54fc707..fb64b35451f5a 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -448,7 +448,7 @@ static void drm_sched_job_timedout(struct work_struct *work) sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); - /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ + /* Protects against concurrent deletion in drm_sched_get_finished_job */ spin_lock(&sched->job_list_lock); job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); @@ -500,9 +500,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) /* * Reinsert back the bad job here - now it's safe as - * drm_sched_get_cleanup_job cannot race against us and release the + * drm_sched_get_finished_job cannot race against us and release the * bad job at this point - we parked (waited for) any in progress - * (earlier) cleanups and drm_sched_get_cleanup_job will not be called + * (earlier) cleanups and drm_sched_get_finished_job will not be called * now until the scheduler thread is unparked. */ if (bad && bad->sched == sched) @@ -960,7 +960,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) } /** - * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed + * drm_sched_get_finished_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * @@ -968,7 +968,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) * ready for it to be destroyed. */ static struct drm_sched_job * -drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) +drm_sched_get_finished_job(struct drm_gpu_scheduler *sched) { struct drm_sched_job *job, *next; @@ -1059,14 +1059,14 @@ static void drm_sched_free_job_work(struct work_struct *w) { struct drm_gpu_scheduler *sched = container_of(w, struct drm_gpu_scheduler, work_free_job); - struct drm_sched_job *cleanup_job; + struct drm_sched_job *job; if (READ_ONCE(sched->pause_submit)) return; - cleanup_job = drm_sched_get_cleanup_job(sched); - if (cleanup_job) { - sched->ops->free_job(cleanup_job); + job = drm_sched_get_finished_job(sched); + if (job) { + sched->ops->free_job(job); drm_sched_free_job_queue_if_done(sched); drm_sched_run_job_queue_if_ready(sched); From e608d9f7ac1a94a4a63d1ef2b37dd80669ad828d Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 2 Nov 2023 10:55:35 +0000 Subject: [PATCH 050/117] drm/sched: Move free worker re-queuing out of the if block Whether or not there are more jobs to clean up does not depend on the existance of the current job, given both drm_sched_get_finished_job and drm_sched_free_job_queue_if_done take and drop the job list lock. Therefore it is confusing to make it read like there is a dependency. Signed-off-by: Tvrtko Ursulin Cc: Luben Tuikov Cc: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-3-tvrtko.ursulin@linux.intel.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index fb64b35451f5a..e1658030613f5 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1065,12 +1065,11 @@ static void drm_sched_free_job_work(struct work_struct *w) return; job = drm_sched_get_finished_job(sched); - if (job) { + if (job) sched->ops->free_job(job); - drm_sched_free_job_queue_if_done(sched); - drm_sched_run_job_queue_if_ready(sched); - } + drm_sched_free_job_queue_if_done(sched); + drm_sched_run_job_queue_if_ready(sched); } /** From 67dd1d8c9f6543661720b9a89e28a25488cb8753 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 2 Nov 2023 10:55:36 +0000 Subject: [PATCH 051/117] drm/sched: Rename drm_sched_free_job_queue to be more descriptive The current name makes it sound like helper will free a queue, while what it does is it enqueues the free job worker. Rename it to drm_sched_run_free_queue to align with existing drm_sched_run_job_queue. Despite that creating an illusion there are two queues, while in reality there is only one, at least it creates a consistent naming for the two enqueuing helpers. At the same time simplify the "if done" helper by dropping the suffix and adding a double underscore prefix to the one which just enqueues. Signed-off-by: Tvrtko Ursulin Cc: Luben Tuikov Cc: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-4-tvrtko.ursulin@linux.intel.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index e1658030613f5..f9baca20b4383 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -266,20 +266,20 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) } /** - * drm_sched_free_job_queue - enqueue free-job work + * __drm_sched_run_free_queue - enqueue free-job work * @sched: scheduler instance */ -static void drm_sched_free_job_queue(struct drm_gpu_scheduler *sched) +static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) { if (!READ_ONCE(sched->pause_submit)) queue_work(sched->submit_wq, &sched->work_free_job); } /** - * drm_sched_free_job_queue_if_done - enqueue free-job work if ready + * drm_sched_run_free_queue - enqueue free-job work if ready * @sched: scheduler instance */ -static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) +static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) { struct drm_sched_job *job; @@ -287,7 +287,7 @@ static void drm_sched_free_job_queue_if_done(struct drm_gpu_scheduler *sched) job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); if (job && dma_fence_is_signaled(&job->s_fence->finished)) - drm_sched_free_job_queue(sched); + __drm_sched_run_free_queue(sched); spin_unlock(&sched->job_list_lock); } @@ -310,7 +310,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) dma_fence_get(&s_fence->finished); drm_sched_fence_finished(s_fence, result); dma_fence_put(&s_fence->finished); - drm_sched_free_job_queue(sched); + __drm_sched_run_free_queue(sched); } /** @@ -1068,7 +1068,7 @@ static void drm_sched_free_job_work(struct work_struct *w) if (job) sched->ops->free_job(job); - drm_sched_free_job_queue_if_done(sched); + drm_sched_run_free_queue(sched); drm_sched_run_job_queue_if_ready(sched); } From 35a4279d42db534ad71a3a598029a53f22856f93 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 2 Nov 2023 10:55:37 +0000 Subject: [PATCH 052/117] drm/sched: Rename drm_sched_run_job_queue_if_ready and clarify kerneldoc "If ready" is not immediately clear what it means - is the scheduler ready or something else? Drop the suffix, clarify kerneldoc, and employ the same naming scheme as in drm_sched_run_free_queue: - drm_sched_run_job_queue - enqueues if there is something to enqueue *and* scheduler is ready (can queue) - __drm_sched_run_job_queue - low-level helper to simply queue the job Signed-off-by: Tvrtko Ursulin Cc: Luben Tuikov Cc: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-5-tvrtko.ursulin@linux.intel.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f9baca20b4383..d5ddbce68fb77 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -256,10 +256,10 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) } /** - * drm_sched_run_job_queue - enqueue run-job work + * __drm_sched_run_job_queue - enqueue run-job work * @sched: scheduler instance */ -static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) +static void __drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) { if (!READ_ONCE(sched->pause_submit)) queue_work(sched->submit_wq, &sched->work_run_job); @@ -928,7 +928,7 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) { if (drm_sched_can_queue(sched)) - drm_sched_run_job_queue(sched); + __drm_sched_run_job_queue(sched); } /** @@ -1041,13 +1041,13 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, EXPORT_SYMBOL(drm_sched_pick_best); /** - * drm_sched_run_job_queue_if_ready - enqueue run-job work if ready + * drm_sched_run_job_queue - enqueue run-job work if there are ready entities * @sched: scheduler instance */ -static void drm_sched_run_job_queue_if_ready(struct drm_gpu_scheduler *sched) +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) { if (drm_sched_select_entity(sched)) - drm_sched_run_job_queue(sched); + __drm_sched_run_job_queue(sched); } /** @@ -1069,7 +1069,7 @@ static void drm_sched_free_job_work(struct work_struct *w) sched->ops->free_job(job); drm_sched_run_free_queue(sched); - drm_sched_run_job_queue_if_ready(sched); + drm_sched_run_job_queue(sched); } /** @@ -1126,7 +1126,7 @@ static void drm_sched_run_job_work(struct work_struct *w) } wake_up(&sched->job_scheduled); - drm_sched_run_job_queue_if_ready(sched); + drm_sched_run_job_queue(sched); } /** From f12af4c461fb6cd5ed7b48f8b4d09b22eb19fcc5 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 2 Nov 2023 10:55:38 +0000 Subject: [PATCH 053/117] drm/sched: Drop suffix from drm_sched_wakeup_if_can_queue Because a) helper is exported to other parts of the scheduler and b) there isn't a plain drm_sched_wakeup to begin with, I think we can drop the suffix and by doing so separate the intimiate knowledge between the scheduler components a bit better. Signed-off-by: Tvrtko Ursulin Cc: Luben Tuikov Cc: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20231102105538.391648-6-tvrtko.ursulin@linux.intel.com Reviewed-by: Luben Tuikov Signed-off-by: Luben Tuikov --- drivers/gpu/drm/scheduler/sched_entity.c | 4 ++-- drivers/gpu/drm/scheduler/sched_main.c | 4 ++-- include/drm/gpu_scheduler.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 409e4256f6e7d..f1db63cc81981 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -370,7 +370,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, container_of(cb, struct drm_sched_entity, cb); drm_sched_entity_clear_dep(f, cb); - drm_sched_wakeup_if_can_queue(entity->rq->sched); + drm_sched_wakeup(entity->rq->sched); } /** @@ -602,7 +602,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) drm_sched_rq_update_fifo(entity, submit_ts); - drm_sched_wakeup_if_can_queue(entity->rq->sched); + drm_sched_wakeup(entity->rq->sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index d5ddbce68fb77..27843e37d9b76 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -920,12 +920,12 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) } /** - * drm_sched_wakeup_if_can_queue - Wake up the scheduler + * drm_sched_wakeup - Wake up the scheduler if it is ready to queue * @sched: scheduler instance * * Wake up the scheduler if we can queue jobs. */ -void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched) +void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { if (drm_sched_can_queue(sched)) __drm_sched_run_job_queue(sched); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 9daa78d2c04c1..754fd2217334e 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -559,7 +559,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); From 5faf6e1853d30d113ebc9977e015d0152e5e1970 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 27 Sep 2023 14:38:37 +0100 Subject: [PATCH 054/117] drm: Do not round to megabytes for greater than 1MiB sizes in fdinfo stats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is better not to lose precision and not revert to 1 MiB size granularity for every size greater than 1 MiB. Sizes in KiB should not be so troublesome to read (and in fact machine parsing is I expect the norm here), they align with other api like /proc/meminfo, and they allow writing tests for the interface without having to embed drm.ko implementation knowledge into them. (Like knowing that minimum buffer size one can use for successful verification has to be 1MiB aligned, and on top account for any pre-existing memory utilisation outside of driver's control.) But probably even more importantly I think that it is just better to show the accurate sizes and not arbitrary lose precision for a little bit of a stretched use case of eyeballing fdinfo text directly. Signed-off-by: Tvrtko Ursulin Cc: Rob Clark Cc: Adrián Larumbe Cc: steven.price@arm.com Reviewed-by: Steven Price Link: https://lore.kernel.org/r/20230927133843.247957-2-tvrtko.ursulin@linux.intel.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 446458aca8e99..5ddaffd325865 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -913,7 +913,7 @@ static void print_size(struct drm_printer *p, const char *stat, unsigned u; for (u = 0; u < ARRAY_SIZE(units) - 1; u++) { - if (sz < SZ_1K) + if (sz == 0 || !IS_ALIGNED(sz, SZ_1K)) break; sz = div_u64(sz, SZ_1K); } From 09a93cc4f7d1893777f6b788bffe60d64e4d5df7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Tue, 5 Sep 2023 18:06:34 -0300 Subject: [PATCH 055/117] drm/v3d: Implement show_fdinfo() callback for GPU usage stats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch exposes the accumulated amount of active time per client through the fdinfo infrastructure. The amount of active time is exposed for each V3D queue: BIN, RENDER, CSD, TFU and CACHE_CLEAN. In order to calculate the amount of active time per client, a CPU clock is used through the function local_clock(). The point where the jobs has started is marked and is finally compared with the time that the job had finished. Moreover, the number of jobs submitted to each queue is also exposed on fdinfo through the identifier "v3d-jobs-". Co-developed-by: Jose Maria Casanova Crespo Signed-off-by: Jose Maria Casanova Crespo Signed-off-by: Maíra Canal Acked-by: Jose Maria Casanova Crespo Reviewed-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/20230905213416.1290219-3-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 36 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/v3d/v3d_drv.h | 23 +++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_gem.c | 1 + drivers/gpu/drm/v3d/v3d_irq.c | 21 +++++++++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 20 ++++++++++++++++++ 5 files changed, 100 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 1ab46bdf8ad71..82984b4df2a2c 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -111,6 +112,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file) v3d_priv->v3d = v3d; for (i = 0; i < V3D_MAX_QUEUES; i++) { + v3d_priv->enabled_ns[i] = 0; + v3d_priv->start_ns[i] = 0; + v3d_priv->jobs_sent[i] = 0; + sched = &v3d->queue[i].sched; drm_sched_entity_init(&v3d_priv->sched_entity[i], DRM_SCHED_PRIORITY_NORMAL, &sched, @@ -136,7 +141,35 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) kfree(v3d_priv); } -DEFINE_DRM_GEM_FOPS(v3d_drm_fops); +static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file) +{ + struct v3d_file_priv *file_priv = file->driver_priv; + u64 timestamp = local_clock(); + enum v3d_queue queue; + + for (queue = 0; queue < V3D_MAX_QUEUES; queue++) { + /* Note that, in case of a GPU reset, the time spent during an + * attempt of executing the job is not computed in the runtime. + */ + drm_printf(p, "drm-engine-%s: \t%llu ns\n", + v3d_queue_to_string(queue), + file_priv->start_ns[queue] ? file_priv->enabled_ns[queue] + + timestamp - file_priv->start_ns[queue] + : file_priv->enabled_ns[queue]); + + /* Note that we only count jobs that completed. Therefore, jobs + * that were resubmitted due to a GPU reset are not computed. + */ + drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n", + v3d_queue_to_string(queue), file_priv->jobs_sent[queue]); + } +} + +static const struct file_operations v3d_drm_fops = { + .owner = THIS_MODULE, + DRM_GEM_FOPS, + .show_fdinfo = drm_show_fdinfo, +}; /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP * protection between clients. Note that render nodes would be @@ -176,6 +209,7 @@ static const struct drm_driver v3d_drm_driver = { .ioctls = v3d_drm_ioctls, .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), .fops = &v3d_drm_fops, + .show_fdinfo = v3d_show_fdinfo, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 106454f28956b..8f9d93239a174 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -21,6 +21,18 @@ struct reset_control; #define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1) +static inline char *v3d_queue_to_string(enum v3d_queue queue) +{ + switch (queue) { + case V3D_BIN: return "bin"; + case V3D_RENDER: return "render"; + case V3D_TFU: return "tfu"; + case V3D_CSD: return "csd"; + case V3D_CACHE_CLEAN: return "cache_clean"; + } + return "UNKNOWN"; +} + struct v3d_queue_state { struct drm_gpu_scheduler sched; @@ -167,6 +179,12 @@ struct v3d_file_priv { } perfmon; struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; + + u64 start_ns[V3D_MAX_QUEUES]; + + u64 enabled_ns[V3D_MAX_QUEUES]; + + u64 jobs_sent[V3D_MAX_QUEUES]; }; struct v3d_bo { @@ -238,6 +256,11 @@ struct v3d_job { */ struct v3d_perfmon *perfmon; + /* File descriptor of the process that submitted the job that could be used + * for collecting stats by process of GPU usage. + */ + struct drm_file *file; + /* Callback for the freeing of the job on refcount going to 0. */ void (*free)(struct kref *ref); }; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 5688d46d30e69..a33e90f29bd52 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -415,6 +415,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, job = *container; job->v3d = v3d; job->free = free; + job->file = file_priv; ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], v3d_priv); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 9705e0cb576d8..ba390b782103a 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -14,6 +14,7 @@ */ #include +#include #include "v3d_drv.h" #include "v3d_regs.h" @@ -101,6 +102,11 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_FLDONE) { struct v3d_fence *fence = to_v3d_fence(v3d->bin_job->base.irq_fence); + struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv; + + file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN]; + file->jobs_sent[V3D_BIN]++; + file->start_ns[V3D_BIN] = 0; trace_v3d_bcl_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -110,6 +116,11 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_FRDONE) { struct v3d_fence *fence = to_v3d_fence(v3d->render_job->base.irq_fence); + struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv; + + file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER]; + file->jobs_sent[V3D_RENDER]++; + file->start_ns[V3D_RENDER] = 0; trace_v3d_rcl_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -119,6 +130,11 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_CSDDONE(v3d->ver)) { struct v3d_fence *fence = to_v3d_fence(v3d->csd_job->base.irq_fence); + struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv; + + file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD]; + file->jobs_sent[V3D_CSD]++; + file->start_ns[V3D_CSD] = 0; trace_v3d_csd_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -155,6 +171,11 @@ v3d_hub_irq(int irq, void *arg) if (intsts & V3D_HUB_INT_TFUC) { struct v3d_fence *fence = to_v3d_fence(v3d->tfu_job->base.irq_fence); + struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv; + + file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU]; + file->jobs_sent[V3D_TFU]++; + file->start_ns[V3D_TFU] = 0; trace_v3d_tfu_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index f59b63e51b14b..19afceacda837 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -18,6 +18,7 @@ * semaphores to interlock between them. */ +#include #include #include "v3d_drv.h" @@ -76,6 +77,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) { struct v3d_bin_job *job = to_bin_job(sched_job); struct v3d_dev *v3d = job->base.v3d; + struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; unsigned long irqflags; @@ -107,6 +109,8 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, job->start, job->end); + file->start_ns[V3D_BIN] = local_clock(); + v3d_switch_perfmon(v3d, &job->base); /* Set the current and end address of the control list. @@ -131,6 +135,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) { struct v3d_render_job *job = to_render_job(sched_job); struct v3d_dev *v3d = job->base.v3d; + struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; @@ -158,6 +163,8 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, job->start, job->end); + file->start_ns[V3D_RENDER] = local_clock(); + v3d_switch_perfmon(v3d, &job->base); /* XXX: Set the QCFG */ @@ -176,6 +183,7 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) { struct v3d_tfu_job *job = to_tfu_job(sched_job); struct v3d_dev *v3d = job->base.v3d; + struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; @@ -190,6 +198,8 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); + file->start_ns[V3D_TFU] = local_clock(); + V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia); V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis); V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); @@ -215,6 +225,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) { struct v3d_csd_job *job = to_csd_job(sched_job); struct v3d_dev *v3d = job->base.v3d; + struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; int i, csd_cfg0_reg, csd_cfg_reg_count; @@ -233,6 +244,8 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); + file->start_ns[V3D_CSD] = local_clock(); + v3d_switch_perfmon(v3d, &job->base); csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver); @@ -250,9 +263,16 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) { struct v3d_job *job = to_v3d_job(sched_job); struct v3d_dev *v3d = job->v3d; + struct v3d_file_priv *file = job->file->driver_priv; + + file->start_ns[V3D_CACHE_CLEAN] = local_clock(); v3d_clean_caches(v3d); + file->enabled_ns[V3D_CACHE_CLEAN] += local_clock() - file->start_ns[V3D_CACHE_CLEAN]; + file->jobs_sent[V3D_CACHE_CLEAN]++; + file->start_ns[V3D_CACHE_CLEAN] = 0; + return NULL; } From 509433d8146c64ca9e0bcc370ec910821fffe80c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Tue, 5 Sep 2023 18:06:35 -0300 Subject: [PATCH 056/117] drm/v3d: Expose the total GPU usage stats on sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous patch exposed the accumulated amount of active time per client for each V3D queue. But this doesn't provide a global notion of the GPU usage. Therefore, provide the accumulated amount of active time for each V3D queue (BIN, RENDER, CSD, TFU and CACHE_CLEAN), considering all the jobs submitted to the queue, independent of the client. This data is exposed through the sysfs interface, so that if the interface is queried at two different points of time the usage percentage of each of the queues can be calculated. Co-developed-by: Jose Maria Casanova Crespo Signed-off-by: Jose Maria Casanova Crespo Signed-off-by: Maíra Canal Acked-by: Jose Maria Casanova Crespo Reviewed-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/20230905213416.1290219-3-mcanal@igalia.com --- drivers/gpu/drm/v3d/Makefile | 3 +- drivers/gpu/drm/v3d/v3d_drv.c | 9 +++++ drivers/gpu/drm/v3d/v3d_drv.h | 8 ++++ drivers/gpu/drm/v3d/v3d_gem.c | 6 ++- drivers/gpu/drm/v3d/v3d_irq.c | 28 +++++++++++++ drivers/gpu/drm/v3d/v3d_sched.c | 15 ++++++- drivers/gpu/drm/v3d/v3d_sysfs.c | 69 +++++++++++++++++++++++++++++++++ 7 files changed, 135 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/v3d/v3d_sysfs.c diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index e8b3141370207..4b21b20e49981 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -11,7 +11,8 @@ v3d-y := \ v3d_mmu.o \ v3d_perfmon.o \ v3d_trace_points.o \ - v3d_sched.o + v3d_sched.o \ + v3d_sysfs.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 82984b4df2a2c..44a1ca57d6a44 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -316,8 +316,14 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) goto irq_disable; + ret = v3d_sysfs_init(dev); + if (ret) + goto drm_unregister; + return 0; +drm_unregister: + drm_dev_unregister(drm); irq_disable: v3d_irq_disable(v3d); gem_destroy: @@ -331,6 +337,9 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); struct v3d_dev *v3d = to_v3d_dev(drm); + struct device *dev = &pdev->dev; + + v3d_sysfs_destroy(dev); drm_dev_unregister(drm); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 8f9d93239a174..4c59fefaa0b4b 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -38,6 +38,10 @@ struct v3d_queue_state { u64 fence_context; u64 emit_seqno; + + u64 start_ns; + u64 enabled_ns; + u64 jobs_sent; }; /* Performance monitor object. The perform lifetime is controlled by userspace @@ -441,3 +445,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); + +/* v3d_sysfs.c */ +int v3d_sysfs_init(struct device *dev); +void v3d_sysfs_destroy(struct device *dev); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index a33e90f29bd52..712675134c048 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -1014,8 +1014,12 @@ v3d_gem_init(struct drm_device *dev) u32 pt_size = 4096 * 1024; int ret, i; - for (i = 0; i < V3D_MAX_QUEUES; i++) + for (i = 0; i < V3D_MAX_QUEUES; i++) { v3d->queue[i].fence_context = dma_fence_context_alloc(1); + v3d->queue[i].start_ns = 0; + v3d->queue[i].enabled_ns = 0; + v3d->queue[i].jobs_sent = 0; + } spin_lock_init(&v3d->mm_lock); spin_lock_init(&v3d->job_lock); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index ba390b782103a..afc76390a197a 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -103,10 +103,17 @@ v3d_irq(int irq, void *arg) struct v3d_fence *fence = to_v3d_fence(v3d->bin_job->base.irq_fence); struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv; + u64 runtime = local_clock() - file->start_ns[V3D_BIN]; file->enabled_ns[V3D_BIN] += local_clock() - file->start_ns[V3D_BIN]; file->jobs_sent[V3D_BIN]++; + v3d->queue[V3D_BIN].jobs_sent++; + file->start_ns[V3D_BIN] = 0; + v3d->queue[V3D_BIN].start_ns = 0; + + file->enabled_ns[V3D_BIN] += runtime; + v3d->queue[V3D_BIN].enabled_ns += runtime; trace_v3d_bcl_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -117,10 +124,17 @@ v3d_irq(int irq, void *arg) struct v3d_fence *fence = to_v3d_fence(v3d->render_job->base.irq_fence); struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv; + u64 runtime = local_clock() - file->start_ns[V3D_RENDER]; file->enabled_ns[V3D_RENDER] += local_clock() - file->start_ns[V3D_RENDER]; file->jobs_sent[V3D_RENDER]++; + v3d->queue[V3D_RENDER].jobs_sent++; + file->start_ns[V3D_RENDER] = 0; + v3d->queue[V3D_RENDER].start_ns = 0; + + file->enabled_ns[V3D_RENDER] += runtime; + v3d->queue[V3D_RENDER].enabled_ns += runtime; trace_v3d_rcl_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -131,10 +145,17 @@ v3d_irq(int irq, void *arg) struct v3d_fence *fence = to_v3d_fence(v3d->csd_job->base.irq_fence); struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv; + u64 runtime = local_clock() - file->start_ns[V3D_CSD]; file->enabled_ns[V3D_CSD] += local_clock() - file->start_ns[V3D_CSD]; file->jobs_sent[V3D_CSD]++; + v3d->queue[V3D_CSD].jobs_sent++; + file->start_ns[V3D_CSD] = 0; + v3d->queue[V3D_CSD].start_ns = 0; + + file->enabled_ns[V3D_CSD] += runtime; + v3d->queue[V3D_CSD].enabled_ns += runtime; trace_v3d_csd_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -172,10 +193,17 @@ v3d_hub_irq(int irq, void *arg) struct v3d_fence *fence = to_v3d_fence(v3d->tfu_job->base.irq_fence); struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv; + u64 runtime = local_clock() - file->start_ns[V3D_TFU]; file->enabled_ns[V3D_TFU] += local_clock() - file->start_ns[V3D_TFU]; file->jobs_sent[V3D_TFU]++; + v3d->queue[V3D_TFU].jobs_sent++; + file->start_ns[V3D_TFU] = 0; + v3d->queue[V3D_TFU].start_ns = 0; + + file->enabled_ns[V3D_TFU] += runtime; + v3d->queue[V3D_TFU].enabled_ns += runtime; trace_v3d_tfu_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 19afceacda837..fccbea2a5f2eb 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -110,6 +110,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) job->start, job->end); file->start_ns[V3D_BIN] = local_clock(); + v3d->queue[V3D_BIN].start_ns = file->start_ns[V3D_BIN]; v3d_switch_perfmon(v3d, &job->base); @@ -164,6 +165,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) job->start, job->end); file->start_ns[V3D_RENDER] = local_clock(); + v3d->queue[V3D_RENDER].start_ns = file->start_ns[V3D_RENDER]; v3d_switch_perfmon(v3d, &job->base); @@ -199,6 +201,7 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); file->start_ns[V3D_TFU] = local_clock(); + v3d->queue[V3D_TFU].start_ns = file->start_ns[V3D_TFU]; V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia); V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis); @@ -245,6 +248,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); file->start_ns[V3D_CSD] = local_clock(); + v3d->queue[V3D_CSD].start_ns = file->start_ns[V3D_CSD]; v3d_switch_perfmon(v3d, &job->base); @@ -264,14 +268,23 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) struct v3d_job *job = to_v3d_job(sched_job); struct v3d_dev *v3d = job->v3d; struct v3d_file_priv *file = job->file->driver_priv; + u64 runtime; file->start_ns[V3D_CACHE_CLEAN] = local_clock(); + v3d->queue[V3D_CACHE_CLEAN].start_ns = file->start_ns[V3D_CACHE_CLEAN]; v3d_clean_caches(v3d); - file->enabled_ns[V3D_CACHE_CLEAN] += local_clock() - file->start_ns[V3D_CACHE_CLEAN]; + runtime = local_clock() - file->start_ns[V3D_CACHE_CLEAN]; + + file->enabled_ns[V3D_CACHE_CLEAN] += runtime; + v3d->queue[V3D_CACHE_CLEAN].enabled_ns += runtime; + file->jobs_sent[V3D_CACHE_CLEAN]++; + v3d->queue[V3D_CACHE_CLEAN].jobs_sent++; + file->start_ns[V3D_CACHE_CLEAN] = 0; + v3d->queue[V3D_CACHE_CLEAN].start_ns = 0; return NULL; } diff --git a/drivers/gpu/drm/v3d/v3d_sysfs.c b/drivers/gpu/drm/v3d/v3d_sysfs.c new file mode 100644 index 0000000000000..d106845ba890c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_sysfs.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Igalia S.L. + */ + +#include +#include + +#include "v3d_drv.h" + +static ssize_t +gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct v3d_dev *v3d = to_v3d_dev(drm); + enum v3d_queue queue; + u64 timestamp = local_clock(); + u64 active_runtime; + ssize_t len = 0; + + len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n"); + + for (queue = 0; queue < V3D_MAX_QUEUES; queue++) { + if (v3d->queue[queue].start_ns) + active_runtime = timestamp - v3d->queue[queue].start_ns; + else + active_runtime = 0; + + /* Each line will display the queue name, timestamp, the number + * of jobs sent to that queue and the runtime, as can be seem here: + * + * queue timestamp jobs runtime + * bin 239043069420 22620 17438164056 + * render 239043069420 22619 27284814161 + * tfu 239043069420 8763 394592566 + * csd 239043069420 3168 10787905530 + * cache_clean 239043069420 6127 237375940 + */ + len += sysfs_emit_at(buf, len, "%s\t%llu\t%llu\t%llu\n", + v3d_queue_to_string(queue), + timestamp, + v3d->queue[queue].jobs_sent, + v3d->queue[queue].enabled_ns + active_runtime); + } + + return len; +} +static DEVICE_ATTR_RO(gpu_stats); + +static struct attribute *v3d_sysfs_entries[] = { + &dev_attr_gpu_stats.attr, + NULL, +}; + +static struct attribute_group v3d_sysfs_attr_group = { + .attrs = v3d_sysfs_entries, +}; + +int +v3d_sysfs_init(struct device *dev) +{ + return sysfs_create_group(&dev->kobj, &v3d_sysfs_attr_group); +} + +void +v3d_sysfs_destroy(struct device *dev) +{ + return sysfs_remove_group(&dev->kobj, &v3d_sysfs_attr_group); +} From 27d9620e9a9a6bc27a646b464b85860d91e21af3 Mon Sep 17 00:00:00 2001 From: Dario Binacchi Date: Mon, 23 Oct 2023 11:05:56 +0200 Subject: [PATCH 057/117] drm/panel: nt35510: fix typo Replace 'HFP' with 'HBP'. Fixes: 899f24ed8d3a ("drm/panel: Add driver for Novatek NT35510-based panels") Signed-off-by: Dario Binacchi Reviewed-by: Neil Armstrong Signed-off-by: Linus Walleij Link: https://patchwork.freedesktop.org/patch/msgid/20231023090613.1694133-1-dario.binacchi@amarulasolutions.com --- drivers/gpu/drm/panel/panel-novatek-nt35510.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index d6dceb8580081..83a9cf53d2690 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -1023,7 +1023,7 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = { .hdisplay = 480, .hsync_start = 480 + 2, /* HFP = 2 */ .hsync_end = 480 + 2 + 0, /* HSync = 0 */ - .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */ + .htotal = 480 + 2 + 0 + 5, /* HBP = 5 */ .vdisplay = 800, .vsync_start = 800 + 2, /* VFP = 2 */ .vsync_end = 800 + 2 + 0, /* VSync = 0 */ From c015fb6d01adb616fb54824feb55ce5ab18e8ca1 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Mon, 6 Nov 2023 14:08:26 +0100 Subject: [PATCH 058/117] accel/ivpu: Fix compilation with CONFIG_PM=n Use pm_runtime_status_suspended() instead of dev->power.runtime_status field that is not available without PM. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://lore.kernel.org/all/20231106130827.1600948-1-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_ipc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 255f2b8b0b5e3..618dbc17df801 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "ivpu_drv.h" @@ -318,8 +319,7 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r struct vpu_jsm_msg hb_resp; int ret, hb_ret; - drm_WARN_ON(&vdev->drm, - vdev->drm.dev->power.runtime_status == RPM_SUSPENDED); + drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp, resp, channel, timeout_ms); if (ret != -ETIMEDOUT) From bc8d6a9df99038f61adf2881ad9f717abe414e06 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Thu, 2 Nov 2023 18:17:20 -0400 Subject: [PATCH 059/117] drm/sched: Don't disturb the entity when in RR-mode scheduling Don't call drm_sched_select_entity() in drm_sched_run_job_queue(). In fact, rename __drm_sched_run_job_queue() to just drm_sched_run_job_queue(), and let it do just that, schedule the work item for execution. The problem is that drm_sched_run_job_queue() calls drm_sched_select_entity() to determine if the scheduler has an entity ready in one of its run-queues, and in the case of the Round-Robin (RR) scheduling, the function drm_sched_rq_select_entity_rr() does just that, selects the _next_ entity which is ready, sets up the run-queue and completion and returns that entity. The FIFO scheduling algorithm is unaffected. Now, since drm_sched_run_job_work() also calls drm_sched_select_entity(), then in the case of RR scheduling, that would result in drm_sched_select_entity() having been called twice, which may result in skipping a ready entity if more than one entity is ready. This commit fixes this by eliminating the call to drm_sched_select_entity() from drm_sched_run_job_queue(), and leaves it only in drm_sched_run_job_work(). v2: Rebased on top of Tvrtko's renames series of patches. (Luben) Add fixes-tag. (Tvrtko) Signed-off-by: Luben Tuikov Fixes: f7fe64ad0f22ff ("drm/sched: Split free_job into own work item") Reviewed-by: Matthew Brost Reviewed-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231107041020.10035-2-ltuikov89@gmail.com --- drivers/gpu/drm/scheduler/sched_main.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 27843e37d9b76..cd0dc3f81d05f 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -256,10 +256,10 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) } /** - * __drm_sched_run_job_queue - enqueue run-job work + * drm_sched_run_job_queue - enqueue run-job work * @sched: scheduler instance */ -static void __drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) +static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) { if (!READ_ONCE(sched->pause_submit)) queue_work(sched->submit_wq, &sched->work_run_job); @@ -928,7 +928,7 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { if (drm_sched_can_queue(sched)) - __drm_sched_run_job_queue(sched); + drm_sched_run_job_queue(sched); } /** @@ -1040,16 +1040,6 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, } EXPORT_SYMBOL(drm_sched_pick_best); -/** - * drm_sched_run_job_queue - enqueue run-job work if there are ready entities - * @sched: scheduler instance - */ -static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched) -{ - if (drm_sched_select_entity(sched)) - __drm_sched_run_job_queue(sched); -} - /** * drm_sched_free_job_work - worker to call free_job * From 89d04995f76ce8318fe752feb33b1a45893b1a38 Mon Sep 17 00:00:00 2001 From: Emma Anholt Date: Tue, 31 Oct 2023 11:16:48 -0700 Subject: [PATCH 060/117] MAINTAINERS: Drop Emma Anholt from all M lines. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I am not active in the Linux kernel and don't want to see patches. Signed-off-by: Emma Anholt Acked-by: Maíra Canal Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20231031181648.48675-1-emma@anholt.net --- MAINTAINERS | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index f13e476ed8038..4e1a63479cad4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6405,8 +6405,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/sun4i/sun8i* DRM DRIVER FOR ARM PL111 CLCD -M: Emma Anholt -S: Supported +S: Orphan T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/pl111/ @@ -6521,8 +6520,7 @@ F: Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml F: drivers/gpu/drm/panel/panel-himax-hx8394.c DRM DRIVER FOR HX8357D PANELS -M: Emma Anholt -S: Maintained +S: Orphan T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/himax,hx8357d.txt F: drivers/gpu/drm/tiny/hx8357d.c @@ -7106,7 +7104,6 @@ F: Documentation/devicetree/bindings/display/ti/ F: drivers/gpu/drm/omapdrm/ DRM DRIVERS FOR V3D -M: Emma Anholt M: Melissa Wen S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc @@ -7115,7 +7112,6 @@ F: drivers/gpu/drm/v3d/ F: include/uapi/drm/v3d_drm.h DRM DRIVERS FOR VC4 -M: Emma Anholt M: Maxime Ripard S: Supported T: git git://github.com/anholt/linux From b035224134626f506c1af73ca32bbc64dd2925ad Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Tue, 31 Oct 2023 08:31:53 +0100 Subject: [PATCH 061/117] accel/ivpu: Allocate vpu_addr in gem->open() callback Use gem->open() callback to simplify the code and prepare for gem_shmem conversion. It is called during handle creation for a gem object, during prime import and in BO_CREATE ioctl. Hence can be used for vpu_addr allocation. On the way remove unused bo->user_ptr field. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231031073156.1301669-2-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_gem.c | 66 ++++++++++++--------------- drivers/accel/ivpu/ivpu_gem.h | 1 - drivers/accel/ivpu/ivpu_mmu_context.c | 2 + 3 files changed, 32 insertions(+), 37 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index c91852f2edc82..d1077cf90b659 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -281,15 +281,6 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); int ret; - if (!range) { - if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) - range = &vdev->hw->ranges.shave; - else if (bo->flags & DRM_IVPU_BO_DMA_MEM) - range = &vdev->hw->ranges.dma; - else - range = &vdev->hw->ranges.user; - } - mutex_lock(&ctx->lock); ret = ivpu_mmu_context_insert_node_locked(ctx, range, ivpu_bo_size(bo), &bo->mm_node); if (!ret) { @@ -299,6 +290,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, } mutex_unlock(&ctx->lock); + if (ret) + ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); + return ret; } @@ -337,9 +331,7 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) } static struct ivpu_bo * -ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, - u64 size, u32 flags, const struct ivpu_bo_ops *ops, - const struct ivpu_addr_range *range, u64 user_ptr) +ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_bo_ops *ops) { struct ivpu_bo *bo; int ret = 0; @@ -364,7 +356,6 @@ ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, bo->base.funcs = &ivpu_gem_funcs; bo->flags = flags; bo->ops = ops; - bo->user_ptr = user_ptr; if (ops->type == IVPU_BO_TYPE_SHMEM) ret = drm_gem_object_init(&vdev->drm, &bo->base, size); @@ -384,14 +375,6 @@ ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, } } - if (mmu_context) { - ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range); - if (ret) { - ivpu_err(vdev, "Failed to add BO to context: %d\n", ret); - goto err_release; - } - } - return bo; err_release: @@ -401,6 +384,23 @@ ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context, return ERR_PTR(ret); } +static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file) +{ + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; + struct ivpu_bo *bo = to_ivpu_bo(obj); + struct ivpu_addr_range *range; + + if (bo->flags & DRM_IVPU_BO_SHAVE_MEM) + range = &vdev->hw->ranges.shave; + else if (bo->flags & DRM_IVPU_BO_DMA_MEM) + range = &vdev->hw->ranges.dma; + else + range = &vdev->hw->ranges.user; + + return ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, range); +} + static void ivpu_bo_free(struct drm_gem_object *obj) { struct ivpu_bo *bo = to_ivpu_bo(obj); @@ -516,6 +516,7 @@ static const struct vm_operations_struct ivpu_vm_ops = { static const struct drm_gem_object_funcs ivpu_gem_funcs = { .free = ivpu_bo_free, + .open = ivpu_bo_open, .mmap = ivpu_bo_mmap, .vm_ops = &ivpu_vm_ops, .get_sg_table = ivpu_bo_get_sg_table, @@ -537,7 +538,7 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (size == 0) return -EINVAL; - bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0); + bo = ivpu_bo_alloc(vdev, size, args->flags, &shmem_ops); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", bo, file_priv->ctx.id, args->size, args->flags); @@ -578,13 +579,17 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla range = &vdev->hw->ranges.global; } - bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0); + bo = ivpu_bo_alloc(vdev, size, flags, &internal_ops); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", bo, vpu_addr, size, flags); return NULL; } + ret = ivpu_bo_alloc_vpu_addr(bo, &vdev->gctx, range); + if (ret) + goto err_put; + ret = ivpu_bo_pin(bo); if (ret) goto err_put; @@ -631,7 +636,7 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_ get_dma_buf(buf); - bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0); + bo = ivpu_bo_alloc(vdev, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); goto err_detach; @@ -651,8 +656,6 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct ivpu_file_priv *file_priv = file->driver_priv; - struct ivpu_device *vdev = to_ivpu_device(dev); struct drm_ivpu_bo_info *args = data; struct drm_gem_object *obj; struct ivpu_bo *bo; @@ -665,21 +668,12 @@ int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file bo = to_ivpu_bo(obj); mutex_lock(&bo->lock); - - if (!bo->ctx) { - ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL); - if (ret) { - ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret); - goto unlock; - } - } - args->flags = bo->flags; args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node); args->vpu_addr = bo->vpu_addr; args->size = obj->size; -unlock: mutex_unlock(&bo->lock); + drm_gem_object_put(obj); return ret; } diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index a0b4d4a32b3bf..9bbbcd7792658 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -29,7 +29,6 @@ struct ivpu_bo { u64 vpu_addr; u32 handle; u32 flags; - uintptr_t user_ptr; u32 job_status; }; diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index 7d8005bc78d2f..f6de6a4ea6088 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -410,6 +410,8 @@ ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, { lockdep_assert_held(&ctx->lock); + WARN_ON(!range); + if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, range->start, range->end, DRM_MM_INSERT_BEST)) From 48aea7f2a2efae6a1bd201061c71a81b3f3b7e55 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Tue, 31 Oct 2023 08:31:54 +0100 Subject: [PATCH 062/117] accel/ivpu: Fix locking in ivpu_bo_remove_all_bos_from_context() ivpu_bo_remove_all_bos_from_context() could race with ivpu_bo_free() when prime buffer was closed after vpu device was closed. Move the bo_list from context to vdev and use a dedicated lock to sync it. This list is not modified when BO is added/removed from a context. Also rename ivpu_bo_free_vpu_addr() to ivpu_bo_unbind() because this function does more then just free vpu_addr. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231031073156.1301669-3-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 7 +- drivers/accel/ivpu/ivpu_drv.h | 3 + drivers/accel/ivpu/ivpu_gem.c | 131 +++++++++++++------------- drivers/accel/ivpu/ivpu_gem.h | 6 +- drivers/accel/ivpu/ivpu_mmu.c | 5 +- drivers/accel/ivpu/ivpu_mmu_context.c | 36 ++++--- drivers/accel/ivpu/ivpu_mmu_context.h | 11 +-- 7 files changed, 109 insertions(+), 90 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 60277ff6af69c..cc6a4dca2aaa4 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -91,8 +91,8 @@ static void file_priv_release(struct kref *ref) ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id); ivpu_cmdq_release_all(file_priv); - ivpu_bo_remove_all_bos_from_context(&file_priv->ctx); ivpu_jsm_context_release(vdev, file_priv->ctx.id); + ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx); ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv); mutex_destroy(&file_priv->lock); @@ -528,6 +528,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev) xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC); xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); + INIT_LIST_HEAD(&vdev->bo_list); + + ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock); + if (ret) + goto err_xa_destroy; ret = ivpu_pci_init(vdev); if (ret) diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 79b193ffbc264..e0517ec4133d4 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -115,6 +115,9 @@ struct ivpu_device { struct xarray context_xa; struct xa_limit context_xa_limit; + struct mutex bo_list_lock; /* Protects bo_list */ + struct list_head bo_list; + struct xarray submitted_jobs_xa; struct task_struct *job_done_thread; diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index d1077cf90b659..915c53d7bb97b 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -26,6 +26,17 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs; static struct lock_class_key prime_bo_lock_class_key; +static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) +{ + if (bo->ctx) + ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n", + action, bo->base.size, (bool)bo->pages, (bool)bo->sgt, bo->handle, + bo->ctx->id, bo->vpu_addr, bo->mmu_mapped); + else + ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n", + action, bo->base.size, (bool)bo->pages, (bool)bo->sgt, bo->handle); +} + static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) { /* Pages are managed by the underlying dma-buf */ @@ -245,9 +256,8 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) mutex_lock(&bo->lock); - if (!bo->vpu_addr) { - ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n", - bo->ctx->id, bo->handle); + if (!bo->ctx) { + ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle); ret = -EINVAL; goto unlock; } @@ -281,53 +291,68 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); int ret; - mutex_lock(&ctx->lock); - ret = ivpu_mmu_context_insert_node_locked(ctx, range, ivpu_bo_size(bo), &bo->mm_node); + mutex_lock(&bo->lock); + + ret = ivpu_mmu_context_insert_node(ctx, range, bo->base.size, &bo->mm_node); if (!ret) { bo->ctx = ctx; bo->vpu_addr = bo->mm_node.start; - list_add_tail(&bo->ctx_node, &ctx->bo_list); + } else { + ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); } - mutex_unlock(&ctx->lock); + ivpu_dbg_bo(vdev, bo, "alloc"); - if (ret) - ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); + mutex_unlock(&bo->lock); return ret; } -static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo) +static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - struct ivpu_mmu_context *ctx = bo->ctx; - ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", - ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); + lockdep_assert_held(&bo->lock); - mutex_lock(&bo->lock); + ivpu_dbg_bo(vdev, bo, "unbind"); + /* TODO: dma_unmap */ if (bo->mmu_mapped) { + drm_WARN_ON(&vdev->drm, !bo->ctx); + drm_WARN_ON(&vdev->drm, !bo->vpu_addr); drm_WARN_ON(&vdev->drm, !bo->sgt); - ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt); + ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt); bo->mmu_mapped = false; } - mutex_lock(&ctx->lock); - list_del(&bo->ctx_node); - bo->vpu_addr = 0; - bo->ctx = NULL; - ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node); - mutex_unlock(&ctx->lock); + if (bo->ctx) { + ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node); + bo->vpu_addr = 0; + bo->ctx = NULL; + } +} +static void ivpu_bo_unbind(struct ivpu_bo *bo) +{ + mutex_lock(&bo->lock); + ivpu_bo_unbind_locked(bo); mutex_unlock(&bo->lock); } -void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx) +void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) { - struct ivpu_bo *bo, *tmp; + struct ivpu_bo *bo; + + if (drm_WARN_ON(&vdev->drm, !ctx)) + return; - list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node) - ivpu_bo_free_vpu_addr(bo); + mutex_lock(&vdev->bo_list_lock); + list_for_each_entry(bo, &vdev->bo_list, bo_list_node) { + mutex_lock(&bo->lock); + if (bo->ctx == ctx) + ivpu_bo_unbind_locked(bo); + mutex_unlock(&bo->lock); + } + mutex_unlock(&vdev->bo_list_lock); } static struct ivpu_bo * @@ -375,6 +400,10 @@ ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_b } } + mutex_lock(&vdev->bo_list_lock); + list_add_tail(&bo->bo_list_node, &vdev->bo_list); + mutex_unlock(&vdev->bo_list_lock); + return bo; err_release: @@ -406,19 +435,16 @@ static void ivpu_bo_free(struct drm_gem_object *obj) struct ivpu_bo *bo = to_ivpu_bo(obj); struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - if (bo->ctx) - ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n", - bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped); - else - ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n", - (bool)bo->sgt, bo->mmu_mapped); + mutex_lock(&vdev->bo_list_lock); + list_del(&bo->bo_list_node); + mutex_unlock(&vdev->bo_list_lock); drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); - vunmap(bo->kvaddr); + ivpu_dbg_bo(vdev, bo, "free"); - if (bo->ctx) - ivpu_bo_free_vpu_addr(bo); + ivpu_bo_unbind(bo); + vunmap(bo->kvaddr); if (bo->sgt) ivpu_bo_unmap_and_free_pages(bo); @@ -435,10 +461,6 @@ static void ivpu_bo_free(struct drm_gem_object *obj) static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct ivpu_bo *bo = to_ivpu_bo(obj); - struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - - ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s", - bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), bo->ops->name); if (obj->import_attach) { /* Drop the reference drm_gem_mmap_obj() acquired.*/ @@ -553,9 +575,6 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) drm_gem_object_put(&bo->base); - ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n", - file_priv->ctx.id, bo->vpu_addr, ivpu_bo_size(bo), bo->flags); - return ret; } @@ -609,9 +628,6 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla goto err_put; } - ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n", - bo->vpu_addr, ivpu_bo_size(bo), flags); - return bo; err_put: @@ -708,41 +724,30 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) { unsigned long dma_refcount = 0; + mutex_lock(&bo->lock); + if (bo->base.dma_buf && bo->base.dma_buf->file) dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", - bo->ctx->id, bo->handle, bo->vpu_addr, ivpu_bo_size(bo), + bo->ctx ? bo->ctx->id : -1, bo->handle, bo->vpu_addr, bo->base.size, kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); + + mutex_unlock(&bo->lock); } void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) { struct ivpu_device *vdev = to_ivpu_device(dev); - struct ivpu_file_priv *file_priv; - unsigned long ctx_id; struct ivpu_bo *bo; drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); - mutex_lock(&vdev->gctx.lock); - list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node) + mutex_lock(&vdev->bo_list_lock); + list_for_each_entry(bo, &vdev->bo_list, bo_list_node) ivpu_bo_print_info(bo, p); - mutex_unlock(&vdev->gctx.lock); - - xa_for_each(&vdev->context_xa, ctx_id, file_priv) { - file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id); - if (!file_priv) - continue; - - mutex_lock(&file_priv->ctx.lock); - list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node) - ivpu_bo_print_info(bo, p); - mutex_unlock(&file_priv->ctx.lock); - - ivpu_file_priv_put(&file_priv); - } + mutex_unlock(&vdev->bo_list_lock); } void ivpu_bo_list_print(struct drm_device *dev) diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index 9bbbcd7792658..214d8fd143106 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -17,10 +17,10 @@ struct ivpu_bo { const struct ivpu_bo_ops *ops; struct ivpu_mmu_context *ctx; - struct list_head ctx_node; + struct list_head bo_list_node; struct drm_mm_node mm_node; - struct mutex lock; /* Protects: pages, sgt, mmu_mapped */ + struct mutex lock; /* Protects: pages, sgt, ctx, mmu_mapped, vpu_addr */ struct sg_table *sgt; struct page **pages; bool mmu_mapped; @@ -48,7 +48,7 @@ struct ivpu_bo_ops { }; int ivpu_bo_pin(struct ivpu_bo *bo); -void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx); +void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); void ivpu_bo_list_print(struct drm_device *dev); diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index b46f3743c0e7d..2228c44b115fa 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -775,9 +775,12 @@ int ivpu_mmu_init(struct ivpu_device *vdev) ivpu_dbg(vdev, MMU, "Init..\n"); - drmm_mutex_init(&vdev->drm, &mmu->lock); ivpu_mmu_config_check(vdev); + ret = drmm_mutex_init(&vdev->drm, &mmu->lock); + if (ret) + return ret; + ret = ivpu_mmu_structs_alloc(vdev); if (ret) return ret; diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index f6de6a4ea6088..122c90fecf665 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -335,6 +335,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 prot; u64 i; + if (drm_WARN_ON(&vdev->drm, !ctx)) + return -EINVAL; + if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) return -EINVAL; /* @@ -382,8 +385,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct int ret; u64 i; - if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) - ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr); + if (drm_WARN_ON(&vdev->drm, !ctx)) + return; mutex_lock(&ctx->lock); @@ -404,30 +407,34 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct } int -ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, - const struct ivpu_addr_range *range, - u64 size, struct drm_mm_node *node) +ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, + u64 size, struct drm_mm_node *node) { - lockdep_assert_held(&ctx->lock); + int ret; WARN_ON(!range); + mutex_lock(&ctx->lock); if (!ivpu_disable_mmu_cont_pages && size >= IVPU_MMU_CONT_PAGES_SIZE) { - if (!drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, - range->start, range->end, DRM_MM_INSERT_BEST)) - return 0; + ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_CONT_PAGES_SIZE, 0, + range->start, range->end, DRM_MM_INSERT_BEST); + if (!ret) + goto unlock; } - return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, - range->start, range->end, DRM_MM_INSERT_BEST); + ret = drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 0, + range->start, range->end, DRM_MM_INSERT_BEST); +unlock: + mutex_unlock(&ctx->lock); + return ret; } void -ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) +ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) { - lockdep_assert_held(&ctx->lock); - + mutex_lock(&ctx->lock); drm_mm_remove_node(node); + mutex_unlock(&ctx->lock); } static int @@ -437,7 +444,6 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3 int ret; mutex_init(&ctx->lock); - INIT_LIST_HEAD(&ctx->bo_list); ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); if (ret) { diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index f15d8c630d8ad..535db3a1fc74f 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -23,10 +23,9 @@ struct ivpu_mmu_pgtable { }; struct ivpu_mmu_context { - struct mutex lock; /* protects: mm, pgtable, bo_list */ + struct mutex lock; /* Protects: mm, pgtable */ struct drm_mm mm; struct ivpu_mmu_pgtable pgtable; - struct list_head bo_list; u32 id; }; @@ -39,11 +38,9 @@ int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid); -int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, - const struct ivpu_addr_range *range, - u64 size, struct drm_mm_node *node); -void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, - struct drm_mm_node *node); +int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range, + u64 size, struct drm_mm_node *node); +void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node); int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); From 48d45fac3940347becd290b96b2fc6d5ad8171f7 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Tue, 31 Oct 2023 08:31:55 +0100 Subject: [PATCH 063/117] accel/ivpu: Remove support for uncached buffers Usages of DRM_IVPU_BO_UNCACHED should be replaced by DRM_IVPU_BO_WC. There is no functional benefit from DRM_IVPU_BO_UNCACHED if these buffers are never mapped to host VM. This allows to cut the buffer handling code in the kernel driver by half. Usage of DRM_IVPU_BO_UNCACHED buffers was removed from user-space driver and will not be part of first UMD release. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231031073156.1301669-4-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/ivpu_fw.c | 2 +- drivers/accel/ivpu/ivpu_gem.c | 3 --- include/uapi/drm/ivpu_accel.h | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 3fd74cd4205f9..3d0c18d68f6c6 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -286,7 +286,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev) if (fw->shave_nn_size) { fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start, - fw->shave_nn_size, DRM_IVPU_BO_UNCACHED); + fw->shave_nn_size, DRM_IVPU_BO_WC); if (!fw->mem_shave_nn) { ivpu_err(vdev, "Failed to allocate shavenn buffer\n"); ret = -ENOMEM; diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 915c53d7bb97b..2a91eb1e36278 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -89,8 +89,6 @@ static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) if (bo->flags & DRM_IVPU_BO_WC) set_pages_array_wc(pages, npages); - else if (bo->flags & DRM_IVPU_BO_UNCACHED) - set_pages_array_uc(pages, npages); bo->pages = pages; return 0; @@ -366,7 +364,6 @@ ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_b switch (flags & DRM_IVPU_BO_CACHE_MASK) { case DRM_IVPU_BO_CACHED: - case DRM_IVPU_BO_UNCACHED: case DRM_IVPU_BO_WC: break; default: diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index 262db0c3beeea..de1944e42c655 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -196,7 +196,7 @@ struct drm_ivpu_bo_create { * * %DRM_IVPU_BO_UNCACHED: * - * Allocated BO will not be cached on host side nor snooped on the VPU side. + * Not supported. Use DRM_IVPU_BO_WC instead. * * %DRM_IVPU_BO_WC: * From 8d88e4cdce4f5c56de55174a4d32ea9c06f7fa66 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Tue, 31 Oct 2023 08:31:56 +0100 Subject: [PATCH 064/117] accel/ivpu: Use GEM shmem helper for all buffers Use struct drm_gem_shmem_object as a base for struct ivpu_bo. This cuts by 50% the buffer management code. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231031073156.1301669-5-stanislaw.gruszka@linux.intel.com --- drivers/accel/ivpu/Kconfig | 2 +- drivers/accel/ivpu/ivpu_drv.c | 6 +- drivers/accel/ivpu/ivpu_gem.c | 518 ++++++++-------------------------- drivers/accel/ivpu/ivpu_gem.h | 70 +---- drivers/accel/ivpu/ivpu_job.c | 8 +- 5 files changed, 144 insertions(+), 460 deletions(-) diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig index 82749e0da524c..682c532452863 100644 --- a/drivers/accel/ivpu/Kconfig +++ b/drivers/accel/ivpu/Kconfig @@ -6,7 +6,7 @@ config DRM_ACCEL_IVPU depends on X86_64 && !UML depends on PCI && PCI_MSI select FW_LOADER - select SHMEM + select DRM_GEM_SHMEM_HELPER select GENERIC_ALLOCATOR help Choose this option if you have a system with an 14th generation diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index cc6a4dca2aaa4..51fa60b6254ce 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -360,7 +360,7 @@ int ivpu_boot(struct ivpu_device *vdev) int ret; /* Update boot params located at first 4KB of FW memory */ - ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr); + ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); ret = ivpu_hw_boot_fw(vdev); if (ret) { @@ -409,7 +409,9 @@ static const struct drm_driver driver = { .open = ivpu_open, .postclose = ivpu_postclose, - .gem_prime_import = ivpu_gem_prime_import, + + .gem_create_object = ivpu_gem_create_object, + .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, .ioctls = ivpu_drm_ioctls, .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 2a91eb1e36278..1dda4f38ea25c 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -20,224 +20,18 @@ #include "ivpu_mmu.h" #include "ivpu_mmu_context.h" -MODULE_IMPORT_NS(DMA_BUF); - static const struct drm_gem_object_funcs ivpu_gem_funcs; -static struct lock_class_key prime_bo_lock_class_key; - static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) { if (bo->ctx) ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n", - action, bo->base.size, (bool)bo->pages, (bool)bo->sgt, bo->handle, - bo->ctx->id, bo->vpu_addr, bo->mmu_mapped); + action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt, + bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped); else ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n", - action, bo->base.size, (bool)bo->pages, (bool)bo->sgt, bo->handle); -} - -static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo) -{ - /* Pages are managed by the underlying dma-buf */ - return 0; -} - -static void prime_free_pages_locked(struct ivpu_bo *bo) -{ - /* Pages are managed by the underlying dma-buf */ -} - -static int prime_map_pages_locked(struct ivpu_bo *bo) -{ - struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - struct sg_table *sgt; - - sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { - ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt)); - return PTR_ERR(sgt); - } - - bo->sgt = sgt; - return 0; -} - -static void prime_unmap_pages_locked(struct ivpu_bo *bo) -{ - dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL); - bo->sgt = NULL; -} - -static const struct ivpu_bo_ops prime_ops = { - .type = IVPU_BO_TYPE_PRIME, - .name = "prime", - .alloc_pages = prime_alloc_pages_locked, - .free_pages = prime_free_pages_locked, - .map_pages = prime_map_pages_locked, - .unmap_pages = prime_unmap_pages_locked, -}; - -static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo) -{ - int npages = ivpu_bo_size(bo) >> PAGE_SHIFT; - struct page **pages; - - pages = drm_gem_get_pages(&bo->base); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - if (bo->flags & DRM_IVPU_BO_WC) - set_pages_array_wc(pages, npages); - - bo->pages = pages; - return 0; -} - -static void shmem_free_pages_locked(struct ivpu_bo *bo) -{ - if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) - set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT); - - drm_gem_put_pages(&bo->base, bo->pages, true, false); - bo->pages = NULL; -} - -static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo) -{ - int npages = ivpu_bo_size(bo) >> PAGE_SHIFT; - struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - struct sg_table *sgt; - int ret; - - sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages); - if (IS_ERR(sgt)) { - ivpu_err(vdev, "Failed to allocate sgtable\n"); - return PTR_ERR(sgt); - } - - ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0); - if (ret) { - ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); - goto err_free_sgt; - } - - bo->sgt = sgt; - return 0; - -err_free_sgt: - kfree(sgt); - return ret; -} - -static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo) -{ - struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - - dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0); - sg_free_table(bo->sgt); - kfree(bo->sgt); - bo->sgt = NULL; -} - -static const struct ivpu_bo_ops shmem_ops = { - .type = IVPU_BO_TYPE_SHMEM, - .name = "shmem", - .alloc_pages = shmem_alloc_pages_locked, - .free_pages = shmem_free_pages_locked, - .map_pages = ivpu_bo_map_pages_locked, - .unmap_pages = ivpu_bo_unmap_pages_locked, -}; - -static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo) -{ - unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT; - struct page **pages; - int ret; - - pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; - - for (i = 0; i < npages; i++) { - pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); - if (!pages[i]) { - ret = -ENOMEM; - goto err_free_pages; - } - cond_resched(); - } - - bo->pages = pages; - return 0; - -err_free_pages: - while (i--) - put_page(pages[i]); - kvfree(pages); - return ret; -} - -static void internal_free_pages_locked(struct ivpu_bo *bo) -{ - unsigned int i, npages = ivpu_bo_size(bo) >> PAGE_SHIFT; - - if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) - set_pages_array_wb(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT); - - for (i = 0; i < npages; i++) - put_page(bo->pages[i]); - - kvfree(bo->pages); - bo->pages = NULL; -} - -static const struct ivpu_bo_ops internal_ops = { - .type = IVPU_BO_TYPE_INTERNAL, - .name = "internal", - .alloc_pages = internal_alloc_pages_locked, - .free_pages = internal_free_pages_locked, - .map_pages = ivpu_bo_map_pages_locked, - .unmap_pages = ivpu_bo_unmap_pages_locked, -}; - -static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo) -{ - struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - int ret; - - lockdep_assert_held(&bo->lock); - drm_WARN_ON(&vdev->drm, bo->sgt); - - ret = bo->ops->alloc_pages(bo); - if (ret) { - ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret); - return ret; - } - - ret = bo->ops->map_pages(bo); - if (ret) { - ivpu_err(vdev, "Failed to map pages for BO: %d", ret); - goto err_free_pages; - } - return ret; - -err_free_pages: - bo->ops->free_pages(bo); - return ret; -} - -static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo) -{ - mutex_lock(&bo->lock); - - WARN_ON(!bo->sgt); - bo->ops->unmap_pages(bo); - WARN_ON(bo->sgt); - bo->ops->free_pages(bo); - WARN_ON(bo->pages); - - mutex_unlock(&bo->lock); + action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt, + bo->handle); } /* @@ -254,20 +48,24 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) mutex_lock(&bo->lock); + ivpu_dbg_bo(vdev, bo, "pin"); + if (!bo->ctx) { ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle); ret = -EINVAL; goto unlock; } - if (!bo->sgt) { - ret = ivpu_bo_alloc_and_map_pages_locked(bo); - if (ret) + if (!bo->mmu_mapped) { + struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + + if (IS_ERR(sgt)) { + ret = PTR_ERR(sgt); + ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); goto unlock; - } + } - if (!bo->mmu_mapped) { - ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt, + ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, ivpu_bo_is_snooped(bo)); if (ret) { ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); @@ -291,13 +89,14 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, mutex_lock(&bo->lock); - ret = ivpu_mmu_context_insert_node(ctx, range, bo->base.size, &bo->mm_node); + ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); if (!ret) { bo->ctx = ctx; bo->vpu_addr = bo->mm_node.start; } else { ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); } + ivpu_dbg_bo(vdev, bo, "alloc"); mutex_unlock(&bo->lock); @@ -314,11 +113,12 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) ivpu_dbg_bo(vdev, bo, "unbind"); /* TODO: dma_unmap */ + if (bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); drm_WARN_ON(&vdev->drm, !bo->vpu_addr); - drm_WARN_ON(&vdev->drm, !bo->sgt); - ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt); + drm_WARN_ON(&vdev->drm, !bo->base.sgt); + ivpu_mmu_context_unmap_sgt(vdev, bo->ctx, bo->vpu_addr, bo->base.sgt); bo->mmu_mapped = false; } @@ -353,15 +153,32 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m mutex_unlock(&vdev->bo_list_lock); } -static struct ivpu_bo * -ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_bo_ops *ops) +struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size) { struct ivpu_bo *bo; - int ret = 0; - if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size))) + if (size == 0 || !PAGE_ALIGNED(size)) return ERR_PTR(-EINVAL); + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return ERR_PTR(-ENOMEM); + + bo->base.base.funcs = &ivpu_gem_funcs; + bo->base.pages_mark_dirty_on_put = true; /* VPU can dirty a BO anytime */ + + INIT_LIST_HEAD(&bo->bo_list_node); + mutex_init(&bo->lock); + + return &bo->base.base; +} + +static struct ivpu_bo * +ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags) +{ + struct drm_gem_shmem_object *shmem; + struct ivpu_bo *bo; + switch (flags & DRM_IVPU_BO_CACHE_MASK) { case DRM_IVPU_BO_CACHED: case DRM_IVPU_BO_WC: @@ -370,44 +187,22 @@ ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, const struct ivpu_b return ERR_PTR(-EINVAL); } - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); + shmem = drm_gem_shmem_create(&vdev->drm, size); + if (IS_ERR(shmem)) + return ERR_CAST(shmem); - mutex_init(&bo->lock); - bo->base.funcs = &ivpu_gem_funcs; + bo = to_ivpu_bo(&shmem->base); + bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; - bo->ops = ops; - - if (ops->type == IVPU_BO_TYPE_SHMEM) - ret = drm_gem_object_init(&vdev->drm, &bo->base, size); - else - drm_gem_private_object_init(&vdev->drm, &bo->base, size); - - if (ret) { - ivpu_err(vdev, "Failed to initialize drm object\n"); - goto err_free; - } - - if (flags & DRM_IVPU_BO_MAPPABLE) { - ret = drm_gem_create_mmap_offset(&bo->base); - if (ret) { - ivpu_err(vdev, "Failed to allocate mmap offset\n"); - goto err_release; - } - } mutex_lock(&vdev->bo_list_lock); list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock); - return bo; + ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n", + bo->vpu_addr, bo->base.base.size, flags); -err_release: - drm_gem_object_release(&bo->base); -err_free: - kfree(bo); - return ERR_PTR(ret); + return bo; } static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file) @@ -429,8 +224,8 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file) static void ivpu_bo_free(struct drm_gem_object *obj) { + struct ivpu_device *vdev = to_ivpu_device(obj->dev); struct ivpu_bo *bo = to_ivpu_bo(obj); - struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); mutex_lock(&vdev->bo_list_lock); list_del(&bo->bo_list_node); @@ -441,108 +236,64 @@ static void ivpu_bo_free(struct drm_gem_object *obj) ivpu_dbg_bo(vdev, bo, "free"); ivpu_bo_unbind(bo); - vunmap(bo->kvaddr); - - if (bo->sgt) - ivpu_bo_unmap_and_free_pages(bo); - - if (bo->base.import_attach) - drm_prime_gem_destroy(&bo->base, bo->sgt); - - drm_gem_object_release(&bo->base); - mutex_destroy(&bo->lock); - kfree(bo); -} - -static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - struct ivpu_bo *bo = to_ivpu_bo(obj); - if (obj->import_attach) { - /* Drop the reference drm_gem_mmap_obj() acquired.*/ - drm_gem_object_put(obj); - vma->vm_private_data = NULL; - return dma_buf_mmap(obj->dma_buf, vma, 0); - } - - vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND); - vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags)); - - return 0; + drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1); + drm_gem_shmem_free(&bo->base); } -static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj) -{ - struct ivpu_bo *bo = to_ivpu_bo(obj); - loff_t npages = obj->size >> PAGE_SHIFT; - int ret = 0; - - mutex_lock(&bo->lock); - - if (!bo->sgt) - ret = ivpu_bo_alloc_and_map_pages_locked(bo); - - mutex_unlock(&bo->lock); - - if (ret) - return ERR_PTR(ret); - - return drm_prime_pages_to_sg(obj->dev, bo->pages, npages); -} +static const struct dma_buf_ops ivpu_bo_dmabuf_ops = { + .cache_sgt_mapping = true, + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; -static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf) +static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags) { - struct vm_area_struct *vma = vmf->vma; - struct drm_gem_object *obj = vma->vm_private_data; - struct ivpu_bo *bo = to_ivpu_bo(obj); - loff_t npages = obj->size >> PAGE_SHIFT; - pgoff_t page_offset; - struct page *page; - vm_fault_t ret; - int err; + struct drm_device *dev = obj->dev; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, + .owner = dev->driver->fops->owner, + .ops = &ivpu_bo_dmabuf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + .resv = obj->resv, + }; + void *sgt; - mutex_lock(&bo->lock); + /* + * Make sure that pages are allocated and dma-mapped before exporting the bo. + * DMA-mapping is required if the bo will be imported to the same device. + */ + sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj)); + if (IS_ERR(sgt)) + return sgt; - if (!bo->sgt) { - err = ivpu_bo_alloc_and_map_pages_locked(bo); - if (err) { - ret = vmf_error(err); - goto unlock; - } - } - - /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - if (page_offset >= npages) { - ret = VM_FAULT_SIGBUS; - } else { - page = bo->pages[page_offset]; - ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); - } - -unlock: - mutex_unlock(&bo->lock); - - return ret; + return drm_gem_dmabuf_export(dev, &exp_info); } -static const struct vm_operations_struct ivpu_vm_ops = { - .fault = ivpu_vm_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static const struct drm_gem_object_funcs ivpu_gem_funcs = { .free = ivpu_bo_free, .open = ivpu_bo_open, - .mmap = ivpu_bo_mmap, - .vm_ops = &ivpu_vm_ops, - .get_sg_table = ivpu_bo_get_sg_table, + .export = ivpu_bo_export, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, }; -int -ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; struct ivpu_device *vdev = file_priv->vdev; @@ -557,20 +308,20 @@ ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (size == 0) return -EINVAL; - bo = ivpu_bo_alloc(vdev, size, args->flags, &shmem_ops); + bo = ivpu_bo_create(vdev, size, args->flags); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)", bo, file_priv->ctx.id, args->size, args->flags); return PTR_ERR(bo); } - ret = drm_gem_handle_create(file, &bo->base, &bo->handle); + ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle); if (!ret) { args->vpu_addr = bo->vpu_addr; args->handle = bo->handle; } - drm_gem_object_put(&bo->base); + drm_gem_object_put(&bo->base.base); return ret; } @@ -580,8 +331,8 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla { const struct ivpu_addr_range *range; struct ivpu_addr_range fixed_range; + struct iosys_map map; struct ivpu_bo *bo; - pgprot_t prot; int ret; drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr)); @@ -595,7 +346,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla range = &vdev->hw->ranges.global; } - bo = ivpu_bo_alloc(vdev, size, flags, &internal_ops); + bo = ivpu_bo_create(vdev, size, flags); if (IS_ERR(bo)) { ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", bo, vpu_addr, size, flags); @@ -610,61 +361,23 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla if (ret) goto err_put; - if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED) - drm_clflush_pages(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT); - - if (bo->flags & DRM_IVPU_BO_WC) - set_pages_array_wc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT); - else if (bo->flags & DRM_IVPU_BO_UNCACHED) - set_pages_array_uc(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT); - - prot = ivpu_bo_pgprot(bo, PAGE_KERNEL); - bo->kvaddr = vmap(bo->pages, ivpu_bo_size(bo) >> PAGE_SHIFT, VM_MAP, prot); - if (!bo->kvaddr) { - ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n"); + ret = drm_gem_shmem_vmap(&bo->base, &map); + if (ret) goto err_put; - } return bo; err_put: - drm_gem_object_put(&bo->base); + drm_gem_object_put(&bo->base.base); return NULL; } void ivpu_bo_free_internal(struct ivpu_bo *bo) { - drm_gem_object_put(&bo->base); -} - -struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf) -{ - struct ivpu_device *vdev = to_ivpu_device(dev); - struct dma_buf_attachment *attach; - struct ivpu_bo *bo; - - attach = dma_buf_attach(buf, dev->dev); - if (IS_ERR(attach)) - return ERR_CAST(attach); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr); - get_dma_buf(buf); - - bo = ivpu_bo_alloc(vdev, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops); - if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size); - goto err_detach; - } - - lockdep_set_class(&bo->lock, &prime_bo_lock_class_key); - - bo->base.import_attach = attach; - - return &bo->base; - -err_detach: - dma_buf_detach(buf, attach); - dma_buf_put(buf); - return ERR_CAST(bo); + drm_gem_shmem_vunmap(&bo->base, &map); + drm_gem_object_put(&bo->base.base); } int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -723,12 +436,23 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p) mutex_lock(&bo->lock); - if (bo->base.dma_buf && bo->base.dma_buf->file) - dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count); + if (bo->base.base.dma_buf && bo->base.base.dma_buf->file) + dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count); + + drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu", + bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size, + bo->flags, kref_read(&bo->base.base.refcount), dma_refcount); + + if (bo->base.base.import_attach) + drm_printf(p, " imported"); + + if (bo->base.pages) + drm_printf(p, " has_pages"); + + if (bo->mmu_mapped) + drm_printf(p, " mmu_mapped"); - drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n", - bo->ctx ? bo->ctx->id : -1, bo->handle, bo->vpu_addr, bo->base.size, - kref_read(&bo->base.refcount), dma_refcount, bo->ops->name); + drm_printf(p, "\n"); mutex_unlock(&bo->lock); } @@ -738,8 +462,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p) struct ivpu_device *vdev = to_ivpu_device(dev); struct ivpu_bo *bo; - drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n", - "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type"); + drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n", + "ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs"); mutex_lock(&vdev->bo_list_lock); list_for_each_entry(bo, &vdev->bo_list, bo_list_node) diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index 214d8fd143106..d75cad0d3c742 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -6,83 +6,52 @@ #define __IVPU_GEM_H__ #include +#include #include -struct dma_buf; -struct ivpu_bo_ops; struct ivpu_file_priv; struct ivpu_bo { - struct drm_gem_object base; - const struct ivpu_bo_ops *ops; - + struct drm_gem_shmem_object base; struct ivpu_mmu_context *ctx; struct list_head bo_list_node; struct drm_mm_node mm_node; - struct mutex lock; /* Protects: pages, sgt, ctx, mmu_mapped, vpu_addr */ - struct sg_table *sgt; - struct page **pages; - bool mmu_mapped; - - void *kvaddr; + struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */ u64 vpu_addr; u32 handle; u32 flags; - u32 job_status; -}; - -enum ivpu_bo_type { - IVPU_BO_TYPE_SHMEM = 1, - IVPU_BO_TYPE_INTERNAL, - IVPU_BO_TYPE_PRIME, -}; - -struct ivpu_bo_ops { - enum ivpu_bo_type type; - const char *name; - int (*alloc_pages)(struct ivpu_bo *bo); - void (*free_pages)(struct ivpu_bo *bo); - int (*map_pages)(struct ivpu_bo *bo); - void (*unmap_pages)(struct ivpu_bo *bo); + u32 job_status; /* Valid only for command buffer */ + bool mmu_mapped; }; int ivpu_bo_pin(struct ivpu_bo *bo); void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); -void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); -void ivpu_bo_list_print(struct drm_device *dev); -struct ivpu_bo * -ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags); +struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); +struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags); void ivpu_bo_free_internal(struct ivpu_bo *bo); -struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); -void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo); int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); +void ivpu_bo_list_print(struct drm_device *dev); + static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj) { - return container_of(obj, struct ivpu_bo, base); + return container_of(obj, struct ivpu_bo, base.base); } static inline void *ivpu_bo_vaddr(struct ivpu_bo *bo) { - return bo->kvaddr; + return bo->base.vaddr; } static inline size_t ivpu_bo_size(struct ivpu_bo *bo) { - return bo->base.size; -} - -static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset) -{ - if (offset > ivpu_bo_size(bo) || !bo->pages) - return NULL; - - return bo->pages[offset / PAGE_SIZE]; + return bo->base.base.size; } static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo) @@ -95,20 +64,9 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; } -static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot) -{ - if (bo->flags & DRM_IVPU_BO_WC) - return pgprot_writecombine(prot); - - if (bo->flags & DRM_IVPU_BO_UNCACHED) - return pgprot_noncached(prot); - - return prot; -} - static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo) { - return to_ivpu_device(bo->base.dev); + return to_ivpu_device(bo->base.base.dev); } static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 15a408fad4945..02acd8dba02aa 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -266,7 +266,7 @@ static void job_release(struct kref *ref) for (i = 0; i < job->bo_count; i++) if (job->bos[i]) - drm_gem_object_put(&job->bos[i]->base); + drm_gem_object_put(&job->bos[i]->base.base); dma_fence_put(job->done_fence); ivpu_file_priv_put(&job->file_priv); @@ -450,7 +450,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 } bo = job->bos[CMD_BUF_IDX]; - if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) { + if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) { ivpu_warn(vdev, "Buffer is already in use\n"); return -EBUSY; } @@ -470,7 +470,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 } for (i = 0; i < buf_count; i++) { - ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1); + ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1); if (ret) { ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); goto unlock_reservations; @@ -479,7 +479,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 for (i = 0; i < buf_count; i++) { usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP; - dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage); + dma_resv_add_fence(job->bos[i]->base.base.resv, job->done_fence, usage); } unlock_reservations: From 078a5b498d6a3e9c2acb637427258eb6b3079923 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 25 Oct 2023 15:24:27 +0200 Subject: [PATCH 065/117] drm/tests: Remove slow tests Both the drm_buddy and drm_mm tests have been converted from selftest to kunit recently. However, a significant portion of them are trying to exert some part of their API over a huge number of iterations and with random variations of their parameters. They are thus more a way to discover new bugs than actual unit tests. This is fine in itself but leads to very slow runtime (up to 25s for some drm_test_mm_reserve and drm_test_mm_insert on a Ryzen 7950x while running the tests in qemu) which make them a poor fit for kunit. Let's remove those tests from the drm_mm and drm_buddy test suites for now, and if it's ever needed we can always create proper unit tests for them later on. This made the entire DRM tests execution time (as of v6.6-rc1) come from 65s to less than .5s on a Ryzen 7950x system when running under qemu, and from 9 minutes to about 4s on a RaspberryPi4. Acked-by: Daniel Vetter Suggested-by: Daniel Vetter Link: https://lore.kernel.org/r/20231025132428.723672-1-mripard@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/drm_buddy_test.c | 465 ------ drivers/gpu/drm/tests/drm_mm_test.c | 2016 +----------------------- 2 files changed, 56 insertions(+), 2425 deletions(-) diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index 09ee6f6af896b..ea2af6bd9abeb 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -13,315 +13,11 @@ #include "../lib/drm_random.h" -#define TIMEOUT(name__) \ - unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT - -static unsigned int random_seed; - static inline u64 get_size(int order, u64 chunk_size) { return (1 << order) * chunk_size; } -__printf(2, 3) -static bool __timeout(unsigned long timeout, const char *fmt, ...) -{ - va_list va; - - if (!signal_pending(current)) { - cond_resched(); - if (time_before(jiffies, timeout)) - return false; - } - - if (fmt) { - va_start(va, fmt); - vprintk(fmt, va); - va_end(va); - } - - return true; -} - -static void __dump_block(struct kunit *test, struct drm_buddy *mm, - struct drm_buddy_block *block, bool buddy) -{ - kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n", - block->header, drm_buddy_block_state(block), - drm_buddy_block_order(block), drm_buddy_block_offset(block), - drm_buddy_block_size(mm, block), !block->parent, buddy); -} - -static void dump_block(struct kunit *test, struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - struct drm_buddy_block *buddy; - - __dump_block(test, mm, block, false); - - buddy = drm_get_buddy(block); - if (buddy) - __dump_block(test, mm, buddy, true); -} - -static int check_block(struct kunit *test, struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - struct drm_buddy_block *buddy; - unsigned int block_state; - u64 block_size; - u64 offset; - int err = 0; - - block_state = drm_buddy_block_state(block); - - if (block_state != DRM_BUDDY_ALLOCATED && - block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) { - kunit_err(test, "block state mismatch\n"); - err = -EINVAL; - } - - block_size = drm_buddy_block_size(mm, block); - offset = drm_buddy_block_offset(block); - - if (block_size < mm->chunk_size) { - kunit_err(test, "block size smaller than min size\n"); - err = -EINVAL; - } - - /* We can't use is_power_of_2() for a u64 on 32-bit systems. */ - if (block_size & (block_size - 1)) { - kunit_err(test, "block size not power of two\n"); - err = -EINVAL; - } - - if (!IS_ALIGNED(block_size, mm->chunk_size)) { - kunit_err(test, "block size not aligned to min size\n"); - err = -EINVAL; - } - - if (!IS_ALIGNED(offset, mm->chunk_size)) { - kunit_err(test, "block offset not aligned to min size\n"); - err = -EINVAL; - } - - if (!IS_ALIGNED(offset, block_size)) { - kunit_err(test, "block offset not aligned to block size\n"); - err = -EINVAL; - } - - buddy = drm_get_buddy(block); - - if (!buddy && block->parent) { - kunit_err(test, "buddy has gone fishing\n"); - err = -EINVAL; - } - - if (buddy) { - if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) { - kunit_err(test, "buddy has wrong offset\n"); - err = -EINVAL; - } - - if (drm_buddy_block_size(mm, buddy) != block_size) { - kunit_err(test, "buddy size mismatch\n"); - err = -EINVAL; - } - - if (drm_buddy_block_state(buddy) == block_state && - block_state == DRM_BUDDY_FREE) { - kunit_err(test, "block and its buddy are free\n"); - err = -EINVAL; - } - } - - return err; -} - -static int check_blocks(struct kunit *test, struct drm_buddy *mm, - struct list_head *blocks, u64 expected_size, bool is_contiguous) -{ - struct drm_buddy_block *block; - struct drm_buddy_block *prev; - u64 total; - int err = 0; - - block = NULL; - prev = NULL; - total = 0; - - list_for_each_entry(block, blocks, link) { - err = check_block(test, mm, block); - - if (!drm_buddy_block_is_allocated(block)) { - kunit_err(test, "block not allocated\n"); - err = -EINVAL; - } - - if (is_contiguous && prev) { - u64 prev_block_size; - u64 prev_offset; - u64 offset; - - prev_offset = drm_buddy_block_offset(prev); - prev_block_size = drm_buddy_block_size(mm, prev); - offset = drm_buddy_block_offset(block); - - if (offset != (prev_offset + prev_block_size)) { - kunit_err(test, "block offset mismatch\n"); - err = -EINVAL; - } - } - - if (err) - break; - - total += drm_buddy_block_size(mm, block); - prev = block; - } - - if (!err) { - if (total != expected_size) { - kunit_err(test, "size mismatch, expected=%llx, found=%llx\n", - expected_size, total); - err = -EINVAL; - } - return err; - } - - if (prev) { - kunit_err(test, "prev block, dump:\n"); - dump_block(test, mm, prev); - } - - kunit_err(test, "bad block, dump:\n"); - dump_block(test, mm, block); - - return err; -} - -static int check_mm(struct kunit *test, struct drm_buddy *mm) -{ - struct drm_buddy_block *root; - struct drm_buddy_block *prev; - unsigned int i; - u64 total; - int err = 0; - - if (!mm->n_roots) { - kunit_err(test, "n_roots is zero\n"); - return -EINVAL; - } - - if (mm->n_roots != hweight64(mm->size)) { - kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n", - mm->n_roots, hweight64(mm->size)); - return -EINVAL; - } - - root = NULL; - prev = NULL; - total = 0; - - for (i = 0; i < mm->n_roots; ++i) { - struct drm_buddy_block *block; - unsigned int order; - - root = mm->roots[i]; - if (!root) { - kunit_err(test, "root(%u) is NULL\n", i); - err = -EINVAL; - break; - } - - err = check_block(test, mm, root); - - if (!drm_buddy_block_is_free(root)) { - kunit_err(test, "root not free\n"); - err = -EINVAL; - } - - order = drm_buddy_block_order(root); - - if (!i) { - if (order != mm->max_order) { - kunit_err(test, "max order root missing\n"); - err = -EINVAL; - } - } - - if (prev) { - u64 prev_block_size; - u64 prev_offset; - u64 offset; - - prev_offset = drm_buddy_block_offset(prev); - prev_block_size = drm_buddy_block_size(mm, prev); - offset = drm_buddy_block_offset(root); - - if (offset != (prev_offset + prev_block_size)) { - kunit_err(test, "root offset mismatch\n"); - err = -EINVAL; - } - } - - block = list_first_entry_or_null(&mm->free_list[order], - struct drm_buddy_block, link); - if (block != root) { - kunit_err(test, "root mismatch at order=%u\n", order); - err = -EINVAL; - } - - if (err) - break; - - prev = root; - total += drm_buddy_block_size(mm, root); - } - - if (!err) { - if (total != mm->size) { - kunit_err(test, "expected mm size=%llx, found=%llx\n", - mm->size, total); - err = -EINVAL; - } - return err; - } - - if (prev) { - kunit_err(test, "prev root(%u), dump:\n", i - 1); - dump_block(test, mm, prev); - } - - if (root) { - kunit_err(test, "bad root(%u), dump:\n", i); - dump_block(test, mm, root); - } - - return err; -} - -static void mm_config(u64 *size, u64 *chunk_size) -{ - DRM_RND_STATE(prng, random_seed); - u32 s, ms; - - /* Nothing fancy, just try to get an interesting bit pattern */ - - prandom_seed_state(&prng, random_seed); - - /* Let size be a random number of pages up to 8 GB (2M pages) */ - s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng); - /* Let the chunk size be a random power of 2 less than size */ - ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng)); - /* Round size down to the chunk size */ - s &= -ms; - - /* Convert from pages to bytes */ - *chunk_size = (u64)ms << 12; - *size = (u64)s << 12; -} - static void drm_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start = 0; @@ -403,96 +99,6 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test) drm_buddy_fini(&mm); } -static void drm_test_buddy_alloc_smoke(struct kunit *test) -{ - u64 mm_size, chunk_size, start = 0; - unsigned long flags = 0; - struct drm_buddy mm; - int *order; - int i; - - DRM_RND_STATE(prng, random_seed); - TIMEOUT(end_time); - - mm_config(&mm_size, &chunk_size); - - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size), - "buddy_init failed\n"); - - order = drm_random_order(mm.max_order + 1, &prng); - KUNIT_ASSERT_TRUE(test, order); - - for (i = 0; i <= mm.max_order; ++i) { - struct drm_buddy_block *block; - int max_order = order[i]; - bool timeout = false; - LIST_HEAD(blocks); - u64 total, size; - LIST_HEAD(tmp); - int order, err; - - KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm), - "pre-mm check failed, abort\n"); - - order = max_order; - total = 0; - - do { -retry: - size = get_size(order, chunk_size); - err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags); - if (err) { - if (err == -ENOMEM) { - KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n", - order); - } else { - if (order--) { - err = 0; - goto retry; - } - - KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n", - order); - } - - break; - } - - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); - KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); - - list_move_tail(&block->link, &blocks); - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order, - "buddy_alloc order mismatch\n"); - - total += drm_buddy_block_size(&mm, block); - - if (__timeout(end_time, NULL)) { - timeout = true; - break; - } - } while (total < mm.size); - - if (!err) - err = check_blocks(test, &mm, &blocks, total, false); - - drm_buddy_free_list(&mm, &blocks); - - if (!err) { - KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), - "post-mm check failed\n"); - } - - if (err || timeout) - break; - - cond_resched(); - } - - kfree(order); - drm_buddy_fini(&mm); -} - static void drm_test_buddy_alloc_pessimistic(struct kunit *test) { u64 mm_size, size, start = 0; @@ -634,64 +240,6 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test) drm_buddy_fini(&mm); } -static void drm_test_buddy_alloc_range(struct kunit *test) -{ - unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION; - u64 offset, size, rem, chunk_size, end; - unsigned long page_num; - struct drm_buddy mm; - LIST_HEAD(blocks); - - mm_config(&size, &chunk_size); - - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size), - "buddy_init failed"); - - KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm), - "pre-mm check failed, abort!"); - - rem = mm.size; - offset = 0; - - for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) { - struct drm_buddy_block *block; - LIST_HEAD(tmp); - - size = min(page_num * mm.chunk_size, rem); - end = offset + size; - - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end, - size, mm.chunk_size, - &tmp, flags), - "alloc_range with offset=%llx, size=%llx failed\n", offset, size); - - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); - KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n"); - - KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset, - "alloc_range start offset mismatch, found=%llx, expected=%llx\n", - drm_buddy_block_offset(block), offset); - - KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true)); - - list_splice_tail(&tmp, &blocks); - - offset += size; - - rem -= size; - if (!rem) - break; - - cond_resched(); - } - - drm_buddy_free_list(&mm, &blocks); - - KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n"); - - drm_buddy_fini(&mm); -} - static void drm_test_buddy_alloc_limit(struct kunit *test) { u64 size = U64_MAX, start = 0; @@ -727,29 +275,16 @@ static void drm_test_buddy_alloc_limit(struct kunit *test) drm_buddy_fini(&mm); } -static int drm_buddy_suite_init(struct kunit_suite *suite) -{ - while (!random_seed) - random_seed = get_random_u32(); - - kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed); - - return 0; -} - static struct kunit_case drm_buddy_tests[] = { KUNIT_CASE(drm_test_buddy_alloc_limit), - KUNIT_CASE(drm_test_buddy_alloc_range), KUNIT_CASE(drm_test_buddy_alloc_optimistic), KUNIT_CASE(drm_test_buddy_alloc_pessimistic), - KUNIT_CASE(drm_test_buddy_alloc_smoke), KUNIT_CASE(drm_test_buddy_alloc_pathological), {} }; static struct kunit_suite drm_buddy_test_suite = { .name = "drm_buddy", - .suite_init = drm_buddy_suite_init, .test_cases = drm_buddy_tests, }; diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index 186b28dc70380..4e9247cf9977f 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -17,10 +17,6 @@ #include "../lib/drm_random.h" -static unsigned int random_seed; -static unsigned int max_iterations = 8192; -static unsigned int max_prime = 128; - enum { BEST, BOTTOMUP, @@ -37,10 +33,6 @@ static const struct insert_mode { [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH }, [EVICT] = { "evict", DRM_MM_INSERT_EVICT }, {} -}, evict_modes[] = { - { "bottom-up", DRM_MM_INSERT_LOW }, - { "top-down", DRM_MM_INSERT_HIGH }, - {} }; static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm) @@ -97,57 +89,6 @@ static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 sta return ok; } -static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size) -{ - struct drm_mm_node *node, *check, *found; - unsigned long n; - u64 addr; - - if (!assert_no_holes(test, mm)) - return false; - - n = 0; - addr = 0; - drm_mm_for_each_node(node, mm) { - if (node->start != addr) { - KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n", - n, addr, node->start); - return false; - } - - if (node->size != size) { - KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n", - n, size, node->size); - return false; - } - - if (drm_mm_hole_follows(node)) { - KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n); - return false; - } - - found = NULL; - drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { - if (node != check) { - KUNIT_FAIL(test, - "lookup return wrong node, expected start %llx, found %llx\n", - node->start, check->start); - return false; - } - found = check; - } - if (!found) { - KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size); - return false; - } - - addr += size; - n++; - } - - return true; -} - static u64 misalignment(struct drm_mm_node *node, u64 alignment) { u64 rem; @@ -270,215 +211,6 @@ static void drm_test_mm_debug(struct kunit *test) nodes[0].start, nodes[0].size); } -static struct drm_mm_node *set_node(struct drm_mm_node *node, - u64 start, u64 size) -{ - node->start = start; - node->size = size; - return node; -} - -static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node) -{ - int err; - - err = drm_mm_reserve_node(mm, node); - if (likely(err == -ENOSPC)) - return true; - - if (!err) { - KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n", - node->start, node->size); - drm_mm_remove_node(node); - } else { - KUNIT_FAIL(test, - "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n", - err, -ENOSPC, node->start, node->size); - } - return false; -} - -static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm, - unsigned int count, - u64 size) -{ - const struct boundary { - u64 start, size; - const char *name; - } boundaries[] = { -#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" } - B(0, 0), - B(-size, 0), - B(size, 0), - B(size * count, 0), - B(-size, size), - B(-size, -size), - B(-size, 2 * size), - B(0, -size), - B(size, -size), - B(count * size, size), - B(count * size, -size), - B(count * size, count * size), - B(count * size, -count * size), - B(count * size, -(count + 1) * size), - B((count + 1) * size, size), - B((count + 1) * size, -size), - B((count + 1) * size, -2 * size), -#undef B - }; - struct drm_mm_node tmp = {}; - int n; - - for (n = 0; n < ARRAY_SIZE(boundaries); n++) { - if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start, - boundaries[n].size))) { - KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n", - n, boundaries[n].name, count, size); - return false; - } - } - - return true; -} - -static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size) -{ - DRM_RND_STATE(prng, random_seed); - struct drm_mm mm; - struct drm_mm_node tmp, *nodes, *node, *next; - unsigned int *order, n, m, o = 0; - int ret, err; - - /* For exercising drm_mm_reserve_node(), we want to check that - * reservations outside of the drm_mm range are rejected, and to - * overlapping and otherwise already occupied ranges. Afterwards, - * the tree and nodes should be intact. - */ - - DRM_MM_BUG_ON(!count); - DRM_MM_BUG_ON(!size); - - ret = -ENOMEM; - order = drm_random_order(count, &prng); - if (!order) - goto err; - - nodes = vzalloc(array_size(count, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - ret = -EINVAL; - drm_mm_init(&mm, 0, count * size); - - if (!check_reserve_boundaries(test, &mm, count, size)) - goto out; - - for (n = 0; n < count; n++) { - nodes[n].start = order[n] * size; - nodes[n].size = size; - - err = drm_mm_reserve_node(&mm, &nodes[n]); - if (err) { - KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n", - n, nodes[n].start); - ret = err; - goto out; - } - - if (!drm_mm_node_allocated(&nodes[n])) { - KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n", - n, nodes[n].start); - goto out; - } - - if (!expect_reserve_fail(test, &mm, &nodes[n])) - goto out; - } - - /* After random insertion the nodes should be in order */ - if (!assert_continuous(test, &mm, size)) - goto out; - - /* Repeated use should then fail */ - drm_random_reorder(order, count, &prng); - for (n = 0; n < count; n++) { - if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1))) - goto out; - - /* Remove and reinsert should work */ - drm_mm_remove_node(&nodes[order[n]]); - err = drm_mm_reserve_node(&mm, &nodes[order[n]]); - if (err) { - KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n", - n, nodes[n].start); - ret = err; - goto out; - } - } - - if (!assert_continuous(test, &mm, size)) - goto out; - - /* Overlapping use should then fail */ - for (n = 0; n < count; n++) { - if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count))) - goto out; - } - for (n = 0; n < count; n++) { - if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n)))) - goto out; - } - - /* Remove several, reinsert, check full */ - for_each_prime_number(n, min(max_prime, count)) { - for (m = 0; m < n; m++) { - node = &nodes[order[(o + m) % count]]; - drm_mm_remove_node(node); - } - - for (m = 0; m < n; m++) { - node = &nodes[order[(o + m) % count]]; - err = drm_mm_reserve_node(&mm, node); - if (err) { - KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n", - m, n, node->start); - ret = err; - goto out; - } - } - - o += n; - - if (!assert_continuous(test, &mm, size)) - goto out; - } - - ret = 0; -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - vfree(nodes); - kfree(order); -err: - return ret; -} - -static void drm_test_mm_reserve(struct kunit *test) -{ - const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); - int n; - - for_each_prime_number_from(n, 1, 54) { - u64 size = BIT_ULL(n); - - KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1)); - - cond_resched(); - } -} - static bool expect_insert(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color, const struct insert_mode *mode) @@ -503,1754 +235,118 @@ static bool expect_insert(struct kunit *test, struct drm_mm *mm, return true; } -static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size) -{ - struct drm_mm_node tmp = {}; - int err; - - err = drm_mm_insert_node(mm, &tmp, size); - if (likely(err == -ENOSPC)) - return true; - - if (!err) { - KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n", - tmp.start, tmp.size); - drm_mm_remove_node(&tmp); - } else { - KUNIT_FAIL(test, - "impossible insert failed with wrong error %d [expected %d], size %llu\n", - err, -ENOSPC, size); - } - return false; -} - -static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace) +static void drm_test_mm_align_pot(struct kunit *test, int max) { - DRM_RND_STATE(prng, random_seed); - const struct insert_mode *mode; struct drm_mm mm; - struct drm_mm_node *nodes, *node, *next; - unsigned int *order, n, m, o = 0; - int ret; - - /* Fill a range with lots of nodes, check it doesn't fail too early */ - - DRM_MM_BUG_ON(!count); - DRM_MM_BUG_ON(!size); - - ret = -ENOMEM; - nodes = vmalloc(array_size(count, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - order = drm_random_order(count, &prng); - if (!order) - goto err_nodes; - - ret = -EINVAL; - drm_mm_init(&mm, 0, count * size); - - for (mode = insert_modes; mode->name; mode++) { - for (n = 0; n < count; n++) { - struct drm_mm_node tmp; - - node = replace ? &tmp : &nodes[n]; - memset(node, 0, sizeof(*node)); - if (!expect_insert(test, &mm, node, size, 0, n, mode)) { - KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n", - mode->name, size, n); - goto out; - } - - if (replace) { - drm_mm_replace_node(&tmp, &nodes[n]); - if (drm_mm_node_allocated(&tmp)) { - KUNIT_FAIL(test, - "replaced old-node still allocated! step %d\n", - n); - goto out; - } - - if (!assert_node(test, &nodes[n], &mm, size, 0, n)) { - KUNIT_FAIL(test, - "replaced node did not inherit parameters, size %llu step %d\n", - size, n); - goto out; - } - - if (tmp.start != nodes[n].start) { - KUNIT_FAIL(test, - "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n", - tmp.start, size, nodes[n].start, nodes[n].size); - goto out; - } - } - } - - /* After random insertion the nodes should be in order */ - if (!assert_continuous(test, &mm, size)) - goto out; - - /* Repeated use should then fail */ - if (!expect_insert_fail(test, &mm, size)) - goto out; + struct drm_mm_node *node, *next; + int bit; - /* Remove one and reinsert, as the only hole it should refill itself */ - for (n = 0; n < count; n++) { - u64 addr = nodes[n].start; + /* Check that we can align to the full u64 address space */ - drm_mm_remove_node(&nodes[n]); - if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) { - KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n", - mode->name, size, n); - goto out; - } + drm_mm_init(&mm, 1, U64_MAX - 2); - if (nodes[n].start != addr) { - KUNIT_FAIL(test, - "%s reinsert node moved, step %d, expected %llx, found %llx\n", - mode->name, n, addr, nodes[n].start); - goto out; - } + for (bit = max - 1; bit; bit--) { + u64 align, size; - if (!assert_continuous(test, &mm, size)) - goto out; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + KUNIT_FAIL(test, "failed to allocate node"); + goto out; } - /* Remove several, reinsert, check full */ - for_each_prime_number(n, min(max_prime, count)) { - for (m = 0; m < n; m++) { - node = &nodes[order[(o + m) % count]]; - drm_mm_remove_node(node); - } - - for (m = 0; m < n; m++) { - node = &nodes[order[(o + m) % count]]; - if (!expect_insert(test, &mm, node, size, 0, n, mode)) { - KUNIT_FAIL(test, - "%s multiple reinsert failed, size %llu step %d\n", - mode->name, size, n); - goto out; - } - } - - o += n; - - if (!assert_continuous(test, &mm, size)) - goto out; - - if (!expect_insert_fail(test, &mm, size)) - goto out; + align = BIT_ULL(bit); + size = BIT_ULL(bit - 1) + 1; + if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) { + KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit); + goto out; } - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - DRM_MM_BUG_ON(!drm_mm_clean(&mm)); - cond_resched(); } - ret = 0; out: - drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_for_each_node_safe(node, next, &mm) { drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_nodes: - vfree(nodes); - return ret; -} - -static void drm_test_mm_insert(struct kunit *test) -{ - const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); - unsigned int n; - - for_each_prime_number_from(n, 1, 54) { - u64 size = BIT_ULL(n); - - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false)); - - cond_resched(); + kfree(node); } + drm_mm_takedown(&mm); } -static void drm_test_mm_replace(struct kunit *test) +static void drm_test_mm_align32(struct kunit *test) { - const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); - unsigned int n; - - /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node, - * then replacing it with the intended node. We want to check that - * the tree is intact and all the information we need is carried - * across to the target node. - */ - - for_each_prime_number_from(n, 1, 54) { - u64 size = BIT_ULL(n); - - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true)); - - cond_resched(); - } + drm_test_mm_align_pot(test, 32); } -static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node, - u64 size, u64 alignment, unsigned long color, - u64 range_start, u64 range_end, const struct insert_mode *mode) +static void drm_test_mm_align64(struct kunit *test) { - int err; - - err = drm_mm_insert_node_in_range(mm, node, - size, alignment, color, - range_start, range_end, - mode->mode); - if (err) { - KUNIT_FAIL(test, - "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n", - size, alignment, color, mode->name, - range_start, range_end, err); - return false; - } - - if (!assert_node(test, node, mm, size, alignment, color)) { - drm_mm_remove_node(node); - return false; - } - - return true; + drm_test_mm_align_pot(test, 64); } -static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm, - u64 size, u64 range_start, u64 range_end) +static void drm_test_mm_once(struct kunit *test, unsigned int mode) { - struct drm_mm_node tmp = {}; - int err; + struct drm_mm mm; + struct drm_mm_node rsvd_lo, rsvd_hi, node; - err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end, - 0); - if (likely(err == -ENOSPC)) - return true; + drm_mm_init(&mm, 0, 7); - if (!err) { - KUNIT_FAIL(test, - "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n", - tmp.start, tmp.size, range_start, range_end); - drm_mm_remove_node(&tmp); - } else { - KUNIT_FAIL(test, - "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n", - err, -ENOSPC, size, range_start, range_end); + memset(&rsvd_lo, 0, sizeof(rsvd_lo)); + rsvd_lo.start = 1; + rsvd_lo.size = 1; + if (drm_mm_reserve_node(&mm, &rsvd_lo)) { + KUNIT_FAIL(test, "Could not reserve low node\n"); + goto err; } - return false; -} - -static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm, - u64 size, u64 start, u64 end) -{ - struct drm_mm_node *node; - unsigned int n; - - if (!expect_insert_in_range_fail(test, mm, size, start, end)) - return false; - - n = div64_u64(start + size - 1, size); - drm_mm_for_each_node(node, mm) { - if (node->start < start || node->start + node->size > end) { - KUNIT_FAIL(test, - "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n", - n, node->start, node->start + node->size, start, end); - return false; - } - - if (node->start != n * size) { - KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n", - n, n * size, node->start); - return false; - } - - if (node->size != size) { - KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n", - n, size, node->size); - return false; - } - - if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) { - KUNIT_FAIL(test, "node %d is followed by a hole!\n", n); - return false; - } - - n++; + memset(&rsvd_hi, 0, sizeof(rsvd_hi)); + rsvd_hi.start = 5; + rsvd_hi.size = 1; + if (drm_mm_reserve_node(&mm, &rsvd_hi)) { + KUNIT_FAIL(test, "Could not reserve low node\n"); + goto err_lo; } - if (start > 0) { - node = __drm_mm_interval_first(mm, 0, start - 1); - if (drm_mm_node_allocated(node)) { - KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n", - node->start, node->size, start); - return false; - } + if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) { + KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n"); + goto err_hi; } - if (end < U64_MAX) { - node = __drm_mm_interval_first(mm, end, U64_MAX); - if (drm_mm_node_allocated(node)) { - KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n", - node->start, node->size, end); - return false; - } + memset(&node, 0, sizeof(node)); + if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) { + KUNIT_FAIL(test, "Could not insert the node into the available hole!\n"); + goto err_hi; } - return true; + drm_mm_remove_node(&node); +err_hi: + drm_mm_remove_node(&rsvd_hi); +err_lo: + drm_mm_remove_node(&rsvd_lo); +err: + drm_mm_takedown(&mm); } -static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size, - u64 start, u64 end) +static void drm_test_mm_lowest(struct kunit *test) { - const struct insert_mode *mode; - struct drm_mm mm; - struct drm_mm_node *nodes, *node, *next; - unsigned int n, start_n, end_n; - int ret; - - DRM_MM_BUG_ON(!count); - DRM_MM_BUG_ON(!size); - DRM_MM_BUG_ON(end <= start); - - /* Very similar to __drm_test_mm_insert(), but now instead of populating the - * full range of the drm_mm, we try to fill a small portion of it. - */ - - ret = -ENOMEM; - nodes = vzalloc(array_size(count, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - ret = -EINVAL; - drm_mm_init(&mm, 0, count * size); - - start_n = div64_u64(start + size - 1, size); - end_n = div64_u64(end - size, size); - - for (mode = insert_modes; mode->name; mode++) { - for (n = start_n; n <= end_n; n++) { - if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n, - start, end, mode)) { - KUNIT_FAIL(test, - "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n", - mode->name, size, n, start_n, end_n, start, end); - goto out; - } - } - - if (!assert_contiguous_in_range(test, &mm, size, start, end)) { - KUNIT_FAIL(test, - "%s: range [%llx, %llx] not full after initialisation, size=%llu\n", - mode->name, start, end, size); - goto out; - } - - /* Remove one and reinsert, it should refill itself */ - for (n = start_n; n <= end_n; n++) { - u64 addr = nodes[n].start; - - drm_mm_remove_node(&nodes[n]); - if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n, - start, end, mode)) { - KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n); - goto out; - } - - if (nodes[n].start != addr) { - KUNIT_FAIL(test, - "%s reinsert node moved, step %d, expected %llx, found %llx\n", - mode->name, n, addr, nodes[n].start); - goto out; - } - } - - if (!assert_contiguous_in_range(test, &mm, size, start, end)) { - KUNIT_FAIL(test, - "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n", - mode->name, start, end, size); - goto out; - } - - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - DRM_MM_BUG_ON(!drm_mm_clean(&mm)); - - cond_resched(); - } - - ret = 0; -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - vfree(nodes); - return ret; -} - -static int insert_outside_range(struct kunit *test) -{ - struct drm_mm mm; - const unsigned int start = 1024; - const unsigned int end = 2048; - const unsigned int size = end - start; - - drm_mm_init(&mm, start, size); - - if (!expect_insert_in_range_fail(test, &mm, 1, 0, start)) - return -EINVAL; - - if (!expect_insert_in_range_fail(test, &mm, size, - start - size / 2, start + (size + 1) / 2)) - return -EINVAL; - - if (!expect_insert_in_range_fail(test, &mm, size, - end - (size + 1) / 2, end + size / 2)) - return -EINVAL; - - if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size)) - return -EINVAL; - - drm_mm_takedown(&mm); - return 0; -} - -static void drm_test_mm_insert_range(struct kunit *test) -{ - const unsigned int count = min_t(unsigned int, BIT(13), max_iterations); - unsigned int n; - - /* Check that requests outside the bounds of drm_mm are rejected. */ - KUNIT_ASSERT_FALSE(test, insert_outside_range(test)); - - for_each_prime_number_from(n, 1, 50) { - const u64 size = BIT_ULL(n); - const u64 max = count * size; - - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, - max / 2, max / 2)); - KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, - max / 4 + 1, 3 * max / 4 - 1)); - - cond_resched(); - } -} - -static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes, - unsigned int num_insert, const struct insert_mode *mode) -{ - unsigned int size = 4096; - unsigned int i; - - for (i = 0; i < num_insert; i++) { - if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) { - KUNIT_FAIL(test, "%s insert failed\n", mode->name); - return -EINVAL; - } - } - - /* introduce fragmentation by freeing every other node */ - for (i = 0; i < num_insert; i++) { - if (i % 2 == 0) - drm_mm_remove_node(&nodes[i]); - } - - return 0; -} - -static u64 get_insert_time(struct kunit *test, struct drm_mm *mm, - unsigned int num_insert, struct drm_mm_node *nodes, - const struct insert_mode *mode) -{ - unsigned int size = 8192; - ktime_t start; - unsigned int i; - - start = ktime_get(); - for (i = 0; i < num_insert; i++) { - if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) { - KUNIT_FAIL(test, "%s insert failed\n", mode->name); - return 0; - } - } - - return ktime_to_ns(ktime_sub(ktime_get(), start)); -} - -static void drm_test_mm_frag(struct kunit *test) -{ - struct drm_mm mm; - const struct insert_mode *mode; - struct drm_mm_node *nodes, *node, *next; - unsigned int insert_size = 10000; - unsigned int scale_factor = 4; - - /* We need 4 * insert_size nodes to hold intermediate allocated - * drm_mm nodes. - * 1 times for prepare_frag() - * 1 times for get_insert_time() - * 2 times for get_insert_time() - */ - nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - /* For BOTTOMUP and TOPDOWN, we first fragment the - * address space using prepare_frag() and then try to verify - * that insertions scale quadratically from 10k to 20k insertions - */ - drm_mm_init(&mm, 1, U64_MAX - 2); - for (mode = insert_modes; mode->name; mode++) { - u64 insert_time1, insert_time2; - - if (mode->mode != DRM_MM_INSERT_LOW && - mode->mode != DRM_MM_INSERT_HIGH) - continue; - - if (prepare_frag(test, &mm, nodes, insert_size, mode)) - goto err; - - insert_time1 = get_insert_time(test, &mm, insert_size, - nodes + insert_size, mode); - if (insert_time1 == 0) - goto err; - - insert_time2 = get_insert_time(test, &mm, (insert_size * 2), - nodes + insert_size * 2, mode); - if (insert_time2 == 0) - goto err; - - kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n", - mode->name, insert_size, insert_size * 2, insert_time1, insert_time2); - - if (insert_time2 > (scale_factor * insert_time1)) { - KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n", - mode->name, insert_time2 - (scale_factor * insert_time1)); - goto err; - } - - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - } - -err: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - vfree(nodes); -} - -static void drm_test_mm_align(struct kunit *test) -{ - const struct insert_mode *mode; - const unsigned int max_count = min(8192u, max_prime); - struct drm_mm mm; - struct drm_mm_node *nodes, *node, *next; - unsigned int prime; - - /* For each of the possible insertion modes, we pick a few - * arbitrary alignments and check that the inserted node - * meets our requirements. - */ - - nodes = vzalloc(array_size(max_count, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - drm_mm_init(&mm, 1, U64_MAX - 2); - - for (mode = insert_modes; mode->name; mode++) { - unsigned int i = 0; - - for_each_prime_number_from(prime, 1, max_count) { - u64 size = next_prime_number(prime); - - if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) { - KUNIT_FAIL(test, "%s insert failed with alignment=%d", - mode->name, prime); - goto out; - } - - i++; - } - - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - DRM_MM_BUG_ON(!drm_mm_clean(&mm)); - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - vfree(nodes); -} - -static void drm_test_mm_align_pot(struct kunit *test, int max) -{ - struct drm_mm mm; - struct drm_mm_node *node, *next; - int bit; - - /* Check that we can align to the full u64 address space */ - - drm_mm_init(&mm, 1, U64_MAX - 2); - - for (bit = max - 1; bit; bit--) { - u64 align, size; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) { - KUNIT_FAIL(test, "failed to allocate node"); - goto out; - } - - align = BIT_ULL(bit); - size = BIT_ULL(bit - 1) + 1; - if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) { - KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit); - goto out; - } - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) { - drm_mm_remove_node(node); - kfree(node); - } - drm_mm_takedown(&mm); -} - -static void drm_test_mm_align32(struct kunit *test) -{ - drm_test_mm_align_pot(test, 32); -} - -static void drm_test_mm_align64(struct kunit *test) -{ - drm_test_mm_align_pot(test, 64); -} - -static void show_scan(struct kunit *test, const struct drm_mm_scan *scan) -{ - kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n", - scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color); -} - -static void show_holes(struct kunit *test, const struct drm_mm *mm, int count) -{ - u64 hole_start, hole_end; - struct drm_mm_node *hole; - - drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { - struct drm_mm_node *next = list_next_entry(hole, node_list); - const char *node1 = NULL, *node2 = NULL; - - if (drm_mm_node_allocated(hole)) - node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ", - hole->start, hole->size, hole->color); - - if (drm_mm_node_allocated(next)) - node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]", - next->start, next->size, next->color); - - kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1, - hole_start, hole_end, hole_end - hole_start, node2); - - kfree(node2); - kfree(node1); - - if (!--count) - break; - } -} - -struct evict_node { - struct drm_mm_node node; - struct list_head link; -}; - -static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan, - struct evict_node *nodes, unsigned int *order, unsigned int count, - bool use_color, struct list_head *evict_list) -{ - struct evict_node *e, *en; - unsigned int i; - - for (i = 0; i < count; i++) { - e = &nodes[order ? order[i] : i]; - list_add(&e->link, evict_list); - if (drm_mm_scan_add_block(scan, &e->node)) - break; - } - list_for_each_entry_safe(e, en, evict_list, link) { - if (!drm_mm_scan_remove_block(scan, &e->node)) - list_del(&e->link); - } - if (list_empty(evict_list)) { - KUNIT_FAIL(test, - "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n", - scan->size, count, scan->alignment, scan->color); - return false; - } - - list_for_each_entry(e, evict_list, link) - drm_mm_remove_node(&e->node); - - if (use_color) { - struct drm_mm_node *node; - - while ((node = drm_mm_scan_color_evict(scan))) { - e = container_of(node, typeof(*e), node); - drm_mm_remove_node(&e->node); - list_add(&e->link, evict_list); - } - } else { - if (drm_mm_scan_color_evict(scan)) { - KUNIT_FAIL(test, - "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n"); - return false; - } - } - - return true; -} - -static bool evict_nothing(struct kunit *test, struct drm_mm *mm, - unsigned int total_size, struct evict_node *nodes) -{ - struct drm_mm_scan scan; - LIST_HEAD(evict_list); - struct evict_node *e; - struct drm_mm_node *node; - unsigned int n; - - drm_mm_scan_init(&scan, mm, 1, 0, 0, 0); - for (n = 0; n < total_size; n++) { - e = &nodes[n]; - list_add(&e->link, &evict_list); - drm_mm_scan_add_block(&scan, &e->node); - } - list_for_each_entry(e, &evict_list, link) - drm_mm_scan_remove_block(&scan, &e->node); - - for (n = 0; n < total_size; n++) { - e = &nodes[n]; - - if (!drm_mm_node_allocated(&e->node)) { - KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n); - return false; - } - - e->link.next = NULL; - } - - drm_mm_for_each_node(node, mm) { - e = container_of(node, typeof(*e), node); - e->link.next = &e->link; - } - - for (n = 0; n < total_size; n++) { - e = &nodes[n]; - - if (!e->link.next) { - KUNIT_FAIL(test, "node[%d] no longer connected!\n", n); - return false; - } - } - - return assert_continuous(test, mm, nodes[0].node.size); -} - -static bool evict_everything(struct kunit *test, struct drm_mm *mm, - unsigned int total_size, struct evict_node *nodes) -{ - struct drm_mm_scan scan; - LIST_HEAD(evict_list); - struct evict_node *e; - unsigned int n; - int err; - - drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0); - for (n = 0; n < total_size; n++) { - e = &nodes[n]; - list_add(&e->link, &evict_list); - if (drm_mm_scan_add_block(&scan, &e->node)) - break; - } - - err = 0; - list_for_each_entry(e, &evict_list, link) { - if (!drm_mm_scan_remove_block(&scan, &e->node)) { - if (!err) { - KUNIT_FAIL(test, "Node %lld not marked for eviction!\n", - e->node.start); - err = -EINVAL; - } - } - } - if (err) - return false; - - list_for_each_entry(e, &evict_list, link) - drm_mm_remove_node(&e->node); - - if (!assert_one_hole(test, mm, 0, total_size)) - return false; - - list_for_each_entry(e, &evict_list, link) { - err = drm_mm_reserve_node(mm, &e->node); - if (err) { - KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n", - e->node.start); - return false; - } - } - - return assert_continuous(test, mm, nodes[0].node.size); -} - -static int evict_something(struct kunit *test, struct drm_mm *mm, - u64 range_start, u64 range_end, struct evict_node *nodes, - unsigned int *order, unsigned int count, unsigned int size, - unsigned int alignment, const struct insert_mode *mode) -{ - struct drm_mm_scan scan; - LIST_HEAD(evict_list); - struct evict_node *e; - struct drm_mm_node tmp; - int err; - - drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start, - range_end, mode->mode); - if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list)) - return -EINVAL; - - memset(&tmp, 0, sizeof(tmp)); - err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0, - DRM_MM_INSERT_EVICT); - if (err) { - KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n", - size, alignment); - show_scan(test, &scan); - show_holes(test, mm, 3); - return err; - } - - if (tmp.start < range_start || tmp.start + tmp.size > range_end) { - KUNIT_FAIL(test, - "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", - tmp.start, tmp.size, range_start, range_end); - err = -EINVAL; - } - - if (!assert_node(test, &tmp, mm, size, alignment, 0) || - drm_mm_hole_follows(&tmp)) { - KUNIT_FAIL(test, - "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n", - tmp.size, size, alignment, misalignment(&tmp, alignment), - tmp.start, drm_mm_hole_follows(&tmp)); - err = -EINVAL; - } - - drm_mm_remove_node(&tmp); - if (err) - return err; - - list_for_each_entry(e, &evict_list, link) { - err = drm_mm_reserve_node(mm, &e->node); - if (err) { - KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n", - e->node.start); - return err; - } - } - - if (!assert_continuous(test, mm, nodes[0].node.size)) { - KUNIT_FAIL(test, "range is no longer continuous\n"); - return -EINVAL; - } - - return 0; -} - -static void drm_test_mm_evict(struct kunit *test) -{ - DRM_RND_STATE(prng, random_seed); - const unsigned int size = 8192; - const struct insert_mode *mode; - struct drm_mm mm; - struct evict_node *nodes; - struct drm_mm_node *node, *next; - unsigned int *order, n; - - /* Here we populate a full drm_mm and then try and insert a new node - * by evicting other nodes in a random order. The drm_mm_scan should - * pick the first matching hole it finds from the random list. We - * repeat that for different allocation strategies, alignments and - * sizes to try and stress the hole finder. - */ - - nodes = vzalloc(array_size(size, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - order = drm_random_order(size, &prng); - if (!order) - goto err_nodes; - - drm_mm_init(&mm, 0, size); - for (n = 0; n < size; n++) { - if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) { - KUNIT_FAIL(test, "insert failed, step %d\n", n); - goto out; - } - } - - /* First check that using the scanner doesn't break the mm */ - if (!evict_nothing(test, &mm, size, nodes)) { - KUNIT_FAIL(test, "evict_nothing() failed\n"); - goto out; - } - if (!evict_everything(test, &mm, size, nodes)) { - KUNIT_FAIL(test, "evict_everything() failed\n"); - goto out; - } - - for (mode = evict_modes; mode->name; mode++) { - for (n = 1; n <= size; n <<= 1) { - drm_random_reorder(order, size, &prng); - if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1, - mode)) { - KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n", - mode->name, n); - goto out; - } - } - - for (n = 1; n < size; n <<= 1) { - drm_random_reorder(order, size, &prng); - if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, - size / 2, n, mode)) { - KUNIT_FAIL(test, - "%s evict_something(size=%u, alignment=%u) failed\n", - mode->name, size / 2, n); - goto out; - } - } - - for_each_prime_number_from(n, 1, min(size, max_prime)) { - unsigned int nsize = (size - n + 1) / 2; - - DRM_MM_BUG_ON(!nsize); - - drm_random_reorder(order, size, &prng); - if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, - nsize, n, mode)) { - KUNIT_FAIL(test, - "%s evict_something(size=%u, alignment=%u) failed\n", - mode->name, nsize, n); - goto out; - } - } - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_nodes: - vfree(nodes); -} - -static void drm_test_mm_evict_range(struct kunit *test) -{ - DRM_RND_STATE(prng, random_seed); - const unsigned int size = 8192; - const unsigned int range_size = size / 2; - const unsigned int range_start = size / 4; - const unsigned int range_end = range_start + range_size; - const struct insert_mode *mode; - struct drm_mm mm; - struct evict_node *nodes; - struct drm_mm_node *node, *next; - unsigned int *order, n; - - /* Like drm_test_mm_evict() but now we are limiting the search to a - * small portion of the full drm_mm. - */ - - nodes = vzalloc(array_size(size, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - order = drm_random_order(size, &prng); - if (!order) - goto err_nodes; - - drm_mm_init(&mm, 0, size); - for (n = 0; n < size; n++) { - if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) { - KUNIT_FAIL(test, "insert failed, step %d\n", n); - goto out; - } - } - - for (mode = evict_modes; mode->name; mode++) { - for (n = 1; n <= range_size; n <<= 1) { - drm_random_reorder(order, size, &prng); - if (evict_something(test, &mm, range_start, range_end, nodes, - order, size, n, 1, mode)) { - KUNIT_FAIL(test, - "%s evict_something(size=%u) failed with range [%u, %u]\n", - mode->name, n, range_start, range_end); - goto out; - } - } - - for (n = 1; n <= range_size; n <<= 1) { - drm_random_reorder(order, size, &prng); - if (evict_something(test, &mm, range_start, range_end, nodes, - order, size, range_size / 2, n, mode)) { - KUNIT_FAIL(test, - "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", - mode->name, range_size / 2, n, range_start, range_end); - goto out; - } - } - - for_each_prime_number_from(n, 1, min(range_size, max_prime)) { - unsigned int nsize = (range_size - n + 1) / 2; - - DRM_MM_BUG_ON(!nsize); - - drm_random_reorder(order, size, &prng); - if (evict_something(test, &mm, range_start, range_end, nodes, - order, size, nsize, n, mode)) { - KUNIT_FAIL(test, - "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", - mode->name, nsize, n, range_start, range_end); - goto out; - } - } - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_nodes: - vfree(nodes); -} - -static unsigned int node_index(const struct drm_mm_node *node) -{ - return div64_u64(node->start, node->size); -} - -static void drm_test_mm_topdown(struct kunit *test) -{ - const struct insert_mode *topdown = &insert_modes[TOPDOWN]; - - DRM_RND_STATE(prng, random_seed); - const unsigned int count = 8192; - unsigned int size; - unsigned long *bitmap; - struct drm_mm mm; - struct drm_mm_node *nodes, *node, *next; - unsigned int *order, n, m, o = 0; - - /* When allocating top-down, we expect to be returned a node - * from a suitable hole at the top of the drm_mm. We check that - * the returned node does match the highest available slot. - */ - - nodes = vzalloc(array_size(count, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - bitmap = bitmap_zalloc(count, GFP_KERNEL); - if (!bitmap) - goto err_nodes; - - order = drm_random_order(count, &prng); - if (!order) - goto err_bitmap; - - for (size = 1; size <= 64; size <<= 1) { - drm_mm_init(&mm, 0, size * count); - for (n = 0; n < count; n++) { - if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) { - KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n); - goto out; - } - - if (drm_mm_hole_follows(&nodes[n])) { - KUNIT_FAIL(test, - "hole after topdown insert %d, start=%llx\n, size=%u", - n, nodes[n].start, size); - goto out; - } - - if (!assert_one_hole(test, &mm, 0, size * (count - n - 1))) - goto out; - } - - if (!assert_continuous(test, &mm, size)) - goto out; - - drm_random_reorder(order, count, &prng); - for_each_prime_number_from(n, 1, min(count, max_prime)) { - for (m = 0; m < n; m++) { - node = &nodes[order[(o + m) % count]]; - drm_mm_remove_node(node); - __set_bit(node_index(node), bitmap); - } - - for (m = 0; m < n; m++) { - unsigned int last; - - node = &nodes[order[(o + m) % count]]; - if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) { - KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n); - goto out; - } - - if (drm_mm_hole_follows(node)) { - KUNIT_FAIL(test, - "hole after topdown insert %d/%d, start=%llx\n", - m, n, node->start); - goto out; - } - - last = find_last_bit(bitmap, count); - if (node_index(node) != last) { - KUNIT_FAIL(test, - "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n", - m, n, size, last, node_index(node)); - goto out; - } - - __clear_bit(last, bitmap); - } - - DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count); - - o += n; - } - - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - DRM_MM_BUG_ON(!drm_mm_clean(&mm)); - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_bitmap: - bitmap_free(bitmap); -err_nodes: - vfree(nodes); -} - -static void drm_test_mm_bottomup(struct kunit *test) -{ - const struct insert_mode *bottomup = &insert_modes[BOTTOMUP]; - - DRM_RND_STATE(prng, random_seed); - const unsigned int count = 8192; - unsigned int size; - unsigned long *bitmap; - struct drm_mm mm; - struct drm_mm_node *nodes, *node, *next; - unsigned int *order, n, m, o = 0; - - /* Like drm_test_mm_topdown, but instead of searching for the last hole, - * we search for the first. - */ - - nodes = vzalloc(array_size(count, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - bitmap = bitmap_zalloc(count, GFP_KERNEL); - if (!bitmap) - goto err_nodes; - - order = drm_random_order(count, &prng); - if (!order) - goto err_bitmap; - - for (size = 1; size <= 64; size <<= 1) { - drm_mm_init(&mm, 0, size * count); - for (n = 0; n < count; n++) { - if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) { - KUNIT_FAIL(test, - "bottomup insert failed, size %u step %d\n", size, n); - goto out; - } - - if (!assert_one_hole(test, &mm, size * (n + 1), size * count)) - goto out; - } - - if (!assert_continuous(test, &mm, size)) - goto out; - - drm_random_reorder(order, count, &prng); - for_each_prime_number_from(n, 1, min(count, max_prime)) { - for (m = 0; m < n; m++) { - node = &nodes[order[(o + m) % count]]; - drm_mm_remove_node(node); - __set_bit(node_index(node), bitmap); - } - - for (m = 0; m < n; m++) { - unsigned int first; - - node = &nodes[order[(o + m) % count]]; - if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) { - KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n); - goto out; - } - - first = find_first_bit(bitmap, count); - if (node_index(node) != first) { - KUNIT_FAIL(test, - "node %d/%d not inserted into bottom hole, expected %d, found %d\n", - m, n, first, node_index(node)); - goto out; - } - __clear_bit(first, bitmap); - } - - DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count); - - o += n; - } - - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - DRM_MM_BUG_ON(!drm_mm_clean(&mm)); - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_bitmap: - bitmap_free(bitmap); -err_nodes: - vfree(nodes); -} - -static void drm_test_mm_once(struct kunit *test, unsigned int mode) -{ - struct drm_mm mm; - struct drm_mm_node rsvd_lo, rsvd_hi, node; - - drm_mm_init(&mm, 0, 7); - - memset(&rsvd_lo, 0, sizeof(rsvd_lo)); - rsvd_lo.start = 1; - rsvd_lo.size = 1; - if (drm_mm_reserve_node(&mm, &rsvd_lo)) { - KUNIT_FAIL(test, "Could not reserve low node\n"); - goto err; - } - - memset(&rsvd_hi, 0, sizeof(rsvd_hi)); - rsvd_hi.start = 5; - rsvd_hi.size = 1; - if (drm_mm_reserve_node(&mm, &rsvd_hi)) { - KUNIT_FAIL(test, "Could not reserve low node\n"); - goto err_lo; - } - - if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) { - KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n"); - goto err_hi; - } - - memset(&node, 0, sizeof(node)); - if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) { - KUNIT_FAIL(test, "Could not insert the node into the available hole!\n"); - goto err_hi; - } - - drm_mm_remove_node(&node); -err_hi: - drm_mm_remove_node(&rsvd_hi); -err_lo: - drm_mm_remove_node(&rsvd_lo); -err: - drm_mm_takedown(&mm); -} - -static void drm_test_mm_lowest(struct kunit *test) -{ - drm_test_mm_once(test, DRM_MM_INSERT_LOW); -} + drm_test_mm_once(test, DRM_MM_INSERT_LOW); +} static void drm_test_mm_highest(struct kunit *test) { drm_test_mm_once(test, DRM_MM_INSERT_HIGH); } -static void separate_adjacent_colors(const struct drm_mm_node *node, - unsigned long color, u64 *start, u64 *end) -{ - if (drm_mm_node_allocated(node) && node->color != color) - ++*start; - - node = list_next_entry(node, node_list); - if (drm_mm_node_allocated(node) && node->color != color) - --*end; -} - -static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node) -{ - if (!drm_mm_hole_follows(node) && - drm_mm_node_allocated(list_next_entry(node, node_list))) { - KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n", - node->color, node->start, node->size, - list_next_entry(node, node_list)->color, - list_next_entry(node, node_list)->start, - list_next_entry(node, node_list)->size); - return true; - } - - return false; -} - -static void drm_test_mm_color(struct kunit *test) -{ - const unsigned int count = min(4096u, max_iterations); - const struct insert_mode *mode; - struct drm_mm mm; - struct drm_mm_node *node, *nn; - unsigned int n; - - /* Color adjustment complicates everything. First we just check - * that when we insert a node we apply any color_adjustment callback. - * The callback we use should ensure that there is a gap between - * any two nodes, and so after each insertion we check that those - * holes are inserted and that they are preserved. - */ - - drm_mm_init(&mm, 0, U64_MAX); - - for (n = 1; n <= count; n++) { - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - goto out; - - if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) { - KUNIT_FAIL(test, "insert failed, step %d\n", n); - kfree(node); - goto out; - } - } - - drm_mm_for_each_node_safe(node, nn, &mm) { - if (node->color != node->size) { - KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n", - node->size, node->color); - - goto out; - } - - drm_mm_remove_node(node); - kfree(node); - } - - /* Now, let's start experimenting with applying a color callback */ - mm.color_adjust = separate_adjacent_colors; - for (mode = insert_modes; mode->name; mode++) { - u64 last; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - goto out; - - node->size = 1 + 2 * count; - node->color = node->size; - - if (drm_mm_reserve_node(&mm, node)) { - KUNIT_FAIL(test, "initial reserve failed!\n"); - goto out; - } - - last = node->start + node->size; - - for (n = 1; n <= count; n++) { - int rem; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - goto out; - - node->start = last; - node->size = n + count; - node->color = node->size; - - if (drm_mm_reserve_node(&mm, node) != -ENOSPC) { - KUNIT_FAIL(test, "reserve %d did not report color overlap!", n); - goto out; - } - - node->start += n + 1; - rem = misalignment(node, n + count); - node->start += n + count - rem; - - if (drm_mm_reserve_node(&mm, node)) { - KUNIT_FAIL(test, "reserve %d failed", n); - goto out; - } - - last = node->start + node->size; - } - - for (n = 1; n <= count; n++) { - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - goto out; - - if (!expect_insert(test, &mm, node, n, n, n, mode)) { - KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n); - kfree(node); - goto out; - } - } - - drm_mm_for_each_node_safe(node, nn, &mm) { - u64 rem; - - if (node->color != node->size) { - KUNIT_FAIL(test, - "%s invalid color stored: expected %lld, found %ld\n", - mode->name, node->size, node->color); - - goto out; - } - - if (colors_abutt(test, node)) - goto out; - - div64_u64_rem(node->start, node->size, &rem); - if (rem) { - KUNIT_FAIL(test, - "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n", - mode->name, node->start, node->size, rem); - goto out; - } - - drm_mm_remove_node(node); - kfree(node); - } - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, nn, &mm) { - drm_mm_remove_node(node); - kfree(node); - } - drm_mm_takedown(&mm); -} - -static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start, - u64 range_end, struct evict_node *nodes, unsigned int *order, - unsigned int count, unsigned int size, unsigned int alignment, - unsigned long color, const struct insert_mode *mode) -{ - struct drm_mm_scan scan; - LIST_HEAD(evict_list); - struct evict_node *e; - struct drm_mm_node tmp; - int err; - - drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start, - range_end, mode->mode); - if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list)) - return -EINVAL; - - memset(&tmp, 0, sizeof(tmp)); - err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color, - DRM_MM_INSERT_EVICT); - if (err) { - KUNIT_FAIL(test, - "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n", - size, alignment, color, err); - show_scan(test, &scan); - show_holes(test, mm, 3); - return err; - } - - if (tmp.start < range_start || tmp.start + tmp.size > range_end) { - KUNIT_FAIL(test, - "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", - tmp.start, tmp.size, range_start, range_end); - err = -EINVAL; - } - - if (colors_abutt(test, &tmp)) - err = -EINVAL; - - if (!assert_node(test, &tmp, mm, size, alignment, color)) { - KUNIT_FAIL(test, - "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n", - tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start); - err = -EINVAL; - } - - drm_mm_remove_node(&tmp); - if (err) - return err; - - list_for_each_entry(e, &evict_list, link) { - err = drm_mm_reserve_node(mm, &e->node); - if (err) { - KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n", - e->node.start); - return err; - } - } - - cond_resched(); - return 0; -} - -static void drm_test_mm_color_evict(struct kunit *test) -{ - DRM_RND_STATE(prng, random_seed); - const unsigned int total_size = min(8192u, max_iterations); - const struct insert_mode *mode; - unsigned long color = 0; - struct drm_mm mm; - struct evict_node *nodes; - struct drm_mm_node *node, *next; - unsigned int *order, n; - - /* Check that the drm_mm_scan also honours color adjustment when - * choosing its victims to create a hole. Our color_adjust does not - * allow two nodes to be placed together without an intervening hole - * enlarging the set of victims that must be evicted. - */ - - nodes = vzalloc(array_size(total_size, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - order = drm_random_order(total_size, &prng); - if (!order) - goto err_nodes; - - drm_mm_init(&mm, 0, 2 * total_size - 1); - mm.color_adjust = separate_adjacent_colors; - for (n = 0; n < total_size; n++) { - if (!expect_insert(test, &mm, &nodes[n].node, - 1, 0, color++, - &insert_modes[0])) { - KUNIT_FAIL(test, "insert failed, step %d\n", n); - goto out; - } - } - - for (mode = evict_modes; mode->name; mode++) { - for (n = 1; n <= total_size; n <<= 1) { - drm_random_reorder(order, total_size, &prng); - if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size, - n, 1, color++, mode)) { - KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n); - goto out; - } - } - - for (n = 1; n < total_size; n <<= 1) { - drm_random_reorder(order, total_size, &prng); - if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size, - total_size / 2, n, color++, mode)) { - KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n", - mode->name, total_size / 2, n); - goto out; - } - } - - for_each_prime_number_from(n, 1, min(total_size, max_prime)) { - unsigned int nsize = (total_size - n + 1) / 2; - - DRM_MM_BUG_ON(!nsize); - - drm_random_reorder(order, total_size, &prng); - if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size, - nsize, n, color++, mode)) { - KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n", - mode->name, nsize, n); - goto out; - } - } - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_nodes: - vfree(nodes); -} - -static void drm_test_mm_color_evict_range(struct kunit *test) -{ - DRM_RND_STATE(prng, random_seed); - const unsigned int total_size = 8192; - const unsigned int range_size = total_size / 2; - const unsigned int range_start = total_size / 4; - const unsigned int range_end = range_start + range_size; - const struct insert_mode *mode; - unsigned long color = 0; - struct drm_mm mm; - struct evict_node *nodes; - struct drm_mm_node *node, *next; - unsigned int *order, n; - - /* Like drm_test_mm_color_evict(), but limited to small portion of the full - * drm_mm range. - */ - - nodes = vzalloc(array_size(total_size, sizeof(*nodes))); - KUNIT_ASSERT_TRUE(test, nodes); - - order = drm_random_order(total_size, &prng); - if (!order) - goto err_nodes; - - drm_mm_init(&mm, 0, 2 * total_size - 1); - mm.color_adjust = separate_adjacent_colors; - for (n = 0; n < total_size; n++) { - if (!expect_insert(test, &mm, &nodes[n].node, - 1, 0, color++, - &insert_modes[0])) { - KUNIT_FAIL(test, "insert failed, step %d\n", n); - goto out; - } - } - - for (mode = evict_modes; mode->name; mode++) { - for (n = 1; n <= range_size; n <<= 1) { - drm_random_reorder(order, range_size, &prng); - if (evict_color(test, &mm, range_start, range_end, nodes, order, - total_size, n, 1, color++, mode)) { - KUNIT_FAIL(test, - "%s evict_color(size=%u) failed for range [%x, %x]\n", - mode->name, n, range_start, range_end); - goto out; - } - } - - for (n = 1; n < range_size; n <<= 1) { - drm_random_reorder(order, total_size, &prng); - if (evict_color(test, &mm, range_start, range_end, nodes, order, - total_size, range_size / 2, n, color++, mode)) { - KUNIT_FAIL(test, - "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", - mode->name, total_size / 2, n, range_start, range_end); - goto out; - } - } - - for_each_prime_number_from(n, 1, min(range_size, max_prime)) { - unsigned int nsize = (range_size - n + 1) / 2; - - DRM_MM_BUG_ON(!nsize); - - drm_random_reorder(order, total_size, &prng); - if (evict_color(test, &mm, range_start, range_end, nodes, order, - total_size, nsize, n, color++, mode)) { - KUNIT_FAIL(test, - "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", - mode->name, nsize, n, range_start, range_end); - goto out; - } - } - - cond_resched(); - } - -out: - drm_mm_for_each_node_safe(node, next, &mm) - drm_mm_remove_node(node); - drm_mm_takedown(&mm); - kfree(order); -err_nodes: - vfree(nodes); -} - -static int drm_mm_suite_init(struct kunit_suite *suite) -{ - while (!random_seed) - random_seed = get_random_u32(); - - kunit_info(suite, - "Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n", - random_seed, max_iterations, max_prime); - - return 0; -} - -module_param(random_seed, uint, 0400); -module_param(max_iterations, uint, 0400); -module_param(max_prime, uint, 0400); - static struct kunit_case drm_mm_tests[] = { KUNIT_CASE(drm_test_mm_init), KUNIT_CASE(drm_test_mm_debug), - KUNIT_CASE(drm_test_mm_reserve), - KUNIT_CASE(drm_test_mm_insert), - KUNIT_CASE(drm_test_mm_replace), - KUNIT_CASE(drm_test_mm_insert_range), - KUNIT_CASE(drm_test_mm_frag), - KUNIT_CASE(drm_test_mm_align), KUNIT_CASE(drm_test_mm_align32), KUNIT_CASE(drm_test_mm_align64), - KUNIT_CASE(drm_test_mm_evict), - KUNIT_CASE(drm_test_mm_evict_range), - KUNIT_CASE(drm_test_mm_topdown), - KUNIT_CASE(drm_test_mm_bottomup), KUNIT_CASE(drm_test_mm_lowest), KUNIT_CASE(drm_test_mm_highest), - KUNIT_CASE(drm_test_mm_color), - KUNIT_CASE(drm_test_mm_color_evict), - KUNIT_CASE(drm_test_mm_color_evict_range), {} }; static struct kunit_suite drm_mm_test_suite = { .name = "drm_mm", - .suite_init = drm_mm_suite_init, .test_cases = drm_mm_tests, }; From a0a0bd3effea22d9886ee6ff98c591acd9565787 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 25 Oct 2023 15:24:28 +0200 Subject: [PATCH 066/117] drm/todo: Add entry to clean up former seltests suites Most of those suites are undocumented and aren't really clear about what they are testing. Let's add a TODO entry as a future task to get started into KUnit and DRM. Acked-by: Daniel Vetter Link: https://lore.kernel.org/r/20231025132428.723672-2-mripard@kernel.org Signed-off-by: Maxime Ripard --- Documentation/gpu/todo.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 03fe5d1247be2..b62c7fa0c2bcc 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -621,6 +621,23 @@ Contact: Javier Martinez Canillas Level: Intermediate +Clean up and document former selftests suites +--------------------------------------------- + +Some KUnit test suites (drm_buddy, drm_cmdline_parser, drm_damage_helper, +drm_format, drm_framebuffer, drm_dp_mst_helper, drm_mm, drm_plane_helper and +drm_rect) are former selftests suites that have been converted over when KUnit +was first introduced. + +These suites were fairly undocumented, and with different goals than what unit +tests can be. Trying to identify what each test in these suites actually test +for, whether that makes sense for a unit test, and either remove it if it +doesn't or document it if it does would be of great help. + +Contact: Maxime Ripard + +Level: Intermediate + Enable trinity for DRM ---------------------- From c400eb4d6f5f603f4f3f6cc4b6fdacd416ff142e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 6 Nov 2023 10:41:20 -0300 Subject: [PATCH 067/117] MAINTAINERS: Add Maira to V3D maintainers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I've been contributing to V3D with improvements, reviews, testing and debugging. Therefore, add myself as a co-maintainer of the V3D driver. Signed-off-by: Maíra Canal Acked-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/20231106134201.725805-1-mcanal@igalia.com --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 4e1a63479cad4..ec79843f842aa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7105,6 +7105,7 @@ F: drivers/gpu/drm/omapdrm/ DRM DRIVERS FOR V3D M: Melissa Wen +M: Maíra Canal S: Supported T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml From 8eb80946ab0c18a853be5f90d6b6ccbe3fd42989 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 31 Oct 2023 12:16:38 +0200 Subject: [PATCH 068/117] drm/edid: split out drm_eld.h from drm_edid.h The drm_edid.[ch] files are starting to be a bit crowded, and with plans to add more ELD related functionality, it's perhaps cleanest to split the ELD code out to a header of its own. Include drm_eld.h from drm_edid.h for starters, and leave it to follow-up work to only include drm_eld.h where needed. Cc: Mitul Golani Reviewed-by: Mitul Golani Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/0c6d631fa1058036d72dd25d1cabc90a7c52490e.1698747331.git.jani.nikula@intel.com --- Documentation/gpu/drm-kms-helpers.rst | 3 + include/drm/drm_edid.h | 149 +----------------------- include/drm/drm_eld.h | 159 ++++++++++++++++++++++++++ 3 files changed, 163 insertions(+), 148 deletions(-) create mode 100644 include/drm/drm_eld.h diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index b748b8ae70b2a..cfa8e6c399b64 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -363,6 +363,9 @@ EDID Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_edid.c :export: +.. kernel-doc:: include/drm/drm_eld.h + :internal: + SCDC Helper Functions Reference =============================== diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 882d2638708e5..1ff52f57ab9cc 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -25,6 +25,7 @@ #include #include +#include /* FIXME: remove this, include directly where needed */ #include struct drm_device; @@ -269,64 +270,6 @@ struct detailed_timing { #define DRM_EDID_DSC_MAX_SLICES 0xf #define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f -/* ELD Header Block */ -#define DRM_ELD_HEADER_BLOCK_SIZE 4 - -#define DRM_ELD_VER 0 -# define DRM_ELD_VER_SHIFT 3 -# define DRM_ELD_VER_MASK (0x1f << 3) -# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ -# define DRM_ELD_VER_CANNED (0x1f << 3) - -#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ - -/* ELD Baseline Block for ELD_Ver == 2 */ -#define DRM_ELD_CEA_EDID_VER_MNL 4 -# define DRM_ELD_CEA_EDID_VER_SHIFT 5 -# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) -# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) -# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) -# define DRM_ELD_MNL_SHIFT 0 -# define DRM_ELD_MNL_MASK (0x1f << 0) - -#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 -# define DRM_ELD_SAD_COUNT_SHIFT 4 -# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) -# define DRM_ELD_CONN_TYPE_SHIFT 2 -# define DRM_ELD_CONN_TYPE_MASK (3 << 2) -# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) -# define DRM_ELD_CONN_TYPE_DP (1 << 2) -# define DRM_ELD_SUPPORTS_AI (1 << 1) -# define DRM_ELD_SUPPORTS_HDCP (1 << 0) - -#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ -# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ - -#define DRM_ELD_SPEAKER 7 -# define DRM_ELD_SPEAKER_MASK 0x7f -# define DRM_ELD_SPEAKER_RLRC (1 << 6) -# define DRM_ELD_SPEAKER_FLRC (1 << 5) -# define DRM_ELD_SPEAKER_RC (1 << 4) -# define DRM_ELD_SPEAKER_RLR (1 << 3) -# define DRM_ELD_SPEAKER_FC (1 << 2) -# define DRM_ELD_SPEAKER_LFE (1 << 1) -# define DRM_ELD_SPEAKER_FLR (1 << 0) - -#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ -# define DRM_ELD_PORT_ID_LEN 8 - -#define DRM_ELD_MANUFACTURER_NAME0 16 -#define DRM_ELD_MANUFACTURER_NAME1 17 - -#define DRM_ELD_PRODUCT_CODE0 18 -#define DRM_ELD_PRODUCT_CODE1 19 - -#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ - -#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) - struct edid { u8 header[8]; /* Vendor & product info */ @@ -409,96 +352,6 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode, enum hdmi_quantization_range rgb_quant_range); -/** - * drm_eld_mnl - Get ELD monitor name length in bytes. - * @eld: pointer to an eld memory structure with mnl set - */ -static inline int drm_eld_mnl(const uint8_t *eld) -{ - return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; -} - -/** - * drm_eld_sad - Get ELD SAD structures. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline const uint8_t *drm_eld_sad(const uint8_t *eld) -{ - unsigned int ver, mnl; - - ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; - if (ver != 2 && ver != 31) - return NULL; - - mnl = drm_eld_mnl(eld); - if (mnl > 16) - return NULL; - - return eld + DRM_ELD_CEA_SAD(mnl, 0); -} - -/** - * drm_eld_sad_count - Get ELD SAD count. - * @eld: pointer to an eld memory structure with sad_count set - */ -static inline int drm_eld_sad_count(const uint8_t *eld) -{ - return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> - DRM_ELD_SAD_COUNT_SHIFT; -} - -/** - * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes - * @eld: pointer to an eld memory structure with mnl and sad_count set - * - * This is a helper for determining the payload size of the baseline block, in - * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. - */ -static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) -{ - return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + - drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; -} - -/** - * drm_eld_size - Get ELD size in bytes - * @eld: pointer to a complete eld memory structure - * - * The returned value does not include the vendor block. It's vendor specific, - * and comprises of the remaining bytes in the ELD memory buffer after - * drm_eld_size() bytes of header and baseline block. - * - * The returned value is guaranteed to be a multiple of 4. - */ -static inline int drm_eld_size(const uint8_t *eld) -{ - return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; -} - -/** - * drm_eld_get_spk_alloc - Get speaker allocation - * @eld: pointer to an ELD memory structure - * - * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER - * field definitions to identify speakers. - */ -static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) -{ - return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; -} - -/** - * drm_eld_get_conn_type - Get device type hdmi/dp connected - * @eld: pointer to an ELD memory structure - * - * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to - * identify the display type connected. - */ -static inline u8 drm_eld_get_conn_type(const uint8_t *eld) -{ - return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; -} - /** * drm_edid_decode_mfg_id - Decode the manufacturer ID * @mfg_id: The manufacturer ID diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h new file mode 100644 index 0000000000000..9bde89bd96eaf --- /dev/null +++ b/include/drm/drm_eld.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __DRM_ELD_H__ +#define __DRM_ELD_H__ + +#include + +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_MASK 0x7f +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const uint8_t *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const uint8_t *drm_eld_sad(const uint8_t *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const uint8_t *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const uint8_t *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +/** + * drm_eld_get_spk_alloc - Get speaker allocation + * @eld: pointer to an ELD memory structure + * + * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER + * field definitions to identify speakers. + */ +static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +{ + return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; +} + +/** + * drm_eld_get_conn_type - Get device type hdmi/dp connected + * @eld: pointer to an ELD memory structure + * + * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to + * identify the display type connected. + */ +static inline u8 drm_eld_get_conn_type(const uint8_t *eld) +{ + return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; +} + +#endif /* __DRM_ELD_H__ */ From 533914536bf5cb5984755244f5aa13cf93cc84d3 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 31 Oct 2023 12:16:39 +0200 Subject: [PATCH 069/117] drm/eld: replace uint8_t with u8 Unify on kernel types. Cc: Mitul Golani Reviewed-by: Chaitanya Kumar Borah Reviewed-by: Mitul Golani Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/6e048fc4c8a3ebec638ce27b0b8b969a3d2fa8bc.1698747331.git.jani.nikula@intel.com --- include/drm/drm_eld.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h index 9bde89bd96eaf..7b674256b9aa0 100644 --- a/include/drm/drm_eld.h +++ b/include/drm/drm_eld.h @@ -70,7 +70,7 @@ * drm_eld_mnl - Get ELD monitor name length in bytes. * @eld: pointer to an eld memory structure with mnl set */ -static inline int drm_eld_mnl(const uint8_t *eld) +static inline int drm_eld_mnl(const u8 *eld) { return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; } @@ -79,7 +79,7 @@ static inline int drm_eld_mnl(const uint8_t *eld) * drm_eld_sad - Get ELD SAD structures. * @eld: pointer to an eld memory structure with sad_count set */ -static inline const uint8_t *drm_eld_sad(const uint8_t *eld) +static inline const u8 *drm_eld_sad(const u8 *eld) { unsigned int ver, mnl; @@ -98,7 +98,7 @@ static inline const uint8_t *drm_eld_sad(const uint8_t *eld) * drm_eld_sad_count - Get ELD SAD count. * @eld: pointer to an eld memory structure with sad_count set */ -static inline int drm_eld_sad_count(const uint8_t *eld) +static inline int drm_eld_sad_count(const u8 *eld) { return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> DRM_ELD_SAD_COUNT_SHIFT; @@ -111,7 +111,7 @@ static inline int drm_eld_sad_count(const uint8_t *eld) * This is a helper for determining the payload size of the baseline block, in * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. */ -static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) +static inline int drm_eld_calc_baseline_block_size(const u8 *eld) { return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; @@ -127,7 +127,7 @@ static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) * * The returned value is guaranteed to be a multiple of 4. */ -static inline int drm_eld_size(const uint8_t *eld) +static inline int drm_eld_size(const u8 *eld) { return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; } @@ -139,7 +139,7 @@ static inline int drm_eld_size(const uint8_t *eld) * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER * field definitions to identify speakers. */ -static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) +static inline u8 drm_eld_get_spk_alloc(const u8 *eld) { return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK; } @@ -151,7 +151,7 @@ static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld) * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to * identify the display type connected. */ -static inline u8 drm_eld_get_conn_type(const uint8_t *eld) +static inline u8 drm_eld_get_conn_type(const u8 *eld) { return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK; } From 439590ace7755657523a1a0230c6099cb0a6e15f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 31 Oct 2023 12:16:40 +0200 Subject: [PATCH 070/117] drm/edid: include drm_eld.h only where required Reduce the dependencies on drm_eld.h. Some files might be able to drop the dependency on drm_edid.h too with the direct inclusion of drm_eld.h. Cc: Mitul Golani Reviewed-by: Mitul Golani Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/9f5963ce900d747f3279312c0cd1da599fd83f94.1698747331.git.jani.nikula@intel.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + drivers/gpu/drm/drm_edid.c | 1 + drivers/gpu/drm/i915/display/intel_audio.c | 1 + drivers/gpu/drm/i915/display/intel_crtc_state_dump.c | 1 + drivers/gpu/drm/i915/display/intel_sdvo.c | 1 + drivers/gpu/drm/nouveau/dispnv50/disp.c | 1 + drivers/gpu/drm/radeon/radeon_audio.c | 1 + drivers/gpu/drm/tegra/hdmi.c | 1 + drivers/gpu/drm/tegra/sor.c | 1 + include/drm/drm_edid.h | 1 - sound/core/pcm_drm_eld.c | 1 + sound/soc/codecs/hdac_hdmi.c | 1 + sound/soc/codecs/hdmi-codec.c | 1 + sound/x86/intel_hdmi_audio.c | 1 + 14 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c6fd34bab3585..42fa2e142712d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -87,6 +87,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ec1cb4890acb5..85f719f67b504 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 19605264a35c3..39f5b698e08a0 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -25,6 +25,7 @@ #include #include +#include #include #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 66fe880af8f3f..2d15e82c0b3d3 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -4,6 +4,7 @@ */ #include +#include #include "i915_drv.h" #include "intel_crtc_state_dump.h" diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 950ba0431f5f0..f967fe06f5496 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "i915_drv.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index a0ac8c258d9ff..334ca7f919f29 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index d6ccaf24ee0c7..279bf130a18c4 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -26,6 +26,7 @@ #include #include +#include #include "dce6_afmt.h" #include "evergreen_hdmi.h" #include "radeon.h" diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 0ba3ca3ac509d..a1fcee665023b 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index d5a3d3f4fece4..83341576630d8 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 1ff52f57ab9cc..e98aa68187007 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -25,7 +25,6 @@ #include #include -#include /* FIXME: remove this, include directly where needed */ #include struct drm_device; diff --git a/sound/core/pcm_drm_eld.c b/sound/core/pcm_drm_eld.c index 07075071972dd..1cdca4d4fc9cd 100644 --- a/sound/core/pcm_drm_eld.c +++ b/sound/core/pcm_drm_eld.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 8b6b760296948..d1b53fc1efb66 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 13689e718d36f..9b4e9192cd79a 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -17,6 +17,7 @@ #include #include /* This is only to get MAX_ELD_BYTES */ +#include #define HDMI_CODEC_CHMAP_IDX_UNKNOWN -1 diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index ab95fb34a6358..02f5a7f9b7288 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include "intel_hdmi_audio.h" From e8d0b2c06fd779709baea71d5e8bfd99b2116518 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 31 Oct 2023 12:16:41 +0200 Subject: [PATCH 071/117] drm/edid: use a temp variable for sads to drop one level of dereferences Use a temporary variable struct cea_sad *, instead of using struct cea_sad ** directly with the double dereferences. It's arguably easier on the eyes, and drops a set of parenthesis too. Cc: Mitul Golani Reviewed-by: Mitul Golani Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/b6e2f295ae5491c2bb0f528508f0f5fca921dc77.1698747331.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 85f719f67b504..4f3e7e855f639 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5591,7 +5591,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, } static int _drm_edid_to_sad(const struct drm_edid *drm_edid, - struct cea_sad **sads) + struct cea_sad **psads) { const struct cea_db *db; struct cea_db_iter iter; @@ -5600,19 +5600,21 @@ static int _drm_edid_to_sad(const struct drm_edid *drm_edid, cea_db_iter_edid_begin(drm_edid, &iter); cea_db_iter_for_each(db, &iter) { if (cea_db_tag(db) == CTA_DB_AUDIO) { + struct cea_sad *sads; int j; count = cea_db_payload_len(db) / 3; /* SAD is 3B */ - *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL); - if (!*sads) + sads = kcalloc(count, sizeof(*sads), GFP_KERNEL); + *psads = sads; + if (!sads) return -ENOMEM; for (j = 0; j < count; j++) { const u8 *sad = &db->data[j * 3]; - (*sads)[j].format = (sad[0] & 0x78) >> 3; - (*sads)[j].channels = sad[0] & 0x7; - (*sads)[j].freq = sad[1] & 0x7F; - (*sads)[j].byte2 = sad[2]; + sads[j].format = (sad[0] & 0x78) >> 3; + sads[j].channels = sad[0] & 0x7; + sads[j].freq = sad[1] & 0x7F; + sads[j].byte2 = sad[2]; } break; } From 8af4681189e58a51be8a0fc9f0687e615cdb82c9 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 31 Oct 2023 12:16:42 +0200 Subject: [PATCH 072/117] drm/edid: add helpers to get/set struct cea_sad from/to 3-byte sad Add helpers to pack/unpack SADs. Both ways and non-static, as follow-up work needs them. v2: Add include to get the declarations Cc: Mitul Golani Reviewed-by: Mitul Golani Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/21d657ca854ce26423b461c0bb71e7a0727ba437.1698747331.git.jani.nikula@intel.com --- drivers/gpu/drm/drm_edid.c | 34 +++++++++++++++++++++++++--------- drivers/gpu/drm/drm_internal.h | 6 ++++++ 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4f3e7e855f639..9619f91c8331f 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -46,6 +46,7 @@ #include #include "drm_crtc_internal.h" +#include "drm_internal.h" static int oui(u8 first, u8 second, u8 third) { @@ -5507,6 +5508,27 @@ static void clear_eld(struct drm_connector *connector) connector->audio_latency[1] = 0; } +/* + * Get 3-byte SAD buffer from struct cea_sad. + */ +void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad) +{ + sad[0] = cta_sad->format << 3 | cta_sad->channels; + sad[1] = cta_sad->freq; + sad[2] = cta_sad->byte2; +} + +/* + * Set struct cea_sad from 3-byte SAD buffer. + */ +void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad) +{ + cta_sad->format = (sad[0] & 0x78) >> 3; + cta_sad->channels = sad[0] & 0x07; + cta_sad->freq = sad[1] & 0x7f; + cta_sad->byte2 = sad[2]; +} + /* * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink @@ -5601,21 +5623,15 @@ static int _drm_edid_to_sad(const struct drm_edid *drm_edid, cea_db_iter_for_each(db, &iter) { if (cea_db_tag(db) == CTA_DB_AUDIO) { struct cea_sad *sads; - int j; + int i; count = cea_db_payload_len(db) / 3; /* SAD is 3B */ sads = kcalloc(count, sizeof(*sads), GFP_KERNEL); *psads = sads; if (!sads) return -ENOMEM; - for (j = 0; j < count; j++) { - const u8 *sad = &db->data[j * 3]; - - sads[j].format = (sad[0] & 0x78) >> 3; - sads[j].channels = sad[0] & 0x7; - sads[j].freq = sad[1] & 0x7F; - sads[j].byte2 = sad[2]; - } + for (i = 0; i < count; i++) + drm_edid_cta_sad_set(&sads[i], &db->data[i * 3]); break; } } diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 8462b657c3758..b12c463bc4605 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -22,6 +22,7 @@ */ #include +#include #include #include @@ -31,6 +32,7 @@ #define DRM_IF_VERSION(maj, min) (maj << 16 | min) +struct cea_sad; struct dentry; struct dma_buf; struct iosys_map; @@ -267,3 +269,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent, const struct drm_framebuffer *fb); void drm_framebuffer_debugfs_init(struct drm_device *dev); + +/* drm_edid.c */ +void drm_edid_cta_sad_get(const struct cea_sad *cta_sad, u8 *sad); +void drm_edid_cta_sad_set(struct cea_sad *cta_sad, const u8 *sad); From f415a6078f640ab15bae34d3c6a1d8e6071363de Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 31 Oct 2023 12:16:43 +0200 Subject: [PATCH 073/117] drm/eld: add helpers to modify the SADs of an ELD Occasionally it's necessary for drivers to modify the SADs of an ELD, but it's not so cool to have drivers poke at the ELD buffer directly. Using the helpers to translate between 3-byte SAD and struct cea_sad, add ELD helpers to get/set the SADs from/to an ELD. v2: s/i/sad_index/ (Mitul) Cc: Mitul Golani Signed-off-by: Jani Nikula Reviewed-by: Mitul Golani Link: https://patchwork.freedesktop.org/patch/msgid/8e9a05f2b1e0dd184132d636e1e778e8917ec25d.1698747331.git.jani.nikula@intel.com --- Documentation/gpu/drm-kms-helpers.rst | 3 ++ drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/drm_eld.c | 55 +++++++++++++++++++++++++++ include/drm/drm_eld.h | 5 +++ 4 files changed, 64 insertions(+) create mode 100644 drivers/gpu/drm/drm_eld.c diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index cfa8e6c399b64..59cfe8a7a8bac 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -366,6 +366,9 @@ EDID Helper Functions Reference .. kernel-doc:: include/drm/drm_eld.h :internal: +.. kernel-doc:: drivers/gpu/drm/drm_eld.c + :export: + SCDC Helper Functions Reference =============================== diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 8e1bde059170e..cdbe91ac0bfc8 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -22,6 +22,7 @@ drm-y := \ drm_drv.o \ drm_dumb_buffers.o \ drm_edid.o \ + drm_eld.o \ drm_encoder.o \ drm_file.o \ drm_fourcc.o \ diff --git a/drivers/gpu/drm/drm_eld.c b/drivers/gpu/drm/drm_eld.c new file mode 100644 index 0000000000000..5177991aa2726 --- /dev/null +++ b/drivers/gpu/drm/drm_eld.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include +#include + +#include "drm_internal.h" + +/** + * drm_eld_sad_get - get SAD from ELD to struct cea_sad + * @eld: ELD buffer + * @sad_index: SAD index + * @cta_sad: destination struct cea_sad + * + * @return: 0 on success, or negative on errors + */ +int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad) +{ + const u8 *sad; + + if (sad_index >= drm_eld_sad_count(eld)) + return -EINVAL; + + sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index); + + drm_edid_cta_sad_set(cta_sad, sad); + + return 0; +} +EXPORT_SYMBOL(drm_eld_sad_get); + +/** + * drm_eld_sad_set - set SAD to ELD from struct cea_sad + * @eld: ELD buffer + * @sad_index: SAD index + * @cta_sad: source struct cea_sad + * + * @return: 0 on success, or negative on errors + */ +int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad) +{ + u8 *sad; + + if (sad_index >= drm_eld_sad_count(eld)) + return -EINVAL; + + sad = eld + DRM_ELD_CEA_SAD(drm_eld_mnl(eld), sad_index); + + drm_edid_cta_sad_get(cta_sad, sad); + + return 0; +} +EXPORT_SYMBOL(drm_eld_sad_set); diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h index 7b674256b9aa0..0a88d10b28b03 100644 --- a/include/drm/drm_eld.h +++ b/include/drm/drm_eld.h @@ -8,6 +8,8 @@ #include +struct cea_sad; + /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 @@ -75,6 +77,9 @@ static inline int drm_eld_mnl(const u8 *eld) return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; } +int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad); +int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad); + /** * drm_eld_sad - Get ELD SAD structures. * @eld: pointer to an eld memory structure with sad_count set From f3123c2590005c5ff631653d31428e40cd10c618 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Thu, 9 Nov 2023 18:53:26 -0500 Subject: [PATCH 074/117] drm/sched: Qualify drm_sched_wakeup() by drm_sched_entity_is_ready() Don't "wake up" the GPU scheduler unless the entity is ready, as well as we can queue to the scheduler, i.e. there is no point in waking up the scheduler for the entity unless the entity is ready. Signed-off-by: Luben Tuikov Fixes: bc8d6a9df99038 ("drm/sched: Don't disturb the entity when in RR-mode scheduling") Reviewed-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231110000123.72565-2-ltuikov89@gmail.com --- drivers/gpu/drm/scheduler/sched_entity.c | 4 ++-- drivers/gpu/drm/scheduler/sched_main.c | 8 +++++--- include/drm/gpu_scheduler.h | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index f1db63cc81981..4d42b1e4daa67 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -370,7 +370,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, container_of(cb, struct drm_sched_entity, cb); drm_sched_entity_clear_dep(f, cb); - drm_sched_wakeup(entity->rq->sched); + drm_sched_wakeup(entity->rq->sched, entity); } /** @@ -602,7 +602,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) drm_sched_rq_update_fifo(entity, submit_ts); - drm_sched_wakeup(entity->rq->sched); + drm_sched_wakeup(entity->rq->sched, entity); } } EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index cd0dc3f81d05f..8f5e466bd5823 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -925,10 +925,12 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) * * Wake up the scheduler if we can queue jobs. */ -void drm_sched_wakeup(struct drm_gpu_scheduler *sched) +void drm_sched_wakeup(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) { - if (drm_sched_can_queue(sched)) - drm_sched_run_job_queue(sched); + if (drm_sched_entity_is_ready(entity)) + if (drm_sched_can_queue(sched)) + drm_sched_run_job_queue(sched); } /** diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 754fd2217334e..09916c84703f5 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -559,7 +559,7 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity); bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); From fc6e7679296530106ee0954e8ddef1aa58b2e0b5 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Tue, 7 Nov 2023 12:41:51 -0800 Subject: [PATCH 075/117] drm/panel-edp: drm/panel-edp: Fix AUO B116XAK01 name and timing Rename AUO 0x405c B116XAK01 to B116XAK01.0 and adjust the timing of auo_b116xak01: T3=200, T12=500, T7_max = 50 according to decoding edid and datasheet. Fixes: da458286a5e2 ("drm/panel: Add support for AUO B116XAK01 panel") Cc: stable@vger.kernel.org Signed-off-by: Hsin-Yi Wang Reviewed-by: Douglas Anderson Acked-by: Maxime Ripard Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231107204611.3082200-2-hsinyi@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 9dce4c7024142..2fba7c1f49ce0 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -973,6 +973,8 @@ static const struct panel_desc auo_b116xak01 = { }, .delay = { .hpd_absent = 200, + .unprepare = 500, + .enable = 50, }, }; @@ -1870,7 +1872,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), From 962845c090c4f85fa4f6872a5b6c89ee61f53cc0 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Tue, 7 Nov 2023 12:41:52 -0800 Subject: [PATCH 076/117] drm/panel-edp: drm/panel-edp: Fix AUO B116XTN02 name Rename AUO 0x235c B116XTN02 to B116XTN02.3 according to decoding edid. Fixes: 3db2420422a5 ("drm/panel-edp: Add AUO B116XTN02, BOE NT116WHM-N21,836X2, NV116WHM-N49 V8.0") Cc: stable@vger.kernel.org Signed-off-by: Hsin-Yi Wang Reviewed-by: Douglas Anderson Acked-by: Maxime Ripard Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231107204611.3082200-3-hsinyi@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 2fba7c1f49ce0..d41d205f7f5bf 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1871,7 +1871,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), - EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), From 4d53cf81479500d7af787fe6bc881c24ec31f005 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Tue, 7 Nov 2023 12:41:53 -0800 Subject: [PATCH 077/117] drm/panel-edp: drm/panel-edp: Add several generic edp panels Add a few generic edp panels used by mt8186 chromebooks. Signed-off-by: Hsin-Yi Wang Reviewed-by: Douglas Anderson Acked-by: Maxime Ripard Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20231107204611.3082200-4-hsinyi@chromium.org --- drivers/gpu/drm/panel/panel-edp.c | 51 +++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index d41d205f7f5bf..599a949d74d16 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1832,6 +1832,12 @@ static const struct panel_delay delay_200_500_e50 = { .enable = 50, }; +static const struct panel_delay delay_200_500_e80 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 80, +}; + static const struct panel_delay delay_200_500_e80_d50 = { .hpd_absent = 200, .unprepare = 500, @@ -1851,6 +1857,19 @@ static const struct panel_delay delay_200_500_e200 = { .enable = 200, }; +static const struct panel_delay delay_200_500_e200_d10 = { + .hpd_absent = 200, + .unprepare = 500, + .enable = 200, + .disable = 10, +}; + +static const struct panel_delay delay_200_150_e200 = { + .hpd_absent = 200, + .unprepare = 150, + .enable = 200, +}; + #define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \ { \ .name = _name, \ @@ -1871,37 +1890,69 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x639c, &delay_200_500_e50, "B140HAK02.7"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0715, &delay_200_150_e200, "NT116WHM-N21"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0731, &delay_200_500_e80, "NT116WHM-N42"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0741, &delay_200_500_e200, "NT116WHM-N44"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x07f6, &delay_200_500_e200, "NT140FHM-N44"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x08b2, &delay_200_500_e200, "NT140WHM-N49"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x09c3, &delay_200_500_e50, "NT116WHM-N21,836X2"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x09ae, &delay_200_500_e200, "NT140FHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1138, &innolux_n116bca_ea1.delay, "N116BCA-EA1-RC4"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1145, &delay_200_500_e80_d50, "N116BCN-EB1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1157, &delay_200_500_e80_d50, "N116BGE-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"), + + EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5c, &delay_200_500_e200, "MB116AN01-2"), + EDP_PANEL_ENTRY('I', 'V', 'O', 0x048e, &delay_200_500_e200_d10, "M116NWR6 R5"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"), + EDP_PANEL_ENTRY('I', 'V', 'O', 0x8c4d, &delay_200_150_e200, "R140NWFM R1"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"), + EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), From 36245bd02e88e68ac5955c2958c968879d7b75a9 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Thu, 9 Nov 2023 19:11:46 -0500 Subject: [PATCH 078/117] drm/sched: Define pr_fmt() for DRM using pr_*() Define pr_fmt() as "[drm] " for DRM code using pr_*() facilities, especially when no devices are available. This makes it easier to browse kernel logs. Signed-off-by: Luben Tuikov Acked-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231110002659.113208-2-ltuikov89@gmail.com --- include/drm/drm_print.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index a93a387f8a1a1..e8fe60d0eb878 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -26,6 +26,20 @@ #ifndef DRM_PRINT_H_ #define DRM_PRINT_H_ +/* Define this before including linux/printk.h, so that the format + * string in pr_*() macros is correctly set for DRM. If a file wants + * to define this to something else, it should do so before including + * this header file. + * + * It is encouraged code using pr_err() to prefix their format with + * the string "*ERROR* ", to make it easier to scan kernel logs. For + * instance, + * pr_err("*ERROR* ", args). + */ +#ifndef pr_fmt +#define pr_fmt(fmt) "[drm] " fmt +#endif + #include #include #include From a78422e9dff366b3a46ae44caf6ec8ded9c9fc2f Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Fri, 10 Nov 2023 01:16:33 +0100 Subject: [PATCH 079/117] drm/sched: implement dynamic job-flow control Currently, job flow control is implemented simply by limiting the number of jobs in flight. Therefore, a scheduler is initialized with a credit limit that corresponds to the number of jobs which can be sent to the hardware. This implies that for each job, drivers need to account for the maximum job size possible in order to not overflow the ring buffer. However, there are drivers, such as Nouveau, where the job size has a rather large range. For such drivers it can easily happen that job submissions not even filling the ring by 1% can block subsequent submissions, which, in the worst case, can lead to the ring run dry. In order to overcome this issue, allow for tracking the actual job size instead of the number of jobs. Therefore, add a field to track a job's credit count, which represents the number of credits a job contributes to the scheduler's credit limit. Signed-off-by: Danilo Krummrich Reviewed-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/20231110001638.71750-1-dakr@redhat.com --- Documentation/gpu/drm-mm.rst | 6 + drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 2 +- drivers/gpu/drm/lima/lima_device.c | 2 +- drivers/gpu/drm/lima/lima_sched.c | 2 +- drivers/gpu/drm/msm/msm_gem_submit.c | 2 +- drivers/gpu/drm/nouveau/nouveau_sched.c | 2 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 2 +- drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- .../gpu/drm/scheduler/gpu_scheduler_trace.h | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 170 ++++++++++++++---- drivers/gpu/drm/v3d/v3d_gem.c | 2 +- include/drm/gpu_scheduler.h | 28 ++- 14 files changed, 175 insertions(+), 51 deletions(-) diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 602010cb6894c..acc5901ac8408 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -552,6 +552,12 @@ Overview .. kernel-doc:: drivers/gpu/drm/scheduler/sched_main.c :doc: Overview +Flow Control +------------ + +.. kernel-doc:: drivers/gpu/drm/scheduler/sched_main.c + :doc: Flow Control + Scheduler Function References ----------------------------- diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 1f357198533f3..62bb7fc7448ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -115,7 +115,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!entity) return 0; - return drm_sched_job_init(&(*job)->base, entity, owner); + return drm_sched_job_init(&(*job)->base, entity, 1, owner); } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 2416c526f9b06..3d0f8d182506e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -535,7 +535,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ret = drm_sched_job_init(&submit->sched_job, &ctx->sched_entity[args->pipe], - submit->ctx); + 1, submit->ctx); if (ret) goto err_submit_put; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 9276756e1397d..5105d290e72e2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1917,7 +1917,7 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) u32 idle, mask; /* If there are any jobs in the HW queue, we're not idle */ - if (atomic_read(&gpu->sched.hw_rq_count)) + if (atomic_read(&gpu->sched.credit_count)) return -EBUSY; /* Check whether the hardware (except FE and MC) is idle */ diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 02cef0cea6572..0bf7105c8748b 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -514,7 +514,7 @@ int lima_device_suspend(struct device *dev) /* check any task running */ for (i = 0; i < lima_pipe_num; i++) { - if (atomic_read(&ldev->pipe[i].base.hw_rq_count)) + if (atomic_read(&ldev->pipe[i].base.credit_count)) return -EBUSY; } diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index aa030e1f7cdae..c3bf8cda84982 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -123,7 +123,7 @@ int lima_sched_task_init(struct lima_sched_task *task, for (i = 0; i < num_bos; i++) drm_gem_object_get(&bos[i]->base.base); - err = drm_sched_job_init(&task->base, &context->base, vm); + err = drm_sched_job_init(&task->base, &context->base, 1, vm); if (err) { kfree(task->bos); return err; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 99744de6c05a1..c002cabe7b9c5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -48,7 +48,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return ERR_PTR(ret); } - ret = drm_sched_job_init(&submit->base, queue->entity, queue); + ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue); if (ret) { kfree(submit->hw_fence); kfree(submit); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index 7e64b5ef90fb2..1b2cc3f2e1c7e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -89,7 +89,7 @@ nouveau_job_init(struct nouveau_job *job, } - ret = drm_sched_job_init(&job->base, &entity->base, NULL); + ret = drm_sched_job_init(&job->base, &entity->base, 1, NULL); if (ret) goto err_free_chains; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index b834777b409b0..54d1c19bea84d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -274,7 +274,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, ret = drm_sched_job_init(&job->base, &file_priv->sched_entity[slot], - NULL); + 1, NULL); if (ret) goto out_put_job; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 6d89e24322dbf..f9446e197428d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -963,7 +963,7 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev) for (i = 0; i < NUM_JOB_SLOTS; i++) { /* If there are any jobs in the HW queue, we're not idle */ - if (atomic_read(&js->queue[i].sched.hw_rq_count)) + if (atomic_read(&js->queue[i].sched.credit_count)) return false; } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 3143ecaaff862..f8ed093b7356e 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(drm_sched_job, __assign_str(name, sched_job->sched->name); __entry->job_count = spsc_queue_count(&entity->job_queue); __entry->hw_job_count = atomic_read( - &sched_job->sched->hw_rq_count); + &sched_job->sched->credit_count); ), TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", __entry->entity, __entry->id, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8f5e466bd5823..044a8c4875ba6 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -48,6 +48,30 @@ * through the jobs entity pointer. */ +/** + * DOC: Flow Control + * + * The DRM GPU scheduler provides a flow control mechanism to regulate the rate + * in which the jobs fetched from scheduler entities are executed. + * + * In this context the &drm_gpu_scheduler keeps track of a driver specified + * credit limit representing the capacity of this scheduler and a credit count; + * every &drm_sched_job carries a driver specified number of credits. + * + * Once a job is executed (but not yet finished), the job's credits contribute + * to the scheduler's credit count until the job is finished. If by executing + * one more job the scheduler's credit count would exceed the scheduler's + * credit limit, the job won't be executed. Instead, the scheduler will wait + * until the credit count has decreased enough to not overflow its credit limit. + * This implies waiting for previously executed jobs. + * + * Optionally, drivers may register a callback (update_job_credits) provided by + * struct drm_sched_backend_ops to update the job's credits dynamically. The + * scheduler executes this callback every time the scheduler considers a job for + * execution and subsequently checks whether the job fits the scheduler's credit + * limit. + */ + #include #include #include @@ -75,6 +99,51 @@ int drm_sched_policy = DRM_SCHED_POLICY_FIFO; MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default)."); module_param_named(sched_policy, drm_sched_policy, int, 0444); +static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) +{ + u32 credits; + + drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, + atomic_read(&sched->credit_count), + &credits)); + + return credits; +} + +/** + * drm_sched_can_queue -- Can we queue more to the hardware? + * @sched: scheduler instance + * @entity: the scheduler entity + * + * Return true if we can push at least one more job from @entity, false + * otherwise. + */ +static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) +{ + struct drm_sched_job *s_job; + + s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); + if (!s_job) + return false; + + if (sched->ops->update_job_credits) { + s_job->credits = sched->ops->update_job_credits(s_job); + + drm_WARN(sched, !s_job->credits, + "Jobs with zero credits bypass job-flow control.\n"); + } + + /* If a job exceeds the credit limit, truncate it to the credit limit + * itself to guarantee forward progress. + */ + if (drm_WARN(sched, s_job->credits > sched->credit_limit, + "Jobs may not exceed the credit limit, truncate.\n")) + s_job->credits = sched->credit_limit; + + return drm_sched_available_credits(sched) >= s_job->credits; +} + static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, const struct rb_node *b) { @@ -186,12 +255,18 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, /** * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run * + * @sched: the gpu scheduler * @rq: scheduler run queue to check. * - * Try to find a ready entity, returns NULL if none found. + * Try to find the next ready entity. + * + * Return an entity if one is found; return an error-pointer (!NULL) if an + * entity was ready, but the scheduler had insufficient credits to accommodate + * its job; return NULL, if no ready entity was found. */ static struct drm_sched_entity * -drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) +drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, + struct drm_sched_rq *rq) { struct drm_sched_entity *entity; @@ -201,6 +276,14 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { if (drm_sched_entity_is_ready(entity)) { + /* If we can't queue yet, preserve the current + * entity in terms of fairness. + */ + if (!drm_sched_can_queue(sched, entity)) { + spin_unlock(&rq->lock); + return ERR_PTR(-ENOSPC); + } + rq->current_entity = entity; reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); @@ -210,8 +293,15 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) } list_for_each_entry(entity, &rq->entities, list) { - if (drm_sched_entity_is_ready(entity)) { + /* If we can't queue yet, preserve the current entity in + * terms of fairness. + */ + if (!drm_sched_can_queue(sched, entity)) { + spin_unlock(&rq->lock); + return ERR_PTR(-ENOSPC); + } + rq->current_entity = entity; reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); @@ -230,12 +320,18 @@ drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq) /** * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run * + * @sched: the gpu scheduler * @rq: scheduler run queue to check. * - * Find oldest waiting ready entity, returns NULL if none found. + * Find oldest waiting ready entity. + * + * Return an entity if one is found; return an error-pointer (!NULL) if an + * entity was ready, but the scheduler had insufficient credits to accommodate + * its job; return NULL, if no ready entity was found. */ static struct drm_sched_entity * -drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) +drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched, + struct drm_sched_rq *rq) { struct rb_node *rb; @@ -245,6 +341,14 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq) entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node); if (drm_sched_entity_is_ready(entity)) { + /* If we can't queue yet, preserve the current entity in + * terms of fairness. + */ + if (!drm_sched_can_queue(sched, entity)) { + spin_unlock(&rq->lock); + return ERR_PTR(-ENOSPC); + } + rq->current_entity = entity; reinit_completion(&entity->entity_idle); break; @@ -302,7 +406,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result) struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_gpu_scheduler *sched = s_fence->sched; - atomic_dec(&sched->hw_rq_count); + atomic_sub(s_job->credits, &sched->credit_count); atomic_dec(sched->score); trace_drm_sched_process_job(s_fence); @@ -525,7 +629,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) &s_job->cb)) { dma_fence_put(s_job->s_fence->parent); s_job->s_fence->parent = NULL; - atomic_dec(&sched->hw_rq_count); + atomic_sub(s_job->credits, &sched->credit_count); } else { /* * remove job from pending_list. @@ -586,7 +690,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { struct dma_fence *fence = s_job->s_fence->parent; - atomic_inc(&sched->hw_rq_count); + atomic_add(s_job->credits, &sched->credit_count); if (!full_recovery) continue; @@ -667,6 +771,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); * drm_sched_job_init - init a scheduler job * @job: scheduler job to init * @entity: scheduler entity to use + * @credits: the number of credits this job contributes to the schedulers + * credit limit * @owner: job owner for debugging * * Refer to drm_sched_entity_push_job() documentation @@ -684,7 +790,7 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs); */ int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner) + u32 credits, void *owner) { if (!entity->rq) { /* This will most likely be followed by missing frames @@ -695,7 +801,13 @@ int drm_sched_job_init(struct drm_sched_job *job, return -ENOENT; } + if (unlikely(!credits)) { + pr_err("*ERROR* %s: credits cannot be 0!\n", __func__); + return -EINVAL; + } + job->entity = entity; + job->credits = credits; job->s_fence = drm_sched_fence_alloc(entity, owner); if (!job->s_fence) return -ENOMEM; @@ -907,21 +1019,10 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) } EXPORT_SYMBOL(drm_sched_job_cleanup); -/** - * drm_sched_can_queue -- Can we queue more to the hardware? - * @sched: scheduler instance - * - * Return true if we can push more jobs to the hw, otherwise false. - */ -static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched) -{ - return atomic_read(&sched->hw_rq_count) < - sched->hw_submission_limit; -} - /** * drm_sched_wakeup - Wake up the scheduler if it is ready to queue * @sched: scheduler instance + * @entity: the scheduler entity * * Wake up the scheduler if we can queue jobs. */ @@ -929,7 +1030,7 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity) { if (drm_sched_entity_is_ready(entity)) - if (drm_sched_can_queue(sched)) + if (drm_sched_can_queue(sched, entity)) drm_sched_run_job_queue(sched); } @@ -938,7 +1039,11 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched, * * @sched: scheduler instance * - * Returns the entity to process or NULL if none are found. + * Return an entity to process or NULL if none are found. + * + * Note, that we break out of the for-loop when "entity" is non-null, which can + * also be an error-pointer--this assures we don't process lower priority + * run-queues. See comments in the respectively called functions. */ static struct drm_sched_entity * drm_sched_select_entity(struct drm_gpu_scheduler *sched) @@ -946,19 +1051,16 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) struct drm_sched_entity *entity; int i; - if (!drm_sched_can_queue(sched)) - return NULL; - /* Kernel run queue has higher priority than normal run queue*/ for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? - drm_sched_rq_select_entity_fifo(sched->sched_rq[i]) : - drm_sched_rq_select_entity_rr(sched->sched_rq[i]); + drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : + drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); if (entity) break; } - return entity; + return IS_ERR(entity) ? NULL : entity; } /** @@ -1094,7 +1196,7 @@ static void drm_sched_run_job_work(struct work_struct *w) s_fence = sched_job->s_fence; - atomic_inc(&sched->hw_rq_count); + atomic_add(sched_job->credits, &sched->credit_count); drm_sched_job_begin(sched_job); trace_drm_run_job(sched_job, entity); @@ -1129,7 +1231,7 @@ static void drm_sched_run_job_work(struct work_struct *w) * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is * allocated and used * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT - * @hw_submission: number of hw submissions that can be in flight + * @credit_limit: the number of credits this scheduler can hold from all jobs * @hang_limit: number of times to allow a job to hang before dropping it * @timeout: timeout value in jiffies for the scheduler * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is @@ -1143,14 +1245,14 @@ static void drm_sched_run_job_work(struct work_struct *w) int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, struct workqueue_struct *submit_wq, - u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, + u32 num_rqs, u32 credit_limit, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev) { int i, ret; sched->ops = ops; - sched->hw_submission_limit = hw_submission; + sched->credit_limit = credit_limit; sched->name = name; sched->timeout = timeout; sched->timeout_wq = timeout_wq ? : system_wq; @@ -1199,7 +1301,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, init_waitqueue_head(&sched->job_scheduled); INIT_LIST_HEAD(&sched->pending_list); spin_lock_init(&sched->job_list_lock); - atomic_set(&sched->hw_rq_count, 0); + atomic_set(&sched->credit_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); INIT_WORK(&sched->work_run_job, drm_sched_run_job_work); INIT_WORK(&sched->work_free_job, drm_sched_free_job_work); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 712675134c048..9d2ac23c29e33 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -418,7 +418,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, job->file = file_priv; ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], - v3d_priv); + 1, v3d_priv); if (ret) goto fail; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 09916c84703f5..1d60eab747de4 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -321,6 +321,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @sched: the scheduler instance on which this job is scheduled. * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. + * @credits: the number of credits this job contributes to the scheduler * @work: Helper to reschdeule job kill to different context. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang @@ -340,6 +341,8 @@ struct drm_sched_job { struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; + u32 credits; + /* * work is used only after finish_cb has been used and will not be * accessed anymore. @@ -463,13 +466,27 @@ struct drm_sched_backend_ops { * and it's time to clean it up. */ void (*free_job)(struct drm_sched_job *sched_job); + + /** + * @update_job_credits: Called when the scheduler is considering this + * job for execution. + * + * This callback returns the number of credits the job would take if + * pushed to the hardware. Drivers may use this to dynamically update + * the job's credit count. For instance, deduct the number of credits + * for already signalled native fences. + * + * This callback is optional. + */ + u32 (*update_job_credits)(struct drm_sched_job *sched_job); }; /** * struct drm_gpu_scheduler - scheduler instance-specific data * * @ops: backend operations provided by the driver. - * @hw_submission_limit: the max size of the hardware queue. + * @credit_limit: the credit limit of this scheduler + * @credit_count: the current credit count of this scheduler * @timeout: the time after which a job is removed from the scheduler. * @name: name of the ring for which this scheduler is being used. * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT, @@ -478,7 +495,6 @@ struct drm_sched_backend_ops { * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler * waits on this wait queue until all the scheduled jobs are * finished. - * @hw_rq_count: the number of jobs currently in the hardware queue. * @job_id_count: used to assign unique id to the each job. * @submit_wq: workqueue used to queue @work_run_job and @work_free_job * @timeout_wq: workqueue used to queue @work_tdr @@ -502,13 +518,13 @@ struct drm_sched_backend_ops { */ struct drm_gpu_scheduler { const struct drm_sched_backend_ops *ops; - uint32_t hw_submission_limit; + u32 credit_limit; + atomic_t credit_count; long timeout; const char *name; u32 num_rqs; struct drm_sched_rq **sched_rq; wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; atomic64_t job_id_count; struct workqueue_struct *submit_wq; struct workqueue_struct *timeout_wq; @@ -530,14 +546,14 @@ struct drm_gpu_scheduler { int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, struct workqueue_struct *submit_wq, - u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit, + u32 num_rqs, u32 credit_limit, unsigned int hang_limit, long timeout, struct workqueue_struct *timeout_wq, atomic_t *score, const char *name, struct device *dev); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, - void *owner); + u32 credits, void *owner); void drm_sched_job_arm(struct drm_sched_job *job); int drm_sched_job_add_dependency(struct drm_sched_job *job, struct dma_fence *fence); From 22aa1a209018dc2eca78745f7666db63637cd5dc Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 2 Nov 2023 15:15:07 +0100 Subject: [PATCH 080/117] drm/panfrost: Really power off GPU cores in panfrost_gpu_power_off() The layout of the registers {TILER,SHADER,L2}_PWROFF_LO, used to request powering off cores, is the same as the {TILER,SHADER,L2}_PWRON_LO ones: this means that in order to request poweroff of cores, we are supposed to write a bitmask of cores that should be powered off! This means that the panfrost_gpu_power_off() function has always been doing nothing. Fix powering off the GPU by writing a bitmask of the cores to poweroff to the relevant PWROFF_LO registers and then check that the transition (from ON to OFF) has finished by polling the relevant PWRTRANS_LO registers. While at it, in order to avoid code duplication, move the core mask logic from panfrost_gpu_power_on() to a new panfrost_get_core_mask() function, used in both poweron and poweroff. Fixes: f3ba91228e8e ("drm/panfrost: Add initial panfrost driver") Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231102141507.73481-1-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_gpu.c | 64 ++++++++++++++++++------- 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index f0be7e19b13ed..97f097ee5c1da 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -362,28 +362,38 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) return ((u64)hi << 32) | lo; } +static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) +{ + u64 core_mask; + + if (pfdev->features.l2_present == 1) + return U64_MAX; + + /* + * Only support one core group now. + * ~(l2_present - 1) unsets all bits in l2_present except + * the bottom bit. (l2_present - 2) has all the bits in + * the first core group set. AND them together to generate + * a mask of cores in the first core group. + */ + core_mask = ~(pfdev->features.l2_present - 1) & + (pfdev->features.l2_present - 2); + dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", + hweight64(core_mask), + hweight64(pfdev->features.shader_present)); + + return core_mask; +} + void panfrost_gpu_power_on(struct panfrost_device *pfdev) { int ret; u32 val; - u64 core_mask = U64_MAX; + u64 core_mask; panfrost_gpu_init_quirks(pfdev); + core_mask = panfrost_get_core_mask(pfdev); - if (pfdev->features.l2_present != 1) { - /* - * Only support one core group now. - * ~(l2_present - 1) unsets all bits in l2_present except - * the bottom bit. (l2_present - 2) has all the bits in - * the first core group set. AND them together to generate - * a mask of cores in the first core group. - */ - core_mask = ~(pfdev->features.l2_present - 1) & - (pfdev->features.l2_present - 2); - dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", - hweight64(core_mask), - hweight64(pfdev->features.shader_present)); - } gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask); ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, val, val == (pfdev->features.l2_present & core_mask), @@ -408,9 +418,27 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) void panfrost_gpu_power_off(struct panfrost_device *pfdev) { - gpu_write(pfdev, TILER_PWROFF_LO, 0); - gpu_write(pfdev, SHADER_PWROFF_LO, 0); - gpu_write(pfdev, L2_PWROFF_LO, 0); + u64 core_mask = panfrost_get_core_mask(pfdev); + int ret; + u32 val; + + gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present & core_mask); + ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, + val, !val, 1, 1000); + if (ret) + dev_err(pfdev->dev, "shader power transition timeout"); + + gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); + ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, + val, !val, 1, 1000); + if (ret) + dev_err(pfdev->dev, "tiler power transition timeout"); + + gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present & core_mask); + ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, + val, !val, 0, 1000); + if (ret) + dev_err(pfdev->dev, "l2 power transition timeout"); } int panfrost_gpu_init(struct panfrost_device *pfdev) From 57d4e26717b030fd794df3534e6b2e806eb761e4 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 9 Nov 2023 11:25:38 +0100 Subject: [PATCH 081/117] drm/panfrost: Perform hard reset to recover GPU if soft reset fails Even though soft reset should ideally never fail, during development of some power management features I managed to get some bits wrong: this resulted in GPU soft reset failures, where the GPU was never able to recover, not even after suspend/resume cycles, meaning that the only way to get functionality back was to reboot the machine. Perform a hard reset after a soft reset failure to be able to recover the GPU during runtime (so, without any machine reboot). Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231109102543.42971-2-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_gpu.c | 13 ++++++++++--- drivers/gpu/drm/panfrost/panfrost_regs.h | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 97f097ee5c1da..c9469bf7770a4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -60,14 +60,21 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) gpu_write(pfdev, GPU_INT_MASK, 0); gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); - gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); + gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000); if (ret) { - dev_err(pfdev->dev, "gpu soft reset timed out\n"); - return ret; + dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n"); + + gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET); + ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val, + val & GPU_IRQ_RESET_COMPLETED, 100, 10000); + if (ret) { + dev_err(pfdev->dev, "gpu hard reset timed out\n"); + return ret; + } } gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index 55ec807550b3f..c25743b05c551 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -44,6 +44,7 @@ GPU_IRQ_MULTIPLE_FAULT) #define GPU_CMD 0x30 #define GPU_CMD_SOFT_RESET 0x01 +#define GPU_CMD_HARD_RESET 0x02 #define GPU_CMD_PERFCNT_CLEAR 0x03 #define GPU_CMD_PERFCNT_SAMPLE 0x04 #define GPU_CMD_CYCLE_COUNT_START 0x05 From 4d74420ffcf4c99165908f058066dd242813dc75 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 9 Nov 2023 11:25:39 +0100 Subject: [PATCH 082/117] drm/panfrost: Tighten polling for soft reset and power on In many cases, soft reset takes more than 1 microsecond, but definitely less than 10; moreover in the poweron flow, tilers, shaders and l2 will become ready (each) in less than 10 microseconds as well. Even in the cases (at least on my platforms, rarely) in which those take more than 10 microseconds, it's very unlikely to see both soft reset and poweron to take more than 70 microseconds. Shorten the polling delay to 10 microseconds to consistently reduce the runtime resume time of the GPU. As an indicative example, measurements taken on a MediaTek MT8195 SoC Average runtime resume time in nanoseconds before this commit: GDM, user selection up/down: 88435ns GDM, Text Entry (typing user/password): 91489ns GNOME Desktop, idling, GKRELLM running: 73200ns After this commit: GDM: user selection up/down: 26690ns GDM: Text Entry (typing user/password): 27917ns GNOME Desktop, idling, GKRELLM running: 25304ns Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231109102543.42971-3-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_gpu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index c9469bf7770a4..09f5e1563ebd4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -63,7 +63,7 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, - val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000); + val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000); if (ret) { dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n"); @@ -404,7 +404,7 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask); ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, val, val == (pfdev->features.l2_present & core_mask), - 100, 20000); + 10, 20000); if (ret) dev_err(pfdev->dev, "error powering up gpu L2"); @@ -412,13 +412,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) pfdev->features.shader_present & core_mask); ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO, val, val == (pfdev->features.shader_present & core_mask), - 100, 20000); + 10, 20000); if (ret) dev_err(pfdev->dev, "error powering up gpu shader"); gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, - val, val == pfdev->features.tiler_present, 100, 1000); + val, val == pfdev->features.tiler_present, 10, 1000); if (ret) dev_err(pfdev->dev, "error powering up gpu tiler"); } From 56e76c0179185568049913257c18069293f8bde9 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 9 Nov 2023 11:25:40 +0100 Subject: [PATCH 083/117] drm/panfrost: Implement ability to turn on/off GPU clocks in suspend Currently, the GPU is being internally powered off for runtime suspend and turned back on for runtime resume through commands sent to it, but note that the GPU doesn't need to be clocked during the poweroff state, hence it is possible to save some power on selected platforms. Add suspend and resume handlers for full system sleep and then add a new panfrost_gpu_pm enumeration and a pm_features variable in the panfrost_compatible structure: BIT(GPU_PM_CLK_DIS) will be used to enable this power saving technique only on SoCs that are able to safely use it. Note that this was implemented only for the system sleep case and not for runtime PM because testing on one of my MediaTek platforms showed issues when turning on and off clocks aggressively (in PM runtime) resulting in a full system lockup. Doing this only for full system sleep never showed issues during my testing by suspending and resuming the system continuously for more than 100 cycles. Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231109102543.42971-4-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_device.c | 61 ++++++++++++++++++++-- drivers/gpu/drm/panfrost/panfrost_device.h | 11 ++++ 2 files changed, 68 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 28f7046e1b1a4..b4ddbc3b80698 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -403,7 +403,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev) panfrost_job_enable_interrupts(pfdev); } -static int panfrost_device_resume(struct device *dev) +static int panfrost_device_runtime_resume(struct device *dev) { struct panfrost_device *pfdev = dev_get_drvdata(dev); @@ -413,7 +413,7 @@ static int panfrost_device_resume(struct device *dev) return 0; } -static int panfrost_device_suspend(struct device *dev) +static int panfrost_device_runtime_suspend(struct device *dev) { struct panfrost_device *pfdev = dev_get_drvdata(dev); @@ -426,5 +426,58 @@ static int panfrost_device_suspend(struct device *dev) return 0; } -EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend, - panfrost_device_resume, NULL); +static int panfrost_device_resume(struct device *dev) +{ + struct panfrost_device *pfdev = dev_get_drvdata(dev); + int ret; + + if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) { + ret = clk_enable(pfdev->clock); + if (ret) + return ret; + + if (pfdev->bus_clock) { + ret = clk_enable(pfdev->bus_clock); + if (ret) + goto err_bus_clk; + } + } + + ret = pm_runtime_force_resume(dev); + if (ret) + goto err_resume; + + return 0; + +err_resume: + if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS) && pfdev->bus_clock) + clk_disable(pfdev->bus_clock); +err_bus_clk: + if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) + clk_disable(pfdev->clock); + return ret; +} + +static int panfrost_device_suspend(struct device *dev) +{ + struct panfrost_device *pfdev = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + + if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) { + if (pfdev->bus_clock) + clk_disable(pfdev->bus_clock); + + clk_disable(pfdev->clock); + } + + return 0; +} + +EXPORT_GPL_DEV_PM_OPS(panfrost_pm_ops) = { + RUNTIME_PM_OPS(panfrost_device_runtime_suspend, panfrost_device_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(panfrost_device_suspend, panfrost_device_resume) +}; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 1e85656dc2f7f..32453b97c39cb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -25,6 +25,14 @@ struct panfrost_perfcnt; #define NUM_JOB_SLOTS 3 #define MAX_PM_DOMAINS 5 +/** + * enum panfrost_gpu_pm - Supported kernel power management features + * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend + */ +enum panfrost_gpu_pm { + GPU_PM_CLK_DIS, +}; + struct panfrost_features { u16 id; u16 revision; @@ -75,6 +83,9 @@ struct panfrost_compatible { /* Vendor implementation quirks callback */ void (*vendor_quirk)(struct panfrost_device *pfdev); + + /* Allowed PM features */ + u8 pm_features; }; struct panfrost_device { From 32f175d4261a2c940fe7913f509f9f41c105b2b6 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 9 Nov 2023 11:25:41 +0100 Subject: [PATCH 084/117] drm/panfrost: Set clocks on/off during system sleep on MediaTek SoCs All of the MediaTek SoCs supported by Panfrost can switch the clocks off and on during system sleep to save some power without any user experience penalty. Measurements taken on multiple MediaTek SoCs (MT8183/8186/8192/8195) show that adding this will not prolong the time that is required to resume the system in any meaningful way. As an example, for MT8195 - a "before" with only runtime PM operations (so, without turning on/off GPU clocks), and an "after" executing both the system sleep .resume() handler and .runtime_resume() (so the time refers to T_Resume + T_Runtime_Resume): Average Panfrost-only system sleep resume time, before: ~28000ns Average Panfrost-only system sleep resume time, after: ~33500ns Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231109102543.42971-5-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 54d1c19bea84d..6d2897c03a8f6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -730,6 +730,7 @@ static const struct panfrost_compatible mediatek_mt8183_b_data = { .supply_names = mediatek_mt8183_b_supplies, .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), .pm_domain_names = mediatek_mt8183_pm_domains, + .pm_features = BIT(GPU_PM_CLK_DIS), }; static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" }; @@ -738,6 +739,7 @@ static const struct panfrost_compatible mediatek_mt8186_data = { .supply_names = mediatek_mt8183_b_supplies, .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains), .pm_domain_names = mediatek_mt8186_pm_domains, + .pm_features = BIT(GPU_PM_CLK_DIS), }; static const char * const mediatek_mt8192_supplies[] = { "mali", NULL }; @@ -748,6 +750,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = { .supply_names = mediatek_mt8192_supplies, .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains), .pm_domain_names = mediatek_mt8192_pm_domains, + .pm_features = BIT(GPU_PM_CLK_DIS), }; static const struct of_device_id dt_match[] = { From 889a2b06f823575f4e8def0fe60d294365b0e1f9 Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 9 Nov 2023 11:25:42 +0100 Subject: [PATCH 085/117] drm/panfrost: Implement ability to turn on/off regulators in suspend Some platforms/SoCs can power off the GPU entirely by completely cutting off power, greatly enhancing battery time during system suspend: add a new pm_feature GPU_PM_VREG_OFF to allow turning off the GPU regulators during full suspend only on selected platforms. Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231109102543.42971-6-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_device.c | 19 ++++++++++++++++++- drivers/gpu/drm/panfrost/panfrost_device.h | 2 ++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index b4ddbc3b80698..c90ad5ee34e7a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -431,10 +431,21 @@ static int panfrost_device_resume(struct device *dev) struct panfrost_device *pfdev = dev_get_drvdata(dev); int ret; + if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF)) { + unsigned long freq = pfdev->pfdevfreq.fast_rate; + struct dev_pm_opp *opp; + + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) + return PTR_ERR(opp); + dev_pm_opp_set_opp(dev, opp); + dev_pm_opp_put(opp); + } + if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) { ret = clk_enable(pfdev->clock); if (ret) - return ret; + goto err_clk; if (pfdev->bus_clock) { ret = clk_enable(pfdev->bus_clock); @@ -455,6 +466,9 @@ static int panfrost_device_resume(struct device *dev) err_bus_clk: if (pfdev->comp->pm_features & BIT(GPU_PM_CLK_DIS)) clk_disable(pfdev->clock); +err_clk: + if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF)) + dev_pm_opp_set_opp(dev, NULL); return ret; } @@ -474,6 +488,9 @@ static int panfrost_device_suspend(struct device *dev) clk_disable(pfdev->clock); } + if (pfdev->comp->pm_features & BIT(GPU_PM_VREG_OFF)) + dev_pm_opp_set_opp(dev, NULL); + return 0; } diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 32453b97c39cb..7de77990a2217 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -28,9 +28,11 @@ struct panfrost_perfcnt; /** * enum panfrost_gpu_pm - Supported kernel power management features * @GPU_PM_CLK_DIS: Allow disabling clocks during system suspend + * @GPU_PM_VREG_OFF: Allow turning off regulators during system suspend */ enum panfrost_gpu_pm { GPU_PM_CLK_DIS, + GPU_PM_VREG_OFF, }; struct panfrost_features { From 540527b1385fb203cc4513ca838b4de60bbbc49a Mon Sep 17 00:00:00 2001 From: AngeloGioacchino Del Regno Date: Thu, 9 Nov 2023 11:25:43 +0100 Subject: [PATCH 086/117] drm/panfrost: Set regulators on/off during system sleep on MediaTek SoCs All of the MediaTek SoCs supported by Panfrost can completely cut power to the GPU during full system sleep without any user-noticeable delay in the resume operation, as shown by measurements taken on multiple MediaTek SoCs (MT8183/86/92/95). As an example, for MT8195 - a "before" with only runtime PM operations (so, without turning on/off regulators), and an "after" executing both the system sleep .resume() handler and .runtime_resume() (so the time refers to T_Resume + T_Runtime_Resume): Average Panfrost-only system sleep resume time, before: ~33500ns Average Panfrost-only system sleep resume time, after: ~336200ns Keep in mind that this additional ~308200 nanoseconds delay happens only in resume from a full system suspend, and not in runtime PM operations, hence it is acceptable. Measurements were also taken on MT8186, showing a delay of ~312000 ns. Testing of this happened on all of the aforementioned MediaTek SoCs, but: MT8183 got tested only by KernelCI with <=10 suspend/resume cycles MT8186, MT8192, MT8195 were tested manually with over 100 suspend/resume cycles with GNOME DE (Mutter + Wayland). Signed-off-by: AngeloGioacchino Del Regno Reviewed-by: Steven Price Signed-off-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20231109102543.42971-7-angelogioacchino.delregno@collabora.com --- drivers/gpu/drm/panfrost/panfrost_drv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 6d2897c03a8f6..7a628e89d379a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -730,7 +730,7 @@ static const struct panfrost_compatible mediatek_mt8183_b_data = { .supply_names = mediatek_mt8183_b_supplies, .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), .pm_domain_names = mediatek_mt8183_pm_domains, - .pm_features = BIT(GPU_PM_CLK_DIS), + .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), }; static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" }; @@ -739,7 +739,7 @@ static const struct panfrost_compatible mediatek_mt8186_data = { .supply_names = mediatek_mt8183_b_supplies, .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains), .pm_domain_names = mediatek_mt8186_pm_domains, - .pm_features = BIT(GPU_PM_CLK_DIS), + .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), }; static const char * const mediatek_mt8192_supplies[] = { "mali", NULL }; @@ -750,7 +750,7 @@ static const struct panfrost_compatible mediatek_mt8192_data = { .supply_names = mediatek_mt8192_supplies, .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains), .pm_domain_names = mediatek_mt8192_pm_domains, - .pm_features = BIT(GPU_PM_CLK_DIS), + .pm_features = BIT(GPU_PM_CLK_DIS) | BIT(GPU_PM_VREG_OFF), }; static const struct of_device_id dt_match[] = { From e4178256094a76cc36d9b9aabe7482615959b26f Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Wed, 18 Oct 2023 11:17:26 -0700 Subject: [PATCH 087/117] drm/virtio: use uint64_t more in virtio_gpu_context_init_ioctl drm_virtgpu_context_set_param defines both param and value to be u64s. Signed-off-by: Gurchetan Singh Reviewed-by: Josh Simonot Reviewed-by: Dmitry Osipenko Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20231018181727.772-1-gurchetansingh@chromium.org --- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index b24b11f25197d..8d13b17c215b4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -565,8 +565,8 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { int ret = 0; - uint32_t num_params, i, param, value; - uint64_t valid_ring_mask; + uint32_t num_params, i; + uint64_t valid_ring_mask, param, value; size_t len; struct drm_virtgpu_context_set_param *ctx_set_params = NULL; struct virtio_gpu_device *vgdev = dev->dev_private; From 7add80126bcedddd157ddc09988b032c93ed56c7 Mon Sep 17 00:00:00 2001 From: Gurchetan Singh Date: Wed, 18 Oct 2023 11:17:27 -0700 Subject: [PATCH 088/117] drm/uapi: add explicit virtgpu context debug name There are two problems with the current method of determining the virtio-gpu debug name. 1) TASK_COMM_LEN is defined to be 16 bytes only, and this is a Linux kernel idiom (see PR_SET_NAME + PR_GET_NAME). Though, Android/FreeBSD get around this via setprogname(..)/getprogname(..) in libc. On Android, names longer than 16 bytes are common. For example, one often encounters a program like "com.android.systemui". The virtio-gpu spec allows the debug name to be up to 64 bytes, so ideally userspace should be able to set debug names up to 64 bytes. 2) The current implementation determines the debug name using whatever task initiated virtgpu. This is could be a "RenderThread" of a larger program, when we actually want to propagate the debug name of the program. To fix these issues, add a new CONTEXT_INIT param that allows userspace to set the debug name when creating a context. It takes a null-terminated C-string as the param value. The length of the string (excluding the terminator) **should** be <= 64 bytes. Otherwise, the debug_name will be truncated to 64 bytes. Link to open-source userspace: https://android-review.googlesource.com/c/platform/hardware/google/gfxstream/+/2787176 Signed-off-by: Gurchetan Singh Reviewed-by: Josh Simonot Reviewed-by: Dmitry Osipenko Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20231018181727.772-2-gurchetansingh@chromium.org --- drivers/gpu/drm/virtio/virtgpu_drv.h | 5 ++++ drivers/gpu/drm/virtio/virtgpu_ioctl.c | 36 +++++++++++++++++++++----- include/uapi/drm/virtgpu_drm.h | 2 ++ 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 96365a772f774..bb7d86a0c6a1f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -58,6 +58,9 @@ #define MAX_CAPSET_ID 63 #define MAX_RINGS 64 +/* See virtio_gpu_ctx_create. One additional character for NULL terminator. */ +#define DEBUG_NAME_MAX_LEN 65 + struct virtio_gpu_object_params { unsigned long size; bool dumb; @@ -274,6 +277,8 @@ struct virtio_gpu_fpriv { uint64_t base_fence_ctx; uint64_t ring_idx_mask; struct mutex context_lock; + char debug_name[DEBUG_NAME_MAX_LEN]; + bool explicit_debug_name; }; /* virtgpu_ioctl.c */ diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 8d13b17c215b4..1e2042419f957 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -42,12 +42,19 @@ static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, struct virtio_gpu_fpriv *vfpriv) { - char dbgname[TASK_COMM_LEN]; + if (vfpriv->explicit_debug_name) { + virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, + vfpriv->context_init, + strlen(vfpriv->debug_name), + vfpriv->debug_name); + } else { + char dbgname[TASK_COMM_LEN]; - get_task_comm(dbgname, current); - virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, - vfpriv->context_init, strlen(dbgname), - dbgname); + get_task_comm(dbgname, current); + virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, + vfpriv->context_init, strlen(dbgname), + dbgname); + } vfpriv->context_created = true; } @@ -107,6 +114,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: value = vgdev->capset_id_mask; break; + case VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME: + value = vgdev->has_context_init ? 1 : 0; + break; default: return -EINVAL; } @@ -580,7 +590,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, return -EINVAL; /* Number of unique parameters supported at this time. */ - if (num_params > 3) + if (num_params > 4) return -EINVAL; ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params), @@ -642,6 +652,20 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, vfpriv->ring_idx_mask = value; break; + case VIRTGPU_CONTEXT_PARAM_DEBUG_NAME: + if (vfpriv->explicit_debug_name) { + ret = -EINVAL; + goto out_unlock; + } + + ret = strncpy_from_user(vfpriv->debug_name, + u64_to_user_ptr(value), + DEBUG_NAME_MAX_LEN - 1); + if (ret < 0) + goto out_unlock; + + vfpriv->explicit_debug_name = true; + break; default: ret = -EINVAL; goto out_unlock; diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index b1d0e56565bcb..c2ce71987e9bb 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -97,6 +97,7 @@ struct drm_virtgpu_execbuffer { #define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */ #define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */ #define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */ +#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */ struct drm_virtgpu_getparam { __u64 param; @@ -198,6 +199,7 @@ struct drm_virtgpu_resource_create_blob { #define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001 #define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002 #define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003 +#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004 struct drm_virtgpu_context_set_param { __u64 param; __u64 value; From 546ca4d35dccaca6613766ed36ccfb2b5bd63bfe Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:31 +0100 Subject: [PATCH 089/117] drm/gpuvm: convert WARN() to drm_WARN() variants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use drm_WARN() and drm_WARN_ON() variants to indicate drivers the context the failing VM resides in. Acked-by: Christian König Reviewed-by: Boris Brezillon Reviewed-by: Thomas Hellström Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-2-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 32 ++++++++++++++------------ drivers/gpu/drm/nouveau/nouveau_uvmm.c | 3 ++- include/drm/drm_gpuvm.h | 7 ++++++ 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 08c088319652e..d7367a202fee6 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -614,12 +614,12 @@ static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, static void __drm_gpuva_remove(struct drm_gpuva *va); static bool -drm_gpuvm_check_overflow(u64 addr, u64 range) +drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range) { u64 end; - return WARN(check_add_overflow(addr, range, &end), - "GPUVA address limited to %zu bytes.\n", sizeof(end)); + return drm_WARN(gpuvm->drm, check_add_overflow(addr, range, &end), + "GPUVA address limited to %zu bytes.\n", sizeof(end)); } static bool @@ -647,7 +647,7 @@ static bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range) { - return !drm_gpuvm_check_overflow(addr, range) && + return !drm_gpuvm_check_overflow(gpuvm, addr, range) && drm_gpuvm_in_mm_range(gpuvm, addr, range) && !drm_gpuvm_in_kernel_node(gpuvm, addr, range); } @@ -656,6 +656,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, * drm_gpuvm_init() - initialize a &drm_gpuvm * @gpuvm: pointer to the &drm_gpuvm to initialize * @name: the name of the GPU VA space + * @drm: the &drm_device this VM resides in * @start_offset: the start offset of the GPU VA space * @range: the size of the GPU VA space * @reserve_offset: the start of the kernel reserved GPU VA area @@ -668,8 +669,8 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, * &name is expected to be managed by the surrounding driver structures. */ void -drm_gpuvm_init(struct drm_gpuvm *gpuvm, - const char *name, +drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + struct drm_device *drm, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops) @@ -677,20 +678,20 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, gpuvm->rb.tree = RB_ROOT_CACHED; INIT_LIST_HEAD(&gpuvm->rb.list); - drm_gpuvm_check_overflow(start_offset, range); - gpuvm->mm_start = start_offset; - gpuvm->mm_range = range; - gpuvm->name = name ? name : "unknown"; gpuvm->ops = ops; + gpuvm->drm = drm; - memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); + drm_gpuvm_check_overflow(gpuvm, start_offset, range); + gpuvm->mm_start = start_offset; + gpuvm->mm_range = range; + memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); if (reserve_range) { gpuvm->kernel_alloc_node.va.addr = reserve_offset; gpuvm->kernel_alloc_node.va.range = reserve_range; - if (likely(!drm_gpuvm_check_overflow(reserve_offset, + if (likely(!drm_gpuvm_check_overflow(gpuvm, reserve_offset, reserve_range))) __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node); } @@ -712,8 +713,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) if (gpuvm->kernel_alloc_node.va.range) __drm_gpuva_remove(&gpuvm->kernel_alloc_node); - WARN(!RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), - "GPUVA tree is not empty, potentially leaking memory."); + drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), + "GPUVA tree is not empty, potentially leaking memory.\n"); } EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); @@ -795,7 +796,8 @@ drm_gpuva_remove(struct drm_gpuva *va) struct drm_gpuvm *gpuvm = va->vm; if (unlikely(va == &gpuvm->kernel_alloc_node)) { - WARN(1, "Can't destroy kernel reserved node.\n"); + drm_WARN(gpuvm->drm, 1, + "Can't destroy kernel reserved node.\n"); return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 5cf892c50f43c..aaf5d28bd5872 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1808,6 +1808,7 @@ int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, u64 kernel_managed_addr, u64 kernel_managed_size) { + struct drm_device *drm = cli->drm->dev; int ret; u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size; @@ -1836,7 +1837,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, uvmm->kernel_managed_addr = kernel_managed_addr; uvmm->kernel_managed_size = kernel_managed_size; - drm_gpuvm_init(&uvmm->base, cli->name, + drm_gpuvm_init(&uvmm->base, cli->name, drm, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, kernel_managed_addr, kernel_managed_size, diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index bdfafc4a7705e..687fd5893624f 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -29,6 +29,7 @@ #include #include +#include #include struct drm_gpuvm; @@ -201,6 +202,11 @@ struct drm_gpuvm { */ const char *name; + /** + * @drm: the &drm_device this VM lives in + */ + struct drm_device *drm; + /** * @mm_start: start of the VA space */ @@ -241,6 +247,7 @@ struct drm_gpuvm { }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + struct drm_device *drm, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops); From d1adea27d0c8a08031b075f1bf4c5ce6f135ad7c Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:32 +0100 Subject: [PATCH 090/117] drm/gpuvm: don't always WARN in drm_gpuvm_check_overflow() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't always WARN in drm_gpuvm_check_overflow() and separate it into a drm_gpuvm_check_overflow() and a dedicated drm_gpuvm_warn_check_overflow() variant. This avoids printing warnings due to invalid userspace requests. Acked-by: Christian König Reviewed-by: Thomas Hellström Reviewed-by: Boris Brezillon Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-3-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index d7367a202fee6..445767f8fbc4a 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -614,12 +614,18 @@ static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, static void __drm_gpuva_remove(struct drm_gpuva *va); static bool -drm_gpuvm_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range) +drm_gpuvm_check_overflow(u64 addr, u64 range) { u64 end; - return drm_WARN(gpuvm->drm, check_add_overflow(addr, range, &end), - "GPUVA address limited to %zu bytes.\n", sizeof(end)); + return check_add_overflow(addr, range, &end); +} + +static bool +drm_gpuvm_warn_check_overflow(struct drm_gpuvm *gpuvm, u64 addr, u64 range) +{ + return drm_WARN(gpuvm->drm, drm_gpuvm_check_overflow(addr, range), + "GPUVA address limited to %zu bytes.\n", sizeof(addr)); } static bool @@ -647,7 +653,7 @@ static bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range) { - return !drm_gpuvm_check_overflow(gpuvm, addr, range) && + return !drm_gpuvm_check_overflow(addr, range) && drm_gpuvm_in_mm_range(gpuvm, addr, range) && !drm_gpuvm_in_kernel_node(gpuvm, addr, range); } @@ -682,7 +688,7 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, gpuvm->ops = ops; gpuvm->drm = drm; - drm_gpuvm_check_overflow(gpuvm, start_offset, range); + drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range); gpuvm->mm_start = start_offset; gpuvm->mm_range = range; @@ -691,8 +697,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, gpuvm->kernel_alloc_node.va.addr = reserve_offset; gpuvm->kernel_alloc_node.va.range = reserve_range; - if (likely(!drm_gpuvm_check_overflow(gpuvm, reserve_offset, - reserve_range))) + if (likely(!drm_gpuvm_warn_check_overflow(gpuvm, reserve_offset, + reserve_range))) __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node); } } From 9297cfc9405bc6b60540b8b8aaf930b7e449e15a Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:33 +0100 Subject: [PATCH 091/117] drm/gpuvm: export drm_gpuvm_range_valid() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drivers may use this function to validate userspace requests in advance, hence export it. Acked-by: Christian König Reviewed-by: Thomas Hellström Reviewed-by: Boris Brezillon Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-4-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 14 +++++++++++++- include/drm/drm_gpuvm.h | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 445767f8fbc4a..2669f9bbc3770 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -649,7 +649,18 @@ drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range) return krange && addr < kend && kstart < end; } -static bool +/** + * drm_gpuvm_range_valid() - checks whether the given range is valid for the + * given &drm_gpuvm + * @gpuvm: the GPUVM to check the range for + * @addr: the base address + * @range: the range starting from the base address + * + * Checks whether the range is within the GPUVM's managed boundaries. + * + * Returns: true for a valid range, false otherwise + */ +bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range) { @@ -657,6 +668,7 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, drm_gpuvm_in_mm_range(gpuvm, addr, range) && !drm_gpuvm_in_kernel_node(gpuvm, addr, range); } +EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid); /** * drm_gpuvm_init() - initialize a &drm_gpuvm diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 687fd5893624f..13eac6f700610 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -253,6 +253,7 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, const struct drm_gpuvm_ops *ops); void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); +bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); static inline struct drm_gpuva * From b41e297abd2347075ec640daf0e5da576e3d7418 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:34 +0100 Subject: [PATCH 092/117] drm/nouveau: make use of drm_gpuvm_range_valid() Use drm_gpuvm_range_valid() in order to validate userspace requests. Reviewed-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-5-dakr@redhat.com --- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 17 +---------------- drivers/gpu/drm/nouveau/nouveau_uvmm.h | 3 --- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index aaf5d28bd5872..641a911528db4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -929,25 +929,13 @@ nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm, static int nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range) { - u64 end = addr + range; - u64 kernel_managed_end = uvmm->kernel_managed_addr + - uvmm->kernel_managed_size; - if (addr & ~PAGE_MASK) return -EINVAL; if (range & ~PAGE_MASK) return -EINVAL; - if (end <= addr) - return -EINVAL; - - if (addr < NOUVEAU_VA_SPACE_START || - end > NOUVEAU_VA_SPACE_END) - return -EINVAL; - - if (addr < kernel_managed_end && - end > uvmm->kernel_managed_addr) + if (!drm_gpuvm_range_valid(&uvmm->base, addr, range)) return -EINVAL; return 0; @@ -1834,9 +1822,6 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, goto out_unlock; } - uvmm->kernel_managed_addr = kernel_managed_addr; - uvmm->kernel_managed_size = kernel_managed_size; - drm_gpuvm_init(&uvmm->base, cli->name, drm, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index a308c59760a54..06a0c36de392f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -14,9 +14,6 @@ struct nouveau_uvmm { struct mutex mutex; struct dma_resv resv; - u64 kernel_managed_addr; - u64 kernel_managed_size; - bool disabled; }; From bbe8458037e74b9887ba2f0f0b8084a13ade3a90 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:35 +0100 Subject: [PATCH 093/117] drm/gpuvm: add common dma-resv per struct drm_gpuvm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide a common dma-resv for GEM objects not being used outside of this GPU-VM. This is used in a subsequent patch to generalize dma-resv, external and evicted object handling and GEM validation. Acked-by: Christian König Reviewed-by: Boris Brezillon Reviewed-by: Thomas Hellström Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-6-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 53 ++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_uvmm.c | 13 ++++++- include/drm/drm_gpuvm.h | 33 ++++++++++++++++ 3 files changed, 97 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 2669f9bbc3770..af5805e4d7c9f 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -61,6 +61,15 @@ * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva * entries from within dma-fence signalling critical sections it is enough to * pre-allocate the &drm_gpuva structures. + * + * &drm_gem_objects which are private to a single VM can share a common + * &dma_resv in order to improve locking efficiency (e.g. with &drm_exec). + * For this purpose drivers must pass a &drm_gem_object to drm_gpuvm_init(), in + * the following called 'resv object', which serves as the container of the + * GPUVM's shared &dma_resv. This resv object can be a driver specific + * &drm_gem_object, such as the &drm_gem_object containing the root page table, + * but it can also be a 'dummy' object, which can be allocated with + * drm_gpuvm_resv_object_alloc(). */ /** @@ -670,11 +679,49 @@ drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, } EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid); +static void +drm_gpuvm_gem_object_free(struct drm_gem_object *obj) +{ + drm_gem_object_release(obj); + kfree(obj); +} + +static const struct drm_gem_object_funcs drm_gpuvm_object_funcs = { + .free = drm_gpuvm_gem_object_free, +}; + +/** + * drm_gpuvm_resv_object_alloc() - allocate a dummy &drm_gem_object + * @drm: the drivers &drm_device + * + * Allocates a dummy &drm_gem_object which can be passed to drm_gpuvm_init() in + * order to serve as root GEM object providing the &drm_resv shared across + * &drm_gem_objects local to a single GPUVM. + * + * Returns: the &drm_gem_object on success, NULL on failure + */ +struct drm_gem_object * +drm_gpuvm_resv_object_alloc(struct drm_device *drm) +{ + struct drm_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return NULL; + + obj->funcs = &drm_gpuvm_object_funcs; + drm_gem_private_object_init(drm, obj, 0); + + return obj; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc); + /** * drm_gpuvm_init() - initialize a &drm_gpuvm * @gpuvm: pointer to the &drm_gpuvm to initialize * @name: the name of the GPU VA space * @drm: the &drm_device this VM resides in + * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv * @start_offset: the start offset of the GPU VA space * @range: the size of the GPU VA space * @reserve_offset: the start of the kernel reserved GPU VA area @@ -689,6 +736,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_range_valid); void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, struct drm_device *drm, + struct drm_gem_object *r_obj, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops) @@ -699,6 +747,9 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, gpuvm->name = name ? name : "unknown"; gpuvm->ops = ops; gpuvm->drm = drm; + gpuvm->r_obj = r_obj; + + drm_gem_object_get(r_obj); drm_gpuvm_warn_check_overflow(gpuvm, start_offset, range); gpuvm->mm_start = start_offset; @@ -733,6 +784,8 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), "GPUVA tree is not empty, potentially leaking memory.\n"); + + drm_gem_object_put(gpuvm->r_obj); } EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 641a911528db4..f74bf30bc6831 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1797,8 +1797,9 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, u64 kernel_managed_addr, u64 kernel_managed_size) { struct drm_device *drm = cli->drm->dev; - int ret; + struct drm_gem_object *r_obj; u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size; + int ret; mutex_init(&uvmm->mutex); dma_resv_init(&uvmm->resv); @@ -1822,11 +1823,19 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, goto out_unlock; } - drm_gpuvm_init(&uvmm->base, cli->name, drm, + r_obj = drm_gpuvm_resv_object_alloc(drm); + if (!r_obj) { + ret = -ENOMEM; + goto out_unlock; + } + + drm_gpuvm_init(&uvmm->base, cli->name, drm, r_obj, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, kernel_managed_addr, kernel_managed_size, NULL); + /* GPUVM takes care from here on. */ + drm_gem_object_put(r_obj); ret = nvif_vmm_ctor(&cli->mmu, "uvmm", cli->vmm.vmm.object.oclass, RAW, diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 13eac6f700610..ff3377cbfe52a 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -244,10 +244,16 @@ struct drm_gpuvm { * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers */ const struct drm_gpuvm_ops *ops; + + /** + * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. + */ + struct drm_gem_object *r_obj; }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, struct drm_device *drm, + struct drm_gem_object *r_obj, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops); @@ -256,6 +262,33 @@ void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); +struct drm_gem_object * +drm_gpuvm_resv_object_alloc(struct drm_device *drm); + +/** + * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gpuvm's shared &dma_resv + */ +#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv) + +/** + * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's + * &dma_resv + * @gpuvm__: the &drm_gpuvm + * + * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared + * &dma_resv + */ +#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) + +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + static inline struct drm_gpuva * __drm_gpuva_next(struct drm_gpuva *va) { From 6118411428a393fb0868bad9025d71875418058b Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:36 +0100 Subject: [PATCH 094/117] drm/nouveau: make use of the GPUVM's shared dma-resv DRM GEM objects private to a single GPUVM can use a shared dma-resv. Make use of the shared dma-resv of GPUVM rather than a driver specific one. The shared dma-resv originates from a "root" GEM object serving as container for the dma-resv to make it compatible with drm_exec. In order to make sure the object proving the shared dma-resv can't be freed up before the objects making use of it, let every such GEM object take a reference on it. Reviewed-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-7-dakr@redhat.com --- drivers/gpu/drm/nouveau/nouveau_bo.c | 11 +++++++++-- drivers/gpu/drm/nouveau/nouveau_bo.h | 5 +++++ drivers/gpu/drm/nouveau/nouveau_gem.c | 10 ++++++++-- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 7 ++----- drivers/gpu/drm/nouveau/nouveau_uvmm.h | 1 - 5 files changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 0f3bd187ede67..7afad86da64b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -148,10 +148,17 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. */ - if (bo->base.dev) + if (bo->base.dev) { + /* Gem objects not being shared with other VMs get their + * dma_resv from a root GEM object. + */ + if (nvbo->no_share) + drm_gem_object_put(nvbo->r_obj); + drm_gem_object_release(&bo->base); - else + } else { dma_resv_fini(&bo->base._resv); + } kfree(nvbo); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index 07f671cf895e0..70c551921a9ee 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -26,6 +26,11 @@ struct nouveau_bo { struct list_head entry; int pbbo_index; bool validate_mapped; + + /* Root GEM object we derive the dma_resv of in case this BO is not + * shared between VMs. + */ + struct drm_gem_object *r_obj; bool no_share; /* GPU address space is independent of CPU word size */ diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index a0d303e5ce3d8..49c2bcbef1299 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -111,7 +111,8 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) return 0; - if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv) + if (nvbo->no_share && uvmm && + drm_gpuvm_resv(&uvmm->base) != nvbo->bo.base.resv) return -EPERM; ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); @@ -245,7 +246,7 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, if (unlikely(!uvmm)) return -EINVAL; - resv = &uvmm->resv; + resv = drm_gpuvm_resv(&uvmm->base); } if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) @@ -288,6 +289,11 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) nvbo->valid_domains &= domain; + if (nvbo->no_share) { + nvbo->r_obj = drm_gpuvm_resv_obj(&uvmm->base); + drm_gem_object_get(nvbo->r_obj); + } + *pnvbo = nvbo; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index f74bf30bc6831..8977a518de96e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1802,7 +1802,6 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, int ret; mutex_init(&uvmm->mutex); - dma_resv_init(&uvmm->resv); mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); @@ -1842,14 +1841,14 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, kernel_managed_addr, kernel_managed_size, NULL, 0, &cli->uvmm.vmm.vmm); if (ret) - goto out_free_gpuva_mgr; + goto out_gpuvm_fini; cli->uvmm.vmm.cli = cli; mutex_unlock(&cli->mutex); return 0; -out_free_gpuva_mgr: +out_gpuvm_fini: drm_gpuvm_destroy(&uvmm->base); out_unlock: mutex_unlock(&cli->mutex); @@ -1907,6 +1906,4 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) nouveau_vmm_fini(&uvmm->vmm); drm_gpuvm_destroy(&uvmm->base); mutex_unlock(&cli->mutex); - - dma_resv_fini(&uvmm->resv); } diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index 06a0c36de392f..22607270fae0d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -12,7 +12,6 @@ struct nouveau_uvmm { struct nouveau_vmm vmm; struct maple_tree region_mt; struct mutex mutex; - struct dma_resv resv; bool disabled; }; From 809ef191ee600e8bcbe2f8a769e00d2d54c16094 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:37 +0100 Subject: [PATCH 095/117] drm/gpuvm: add drm_gpuvm_flags to drm_gpuvm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce flags for struct drm_gpuvm, this required by subsequent commits. Acked-by: Christian König Reviewed-by: Boris Brezillon Reviewed-by: Thomas Hellström Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-8-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 3 +++ drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +- include/drm/drm_gpuvm.h | 16 ++++++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index af5805e4d7c9f..53e2c406fb04e 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -720,6 +720,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc); * drm_gpuvm_init() - initialize a &drm_gpuvm * @gpuvm: pointer to the &drm_gpuvm to initialize * @name: the name of the GPU VA space + * @flags: the &drm_gpuvm_flags for this GPUVM * @drm: the &drm_device this VM resides in * @r_obj: the resv &drm_gem_object providing the GPUVM's common &dma_resv * @start_offset: the start offset of the GPU VA space @@ -735,6 +736,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_resv_object_alloc); */ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + enum drm_gpuvm_flags flags, struct drm_device *drm, struct drm_gem_object *r_obj, u64 start_offset, u64 range, @@ -745,6 +747,7 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, INIT_LIST_HEAD(&gpuvm->rb.list); gpuvm->name = name ? name : "unknown"; + gpuvm->flags = flags; gpuvm->ops = ops; gpuvm->drm = drm; gpuvm->r_obj = r_obj; diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 8977a518de96e..f765e38353066 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1828,7 +1828,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, goto out_unlock; } - drm_gpuvm_init(&uvmm->base, cli->name, drm, r_obj, + drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, kernel_managed_addr, kernel_managed_size, diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index ff3377cbfe52a..0c2e24155a93d 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -184,6 +184,16 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) return va->flags & DRM_GPUVA_INVALIDATED; } +/** + * enum drm_gpuvm_flags - flags for struct drm_gpuvm + */ +enum drm_gpuvm_flags { + /** + * @DRM_GPUVM_USERBITS: user defined bits + */ + DRM_GPUVM_USERBITS = BIT(0), +}; + /** * struct drm_gpuvm - DRM GPU VA Manager * @@ -202,6 +212,11 @@ struct drm_gpuvm { */ const char *name; + /** + * @flags: the &drm_gpuvm_flags of this GPUVM + */ + enum drm_gpuvm_flags flags; + /** * @drm: the &drm_device this VM lives in */ @@ -252,6 +267,7 @@ struct drm_gpuvm { }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, + enum drm_gpuvm_flags flags, struct drm_device *drm, struct drm_gem_object *r_obj, u64 start_offset, u64 range, From 266f7618e761c8a6aa89dbfe43cda1b69cdbbf14 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:38 +0100 Subject: [PATCH 096/117] drm/nouveau: separately allocate struct nouveau_uvmm Allocate struct nouveau_uvmm separately in preparation for subsequent commits introducing reference counting for struct drm_gpuvm. While at it, get rid of nouveau_uvmm_init() as indirection of nouveau_uvmm_ioctl_vm_init() and perform some minor cleanups. Reviewed-by: Dave Airlie Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-9-dakr@redhat.com --- drivers/gpu/drm/nouveau/nouveau_drm.c | 5 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 10 ++-- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 63 +++++++++++++------------- drivers/gpu/drm/nouveau/nouveau_uvmm.h | 4 -- 4 files changed, 40 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 50589f982d1a4..f603eaef15608 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -190,6 +190,8 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence, static void nouveau_cli_fini(struct nouveau_cli *cli) { + struct nouveau_uvmm *uvmm = nouveau_cli_uvmm_locked(cli); + /* All our channels are dead now, which means all the fences they * own are signalled, and all callback functions have been called. * @@ -199,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli) WARN_ON(!list_empty(&cli->worker)); usif_client_fini(cli); - nouveau_uvmm_fini(&cli->uvmm); + if (uvmm) + nouveau_uvmm_fini(uvmm); nouveau_sched_entity_fini(&cli->sched_entity); nouveau_vmm_fini(&cli->svm); nouveau_vmm_fini(&cli->vmm); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3666a7403e472..e514110bf391f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -93,7 +93,10 @@ struct nouveau_cli { struct nvif_mmu mmu; struct nouveau_vmm vmm; struct nouveau_vmm svm; - struct nouveau_uvmm uvmm; + struct { + struct nouveau_uvmm *ptr; + bool disabled; + } uvmm; struct nouveau_sched_entity sched_entity; @@ -121,10 +124,7 @@ struct nouveau_cli_work { static inline struct nouveau_uvmm * nouveau_cli_uvmm(struct nouveau_cli *cli) { - if (!cli || !cli->uvmm.vmm.cli) - return NULL; - - return &cli->uvmm; + return cli ? cli->uvmm.ptr : NULL; } static inline struct nouveau_uvmm * diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index f765e38353066..54be12c1272f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1636,18 +1636,6 @@ nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, return ret; } -int -nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, - void *data, - struct drm_file *file_priv) -{ - struct nouveau_cli *cli = nouveau_cli(file_priv); - struct drm_nouveau_vm_init *init = data; - - return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr, - init->kernel_managed_size); -} - static int nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args) { @@ -1793,17 +1781,25 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) } int -nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, - u64 kernel_managed_addr, u64 kernel_managed_size) +nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, + void *data, + struct drm_file *file_priv) { + struct nouveau_uvmm *uvmm; + struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_device *drm = cli->drm->dev; struct drm_gem_object *r_obj; - u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size; + struct drm_nouveau_vm_init *init = data; + u64 kernel_managed_end; int ret; - mutex_init(&uvmm->mutex); - mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); - mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); + if (check_add_overflow(init->kernel_managed_addr, + init->kernel_managed_size, + &kernel_managed_end)) + return -EINVAL; + + if (kernel_managed_end > NOUVEAU_VA_SPACE_END) + return -EINVAL; mutex_lock(&cli->mutex); @@ -1812,44 +1808,49 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, goto out_unlock; } - if (kernel_managed_end <= kernel_managed_addr) { - ret = -EINVAL; - goto out_unlock; - } - - if (kernel_managed_end > NOUVEAU_VA_SPACE_END) { - ret = -EINVAL; + uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL); + if (!uvmm) { + ret = -ENOMEM; goto out_unlock; } r_obj = drm_gpuvm_resv_object_alloc(drm); if (!r_obj) { + kfree(uvmm); ret = -ENOMEM; goto out_unlock; } + mutex_init(&uvmm->mutex); + mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); + mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); + drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj, NOUVEAU_VA_SPACE_START, NOUVEAU_VA_SPACE_END, - kernel_managed_addr, kernel_managed_size, + init->kernel_managed_addr, + init->kernel_managed_size, NULL); /* GPUVM takes care from here on. */ drm_gem_object_put(r_obj); ret = nvif_vmm_ctor(&cli->mmu, "uvmm", cli->vmm.vmm.object.oclass, RAW, - kernel_managed_addr, kernel_managed_size, - NULL, 0, &cli->uvmm.vmm.vmm); + init->kernel_managed_addr, + init->kernel_managed_size, + NULL, 0, &uvmm->vmm.vmm); if (ret) goto out_gpuvm_fini; - cli->uvmm.vmm.cli = cli; + uvmm->vmm.cli = cli; + cli->uvmm.ptr = uvmm; mutex_unlock(&cli->mutex); return 0; out_gpuvm_fini: drm_gpuvm_destroy(&uvmm->base); + kfree(uvmm); out_unlock: mutex_unlock(&cli->mutex); return ret; @@ -1864,9 +1865,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) struct nouveau_sched_entity *entity = &cli->sched_entity; struct drm_gpuva *va, *next; - if (!cli) - return; - rmb(); /* for list_empty to work without lock */ wait_event(entity->job.wq, list_empty(&entity->job.list.head)); @@ -1905,5 +1903,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) mutex_lock(&cli->mutex); nouveau_vmm_fini(&uvmm->vmm); drm_gpuvm_destroy(&uvmm->base); + kfree(uvmm); mutex_unlock(&cli->mutex); } diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h index 22607270fae0d..f0a6d98ace4fd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h @@ -12,8 +12,6 @@ struct nouveau_uvmm { struct nouveau_vmm vmm; struct maple_tree region_mt; struct mutex mutex; - - bool disabled; }; struct nouveau_uvma_region { @@ -78,8 +76,6 @@ struct nouveau_uvmm_bind_job_args { #define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base) -int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, - u64 kernel_managed_addr, u64 kernel_managed_size); void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm); void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem); From 8af72338dd81d1f8667e0240bd28f5fc98b3f20d Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:39 +0100 Subject: [PATCH 097/117] drm/gpuvm: reference count drm_gpuvm structures MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement reference counting for struct drm_gpuvm. Acked-by: Christian König Reviewed-by: Thomas Hellström Reviewed-by: Boris Brezillon Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-10-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 56 +++++++++++++++++++++----- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 20 ++++++--- include/drm/drm_gpuvm.h | 31 +++++++++++++- 3 files changed, 90 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 53e2c406fb04e..ef968eba6fe63 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -746,6 +746,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, gpuvm->rb.tree = RB_ROOT_CACHED; INIT_LIST_HEAD(&gpuvm->rb.list); + kref_init(&gpuvm->kref); + gpuvm->name = name ? name : "unknown"; gpuvm->flags = flags; gpuvm->ops = ops; @@ -770,15 +772,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, } EXPORT_SYMBOL_GPL(drm_gpuvm_init); -/** - * drm_gpuvm_destroy() - cleanup a &drm_gpuvm - * @gpuvm: pointer to the &drm_gpuvm to clean up - * - * Note that it is a bug to call this function on a manager that still - * holds GPU VA mappings. - */ -void -drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) +static void +drm_gpuvm_fini(struct drm_gpuvm *gpuvm) { gpuvm->name = NULL; @@ -790,7 +785,35 @@ drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) drm_gem_object_put(gpuvm->r_obj); } -EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); + +static void +drm_gpuvm_free(struct kref *kref) +{ + struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref); + + drm_gpuvm_fini(gpuvm); + + if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free)) + return; + + gpuvm->ops->vm_free(gpuvm); +} + +/** + * drm_gpuvm_put() - drop a struct drm_gpuvm reference + * @gpuvm: the &drm_gpuvm to release the reference of + * + * This releases a reference to @gpuvm. + * + * This function may be called from atomic context. + */ +void +drm_gpuvm_put(struct drm_gpuvm *gpuvm) +{ + if (gpuvm) + kref_put(&gpuvm->kref, drm_gpuvm_free); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_put); static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, @@ -839,11 +862,21 @@ drm_gpuva_insert(struct drm_gpuvm *gpuvm, { u64 addr = va->va.addr; u64 range = va->va.range; + int ret; if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) return -EINVAL; - return __drm_gpuva_insert(gpuvm, va); + ret = __drm_gpuva_insert(gpuvm, va); + if (likely(!ret)) + /* Take a reference of the GPUVM for the successfully inserted + * drm_gpuva. We can't take the reference in + * __drm_gpuva_insert() itself, since we don't want to increse + * the reference count for the GPUVM's kernel_alloc_node. + */ + drm_gpuvm_get(gpuvm); + + return ret; } EXPORT_SYMBOL_GPL(drm_gpuva_insert); @@ -876,6 +909,7 @@ drm_gpuva_remove(struct drm_gpuva *va) } __drm_gpuva_remove(va); + drm_gpuvm_put(va->vm); } EXPORT_SYMBOL_GPL(drm_gpuva_remove); diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 54be12c1272f2..cb2f06565c462 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1780,6 +1780,18 @@ nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) } } +static void +nouveau_uvmm_free(struct drm_gpuvm *gpuvm) +{ + struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm); + + kfree(uvmm); +} + +static const struct drm_gpuvm_ops gpuvm_ops = { + .vm_free = nouveau_uvmm_free, +}; + int nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, void *data, @@ -1830,7 +1842,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, NOUVEAU_VA_SPACE_END, init->kernel_managed_addr, init->kernel_managed_size, - NULL); + &gpuvm_ops); /* GPUVM takes care from here on. */ drm_gem_object_put(r_obj); @@ -1849,8 +1861,7 @@ nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, return 0; out_gpuvm_fini: - drm_gpuvm_destroy(&uvmm->base); - kfree(uvmm); + drm_gpuvm_put(&uvmm->base); out_unlock: mutex_unlock(&cli->mutex); return ret; @@ -1902,7 +1913,6 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) mutex_lock(&cli->mutex); nouveau_vmm_fini(&uvmm->vmm); - drm_gpuvm_destroy(&uvmm->base); - kfree(uvmm); + drm_gpuvm_put(&uvmm->base); mutex_unlock(&cli->mutex); } diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 0c2e24155a93d..4e6e1fd3485ad 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -247,6 +247,11 @@ struct drm_gpuvm { struct list_head list; } rb; + /** + * @kref: reference count of this object + */ + struct kref kref; + /** * @kernel_alloc_node: * @@ -273,7 +278,23 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, u64 start_offset, u64 range, u64 reserve_offset, u64 reserve_range, const struct drm_gpuvm_ops *ops); -void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm); + +/** + * drm_gpuvm_get() - acquire a struct drm_gpuvm reference + * @gpuvm: the &drm_gpuvm to acquire the reference of + * + * This function acquires an additional reference to @gpuvm. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline struct drm_gpuvm * +drm_gpuvm_get(struct drm_gpuvm *gpuvm) +{ + kref_get(&gpuvm->kref); + + return gpuvm; +} + +void drm_gpuvm_put(struct drm_gpuvm *gpuvm); bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); @@ -673,6 +694,14 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, * operations to drivers. */ struct drm_gpuvm_ops { + /** + * @vm_free: called when the last reference of a struct drm_gpuvm is + * dropped + * + * This callback is mandatory. + */ + void (*vm_free)(struct drm_gpuvm *gpuvm); + /** * @op_alloc: called when the &drm_gpuvm allocates * a struct drm_gpuva_op From 94bc2249f08e141fb4aa120bfdc392c7a5e78211 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:40 +0100 Subject: [PATCH 098/117] drm/gpuvm: add an abstraction for a VM / BO combination MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an abstraction layer between the drm_gpuva mappings of a particular drm_gem_object and this GEM object itself. The abstraction represents a combination of a drm_gem_object and drm_gpuvm. The drm_gem_object holds a list of drm_gpuvm_bo structures (the structure representing this abstraction), while each drm_gpuvm_bo contains list of mappings of this GEM object. This has multiple advantages: 1) We can use the drm_gpuvm_bo structure to attach it to various lists of the drm_gpuvm. This is useful for tracking external and evicted objects per VM, which is introduced in subsequent patches. 2) Finding mappings of a certain drm_gem_object mapped in a certain drm_gpuvm becomes much cheaper. 3) Drivers can derive and extend the structure to easily represent driver specific states of a BO for a certain GPUVM. The idea of this abstraction was taken from amdgpu, hence the credit for this idea goes to the developers of amdgpu. Cc: Christian König Acked-by: Christian König Reviewed-by: Thomas Hellström Reviewed-by: Boris Brezillon Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-11-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 340 +++++++++++++++++++++---- drivers/gpu/drm/nouveau/nouveau_uvmm.c | 63 +++-- include/drm/drm_gem.h | 32 +-- include/drm/drm_gpuvm.h | 185 +++++++++++++- 4 files changed, 534 insertions(+), 86 deletions(-) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index ef968eba6fe63..7e193bca8bab0 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -70,6 +70,18 @@ * &drm_gem_object, such as the &drm_gem_object containing the root page table, * but it can also be a 'dummy' object, which can be allocated with * drm_gpuvm_resv_object_alloc(). + * + * In order to connect a struct drm_gpuva its backing &drm_gem_object each + * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each + * &drm_gpuvm_bo contains a list of &drm_gpuva structures. + * + * A &drm_gpuvm_bo is an abstraction that represents a combination of a + * &drm_gpuvm and a &drm_gem_object. Every such combination should be unique. + * This is ensured by the API through drm_gpuvm_bo_obtain() and + * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding + * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this + * particular combination. If not existent a new instance is created and linked + * to the &drm_gem_object. */ /** @@ -395,21 +407,28 @@ /** * DOC: Locking * - * Generally, the GPU VA manager does not take care of locking itself, it is - * the drivers responsibility to take care about locking. Drivers might want to - * protect the following operations: inserting, removing and iterating - * &drm_gpuva objects as well as generating all kinds of operations, such as - * split / merge or prefetch. - * - * The GPU VA manager also does not take care of the locking of the backing - * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to - * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively - * a driver specific external lock. For the latter see also - * drm_gem_gpuva_set_lock(). - * - * However, the GPU VA manager contains lockdep checks to ensure callers of its - * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is - * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). + * In terms of managing &drm_gpuva entries DRM GPUVM does not take care of + * locking itself, it is the drivers responsibility to take care about locking. + * Drivers might want to protect the following operations: inserting, removing + * and iterating &drm_gpuva objects as well as generating all kinds of + * operations, such as split / merge or prefetch. + * + * DRM GPUVM also does not take care of the locking of the backing + * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by + * itself; drivers are responsible to enforce mutual exclusion using either the + * GEMs dma_resv lock or alternatively a driver specific external lock. For the + * latter see also drm_gem_gpuva_set_lock(). + * + * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold + * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed + * by functions such as drm_gpuva_link() or drm_gpuva_unlink(), but also + * drm_gpuvm_bo_obtain() and drm_gpuvm_bo_put(). + * + * The latter is required since on creation and destruction of a &drm_gpuvm_bo + * the &drm_gpuvm_bo is attached / removed from the &drm_gem_objects gpuva list. + * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and + * &drm_gem_object must be able to observe previous creations and destructions + * of &drm_gpuvm_bos in order to keep instances unique. */ /** @@ -439,6 +458,7 @@ * { * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op + * struct drm_gpuvm_bo *vm_bo; * * driver_lock_va_space(); * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, @@ -446,6 +466,10 @@ * if (IS_ERR(ops)) * return PTR_ERR(ops); * + * vm_bo = drm_gpuvm_bo_obtain(gpuvm, obj); + * if (IS_ERR(vm_bo)) + * return PTR_ERR(vm_bo); + * * drm_gpuva_for_each_op(op, ops) { * struct drm_gpuva *va; * @@ -458,7 +482,7 @@ * * driver_vm_map(); * drm_gpuva_map(gpuvm, va, &op->map); - * drm_gpuva_link(va); + * drm_gpuva_link(va, vm_bo); * * break; * case DRM_GPUVA_OP_REMAP: { @@ -485,11 +509,11 @@ * driver_vm_remap(); * drm_gpuva_remap(prev, next, &op->remap); * - * drm_gpuva_unlink(va); * if (prev) - * drm_gpuva_link(prev); + * drm_gpuva_link(prev, va->vm_bo); * if (next) - * drm_gpuva_link(next); + * drm_gpuva_link(next, va->vm_bo); + * drm_gpuva_unlink(va); * * break; * } @@ -505,6 +529,7 @@ * break; * } * } + * drm_gpuvm_bo_put(vm_bo); * driver_unlock_va_space(); * * return 0; @@ -514,6 +539,7 @@ * * struct driver_context { * struct drm_gpuvm *gpuvm; + * struct drm_gpuvm_bo *vm_bo; * struct drm_gpuva *new_va; * struct drm_gpuva *prev_va; * struct drm_gpuva *next_va; @@ -534,6 +560,7 @@ * struct drm_gem_object *obj, u64 offset) * { * struct driver_context ctx; + * struct drm_gpuvm_bo *vm_bo; * struct drm_gpuva_ops *ops; * struct drm_gpuva_op *op; * int ret = 0; @@ -543,16 +570,23 @@ * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); - * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { + * ctx.vm_bo = drm_gpuvm_bo_create(gpuvm, obj); + * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va || !vm_bo) { * ret = -ENOMEM; * goto out; * } * + * // Typically protected with a driver specific GEM gpuva lock + * // used in the fence signaling path for drm_gpuva_link() and + * // drm_gpuva_unlink(), hence pre-allocate. + * ctx.vm_bo = drm_gpuvm_bo_obtain_prealloc(ctx.vm_bo); + * * driver_lock_va_space(); * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset); * driver_unlock_va_space(); * * out: + * drm_gpuvm_bo_put(ctx.vm_bo); * kfree(ctx.new_va); * kfree(ctx.prev_va); * kfree(ctx.next_va); @@ -565,7 +599,7 @@ * * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map); * - * drm_gpuva_link(ctx->new_va); + * drm_gpuva_link(ctx->new_va, ctx->vm_bo); * * // prevent the new GPUVA from being freed in * // driver_mapping_create() @@ -577,22 +611,23 @@ * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) * { * struct driver_context *ctx = __ctx; + * struct drm_gpuva *va = op->remap.unmap->va; * * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); * - * drm_gpuva_unlink(op->remap.unmap->va); - * kfree(op->remap.unmap->va); - * * if (op->remap.prev) { - * drm_gpuva_link(ctx->prev_va); + * drm_gpuva_link(ctx->prev_va, va->vm_bo); * ctx->prev_va = NULL; * } * * if (op->remap.next) { - * drm_gpuva_link(ctx->next_va); + * drm_gpuva_link(ctx->next_va, va->vm_bo); * ctx->next_va = NULL; * } * + * drm_gpuva_unlink(va); + * kfree(va); + * * return 0; * } * @@ -815,6 +850,199 @@ drm_gpuvm_put(struct drm_gpuvm *gpuvm) } EXPORT_SYMBOL_GPL(drm_gpuvm_put); +/** + * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo + * @gpuvm: The &drm_gpuvm the @obj is mapped in. + * @obj: The &drm_gem_object being mapped in the @gpuvm. + * + * If provided by the driver, this function uses the &drm_gpuvm_ops + * vm_bo_alloc() callback to allocate. + * + * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo; + + if (ops && ops->vm_bo_alloc) + vm_bo = ops->vm_bo_alloc(); + else + vm_bo = kzalloc(sizeof(*vm_bo), GFP_KERNEL); + + if (unlikely(!vm_bo)) + return NULL; + + vm_bo->vm = drm_gpuvm_get(gpuvm); + vm_bo->obj = obj; + drm_gem_object_get(obj); + + kref_init(&vm_bo->kref); + INIT_LIST_HEAD(&vm_bo->list.gpuva); + INIT_LIST_HEAD(&vm_bo->list.entry.gem); + + return vm_bo; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create); + +static void +drm_gpuvm_bo_destroy(struct kref *kref) +{ + struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo, + kref); + struct drm_gpuvm *gpuvm = vm_bo->vm; + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gem_object *obj = vm_bo->obj; + bool lock = !drm_gpuvm_resv_protected(gpuvm); + + if (!lock) + drm_gpuvm_resv_assert_held(gpuvm); + + drm_gem_gpuva_assert_lock_held(obj); + list_del(&vm_bo->list.entry.gem); + + if (ops && ops->vm_bo_free) + ops->vm_bo_free(vm_bo); + else + kfree(vm_bo); + + drm_gpuvm_put(gpuvm); + drm_gem_object_put(obj); +} + +/** + * drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference + * @vm_bo: the &drm_gpuvm_bo to release the reference of + * + * This releases a reference to @vm_bo. + * + * If the reference count drops to zero, the &gpuvm_bo is destroyed, which + * includes removing it from the GEMs gpuva list. Hence, if a call to this + * function can potentially let the reference count drop to zero the caller must + * hold the dma-resv or driver specific GEM gpuva lock. + * + * This function may only be called from non-atomic context. + */ +void +drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo) +{ + might_sleep(); + + if (vm_bo) + kref_put(&vm_bo->kref, drm_gpuvm_bo_destroy); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put); + +static struct drm_gpuvm_bo * +__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + struct drm_gpuvm_bo *vm_bo; + + drm_gem_gpuva_assert_lock_held(obj); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) + if (vm_bo->vm == gpuvm) + return vm_bo; + + return NULL; +} + +/** + * drm_gpuvm_bo_find() - find the &drm_gpuvm_bo for the given + * &drm_gpuvm and &drm_gem_object + * @gpuvm: The &drm_gpuvm the @obj is mapped in. + * @obj: The &drm_gem_object being mapped in the @gpuvm. + * + * Find the &drm_gpuvm_bo representing the combination of the given + * &drm_gpuvm and &drm_gem_object. If found, increases the reference + * count of the &drm_gpuvm_bo accordingly. + * + * Returns: a pointer to the &drm_gpuvm_bo on success, NULL on failure + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + struct drm_gpuvm_bo *vm_bo = __drm_gpuvm_bo_find(gpuvm, obj); + + return vm_bo ? drm_gpuvm_bo_get(vm_bo) : NULL; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find); + +/** + * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the + * given &drm_gpuvm and &drm_gem_object + * @gpuvm: The &drm_gpuvm the @obj is mapped in. + * @obj: The &drm_gem_object being mapped in the @gpuvm. + * + * Find the &drm_gpuvm_bo representing the combination of the given + * &drm_gpuvm and &drm_gem_object. If found, increases the reference + * count of the &drm_gpuvm_bo accordingly. If not found, allocates a new + * &drm_gpuvm_bo. + * + * A new &drm_gpuvm_bo is added to the GEMs gpuva list. + * + * Returns: a pointer to the &drm_gpuvm_bo on success, an ERR_PTR on failure + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + struct drm_gpuvm_bo *vm_bo; + + vm_bo = drm_gpuvm_bo_find(gpuvm, obj); + if (vm_bo) + return vm_bo; + + vm_bo = drm_gpuvm_bo_create(gpuvm, obj); + if (!vm_bo) + return ERR_PTR(-ENOMEM); + + drm_gem_gpuva_assert_lock_held(obj); + list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); + + return vm_bo; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain); + +/** + * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo + * for the given &drm_gpuvm and &drm_gem_object + * @__vm_bo: A pre-allocated struct drm_gpuvm_bo. + * + * Find the &drm_gpuvm_bo representing the combination of the given + * &drm_gpuvm and &drm_gem_object. If found, increases the reference + * count of the found &drm_gpuvm_bo accordingly, while the @__vm_bo reference + * count is decreased. If not found @__vm_bo is returned without further + * increase of the reference count. + * + * A new &drm_gpuvm_bo is added to the GEMs gpuva list. + * + * Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing + * &drm_gpuvm_bo was found + */ +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo) +{ + struct drm_gpuvm *gpuvm = __vm_bo->vm; + struct drm_gem_object *obj = __vm_bo->obj; + struct drm_gpuvm_bo *vm_bo; + + vm_bo = drm_gpuvm_bo_find(gpuvm, obj); + if (vm_bo) { + drm_gpuvm_bo_put(__vm_bo); + return vm_bo; + } + + drm_gem_gpuva_assert_lock_held(obj); + list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); + + return __vm_bo; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc); + static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va) @@ -916,24 +1144,33 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove); /** * drm_gpuva_link() - link a &drm_gpuva * @va: the &drm_gpuva to link + * @vm_bo: the &drm_gpuvm_bo to add the &drm_gpuva to * - * This adds the given &va to the GPU VA list of the &drm_gem_object it is - * associated with. + * This adds the given &va to the GPU VA list of the &drm_gpuvm_bo and the + * &drm_gpuvm_bo to the &drm_gem_object it is associated with. + * + * For every &drm_gpuva entry added to the &drm_gpuvm_bo an additional + * reference of the latter is taken. * * This function expects the caller to protect the GEM's GPUVA list against - * concurrent access using the GEMs dma_resv lock. + * concurrent access using either the GEMs dma_resv lock or a driver specific + * lock set through drm_gem_gpuva_set_lock(). */ void -drm_gpuva_link(struct drm_gpuva *va) +drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) { struct drm_gem_object *obj = va->gem.obj; + struct drm_gpuvm *gpuvm = va->vm; if (unlikely(!obj)) return; - drm_gem_gpuva_assert_lock_held(obj); + drm_WARN_ON(gpuvm->drm, obj != vm_bo->obj); - list_add_tail(&va->gem.entry, &obj->gpuva.list); + va->vm_bo = drm_gpuvm_bo_get(vm_bo); + + drm_gem_gpuva_assert_lock_held(obj); + list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); } EXPORT_SYMBOL_GPL(drm_gpuva_link); @@ -944,20 +1181,31 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link); * This removes the given &va from the GPU VA list of the &drm_gem_object it is * associated with. * + * This removes the given &va from the GPU VA list of the &drm_gpuvm_bo and + * the &drm_gpuvm_bo from the &drm_gem_object it is associated with in case + * this call unlinks the last &drm_gpuva from the &drm_gpuvm_bo. + * + * For every &drm_gpuva entry removed from the &drm_gpuvm_bo a reference of + * the latter is dropped. + * * This function expects the caller to protect the GEM's GPUVA list against - * concurrent access using the GEMs dma_resv lock. + * concurrent access using either the GEMs dma_resv lock or a driver specific + * lock set through drm_gem_gpuva_set_lock(). */ void drm_gpuva_unlink(struct drm_gpuva *va) { struct drm_gem_object *obj = va->gem.obj; + struct drm_gpuvm_bo *vm_bo = va->vm_bo; if (unlikely(!obj)) return; drm_gem_gpuva_assert_lock_held(obj); - list_del_init(&va->gem.entry); + + va->vm_bo = NULL; + drm_gpuvm_bo_put(vm_bo); } EXPORT_SYMBOL_GPL(drm_gpuva_unlink); @@ -1102,10 +1350,10 @@ drm_gpuva_remap(struct drm_gpuva *prev, struct drm_gpuva *next, struct drm_gpuva_op_remap *op) { - struct drm_gpuva *curr = op->unmap->va; - struct drm_gpuvm *gpuvm = curr->vm; + struct drm_gpuva *va = op->unmap->va; + struct drm_gpuvm *gpuvm = va->vm; - drm_gpuva_remove(curr); + drm_gpuva_remove(va); if (op->prev) { drm_gpuva_init_from_op(prev, op->prev); @@ -1747,9 +1995,8 @@ drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); /** - * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM - * @gpuvm: the &drm_gpuvm representing the GPU VA space - * @obj: the &drm_gem_object to unmap + * drm_gpuvm_bo_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM + * @vm_bo: the &drm_gpuvm_bo abstraction * * This function creates a list of operations to perform unmapping for every * GPUVA attached to a GEM. @@ -1766,15 +2013,14 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure */ struct drm_gpuva_ops * -drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, - struct drm_gem_object *obj) +drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo) { struct drm_gpuva_ops *ops; struct drm_gpuva_op *op; struct drm_gpuva *va; int ret; - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(vm_bo->obj); ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) @@ -1782,8 +2028,8 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, INIT_LIST_HEAD(&ops->list); - drm_gem_for_each_gpuva(va, obj) { - op = gpuva_op_alloc(gpuvm); + drm_gpuvm_bo_for_each_va(va, vm_bo) { + op = gpuva_op_alloc(vm_bo->vm); if (!op) { ret = -ENOMEM; goto err_free_ops; @@ -1797,10 +2043,10 @@ drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, return ops; err_free_ops: - drm_gpuva_ops_free(gpuvm, ops); + drm_gpuva_ops_free(vm_bo->vm, ops); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create); +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_unmap_ops_create); /** * drm_gpuva_ops_free() - free the given &drm_gpuva_ops diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index cb2f06565c462..eda7bb8624f11 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -62,6 +62,8 @@ struct bind_job_op { enum vm_bind_op op; u32 flags; + struct drm_gpuvm_bo *vm_bo; + struct { u64 addr; u64 range; @@ -1101,22 +1103,28 @@ bind_validate_region(struct nouveau_job *job) } static void -bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new) +bind_link_gpuvas(struct bind_job_op *bop) { + struct nouveau_uvma_prealloc *new = &bop->new; + struct drm_gpuvm_bo *vm_bo = bop->vm_bo; + struct drm_gpuva_ops *ops = bop->ops; struct drm_gpuva_op *op; drm_gpuva_for_each_op(op, ops) { switch (op->op) { case DRM_GPUVA_OP_MAP: - drm_gpuva_link(&new->map->va); + drm_gpuva_link(&new->map->va, vm_bo); break; - case DRM_GPUVA_OP_REMAP: + case DRM_GPUVA_OP_REMAP: { + struct drm_gpuva *va = op->remap.unmap->va; + if (op->remap.prev) - drm_gpuva_link(&new->prev->va); + drm_gpuva_link(&new->prev->va, va->vm_bo); if (op->remap.next) - drm_gpuva_link(&new->next->va); - drm_gpuva_unlink(op->remap.unmap->va); + drm_gpuva_link(&new->next->va, va->vm_bo); + drm_gpuva_unlink(va); break; + } case DRM_GPUVA_OP_UNMAP: drm_gpuva_unlink(op->unmap.va); break; @@ -1138,10 +1146,17 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) list_for_each_op(op, &bind_job->ops) { if (op->op == OP_MAP) { - op->gem.obj = drm_gem_object_lookup(job->file_priv, - op->gem.handle); - if (!op->gem.obj) + struct drm_gem_object *obj = op->gem.obj = + drm_gem_object_lookup(job->file_priv, + op->gem.handle); + if (!obj) return -ENOENT; + + dma_resv_lock(obj->resv, NULL); + op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj); + dma_resv_unlock(obj->resv); + if (IS_ERR(op->vm_bo)) + return PTR_ERR(op->vm_bo); } ret = bind_validate_op(job, op); @@ -1352,7 +1367,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) case OP_UNMAP_SPARSE: case OP_MAP: case OP_UNMAP: - bind_link_gpuvas(op->ops, &op->new); + bind_link_gpuvas(op); break; default: break; @@ -1499,6 +1514,12 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) if (!IS_ERR_OR_NULL(op->ops)) drm_gpuva_ops_free(&uvmm->base, op->ops); + if (!IS_ERR_OR_NULL(op->vm_bo)) { + dma_resv_lock(obj->resv, NULL); + drm_gpuvm_bo_put(op->vm_bo); + dma_resv_unlock(obj->resv); + } + if (obj) drm_gem_object_put(obj); } @@ -1752,15 +1773,18 @@ void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem) { struct drm_gem_object *obj = &nvbo->bo.base; + struct drm_gpuvm_bo *vm_bo; struct drm_gpuva *va; dma_resv_assert_held(obj->resv); - drm_gem_for_each_gpuva(va, obj) { - struct nouveau_uvma *uvma = uvma_from_va(va); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gpuvm_bo_for_each_va(va, vm_bo) { + struct nouveau_uvma *uvma = uvma_from_va(va); - nouveau_uvma_map(uvma, mem); - drm_gpuva_invalidate(va, false); + nouveau_uvma_map(uvma, mem); + drm_gpuva_invalidate(va, false); + } } } @@ -1768,15 +1792,18 @@ void nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) { struct drm_gem_object *obj = &nvbo->bo.base; + struct drm_gpuvm_bo *vm_bo; struct drm_gpuva *va; dma_resv_assert_held(obj->resv); - drm_gem_for_each_gpuva(va, obj) { - struct nouveau_uvma *uvma = uvma_from_va(va); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { + drm_gpuvm_bo_for_each_va(va, vm_bo) { + struct nouveau_uvma *uvma = uvma_from_va(va); - nouveau_uvma_unmap(uvma); - drm_gpuva_invalidate(va, true); + nouveau_uvma_unmap(uvma); + drm_gpuva_invalidate(va, true); + } } } diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 16364487fde9f..369505447acd8 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -580,7 +580,7 @@ int drm_gem_evict(struct drm_gem_object *obj); * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object * @obj: the &drm_gem_object * - * This initializes the &drm_gem_object's &drm_gpuva list. + * This initializes the &drm_gem_object's &drm_gpuvm_bo list. * * Calling this function is only necessary for drivers intending to support the * &drm_driver_feature DRIVER_GEM_GPUVA. @@ -593,28 +593,28 @@ static inline void drm_gem_gpuva_init(struct drm_gem_object *obj) } /** - * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas - * @entry__: &drm_gpuva structure to assign to in each iteration step - * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * - * This iterator walks over all &drm_gpuva structures associated with the - * &drm_gpuva_manager. + * This iterator walks over all &drm_gpuvm_bo structures associated with the + * &drm_gem_object. */ -#define drm_gem_for_each_gpuva(entry__, obj__) \ - list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry) +#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \ + list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem) /** - * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of - * gpuvas - * @entry__: &drm_gpuva structure to assign to in each iteration step - * @next__: &next &drm_gpuva to store the next step - * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with + * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of + * &drm_gpuvm_bo + * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step + * @next__: &next &drm_gpuvm_bo to store the next step + * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with * - * This iterator walks over all &drm_gpuva structures associated with the + * This iterator walks over all &drm_gpuvm_bo structures associated with the * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence * it is save against removal of elements. */ -#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \ - list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry) +#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \ + list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem) #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index 4e6e1fd3485ad..b12fb22b0e226 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -25,6 +25,7 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +#include #include #include #include @@ -33,6 +34,7 @@ #include struct drm_gpuvm; +struct drm_gpuvm_bo; struct drm_gpuvm_ops; /** @@ -73,6 +75,12 @@ struct drm_gpuva { */ struct drm_gpuvm *vm; + /** + * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped + * &drm_gem_object + */ + struct drm_gpuvm_bo *vm_bo; + /** * @flags: the &drm_gpuva_flags for this mapping */ @@ -108,7 +116,7 @@ struct drm_gpuva { struct drm_gem_object *obj; /** - * @entry: the &list_head to attach this object to a &drm_gem_object + * @entry: the &list_head to attach this object to a &drm_gpuvm_bo */ struct list_head entry; } gem; @@ -141,7 +149,7 @@ struct drm_gpuva { int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); void drm_gpuva_remove(struct drm_gpuva *va); -void drm_gpuva_link(struct drm_gpuva *va); +void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); void drm_gpuva_unlink(struct drm_gpuva *va); struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, @@ -188,10 +196,16 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) * enum drm_gpuvm_flags - flags for struct drm_gpuvm */ enum drm_gpuvm_flags { + /** + * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the + * GPUVM's &dma_resv lock + */ + DRM_GPUVM_RESV_PROTECTED = BIT(0), + /** * @DRM_GPUVM_USERBITS: user defined bits */ - DRM_GPUVM_USERBITS = BIT(0), + DRM_GPUVM_USERBITS = BIT(1), }; /** @@ -302,6 +316,19 @@ bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); struct drm_gem_object * drm_gpuvm_resv_object_alloc(struct drm_device *drm); +/** + * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is + * set + * @gpuvm: the &drm_gpuvm + * + * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise. + */ +static inline bool +drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) +{ + return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; +} + /** * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv * @gpuvm__: the &drm_gpuvm @@ -320,6 +347,12 @@ drm_gpuvm_resv_object_alloc(struct drm_device *drm); */ #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) +#define drm_gpuvm_resv_held(gpuvm__) \ + dma_resv_held(drm_gpuvm_resv(gpuvm__)) + +#define drm_gpuvm_resv_assert_held(gpuvm__) \ + dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) + #define drm_gpuvm_resv_held(gpuvm__) \ dma_resv_held(drm_gpuvm_resv(gpuvm__)) @@ -404,6 +437,125 @@ __drm_gpuva_next(struct drm_gpuva *va) #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) +/** + * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and + * &drm_gem_object combination + * + * This structure is an abstraction representing a &drm_gpuvm and + * &drm_gem_object combination. It serves as an indirection to accelerate + * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same + * &drm_gem_object. + * + * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to + * accelerate validation. + * + * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once + * a GEM object is mapped first in a GPU-VM and release the instance once the + * last mapping of the GEM object in this GPU-VM is unmapped. + */ +struct drm_gpuvm_bo { + /** + * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference + * counted pointer. + */ + struct drm_gpuvm *vm; + + /** + * @obj: The &drm_gem_object being mapped in @vm. This is a reference + * counted pointer. + */ + struct drm_gem_object *obj; + + /** + * @kref: The reference count for this &drm_gpuvm_bo. + */ + struct kref kref; + + /** + * @list: Structure containing all &list_heads. + */ + struct { + /** + * @gpuva: The list of linked &drm_gpuvas. + * + * It is safe to access entries from this list as long as the + * GEM's gpuva lock is held. See also struct drm_gem_object. + */ + struct list_head gpuva; + + /** + * @entry: Structure containing all &list_heads serving as + * entry. + */ + struct { + /** + * @gem: List entry to attach to the &drm_gem_objects + * gpuva list. + */ + struct list_head gem; + } entry; + } list; +}; + +struct drm_gpuvm_bo * +drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); +struct drm_gpuvm_bo * +drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo); + +/** + * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference + * @vm_bo: the &drm_gpuvm_bo to acquire the reference of + * + * This function acquires an additional reference to @vm_bo. It is illegal to + * call this without already holding a reference. No locks required. + */ +static inline struct drm_gpuvm_bo * +drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) +{ + kref_get(&vm_bo->kref); + return vm_bo; +} + +void drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); + +struct drm_gpuvm_bo * +drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj); + +/** + * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \ + list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry) + +/** + * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of + * &drm_gpuva + * @va__: &drm_gpuva structure to assign to in each iteration step + * @next__: &next &drm_gpuva to store the next step + * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with + * + * This iterator walks over all &drm_gpuva structures associated with the + * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence + * it is save against removal of elements. + * + * The caller must hold the GEM's gpuva lock. + */ +#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \ + list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry) + /** * enum drm_gpuva_op_type - GPU VA operation type * @@ -673,8 +825,7 @@ drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, u64 addr, u64 range); struct drm_gpuva_ops * -drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, - struct drm_gem_object *obj); +drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo); void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, struct drm_gpuva_ops *ops); @@ -726,6 +877,30 @@ struct drm_gpuvm_ops { */ void (*op_free)(struct drm_gpuva_op *op); + /** + * @vm_bo_alloc: called when the &drm_gpuvm allocates + * a struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * allocate memory accordingly. + * + * This callback is optional. + */ + struct drm_gpuvm_bo *(*vm_bo_alloc)(void); + + /** + * @vm_bo_free: called when the &drm_gpuvm frees a + * struct drm_gpuvm_bo + * + * Some drivers may want to embed struct drm_gpuvm_bo into driver + * specific structures. By implementing this callback drivers can + * free the previously allocated memory accordingly. + * + * This callback is optional. + */ + void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); + /** * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the * mapping once all previous steps were completed From 50c1a36f594bb3dd33f3f9386c5d960cd12327d8 Mon Sep 17 00:00:00 2001 From: Danilo Krummrich Date: Wed, 8 Nov 2023 01:12:41 +0100 Subject: [PATCH 099/117] drm/gpuvm: track/lock/validate external/evicted objects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the DRM GPUVM offers common infrastructure to track GPU VA allocations and mappings, generically connect GPU VA mappings to their backing buffers and perform more complex mapping operations on the GPU VA space. However, there are more design patterns commonly used by drivers, which can potentially be generalized in order to make the DRM GPUVM represent a basis for GPU-VM implementations. In this context, this patch aims at generalizing the following elements. 1) Provide a common dma-resv for GEM objects not being used outside of this GPU-VM. 2) Provide tracking of external GEM objects (GEM objects which are shared with other GPU-VMs). 3) Provide functions to efficiently lock all GEM objects dma-resv the GPU-VM contains mappings of. 4) Provide tracking of evicted GEM objects the GPU-VM contains mappings of, such that validation of evicted GEM objects is accelerated. 5) Provide some convinience functions for common patterns. Big thanks to Boris Brezillon for his help to figure out locking for drivers updating the GPU VA space within the fence signalling path. Acked-by: Christian König Reviewed-by: Boris Brezillon Reviewed-by: Thomas Hellström Suggested-by: Matthew Brost Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20231108001259.15123-12-dakr@redhat.com --- drivers/gpu/drm/drm_gpuvm.c | 633 ++++++++++++++++++++++++++++++++++++ include/drm/drm_gpuvm.h | 250 ++++++++++++++ 2 files changed, 883 insertions(+) diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index 7e193bca8bab0..54f5e8851de5d 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -82,6 +82,21 @@ * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this * particular combination. If not existent a new instance is created and linked * to the &drm_gem_object. + * + * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used + * as entry for the &drm_gpuvm's lists of external and evicted objects. Those + * lists are maintained in order to accelerate locking of dma-resv locks and + * validation of evicted objects bound in a &drm_gpuvm. For instance, all + * &drm_gem_object's &dma_resv of a given &drm_gpuvm can be locked by calling + * drm_gpuvm_exec_lock(). Once locked drivers can call drm_gpuvm_validate() in + * order to validate all evicted &drm_gem_objects. It is also possible to lock + * additional &drm_gem_objects by providing the corresponding parameters to + * drm_gpuvm_exec_lock() as well as open code the &drm_exec loop while making + * use of helper functions such as drm_gpuvm_prepare_range() or + * drm_gpuvm_prepare_objects(). + * + * Every bound &drm_gem_object is treated as external object when its &dma_resv + * structure is different than the &drm_gpuvm's common &dma_resv structure. */ /** @@ -429,6 +444,20 @@ * Subsequent calls to drm_gpuvm_bo_obtain() for the same &drm_gpuvm and * &drm_gem_object must be able to observe previous creations and destructions * of &drm_gpuvm_bos in order to keep instances unique. + * + * The &drm_gpuvm's lists for keeping track of external and evicted objects are + * protected against concurrent insertion / removal and iteration internally. + * + * However, drivers still need ensure to protect concurrent calls to functions + * iterating those lists, namely drm_gpuvm_prepare_objects() and + * drm_gpuvm_validate(). + * + * Alternatively, drivers can set the &DRM_GPUVM_RESV_PROTECTED flag to indicate + * that the corresponding &dma_resv locks are held in order to protect the + * lists. If &DRM_GPUVM_RESV_PROTECTED is set, internal locking is disabled and + * the corresponding lockdep checks are enabled. This is an optimization for + * drivers which are capable of taking the corresponding &dma_resv locks and + * hence do not require internal locking. */ /** @@ -641,6 +670,201 @@ * } */ +/** + * get_next_vm_bo_from_list() - get the next vm_bo element + * @__gpuvm: the &drm_gpuvm + * @__list_name: the name of the list we're iterating on + * @__local_list: a pointer to the local list used to store already iterated items + * @__prev_vm_bo: the previous element we got from get_next_vm_bo_from_list() + * + * This helper is here to provide lockless list iteration. Lockless as in, the + * iterator releases the lock immediately after picking the first element from + * the list, so list insertion deletion can happen concurrently. + * + * Elements popped from the original list are kept in a local list, so removal + * and is_empty checks can still happen while we're iterating the list. + */ +#define get_next_vm_bo_from_list(__gpuvm, __list_name, __local_list, __prev_vm_bo) \ + ({ \ + struct drm_gpuvm_bo *__vm_bo = NULL; \ + \ + drm_gpuvm_bo_put(__prev_vm_bo); \ + \ + spin_lock(&(__gpuvm)->__list_name.lock); \ + if (!(__gpuvm)->__list_name.local_list) \ + (__gpuvm)->__list_name.local_list = __local_list; \ + else \ + drm_WARN_ON((__gpuvm)->drm, \ + (__gpuvm)->__list_name.local_list != __local_list); \ + \ + while (!list_empty(&(__gpuvm)->__list_name.list)) { \ + __vm_bo = list_first_entry(&(__gpuvm)->__list_name.list, \ + struct drm_gpuvm_bo, \ + list.entry.__list_name); \ + if (kref_get_unless_zero(&__vm_bo->kref)) { \ + list_move_tail(&(__vm_bo)->list.entry.__list_name, \ + __local_list); \ + break; \ + } else { \ + list_del_init(&(__vm_bo)->list.entry.__list_name); \ + __vm_bo = NULL; \ + } \ + } \ + spin_unlock(&(__gpuvm)->__list_name.lock); \ + \ + __vm_bo; \ + }) + +/** + * for_each_vm_bo_in_list() - internal vm_bo list iterator + * @__gpuvm: the &drm_gpuvm + * @__list_name: the name of the list we're iterating on + * @__local_list: a pointer to the local list used to store already iterated items + * @__vm_bo: the struct drm_gpuvm_bo to assign in each iteration step + * + * This helper is here to provide lockless list iteration. Lockless as in, the + * iterator releases the lock immediately after picking the first element from the + * list, hence list insertion and deletion can happen concurrently. + * + * It is not allowed to re-assign the vm_bo pointer from inside this loop. + * + * Typical use: + * + * struct drm_gpuvm_bo *vm_bo; + * LIST_HEAD(my_local_list); + * + * ret = 0; + * for_each_vm_bo_in_list(gpuvm, , &my_local_list, vm_bo) { + * ret = do_something_with_vm_bo(..., vm_bo); + * if (ret) + * break; + * } + * // Drop ref in case we break out of the loop. + * drm_gpuvm_bo_put(vm_bo); + * restore_vm_bo_list(gpuvm, , &my_local_list); + * + * + * Only used for internal list iterations, not meant to be exposed to the outside + * world. + */ +#define for_each_vm_bo_in_list(__gpuvm, __list_name, __local_list, __vm_bo) \ + for (__vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \ + __local_list, NULL); \ + __vm_bo; \ + __vm_bo = get_next_vm_bo_from_list(__gpuvm, __list_name, \ + __local_list, __vm_bo)) + +static void +__restore_vm_bo_list(struct drm_gpuvm *gpuvm, spinlock_t *lock, + struct list_head *list, struct list_head **local_list) +{ + /* Merge back the two lists, moving local list elements to the + * head to preserve previous ordering, in case it matters. + */ + spin_lock(lock); + if (*local_list) { + list_splice(*local_list, list); + *local_list = NULL; + } + spin_unlock(lock); +} + +/** + * restore_vm_bo_list() - move vm_bo elements back to their original list + * @__gpuvm: the &drm_gpuvm + * @__list_name: the name of the list we're iterating on + * + * When we're done iterating a vm_bo list, we should call restore_vm_bo_list() + * to restore the original state and let new iterations take place. + */ +#define restore_vm_bo_list(__gpuvm, __list_name) \ + __restore_vm_bo_list((__gpuvm), &(__gpuvm)->__list_name.lock, \ + &(__gpuvm)->__list_name.list, \ + &(__gpuvm)->__list_name.local_list) + +static void +cond_spin_lock(spinlock_t *lock, bool cond) +{ + if (cond) + spin_lock(lock); +} + +static void +cond_spin_unlock(spinlock_t *lock, bool cond) +{ + if (cond) + spin_unlock(lock); +} + +static void +__drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock, + struct list_head *entry, struct list_head *list) +{ + cond_spin_lock(lock, !!lock); + if (list_empty(entry)) + list_add_tail(entry, list); + cond_spin_unlock(lock, !!lock); +} + +/** + * drm_gpuvm_bo_list_add() - insert a vm_bo into the given list + * @__vm_bo: the &drm_gpuvm_bo + * @__list_name: the name of the list to insert into + * @__lock: whether to lock with the internal spinlock + * + * Inserts the given @__vm_bo into the list specified by @__list_name. + */ +#define drm_gpuvm_bo_list_add(__vm_bo, __list_name, __lock) \ + __drm_gpuvm_bo_list_add((__vm_bo)->vm, \ + __lock ? &(__vm_bo)->vm->__list_name.lock : \ + NULL, \ + &(__vm_bo)->list.entry.__list_name, \ + &(__vm_bo)->vm->__list_name.list) + +static void +__drm_gpuvm_bo_list_del(struct drm_gpuvm *gpuvm, spinlock_t *lock, + struct list_head *entry, bool init) +{ + cond_spin_lock(lock, !!lock); + if (init) { + if (!list_empty(entry)) + list_del_init(entry); + } else { + list_del(entry); + } + cond_spin_unlock(lock, !!lock); +} + +/** + * drm_gpuvm_bo_list_del_init() - remove a vm_bo from the given list + * @__vm_bo: the &drm_gpuvm_bo + * @__list_name: the name of the list to insert into + * @__lock: whether to lock with the internal spinlock + * + * Removes the given @__vm_bo from the list specified by @__list_name. + */ +#define drm_gpuvm_bo_list_del_init(__vm_bo, __list_name, __lock) \ + __drm_gpuvm_bo_list_del((__vm_bo)->vm, \ + __lock ? &(__vm_bo)->vm->__list_name.lock : \ + NULL, \ + &(__vm_bo)->list.entry.__list_name, \ + true) + +/** + * drm_gpuvm_bo_list_del() - remove a vm_bo from the given list + * @__vm_bo: the &drm_gpuvm_bo + * @__list_name: the name of the list to insert into + * @__lock: whether to lock with the internal spinlock + * + * Removes the given @__vm_bo from the list specified by @__list_name. + */ +#define drm_gpuvm_bo_list_del(__vm_bo, __list_name, __lock) \ + __drm_gpuvm_bo_list_del((__vm_bo)->vm, \ + __lock ? &(__vm_bo)->vm->__list_name.lock : \ + NULL, \ + &(__vm_bo)->list.entry.__list_name, \ + false) + #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) #define GPUVA_START(node) ((node)->va.addr) @@ -781,6 +1005,12 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, gpuvm->rb.tree = RB_ROOT_CACHED; INIT_LIST_HEAD(&gpuvm->rb.list); + INIT_LIST_HEAD(&gpuvm->extobj.list); + spin_lock_init(&gpuvm->extobj.lock); + + INIT_LIST_HEAD(&gpuvm->evict.list); + spin_lock_init(&gpuvm->evict.lock); + kref_init(&gpuvm->kref); gpuvm->name = name ? name : "unknown"; @@ -818,6 +1048,11 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm) drm_WARN(gpuvm->drm, !RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), "GPUVA tree is not empty, potentially leaking memory.\n"); + drm_WARN(gpuvm->drm, !list_empty(&gpuvm->extobj.list), + "Extobj list should be empty.\n"); + drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list), + "Evict list should be empty.\n"); + drm_gem_object_put(gpuvm->r_obj); } @@ -850,6 +1085,343 @@ drm_gpuvm_put(struct drm_gpuvm *gpuvm) } EXPORT_SYMBOL_GPL(drm_gpuvm_put); +static int +__drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + struct drm_gpuvm_bo *vm_bo; + LIST_HEAD(extobjs); + int ret = 0; + + for_each_vm_bo_in_list(gpuvm, extobj, &extobjs, vm_bo) { + ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences); + if (ret) + break; + } + /* Drop ref in case we break out of the loop. */ + drm_gpuvm_bo_put(vm_bo); + restore_vm_bo_list(gpuvm, extobj); + + return ret; +} + +static int +drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + struct drm_gpuvm_bo *vm_bo; + int ret = 0; + + drm_gpuvm_resv_assert_held(gpuvm); + list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) { + ret = drm_exec_prepare_obj(exec, vm_bo->obj, num_fences); + if (ret) + break; + + if (vm_bo->evicted) + drm_gpuvm_bo_list_add(vm_bo, evict, false); + } + + return ret; +} + +/** + * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec locking context + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for all &drm_gem_objects the given + * &drm_gpuvm contains mappings of. + * + * Using this function directly, it is the drivers responsibility to call + * drm_exec_init() and drm_exec_fini() accordingly. + * + * Note: This function is safe against concurrent insertion and removal of + * external objects, however it is not safe against concurrent usage itself. + * + * Drivers need to make sure to protect this case with either an outer VM lock + * or by calling drm_gpuvm_prepare_vm() before this function within the + * drm_exec_until_all_locked() loop, such that the GPUVM's dma-resv lock ensures + * mutual exclusion. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + if (drm_gpuvm_resv_protected(gpuvm)) + return drm_gpuvm_prepare_objects_locked(gpuvm, exec, + num_fences); + else + return __drm_gpuvm_prepare_objects(gpuvm, exec, num_fences); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_objects); + +/** + * drm_gpuvm_prepare_range() - prepare all BOs mapped within a given range + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec locking context + * @addr: the start address within the VA space + * @range: the range to iterate within the VA space + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for all &drm_gem_objects mapped between @addr + * and @addr + @range. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec, + u64 addr, u64 range, unsigned int num_fences) +{ + struct drm_gpuva *va; + u64 end = addr + range; + int ret; + + drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { + struct drm_gem_object *obj = va->gem.obj; + + ret = drm_exec_prepare_obj(exec, obj, num_fences); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range); + +/** + * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * Acquires all dma-resv locks of all &drm_gem_objects the given + * &drm_gpuvm contains mappings of. + * + * Addionally, when calling this function with struct drm_gpuvm_exec::extra + * being set the driver receives the given @fn callback to lock additional + * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers + * would call drm_exec_prepare_obj() from within this callback. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec) +{ + struct drm_gpuvm *gpuvm = vm_exec->vm; + struct drm_exec *exec = &vm_exec->exec; + unsigned int num_fences = vm_exec->num_fences; + int ret; + + drm_exec_init(exec, vm_exec->flags); + + drm_exec_until_all_locked(exec) { + ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + + ret = drm_gpuvm_prepare_objects(gpuvm, exec, num_fences); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + + if (vm_exec->extra.fn) { + ret = vm_exec->extra.fn(vm_exec); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + } + } + + return 0; + +err: + drm_exec_fini(exec); + return ret; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock); + +static int +fn_lock_array(struct drm_gpuvm_exec *vm_exec) +{ + struct { + struct drm_gem_object **objs; + unsigned int num_objs; + } *args = vm_exec->extra.priv; + + return drm_exec_prepare_array(&vm_exec->exec, args->objs, + args->num_objs, vm_exec->num_fences); +} + +/** + * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * @objs: additional &drm_gem_objects to lock + * @num_objs: the number of additional &drm_gem_objects to lock + * + * Acquires all dma-resv locks of all &drm_gem_objects the given &drm_gpuvm + * contains mappings of, plus the ones given through @objs. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, + struct drm_gem_object **objs, + unsigned int num_objs) +{ + struct { + struct drm_gem_object **objs; + unsigned int num_objs; + } args; + + args.objs = objs; + args.num_objs = num_objs; + + vm_exec->extra.fn = fn_lock_array; + vm_exec->extra.priv = &args; + + return drm_gpuvm_exec_lock(vm_exec); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_array); + +/** + * drm_gpuvm_exec_lock_range() - prepare all BOs mapped within a given range + * @vm_exec: the &drm_gpuvm_exec wrapper + * @addr: the start address within the VA space + * @range: the range to iterate within the VA space + * + * Acquires all dma-resv locks of all &drm_gem_objects mapped between @addr and + * @addr + @range. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, + u64 addr, u64 range) +{ + struct drm_gpuvm *gpuvm = vm_exec->vm; + struct drm_exec *exec = &vm_exec->exec; + int ret; + + drm_exec_init(exec, vm_exec->flags); + + drm_exec_until_all_locked(exec) { + ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range, + vm_exec->num_fences); + drm_exec_retry_on_contention(exec); + if (ret) + goto err; + } + + return ret; + +err: + drm_exec_fini(exec); + return ret; +} +EXPORT_SYMBOL_GPL(drm_gpuvm_exec_lock_range); + +static int +__drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo; + LIST_HEAD(evict); + int ret = 0; + + for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) { + ret = ops->vm_bo_validate(vm_bo, exec); + if (ret) + break; + } + /* Drop ref in case we break out of the loop. */ + drm_gpuvm_bo_put(vm_bo); + restore_vm_bo_list(gpuvm, evict); + + return ret; +} + +static int +drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + struct drm_gpuvm_bo *vm_bo, *next; + int ret = 0; + + drm_gpuvm_resv_assert_held(gpuvm); + + list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list, + list.entry.evict) { + ret = ops->vm_bo_validate(vm_bo, exec); + if (ret) + break; + + dma_resv_assert_held(vm_bo->obj->resv); + if (!vm_bo->evicted) + drm_gpuvm_bo_list_del_init(vm_bo, evict, false); + } + + return ret; +} + +/** + * drm_gpuvm_validate() - validate all BOs marked as evicted + * @gpuvm: the &drm_gpuvm to validate evicted BOs + * @exec: the &drm_exec instance used for locking the GPUVM + * + * Calls the &drm_gpuvm_ops::vm_bo_validate callback for all evicted buffer + * objects being mapped in the given &drm_gpuvm. + * + * Returns: 0 on success, negative error code on failure. + */ +int +drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec) +{ + const struct drm_gpuvm_ops *ops = gpuvm->ops; + + if (unlikely(!ops || !ops->vm_bo_validate)) + return -EOPNOTSUPP; + + if (drm_gpuvm_resv_protected(gpuvm)) + return drm_gpuvm_validate_locked(gpuvm, exec); + else + return __drm_gpuvm_validate(gpuvm, exec); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_validate); + +/** + * drm_gpuvm_resv_add_fence - add fence to private and all extobj + * dma-resv + * @gpuvm: the &drm_gpuvm to add a fence to + * @exec: the &drm_exec locking context + * @fence: fence to add + * @private_usage: private dma-resv usage + * @extobj_usage: extobj dma-resv usage + */ +void +drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage) +{ + struct drm_gem_object *obj; + unsigned long index; + + drm_exec_for_each_locked_object(exec, index, obj) { + dma_resv_assert_held(obj->resv); + dma_resv_add_fence(obj->resv, fence, + drm_gpuvm_is_extobj(gpuvm, obj) ? + extobj_usage : private_usage); + } +} +EXPORT_SYMBOL_GPL(drm_gpuvm_resv_add_fence); + /** * drm_gpuvm_bo_create() - create a new instance of struct drm_gpuvm_bo * @gpuvm: The &drm_gpuvm the @obj is mapped in. @@ -883,6 +1455,9 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, INIT_LIST_HEAD(&vm_bo->list.gpuva); INIT_LIST_HEAD(&vm_bo->list.entry.gem); + INIT_LIST_HEAD(&vm_bo->list.entry.extobj); + INIT_LIST_HEAD(&vm_bo->list.entry.evict); + return vm_bo; } EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create); @@ -900,6 +1475,9 @@ drm_gpuvm_bo_destroy(struct kref *kref) if (!lock) drm_gpuvm_resv_assert_held(gpuvm); + drm_gpuvm_bo_list_del(vm_bo, extobj, lock); + drm_gpuvm_bo_list_del(vm_bo, evict, lock); + drm_gem_gpuva_assert_lock_held(obj); list_del(&vm_bo->list.entry.gem); @@ -1043,6 +1621,61 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo) } EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain_prealloc); +/** + * drm_gpuvm_bo_extobj_add() - adds the &drm_gpuvm_bo to its &drm_gpuvm's + * extobj list + * @vm_bo: The &drm_gpuvm_bo to add to its &drm_gpuvm's the extobj list. + * + * Adds the given @vm_bo to its &drm_gpuvm's extobj list if not on the list + * already and if the corresponding &drm_gem_object is an external object, + * actually. + */ +void +drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo) +{ + struct drm_gpuvm *gpuvm = vm_bo->vm; + bool lock = !drm_gpuvm_resv_protected(gpuvm); + + if (!lock) + drm_gpuvm_resv_assert_held(gpuvm); + + if (drm_gpuvm_is_extobj(gpuvm, vm_bo->obj)) + drm_gpuvm_bo_list_add(vm_bo, extobj, lock); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add); + +/** + * drm_gpuvm_bo_evict() - add / remove a &drm_gpuvm_bo to / from the &drm_gpuvms + * evicted list + * @vm_bo: the &drm_gpuvm_bo to add or remove + * @evict: indicates whether the object is evicted + * + * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list. + */ +void +drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict) +{ + struct drm_gpuvm *gpuvm = vm_bo->vm; + struct drm_gem_object *obj = vm_bo->obj; + bool lock = !drm_gpuvm_resv_protected(gpuvm); + + dma_resv_assert_held(obj->resv); + vm_bo->evicted = evict; + + /* Can't add external objects to the evicted list directly if not using + * internal spinlocks, since in this case the evicted list is protected + * with the VM's common dma-resv lock. + */ + if (drm_gpuvm_is_extobj(gpuvm, obj) && !lock) + return; + + if (evict) + drm_gpuvm_bo_list_add(vm_bo, evict, lock); + else + drm_gpuvm_bo_list_del_init(vm_bo, evict, lock); +} +EXPORT_SYMBOL_GPL(drm_gpuvm_bo_evict); + static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va) diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h index b12fb22b0e226..8ca10461d8ac8 100644 --- a/include/drm/drm_gpuvm.h +++ b/include/drm/drm_gpuvm.h @@ -32,6 +32,7 @@ #include #include +#include struct drm_gpuvm; struct drm_gpuvm_bo; @@ -283,6 +284,50 @@ struct drm_gpuvm { * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. */ struct drm_gem_object *r_obj; + + /** + * @extobj: structure holding the extobj list + */ + struct { + /** + * @list: &list_head storing &drm_gpuvm_bos serving as + * external object + */ + struct list_head list; + + /** + * @local_list: pointer to the local list temporarily storing + * entries from the external object list + */ + struct list_head *local_list; + + /** + * @lock: spinlock to protect the extobj list + */ + spinlock_t lock; + } extobj; + + /** + * @evict: structure holding the evict list and evict list lock + */ + struct { + /** + * @list: &list_head storing &drm_gpuvm_bos currently being + * evicted + */ + struct list_head list; + + /** + * @local_list: pointer to the local list temporarily storing + * entries from the evicted object list + */ + struct list_head *local_list; + + /** + * @lock: spinlock to protect the evict list + */ + spinlock_t lock; + } evict; }; void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, @@ -359,6 +404,22 @@ drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) #define drm_gpuvm_resv_assert_held(gpuvm__) \ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) +/** + * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an + * external object + * @gpuvm: the &drm_gpuvm to check + * @obj: the &drm_gem_object to check + * + * Returns: true if the &drm_gem_object &dma_resv differs from the + * &drm_gpuvms &dma_resv, false otherwise + */ +static inline bool +drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, + struct drm_gem_object *obj) +{ + return obj && obj->resv != drm_gpuvm_resv(gpuvm); +} + static inline struct drm_gpuva * __drm_gpuva_next(struct drm_gpuva *va) { @@ -437,6 +498,144 @@ __drm_gpuva_next(struct drm_gpuva *va) #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) +/** + * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec + * + * This structure should be created on the stack as &drm_exec should be. + * + * Optionally, @extra can be set in order to lock additional &drm_gem_objects. + */ +struct drm_gpuvm_exec { + /** + * @exec: the &drm_exec structure + */ + struct drm_exec exec; + + /** + * @flags: the flags for the struct drm_exec + */ + uint32_t flags; + + /** + * @vm: the &drm_gpuvm to lock its DMA reservations + */ + struct drm_gpuvm *vm; + + /** + * @num_fences: the number of fences to reserve for the &dma_resv of the + * locked &drm_gem_objects + */ + unsigned int num_fences; + + /** + * @extra: Callback and corresponding private data for the driver to + * lock arbitrary additional &drm_gem_objects. + */ + struct { + /** + * @fn: The driver callback to lock additional &drm_gem_objects. + */ + int (*fn)(struct drm_gpuvm_exec *vm_exec); + + /** + * @priv: driver private data for the @fn callback + */ + void *priv; + } extra; +}; + +/** + * drm_gpuvm_prepare_vm() - prepare the GPUVMs common dma-resv + * @gpuvm: the &drm_gpuvm + * @exec: the &drm_exec context + * @num_fences: the amount of &dma_fences to reserve + * + * Calls drm_exec_prepare_obj() for the GPUVMs dummy &drm_gem_object. + * + * Using this function directly, it is the drivers responsibility to call + * drm_exec_init() and drm_exec_fini() accordingly. + * + * Returns: 0 on success, negative error code on failure. + */ +static inline int +drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences) +{ + return drm_exec_prepare_obj(exec, gpuvm->r_obj, num_fences); +} + +int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + unsigned int num_fences); + +int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + u64 addr, u64 range, + unsigned int num_fences); + +int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec); + +int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, + struct drm_gem_object **objs, + unsigned int num_objs); + +int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, + u64 addr, u64 range); + +/** + * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * Releases all dma-resv locks of all &drm_gem_objects previously acquired + * through drm_gpuvm_exec_lock() or its variants. + * + * Returns: 0 on success, negative error code on failure. + */ +static inline void +drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) +{ + drm_exec_fini(&vm_exec->exec); +} + +int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec); +void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, + struct drm_exec *exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage); + +/** + * drm_gpuvm_exec_resv_add_fence() + * @vm_exec: the &drm_gpuvm_exec wrapper + * @fence: fence to add + * @private_usage: private dma-resv usage + * @extobj_usage: extobj dma-resv usage + * + * See drm_gpuvm_resv_add_fence(). + */ +static inline void +drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec, + struct dma_fence *fence, + enum dma_resv_usage private_usage, + enum dma_resv_usage extobj_usage) +{ + drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, + private_usage, extobj_usage); +} + +/** + * drm_gpuvm_exec_validate() + * @vm_exec: the &drm_gpuvm_exec wrapper + * + * See drm_gpuvm_validate(). + */ +static inline int +drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) +{ + return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); +} + /** * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and * &drm_gem_object combination @@ -466,6 +665,12 @@ struct drm_gpuvm_bo { */ struct drm_gem_object *obj; + /** + * @evicted: Indicates whether the &drm_gem_object is evicted; field + * protected by the &drm_gem_object's dma-resv lock. + */ + bool evicted; + /** * @kref: The reference count for this &drm_gpuvm_bo. */ @@ -493,6 +698,18 @@ struct drm_gpuvm_bo { * gpuva list. */ struct list_head gem; + + /** + * @evict: List entry to attach to the &drm_gpuvms + * extobj list. + */ + struct list_head extobj; + + /** + * @evict: List entry to attach to the &drm_gpuvms evict + * list. + */ + struct list_head evict; } entry; } list; }; @@ -527,6 +744,27 @@ struct drm_gpuvm_bo * drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj); +void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); + +/** + * drm_gpuvm_bo_gem_evict() + * @obj: the &drm_gem_object + * @evict: indicates whether @obj is evicted + * + * See drm_gpuvm_bo_evict(). + */ +static inline void +drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) +{ + struct drm_gpuvm_bo *vm_bo; + + drm_gem_gpuva_assert_lock_held(obj); + drm_gem_for_each_gpuvm_bo(vm_bo, obj) + drm_gpuvm_bo_evict(vm_bo, evict); +} + +void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); + /** * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva * @va__: &drm_gpuva structure to assign to in each iteration step @@ -901,6 +1139,18 @@ struct drm_gpuvm_ops { */ void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); + /** + * @vm_bo_validate: called from drm_gpuvm_validate() + * + * Drivers receive this callback for every evicted &drm_gem_object being + * mapped in the corresponding &drm_gpuvm. + * + * Typically, drivers would call their driver specific variant of + * ttm_bo_validate() from within this callback. + */ + int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo, + struct drm_exec *exec); + /** * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the * mapping once all previous steps were completed From b0e396d68fef9c9c050dfbb590cc0066441f65c7 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Fri, 10 Nov 2023 20:01:57 -0500 Subject: [PATCH 100/117] Revert "drm/sched: Define pr_fmt() for DRM using pr_*()" From Jani: The drm_print.[ch] facilities use very few pr_*() calls directly. The users of pr_*() calls do not necessarily include at all, and really don't have to. Even the ones that do include it, usually have includes first, and includes next. Notably, includes . And, of course, defines pr_fmt() itself if not already defined. No, it's encouraged not to use pr_*() at all, and prefer drm device based logging, or device based logging. This reverts commit 36245bd02e88e68ac5955c2958c968879d7b75a9. Signed-off-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/878r75wzm9.fsf@intel.com Acked-by: Javier Martinez Canillas Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20231111024130.11464-2-ltuikov89@gmail.com --- include/drm/drm_print.h | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index e8fe60d0eb878..a93a387f8a1a1 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -26,20 +26,6 @@ #ifndef DRM_PRINT_H_ #define DRM_PRINT_H_ -/* Define this before including linux/printk.h, so that the format - * string in pr_*() macros is correctly set for DRM. If a file wants - * to define this to something else, it should do so before including - * this header file. - * - * It is encouraged code using pr_err() to prefix their format with - * the string "*ERROR* ", to make it easier to scan kernel logs. For - * instance, - * pr_err("*ERROR* ", args). - */ -#ifndef pr_fmt -#define pr_fmt(fmt) "[drm] " fmt -#endif - #include #include #include From 38b2d9d385102f430eb023aee1ed0ed37d9173f5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 9 Oct 2023 16:06:30 +0200 Subject: [PATCH 101/117] drm/format-helper: Cache buffers with struct drm_format_conv_state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hold temporary memory for format conversion in an instance of struct drm_format_conv_state. Update internal helpers of DRM's format-conversion code accordingly. Drivers will later be able to maintain this cache by themselves. Besides caching, struct drm_format_conv_state will be useful to hold additional information for format conversion, such as palette data or foreground/background colors. This will enable conversion from indexed color formats to component-based formats. v5: * improve documentation (Javier, Noralf) v3: * rename struct drm_xfrm_buf to struct drm_format_conv_state (Javier) * remove managed cleanup * add drm_format_conv_state_copy() for shadow-plane support Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Acked-by: Noralf Trønnes Tested-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231009141018.11291-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_format_helper.c | 118 +++++++++++++++++++++++++--- include/drm/drm_format_helper.h | 51 ++++++++++++ 2 files changed, 158 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index f93a4efcee909..28d013d427788 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -20,6 +20,97 @@ #include #include +/** + * drm_format_conv_state_init - Initialize format-conversion state + * @state: The state to initialize + * + * Clears all fields in struct drm_format_conv_state. The state will + * be empty with no preallocated resources. + */ +void drm_format_conv_state_init(struct drm_format_conv_state *state) +{ + state->tmp.mem = NULL; + state->tmp.size = 0; + state->tmp.preallocated = false; +} +EXPORT_SYMBOL(drm_format_conv_state_init); + +/** + * drm_format_conv_state_copy - Copy format-conversion state + * @state: Destination state + * @old_state: Source state + * + * Copies format-conversion state from @old_state to @state; except for + * temporary storage. + */ +void drm_format_conv_state_copy(struct drm_format_conv_state *state, + const struct drm_format_conv_state *old_state) +{ + /* + * So far, there's only temporary storage here, which we don't + * duplicate. Just clear the fields. + */ + state->tmp.mem = NULL; + state->tmp.size = 0; + state->tmp.preallocated = false; +} +EXPORT_SYMBOL(drm_format_conv_state_copy); + +/** + * drm_format_conv_state_reserve - Allocates storage for format conversion + * @state: The format-conversion state + * @new_size: The minimum allocation size + * @flags: Flags for kmalloc() + * + * Allocates at least @new_size bytes and returns a pointer to the memory + * range. After calling this function, previously returned memory blocks + * are invalid. It's best to collect all memory requirements of a format + * conversion and call this function once to allocate the range. + * + * Returns: + * A pointer to the allocated memory range, or NULL otherwise. + */ +void *drm_format_conv_state_reserve(struct drm_format_conv_state *state, + size_t new_size, gfp_t flags) +{ + void *mem; + + if (new_size <= state->tmp.size) + goto out; + else if (state->tmp.preallocated) + return NULL; + + mem = krealloc(state->tmp.mem, new_size, flags); + if (!mem) + return NULL; + + state->tmp.mem = mem; + state->tmp.size = new_size; + +out: + return state->tmp.mem; +} +EXPORT_SYMBOL(drm_format_conv_state_reserve); + +/** + * drm_format_conv_state_release - Releases an format-conversion storage + * @state: The format-conversion state + * + * Releases the memory range references by the format-conversion state. + * After this call, all pointers to the memory are invalid. Prefer + * drm_format_conv_state_init() for cleaning up and unloading a driver. + */ +void drm_format_conv_state_release(struct drm_format_conv_state *state) +{ + if (state->tmp.preallocated) + return; + + kfree(state->tmp.mem); + state->tmp.mem = NULL; + state->tmp.size = 0; +} +EXPORT_SYMBOL(drm_format_conv_state_release); + static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp) { return clip->y1 * pitch + clip->x1 * cpp; @@ -45,6 +136,7 @@ EXPORT_SYMBOL(drm_fb_clip_offset); static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip, bool vaddr_cached_hint, + struct drm_format_conv_state *state, void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels)) { unsigned long linepixels = drm_rect_width(clip); @@ -60,7 +152,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p * one line at a time. */ if (!vaddr_cached_hint) { - stmp = kmalloc(sbuf_len, GFP_KERNEL); + stmp = drm_format_conv_state_reserve(state, sbuf_len, GFP_KERNEL); if (!stmp) return -ENOMEM; } @@ -79,8 +171,6 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p dst += dst_pitch; } - kfree(stmp); - return 0; } @@ -88,6 +178,7 @@ static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_p static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize, const void *vaddr, const struct drm_framebuffer *fb, const struct drm_rect *clip, bool vaddr_cached_hint, + struct drm_format_conv_state *state, void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels)) { unsigned long linepixels = drm_rect_width(clip); @@ -101,9 +192,9 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign void *dbuf; if (vaddr_cached_hint) { - dbuf = kmalloc(dbuf_len, GFP_KERNEL); + dbuf = drm_format_conv_state_reserve(state, dbuf_len, GFP_KERNEL); } else { - dbuf = kmalloc(stmp_off + sbuf_len, GFP_KERNEL); + dbuf = drm_format_conv_state_reserve(state, stmp_off + sbuf_len, GFP_KERNEL); stmp = dbuf + stmp_off; } if (!dbuf) @@ -124,8 +215,6 @@ static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsign dst += dst_pitch; } - kfree(dbuf); - return 0; } @@ -139,17 +228,24 @@ static int drm_fb_xfrm(struct iosys_map *dst, static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { 0, 0, 0, 0 }; + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; + int ret; if (!dst_pitch) dst_pitch = default_dst_pitch; /* TODO: handle src in I/O memory here */ if (dst[0].is_iomem) - return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0], - src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line); + ret = __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0], + src[0].vaddr, fb, clip, vaddr_cached_hint, &fmtcnv_state, + xfrm_line); else - return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0], - src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line); + ret = __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0], + src[0].vaddr, fb, clip, vaddr_cached_hint, &fmtcnv_state, + xfrm_line); + drm_format_conv_state_release(&fmtcnv_state); + + return ret; } /** diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 291deb09475bb..724a9baf7315d 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -15,6 +15,57 @@ struct drm_rect; struct iosys_map; +/** + * struct drm_format_conv_state - Stores format-conversion state + * + * DRM helpers for format conversion store temporary state in + * struct drm_xfrm_buf. The buffer's resources can be reused + * among multiple conversion operations. + * + * All fields are considered private. + */ +struct drm_format_conv_state { + struct { + void *mem; + size_t size; + bool preallocated; + } tmp; +}; + +#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \ + .tmp = { \ + .mem = (_mem), \ + .size = (_size), \ + .preallocated = (_preallocated), \ + } \ + } + +/** + * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state + * + * Initializes an instance of struct drm_format_conv_state to default values. + */ +#define DRM_FORMAT_CONV_STATE_INIT \ + __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false) + +/** + * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state + * @_mem: The preallocated memory area + * @_size: The number of bytes in _mem + * + * Initializes an instance of struct drm_format_conv_state to preallocated + * storage. The caller is responsible for releasing the provided memory range. + */ +#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \ + __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true) + +void drm_format_conv_state_init(struct drm_format_conv_state *state); +void drm_format_conv_state_copy(struct drm_format_conv_state *state, + const struct drm_format_conv_state *old_state); +void *drm_format_conv_state_reserve(struct drm_format_conv_state *state, + size_t new_size, gfp_t flags); +void drm_format_conv_state_release(struct drm_format_conv_state *state); + unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, const struct drm_rect *clip); From 903674588a48df25bb79b1bedbfc48450f1d5d8f Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 9 Oct 2023 16:06:31 +0200 Subject: [PATCH 102/117] drm/atomic-helper: Add format-conversion state to shadow-plane state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Store an instance of struct drm_format_conv_state in the shadow-plane state struct drm_shadow_plane_state. Many drivers with shadow planes use DRM's format helpers to copy or convert the framebuffer data to backing storage in the scanout buffer. The shadow plane provides the necessary state and manages the conversion's intermediate buffer memory. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Acked-by: Noralf Trønnes Tested-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231009141018.11291-3-tzimmermann@suse.de --- drivers/gpu/drm/drm_gem_atomic_helper.c | 9 +++++++++ include/drm/drm_gem_atomic_helper.h | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index 5d4b9cd077f7a..e440f458b6633 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -218,7 +218,14 @@ void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, struct drm_shadow_plane_state *new_shadow_plane_state) { + struct drm_plane_state *plane_state = plane->state; + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); + __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); + + drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state, + &new_shadow_plane_state->fmtcnv_state); } EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); @@ -266,6 +273,7 @@ EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); */ void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) { + drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state); __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); } EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); @@ -302,6 +310,7 @@ void __drm_gem_reset_shadow_plane(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state) { __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); + drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); } EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 40b8b039518e0..3e01c619a25e0 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -5,6 +5,7 @@ #include +#include #include #include @@ -49,6 +50,15 @@ struct drm_shadow_plane_state { /** @base: plane state */ struct drm_plane_state base; + /** + * @fmtcnv_state: Format-conversion state + * + * Per-plane state for format conversion. + * Flags for copying shadow buffers into backend storage. Also holds + * temporary storage for format conversion. + */ + struct drm_format_conv_state fmtcnv_state; + /* Transitional state - do not export or duplicate */ /** From 4cd24d4b1a9548f42cdb7f449edc6f869a8ae730 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 9 Oct 2023 16:06:32 +0200 Subject: [PATCH 103/117] drm/format-helper: Pass format-conversion state to helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass an instance of struct drm_format_conv_state to DRM's format conversion helpers. Update all callers. Most drivers can use the format-conversion state from their shadow- plane state. The shadow plane's destroy function releases the allocated buffer. Drivers will later be able to allocate a buffer of appropriate size in their plane's atomic_check code. The gud driver uses a separate thread for committing updates. For now, the update worker contains its own format-conversion state. Images in the format-helper tests are small. The tests preallocate a static page for the temporary buffer. Unloading the module releases the memory. v6: * update patch for ssd132x support v5: * avoid using unusupported shadow-plane state in repaper (Noralf) * fix documentation (Noralf, kernel test robot) v3: * store buffer in shadow-plane state (Javier, Maxime) * replace ARRAY_SIZE() with sizeof() (Jani) Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Tested-by: Javier Martinez Canillas # ssd130x Cc: Noralf Trønnes Cc: Javier Martinez Canillas Cc: Gerd Hoffmann Cc: David Lechner Link: https://patchwork.freedesktop.org/patch/msgid/20231009141018.11291-4-tzimmermann@suse.de --- drivers/gpu/drm/drm_format_helper.c | 123 ++++++++++-------- drivers/gpu/drm/drm_mipi_dbi.c | 19 ++- drivers/gpu/drm/gud/gud_pipe.c | 30 +++-- drivers/gpu/drm/solomon/ssd130x.c | 16 ++- .../gpu/drm/tests/drm_format_helper_test.c | 72 ++++++---- drivers/gpu/drm/tiny/cirrus.c | 3 +- drivers/gpu/drm/tiny/ili9225.c | 10 +- drivers/gpu/drm/tiny/ofdrm.c | 2 +- drivers/gpu/drm/tiny/repaper.c | 10 +- drivers/gpu/drm/tiny/simpledrm.c | 2 +- drivers/gpu/drm/tiny/st7586.c | 19 +-- include/drm/drm_format_helper.h | 30 +++-- include/drm/drm_mipi_dbi.h | 4 +- 13 files changed, 200 insertions(+), 140 deletions(-) diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 28d013d427788..b1be458ed4dda 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -223,29 +223,25 @@ static int drm_fb_xfrm(struct iosys_map *dst, const unsigned int *dst_pitch, const u8 *dst_pixsize, const struct iosys_map *src, const struct drm_framebuffer *fb, const struct drm_rect *clip, bool vaddr_cached_hint, + struct drm_format_conv_state *state, void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels)) { static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { 0, 0, 0, 0 }; - struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; - int ret; if (!dst_pitch) dst_pitch = default_dst_pitch; /* TODO: handle src in I/O memory here */ if (dst[0].is_iomem) - ret = __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0], - src[0].vaddr, fb, clip, vaddr_cached_hint, &fmtcnv_state, - xfrm_line); + return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0], + src[0].vaddr, fb, clip, vaddr_cached_hint, state, + xfrm_line); else - ret = __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0], - src[0].vaddr, fb, clip, vaddr_cached_hint, &fmtcnv_state, - xfrm_line); - drm_format_conv_state_release(&fmtcnv_state); - - return ret; + return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0], + src[0].vaddr, fb, clip, vaddr_cached_hint, state, + xfrm_line); } /** @@ -331,6 +327,7 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * @cached: Source buffer is mapped cached (eg. not write-combined) + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and swaps per-pixel * bytes during the process. Destination and framebuffer formats must match. The @@ -345,7 +342,8 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels */ void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool cached) + const struct drm_rect *clip, bool cached, + struct drm_format_conv_state *state) { const struct drm_format_info *format = fb->format; u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8); @@ -364,7 +362,7 @@ void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, return; } - drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line); + drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, state, swab_line); } EXPORT_SYMBOL(drm_fb_swab); @@ -391,6 +389,7 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -405,13 +404,13 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne */ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 1, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_rgb332_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332); @@ -460,6 +459,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * @swab: Swap bytes * * This function copies parts of a framebuffer to display memory and converts the @@ -475,7 +475,8 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, */ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool swab) + const struct drm_rect *clip, struct drm_format_conv_state *state, + bool swab) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, @@ -488,7 +489,7 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi else xfrm_line = drm_fb_xrgb8888_to_rgb565_line; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line); + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); @@ -517,6 +518,7 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -532,13 +534,13 @@ static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_xrgb1555_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555); @@ -569,6 +571,7 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -584,13 +587,13 @@ static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_argb1555_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555); @@ -621,6 +624,7 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -636,13 +640,13 @@ static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_rgba5551_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551); @@ -671,6 +675,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -686,13 +691,13 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne */ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 3, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_rgb888_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); @@ -719,6 +724,7 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig * @src: Array of XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. The parameters @dst, @dst_pitch and @src refer @@ -734,13 +740,13 @@ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsig */ void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_argb8888_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888); @@ -765,13 +771,14 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_abgr8888_line); } @@ -795,13 +802,14 @@ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsig static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_xbgr8888_line); } @@ -831,6 +839,7 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -846,13 +855,14 @@ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, un */ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_xrgb2101010_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010); @@ -884,6 +894,7 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts * the color format during the process. The parameters @dst, @dst_pitch and @@ -899,13 +910,14 @@ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, un */ void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, + struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 4, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_argb2101010_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010); @@ -935,6 +947,7 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -954,13 +967,13 @@ static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned */ void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 1, }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, drm_fb_xrgb8888_to_gray8_line); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); @@ -974,6 +987,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); * @src: The framebuffer memory to copy from * @fb: The framebuffer to copy from * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory. If the * formats of the display and the framebuffer mismatch, the blit function @@ -992,7 +1006,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); */ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { uint32_t fb_format = fb->format->format; @@ -1000,44 +1014,44 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d drm_fb_memcpy(dst, dst_pitch, src, fb, clip); return 0; } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false); + drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); return 0; } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false); + drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); return 0; } else if (fb_format == DRM_FORMAT_XRGB8888) { if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false); + drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false); return 0; } else if (dst_format == DRM_FORMAT_XRGB1555) { - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ARGB1555) { - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_RGBA5551) { - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ARGB8888) { - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_XBGR8888) { - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ABGR8888) { - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_XRGB2101010) { - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_ARGB2101010) { - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip); + drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_BGRX8888) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false); + drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); return 0; } } @@ -1074,6 +1088,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int * @src: Array of XRGB8888 source buffers * @fb: DRM framebuffer * @clip: Clip rectangle area to copy + * @state: Transform and conversion state * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -1098,7 +1113,7 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int */ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { 0, 0, 0, 0 @@ -1138,7 +1153,7 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc * Allocate a buffer to be used for both copying from the cma * memory and to store the intermediate grayscale line pixels. */ - src32 = kmalloc(len_src32 + linepixels, GFP_KERNEL); + src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL); if (!src32) return; @@ -1152,8 +1167,6 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc vaddr += fb->pitches[0]; mono += dst_pitch_0; } - - kfree(src32); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index e90f0bf895b33..daac649aabdbe 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -197,12 +197,14 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf); * @fb: The source framebuffer * @clip: Clipping rectangle of the area to be copied * @swap: When true, swap MSB/LSB of 16-bit values + * @fmtcnv_state: Format-conversion state * * Returns: * Zero on success, negative error code on failure. */ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap) + struct drm_rect *clip, bool swap, + struct drm_format_conv_state *fmtcnv_state) { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst); @@ -215,12 +217,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer * switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach); + drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach, + fmtcnv_state); else drm_fb_memcpy(&dst_map, NULL, src, fb, clip); break; case DRM_FORMAT_XRGB8888: - drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap); + drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap); break; default: drm_err_once(fb->dev, "Format is not supported: %p4cc\n", @@ -252,7 +255,7 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev, } static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *rect) + struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; @@ -270,7 +273,7 @@ static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, if (!dbi->dc || !full || swap || fb->format->format == DRM_FORMAT_XRGB8888) { tr = dbidev->tx_buf; - ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap); + ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state); if (ret) goto err_msg; } else { @@ -332,7 +335,8 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); drm_dev_exit(idx); } @@ -368,7 +372,8 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev, if (!drm_dev_enter(&dbidev->drm, &idx)) return; - mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); backlight_enable(dbidev->backlight); drm_dev_exit(idx); diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index d2f199ea3c111..28171de84718e 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -51,7 +51,8 @@ static bool gud_is_big_endian(void) static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format, void *src, struct drm_framebuffer *fb, - struct drm_rect *rect) + struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state) { unsigned int block_width = drm_format_info_block_width(format, 0); unsigned int bits_per_pixel = 8 / block_width; @@ -75,7 +76,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format iosys_map_set_vaddr(&dst_map, buf); iosys_map_set_vaddr(&vmap, src); - drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect); + drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect, fmtcnv_state); pix8 = buf; for (y = 0; y < height; y++) { @@ -152,7 +153,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, const struct drm_format_info *format, struct drm_rect *rect, - struct gud_set_buffer_req *req) + struct gud_set_buffer_req *req, + struct drm_format_conv_state *fmtcnv_state) { u8 compression = gdrm->compression; struct iosys_map dst; @@ -178,23 +180,23 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, */ if (format != fb->format) { if (format->format == GUD_DRM_FORMAT_R1) { - len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect); + len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect, fmtcnv_state); if (!len) return -ENOMEM; } else if (format->format == DRM_FORMAT_R8) { - drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect); + drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect, fmtcnv_state); } else if (format->format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect); + drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state); } else if (format->format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, + drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state, gud_is_big_endian()); } else if (format->format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect); + drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state); } else { len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect); } } else if (gud_is_big_endian() && format->cpp[0] > 1) { - drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads); + drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads, fmtcnv_state); } else if (compression && cached_reads && pitch == fb->pitches[0]) { /* can compress directly from the framebuffer */ buf = vaddr + rect->y1 * pitch; @@ -266,7 +268,8 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len) static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, - const struct drm_format_info *format, struct drm_rect *rect) + const struct drm_format_info *format, struct drm_rect *rect, + struct drm_format_conv_state *fmtcnv_state) { struct gud_set_buffer_req req; size_t len, trlen; @@ -274,7 +277,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); - ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req); + ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req, fmtcnv_state); if (ret) return ret; @@ -318,6 +321,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb const struct iosys_map *src, bool cached_reads, struct drm_rect *damage) { + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; const struct drm_format_info *format; unsigned int i, lines; size_t pitch; @@ -340,7 +344,7 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb rect.y1 += i * lines; rect.y2 = min_t(u32, rect.y1 + lines, damage->y2); - ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect); + ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect, &fmtcnv_state); if (ret) { if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN && ret != -EPROTO) @@ -350,6 +354,8 @@ static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb break; } } + + drm_format_conv_state_release(&fmtcnv_state); } void gud_flush_work(struct work_struct *work) diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index e0174f82e3537..ee0a06c506926 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -808,7 +808,8 @@ static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array) static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *vmap, struct drm_rect *rect, - u8 *buf, u8 *data_array) + u8 *buf, u8 *data_array, + struct drm_format_conv_state *fmtcnv_state) { struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev); struct iosys_map dst; @@ -826,7 +827,7 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, return ret; iosys_map_set_vaddr(&dst, buf); - drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect); + drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); @@ -838,7 +839,8 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *vmap, struct drm_rect *rect, u8 *buf, - u8 *data_array) + u8 *data_array, + struct drm_format_conv_state *fmtcnv_state) { struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev); unsigned int dst_pitch = drm_rect_width(rect); @@ -855,7 +857,7 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb, return ret; iosys_map_set_vaddr(&dst, buf); - drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect); + drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); @@ -968,7 +970,8 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane, ssd130x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip, ssd130x_plane_state->buffer, - ssd130x_crtc_state->data_array); + ssd130x_crtc_state->data_array, + &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); @@ -1002,7 +1005,8 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane, ssd132x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip, ssd130x_plane_state->buffer, - ssd130x_crtc_state->data_array); + ssd130x_crtc_state->data_array, + &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index f6408e56f7861..08992636ec05f 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -20,6 +20,10 @@ #define TEST_USE_DEFAULT_PITCH 0 +static unsigned char fmtcnv_state_mem[PAGE_SIZE]; +static struct drm_format_conv_state fmtcnv_state = + DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(fmtcnv_state_mem, sizeof(fmtcnv_state_mem)); + struct convert_to_gray8_result { unsigned int dst_pitch; const u8 expected[TEST_BUF_SIZE]; @@ -630,8 +634,7 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_gray8(&dst, dst_pitch, &src, &fb, ¶ms->clip); - + drm_fb_xrgb8888_to_gray8(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } @@ -664,7 +667,7 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_rgb332(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_rgb332(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } @@ -697,12 +700,14 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, ¶ms->clip, false); + drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, ¶ms->clip, + &fmtcnv_state, false); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); buf = dst.vaddr; /* restore original value of buf */ - drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, true); + drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, + &fmtcnv_state, true); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size); @@ -711,7 +716,8 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); @@ -748,7 +754,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -757,7 +763,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); @@ -794,7 +801,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -803,7 +810,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); @@ -840,7 +848,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -849,7 +857,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); @@ -890,7 +899,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); buf = dst.vaddr; /* restore original value of buf */ @@ -898,7 +907,8 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, ¶ms->clip, + &fmtcnv_state); KUNIT_EXPECT_FALSE(test, blit_result); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -933,7 +943,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -942,7 +952,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); @@ -979,7 +990,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -989,7 +1000,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) int blit_result = 0; blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb, - ¶ms->clip); + ¶ms->clip, &fmtcnv_state); KUNIT_EXPECT_FALSE(test, blit_result); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -1024,7 +1035,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -1034,7 +1045,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test) int blit_result = 0; blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb, - ¶ms->clip); + ¶ms->clip, &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); @@ -1071,7 +1082,7 @@ static void drm_test_fb_xrgb8888_to_mono(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_xrgb8888_to_mono(&dst, dst_pitch, &src, &fb, ¶ms->clip); + drm_fb_xrgb8888_to_mono(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } @@ -1104,7 +1115,7 @@ static void drm_test_fb_swab(struct kunit *test) const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ? NULL : &result->dst_pitch; - drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false); + drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false, &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); @@ -1114,7 +1125,7 @@ static void drm_test_fb_swab(struct kunit *test) int blit_result; blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN, - &src, &fb, ¶ms->clip); + &src, &fb, ¶ms->clip, &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_FALSE(test, blit_result); @@ -1123,7 +1134,8 @@ static void drm_test_fb_swab(struct kunit *test) buf = dst.vaddr; memset(buf, 0, dst_size); - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_FALSE(test, blit_result); @@ -1137,7 +1149,8 @@ static void drm_test_fb_swab(struct kunit *test) mock_format.format |= DRM_FORMAT_BIG_ENDIAN; fb.format = &mock_format; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_FALSE(test, blit_result); @@ -1175,7 +1188,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); @@ -1214,7 +1228,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test) int blit_result = 0; - blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); @@ -1817,7 +1832,8 @@ static void drm_test_fb_memcpy(struct kunit *test) int blit_result; - blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, ¶ms->clip); + blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, ¶ms->clip, + &fmtcnv_state); KUNIT_EXPECT_FALSE(test, blit_result); for (size_t i = 0; i < fb.format->num_planes; i++) { diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index c5c34cd2edc1f..4e3a152f897ac 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -411,7 +411,8 @@ static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane, unsigned int offset = drm_fb_clip_offset(pitch, format, &damage); struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset); - drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, &damage); + drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, + &damage, &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index 4ceb68ffac4be..dd8b0a181be94 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -78,7 +78,7 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data) } static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *rect) + struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; @@ -98,7 +98,7 @@ static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, if (!dbi->dc || !full || swap || fb->format->format == DRM_FORMAT_XRGB8888) { tr = dbidev->tx_buf; - ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap); + ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap, fmtcnv_state); if (ret) goto err_msg; } else { @@ -171,7 +171,8 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); drm_dev_exit(idx); } @@ -281,7 +282,8 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe, ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017); - ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); out_exit: drm_dev_exit(idx); diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 2d999a0facdee..404c83032cecc 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -817,7 +817,7 @@ static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane, iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip)); drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb, - &damage); + &damage, &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 73dd4f4289c20..8fd6758f5725f 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -509,7 +509,8 @@ static void repaper_get_temperature(struct repaper_epd *epd) epd->factored_stage_time = epd->stage_time * factor10x / 10; } -static int repaper_fb_dirty(struct drm_framebuffer *fb) +static int repaper_fb_dirty(struct drm_framebuffer *fb, + struct drm_format_conv_state *fmtcnv_state) { struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); struct repaper_epd *epd = drm_to_epd(fb->dev); @@ -545,7 +546,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) iosys_map_set_vaddr(&dst, buf); iosys_map_set_vaddr(&vmap, dma_obj->vaddr); - drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip); + drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); @@ -830,13 +831,16 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_plane_state *state = pipe->plane.state; + struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT; struct drm_rect rect; if (!pipe->crtc.state->active) return; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - repaper_fb_dirty(state->fb); + repaper_fb_dirty(state->fb, &fmtcnv_state); + + drm_format_conv_state_release(&fmtcnv_state); } static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 8bdaf66044fca..4916e9b0d46a3 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -609,7 +609,7 @@ static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip)); drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, - fb, &damage); + fb, &damage, &shadow_plane_state->fmtcnv_state); } drm_dev_exit(idx); diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index 3cf4eec16a813..7336fa1ddaed1 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -64,7 +64,8 @@ static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 }; static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) + struct drm_rect *clip, + struct drm_format_conv_state *fmtcnv_state) { size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1); unsigned int x, y; @@ -77,7 +78,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, iosys_map_set_vaddr(&dst_map, buf); iosys_map_set_vaddr(&vmap, vaddr); - drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip); + drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip, fmtcnv_state); src = buf; for (y = clip->y1; y < clip->y2; y++) { @@ -93,7 +94,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, } static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip) + struct drm_rect *clip, struct drm_format_conv_state *fmtcnv_state) { int ret; @@ -101,7 +102,7 @@ static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuf if (ret) return ret; - st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip); + st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip, fmtcnv_state); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); @@ -109,7 +110,7 @@ static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuf } static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *rect) + struct drm_rect *rect, struct drm_format_conv_state *fmtcnv_state) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); struct mipi_dbi *dbi = &dbidev->dbi; @@ -121,7 +122,7 @@ static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb, DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); - ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect); + ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect, fmtcnv_state); if (ret) goto err_msg; @@ -160,7 +161,8 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_state, state, &rect)) - st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); drm_dev_exit(idx); } @@ -238,7 +240,8 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe, msleep(100); - st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect); + st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect, + &shadow_plane_state->fmtcnv_state); mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); out_exit: diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index 724a9baf7315d..f13b34e0b752b 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -74,45 +74,49 @@ void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch, const struct drm_rect *clip); void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool cached); + const struct drm_rect *clip, bool cached, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, bool swab); + const struct drm_rect *clip, struct drm_format_conv_state *state, + bool swab); void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, + struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *rect); + const struct drm_rect *clip, struct drm_format_conv_state *state); void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip); + const struct drm_rect *clip, struct drm_format_conv_state *state); size_t drm_fb_build_fourcc_list(struct drm_device *dev, const u32 *native_fourccs, size_t native_nfourccs, diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 816f196b3d4c5..e8e0f8d39f3a6 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -12,6 +12,7 @@ #include #include +struct drm_format_conv_state; struct drm_rect; struct gpio_desc; struct iosys_map; @@ -192,7 +193,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len); int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data, size_t len); int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swap); + struct drm_rect *clip, bool swap, + struct drm_format_conv_state *fmtcnv_state); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) From 58b184dcb3f4c52c15b6ff4fa2fa0d69d1e1313f Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 9 Oct 2023 16:06:33 +0200 Subject: [PATCH 104/117] drm/ofdrm: Preallocate format-conversion buffer in atomic_check Preallocate the format-conversion state's storage in the plane's atomic_check function if a format conversion is necessary. Allows the update to fail if no memory is available. Avoids the same allocation within atomic_update, which may not fail. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231009141018.11291-5-tzimmermann@suse.de --- drivers/gpu/drm/tiny/ofdrm.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 404c83032cecc..05a72473cfc65 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -758,7 +758,11 @@ static const uint64_t ofdrm_primary_plane_format_modifiers[] = { static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state) { + struct drm_device *dev = plane->dev; + struct ofdrm_device *odev = ofdrm_device_of_dev(dev); struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane); + struct drm_shadow_plane_state *new_shadow_plane_state = + to_drm_shadow_plane_state(new_plane_state); struct drm_framebuffer *new_fb = new_plane_state->fb; struct drm_crtc *new_crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state = NULL; @@ -777,6 +781,16 @@ static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane, else if (!new_plane_state->visible) return 0; + if (new_fb->format != odev->format) { + void *buf; + + /* format conversion necessary; reserve buffer */ + buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state, + odev->pitch, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc); new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state); From e7c814d305e110d6db3f440d14490a8d0d9477d9 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 9 Oct 2023 16:06:34 +0200 Subject: [PATCH 105/117] drm/simpledrm: Preallocate format-conversion buffer in atomic_check Preallocate the format-conversion state's storage in the plane's atomic_check function if a format conversion is necessary. Allows the update to fail if no memory is available. Avoids the same allocation within atomic_update, which may not fail. Also inline drm_plane_helper_atomic_check() into the driver and thus return early for invisible planes. Avoids memory allocation entirely in this case. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231009141018.11291-6-tzimmermann@suse.de --- drivers/gpu/drm/tiny/simpledrm.c | 41 +++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 4916e9b0d46a3..22a827102dfd0 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -579,6 +580,44 @@ static const uint64_t simpledrm_primary_plane_format_modifiers[] = { DRM_FORMAT_MOD_INVALID }; +static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_shadow_plane_state *new_shadow_plane_state = + to_drm_shadow_plane_state(new_plane_state); + struct drm_framebuffer *new_fb = new_plane_state->fb; + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + struct drm_device *dev = plane->dev; + struct simpledrm_device *sdev = simpledrm_device_of_dev(dev); + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; + + if (new_fb->format != sdev->format) { + void *buf; + + /* format conversion necessary; reserve buffer */ + buf = drm_format_conv_state_reserve(&new_shadow_plane_state->fmtcnv_state, + sdev->pitch, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + return 0; +} + static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -635,7 +674,7 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = drm_plane_helper_atomic_check, + .atomic_check = simpledrm_primary_plane_helper_atomic_check, .atomic_update = simpledrm_primary_plane_helper_atomic_update, .atomic_disable = simpledrm_primary_plane_helper_atomic_disable, }; From c669875041d038e91fa99766a07ec2d8bd6dcf6a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 9 Oct 2023 16:06:36 +0200 Subject: [PATCH 106/117] drm/ssd130x: Preallocate format-conversion buffer in atomic_check Preallocate the format-conversion state's storage in the plane's atomic_check function if a format conversion is necessary. Allows the update to fail if no memory is available. Avoids the same allocation within atomic_update, which may not fail. v6: * update patch for ssd132x support Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Tested-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231009141018.11291-8-tzimmermann@suse.de --- drivers/gpu/drm/solomon/ssd130x.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index ee0a06c506926..bef293922b98f 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -873,6 +873,7 @@ static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane, struct ssd130x_device *ssd130x = drm_to_ssd130x(drm); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state); + struct drm_shadow_plane_state *shadow_plane_state = &ssd130x_state->base; struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc_state *crtc_state = NULL; const struct drm_format_info *fi; @@ -897,6 +898,16 @@ static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane, pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width); + if (plane_state->fb->format != fi) { + void *buf; + + /* format conversion necessary; reserve buffer */ + buf = drm_format_conv_state_reserve(&shadow_plane_state->fmtcnv_state, + pitch, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL); if (!ssd130x_state->buffer) return -ENOMEM; @@ -911,6 +922,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane, struct ssd130x_device *ssd130x = drm_to_ssd130x(drm); struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state); + struct drm_shadow_plane_state *shadow_plane_state = &ssd130x_state->base; struct drm_crtc *crtc = plane_state->crtc; struct drm_crtc_state *crtc_state = NULL; const struct drm_format_info *fi; @@ -935,6 +947,16 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane, pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width); + if (plane_state->fb->format != fi) { + void *buf; + + /* format conversion necessary; reserve buffer */ + buf = drm_format_conv_state_reserve(&shadow_plane_state->fmtcnv_state, + pitch, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + ssd130x_state->buffer = kcalloc(pitch, ssd130x->height, GFP_KERNEL); if (!ssd130x_state->buffer) return -ENOMEM; From 78dfe8a0ef779159a6ff51231d71b3a65c55ccf5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 1 Nov 2023 11:35:42 +0100 Subject: [PATCH 107/117] drm: Remove struct drm_flip_task from DRM interfaces Contain struct drm_flip_task and its helper functions drm_flip_work_allocate_task() and drm_flip_work_queue_task() within drm_flip_work.c There are no callers outside of the flip-work code. Signed-off-by: Thomas Zimmermann Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231101103618.23806-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_flip_work.c | 27 +++++++-------------------- include/drm/drm_flip_work.h | 18 ++---------------- 2 files changed, 9 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c index 060b753881a27..8c6090a90d564 100644 --- a/drivers/gpu/drm/drm_flip_work.c +++ b/drivers/gpu/drm/drm_flip_work.c @@ -27,14 +27,12 @@ #include #include -/** - * drm_flip_work_allocate_task - allocate a flip-work task - * @data: data associated to the task - * @flags: allocator flags - * - * Allocate a drm_flip_task object and attach private data to it. - */ -struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) +struct drm_flip_task { + struct list_head node; + void *data; +}; + +static struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) { struct drm_flip_task *task; @@ -44,18 +42,8 @@ struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags) return task; } -EXPORT_SYMBOL(drm_flip_work_allocate_task); -/** - * drm_flip_work_queue_task - queue a specific task - * @work: the flip-work - * @task: the task to handle - * - * Queues task, that will later be run (passed back to drm_flip_func_t - * func) on a work queue after drm_flip_work_commit() is called. - */ -void drm_flip_work_queue_task(struct drm_flip_work *work, - struct drm_flip_task *task) +static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task) { unsigned long flags; @@ -63,7 +51,6 @@ void drm_flip_work_queue_task(struct drm_flip_work *work, list_add_tail(&task->node, &work->queued); spin_unlock_irqrestore(&work->lock, flags); } -EXPORT_SYMBOL(drm_flip_work_queue_task); /** * drm_flip_work_queue - queue work diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 21c3d512d25c4..6be4ba6f35143 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -33,9 +33,8 @@ * * Util to queue up work to run from work-queue context after flip/vblank. * Typically this can be used to defer unref of framebuffer's, cursor - * bo's, etc until after vblank. The APIs are all thread-safe. - * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called - * in atomic context. + * bo's, etc until after vblank. The APIs are all thread-safe. Moreover, + * drm_flip_work_queue can be called in atomic context. */ struct drm_flip_work; @@ -51,16 +50,6 @@ struct drm_flip_work; */ typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); -/** - * struct drm_flip_task - flip work task - * @node: list entry element - * @data: data to pass to &drm_flip_work.func - */ -struct drm_flip_task { - struct list_head node; - void *data; -}; - /** * struct drm_flip_work - flip work queue * @name: debug name @@ -79,9 +68,6 @@ struct drm_flip_work { spinlock_t lock; }; -struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); -void drm_flip_work_queue_task(struct drm_flip_work *work, - struct drm_flip_task *task); void drm_flip_work_queue(struct drm_flip_work *work, void *val); void drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq); From ce64630dca7026ed9dc880dcd005977f662c99fe Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 1 Nov 2023 11:35:43 +0100 Subject: [PATCH 108/117] drm: Fix flip-task docs Say that drm_flip_work_commit() is safe to call in atomic context. Turn the name into a hyperlink. Suggested-by: Daniel Vetter Signed-off-by: Thomas Zimmermann Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20231101103618.23806-3-tzimmermann@suse.de --- include/drm/drm_flip_work.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 6be4ba6f35143..1eef3283a109c 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -31,10 +31,10 @@ /** * DOC: flip utils * - * Util to queue up work to run from work-queue context after flip/vblank. + * Utility to queue up work to run from work-queue context after flip/vblank. * Typically this can be used to defer unref of framebuffer's, cursor - * bo's, etc until after vblank. The APIs are all thread-safe. Moreover, - * drm_flip_work_queue can be called in atomic context. + * bo's, etc until after vblank. The APIs are all thread-safe. Moreover, + * drm_flip_work_commit() can be called in atomic context. */ struct drm_flip_work; From 2e122362d25e1b977aa5c5c88c03956be4228e02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Winiarski?= Date: Tue, 24 Oct 2023 13:07:10 +0200 Subject: [PATCH 109/117] iosys-map: Rename locals used inside macros MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Widely used variable names can be used by macro users, potentially leading to name collisions. Suffix locals used inside the macros with an underscore, to reduce the collision potential. Suggested-by: Lucas De Marchi Signed-off-by: Michał Winiarski Reviewed-by: Lucas De Marchi Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20231024110710.3039807-1-michal.winiarski@intel.com --- include/linux/iosys-map.h | 44 +++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index 1b06d074ade03..e3649a6563dd8 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -168,9 +168,9 @@ struct iosys_map { * about the use of uninitialized variable. */ #define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ - struct iosys_map copy = *map_; \ - iosys_map_incr(©, offset_); \ - copy; \ + struct iosys_map copy_ = *map_; \ + iosys_map_incr(©_, offset_); \ + copy_; \ }) /** @@ -391,14 +391,14 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * Returns: * The value read from the mapping. */ -#define iosys_map_rd(map__, offset__, type__) ({ \ - type__ val; \ - if ((map__)->is_iomem) { \ - __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\ - } else { \ - __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \ - } \ - val; \ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val_; \ + if ((map__)->is_iomem) { \ + __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ + } else { \ + __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \ + } \ + val_; \ }) /** @@ -413,13 +413,13 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * or if pointer may be unaligned (and problematic for the architecture * supported), use iosys_map_memcpy_to() */ -#define iosys_map_wr(map__, offset__, type__, val__) ({ \ - type__ val = (val__); \ - if ((map__)->is_iomem) { \ - __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\ - } else { \ - __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \ - } \ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val_ = (val__); \ + if ((map__)->is_iomem) { \ + __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ + } else { \ + __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \ + } \ }) /** @@ -485,9 +485,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * The value read from the mapping. */ #define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ - struct_type__ *s; \ + struct_type__ *s_; \ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ - typeof(s->field__)); \ + typeof(s_->field__)); \ }) /** @@ -508,9 +508,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * usage and memory layout. */ #define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ - struct_type__ *s; \ + struct_type__ *s_; \ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ - typeof(s->field__), val__); \ + typeof(s_->field__), val__); \ }) #endif /* __IOSYS_MAP_H__ */ From 34b98a5f7a185c19715cc98c57d7e27b4785dfdf Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 12 Nov 2023 01:42:36 +0300 Subject: [PATCH 110/117] drm/virtio: Fix return value for VIRTGPU_CONTEXT_PARAM_DEBUG_NAME The strncpy_from_user() returns number of copied bytes and not zero on success. The non-zero return value of ioctl is treated as error. Return zero on success instead of the number of copied bytes. Fixes: 7add80126bce ("drm/uapi: add explicit virtgpu context debug name") Signed-off-by: Dmitry Osipenko Reviewed-by: Gurchetan Singh Link: https://patchwork.freedesktop.org/patch/msgid/20231111224236.890431-1-dmitry.osipenko@collabora.com --- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 1e2042419f957..e4f76f3155504 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -665,6 +665,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, goto out_unlock; vfpriv->explicit_debug_name = true; + ret = 0; break; default: ret = -EINVAL; From f740f031cce7703a966ad0279d0f15973d61df16 Mon Sep 17 00:00:00 2001 From: Marco Pagani Date: Wed, 15 Nov 2023 11:35:36 +0100 Subject: [PATCH 111/117] drm/test: rearrange test entries in Kconfig and Makefile Rearrange entries in Kconfig and Makefile alphabetically to make room for additional KUnit test suites. Signed-off-by: Marco Pagani Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20231115103537.220760-1-marpagan@redhat.com --- drivers/gpu/drm/Kconfig | 10 +++++----- drivers/gpu/drm/tests/Makefile | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 3eee8636f847a..cdbc56e076498 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -75,15 +75,15 @@ config DRM_KUNIT_TEST_HELPERS config DRM_KUNIT_TEST tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS depends on DRM && KUNIT - select PRIME_NUMBERS + select DRM_BUDDY select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HELPER - select DRM_LIB_RANDOM - select DRM_KMS_HELPER - select DRM_BUDDY + select DRM_EXEC select DRM_EXPORT_FOR_TESTS if m + select DRM_KMS_HELPER select DRM_KUNIT_TEST_HELPERS - select DRM_EXEC + select DRM_LIB_RANDOM + select PRIME_NUMBERS default KUNIT_ALL_TESTS help This builds unit tests for DRM. This option is not useful for diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index ba7baa6226751..2645af241ff0b 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_connector_test.o \ drm_damage_helper_test.o \ drm_dp_mst_helper_test.o \ + drm_exec_test.o \ drm_format_helper_test.o \ drm_format_test.o \ drm_framebuffer_test.o \ @@ -17,7 +18,6 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_modes_test.o \ drm_plane_helper_test.o \ drm_probe_helper_test.o \ - drm_rect_test.o \ - drm_exec_test.o + drm_rect_test.o CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN) From 312292a4ee19dddcbc7cf58349596b6a7e39fcd0 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Thu, 2 Nov 2023 14:08:06 +0100 Subject: [PATCH 112/117] drm/client: Do not acquire module reference Do not acquire a reference on the module that provides a client's callback functions in drm_client_init(). The additional reference prevents the user from unloading the callback functions' module and thus creating dangling pointers. This is only necessary if there is no direct dependency between the caller of drm_client_init() and the provider of the callbacks in struct drm_client_funcs. If this case ever existed, it has been removed from the DRM code. Callers of drm_client_init() also provide the callback implementation. The lifetime of the clients is tied to the dependency chain's outer-most module, which is the hardware's DRM driver. Before client helpers could be unloaded, the driver module would have to be unloaded, which also unregisters all clients. Driver modules that set up DRM clients can now be unloaded. Signed-off-by: Thomas Zimmermann Acked-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20231102131056.7256-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_client.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index c3027115d0552..9403b3f576f7b 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -84,16 +83,13 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create) return -EOPNOTSUPP; - if (funcs && !try_module_get(funcs->owner)) - return -ENODEV; - client->dev = dev; client->name = name; client->funcs = funcs; ret = drm_client_modeset_create(client); if (ret) - goto err_put_module; + return ret; ret = drm_client_open(client); if (ret) @@ -105,10 +101,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, err_free: drm_client_modeset_free(client); -err_put_module: - if (funcs) - module_put(funcs->owner); - return ret; } EXPORT_SYMBOL(drm_client_init); @@ -177,8 +169,6 @@ void drm_client_release(struct drm_client_dev *client) drm_client_modeset_free(client); drm_client_close(client); drm_dev_put(dev); - if (client->funcs) - module_put(client->funcs->owner); } EXPORT_SYMBOL(drm_client_release); From 043a2d5d71d87553985816065da41cff72f5f2df Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 13 Nov 2023 18:02:48 +0100 Subject: [PATCH 113/117] accel/ivpu: Rename cons->rx_msg_lock Now the cons->rx_msg_lock also protects 'abort' field so rename the lock. Signed-off-by: Stanislaw Gruszka Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231113170252.758137-2-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_ipc.c | 27 +++++++++++++-------------- drivers/accel/ivpu/ivpu_ipc.h | 2 +- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 88453762c9d53..31ae0e71a8a36 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -150,7 +150,7 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, cons->tx_vpu_addr = 0; cons->request_id = 0; cons->aborted = false; - spin_lock_init(&cons->rx_msg_lock); + spin_lock_init(&cons->rx_lock); INIT_LIST_HEAD(&cons->rx_msg_list); init_waitqueue_head(&cons->rx_msg_wq); @@ -168,7 +168,7 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c list_del(&cons->link); spin_unlock_irq(&ipc->cons_list_lock); - spin_lock_irq(&cons->rx_msg_lock); + spin_lock_irq(&cons->rx_lock); list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) { list_del(&rx_msg->link); if (!cons->aborted) @@ -176,7 +176,7 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c atomic_dec(&ipc->rx_msg_count); kfree(rx_msg); } - spin_unlock_irq(&cons->rx_msg_lock); + spin_unlock_irq(&cons->rx_lock); ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr); } @@ -212,9 +212,9 @@ static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) if (IS_KTHREAD()) ret |= (kthread_should_stop() || kthread_should_park()); - spin_lock_irq(&cons->rx_msg_lock); + spin_lock_irq(&cons->rx_lock); ret |= !list_empty(&cons->rx_msg_list) || cons->aborted; - spin_unlock_irq(&cons->rx_msg_lock); + spin_unlock_irq(&cons->rx_lock); return ret; } @@ -237,20 +237,19 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, if (wait_ret == 0) return -ETIMEDOUT; - spin_lock_irq(&cons->rx_msg_lock); + spin_lock_irq(&cons->rx_lock); rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link); if (!rx_msg) { - spin_unlock_irq(&cons->rx_msg_lock); + spin_unlock_irq(&cons->rx_lock); return -EAGAIN; } list_del(&rx_msg->link); if (cons->aborted) { - spin_unlock_irq(&cons->rx_msg_lock); + spin_unlock_irq(&cons->rx_lock); ret = -ECANCELED; goto out; } - - spin_unlock_irq(&cons->rx_msg_lock); + spin_unlock_irq(&cons->rx_lock); if (ipc_buf) memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf)); @@ -383,9 +382,9 @@ ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, rx_msg->ipc_hdr = ipc_hdr; rx_msg->jsm_msg = jsm_msg; - spin_lock_irqsave(&cons->rx_msg_lock, flags); + spin_lock_irqsave(&cons->rx_lock, flags); list_add_tail(&rx_msg->link, &cons->rx_msg_list); - spin_unlock_irqrestore(&cons->rx_msg_lock, flags); + spin_unlock_irqrestore(&cons->rx_lock, flags); wake_up(&cons->rx_msg_wq); } @@ -529,9 +528,9 @@ void ivpu_ipc_disable(struct ivpu_device *vdev) spin_lock_irqsave(&ipc->cons_list_lock, flags); list_for_each_entry_safe(cons, c, &ipc->cons_list, link) { - spin_lock(&cons->rx_msg_lock); + spin_lock(&cons->rx_lock); cons->aborted = true; - spin_unlock(&cons->rx_msg_lock); + spin_unlock(&cons->rx_lock); wake_up(&cons->rx_msg_wq); } spin_unlock_irqrestore(&ipc->cons_list_lock, flags); diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h index a380787f7222a..71b2e648dffd7 100644 --- a/drivers/accel/ivpu/ivpu_ipc.h +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -49,7 +49,7 @@ struct ivpu_ipc_consumer { u32 request_id; bool aborted; - spinlock_t rx_msg_lock; /* Protects rx_msg_list and aborted */ + spinlock_t rx_lock; /* Protects rx_msg_list and aborted */ struct list_head rx_msg_list; wait_queue_head_t rx_msg_wq; }; From 12fbf8ac39b08f810b767e2e3dc32258907ce890 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 13 Nov 2023 18:02:49 +0100 Subject: [PATCH 114/117] accel/ivpu: Do not use irqsave in ivpu_ipc_dispatch ivpu_ipc_dispatch is always called with irqs disabled. Add lockdep assertion and remove unneeded _irqsave/_irqrestore. Signed-off-by: Stanislaw Gruszka Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231113170252.758137-3-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_ipc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 31ae0e71a8a36..781c7e40505a6 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -367,9 +367,9 @@ ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, { struct ivpu_ipc_info *ipc = vdev->ipc; struct ivpu_ipc_rx_msg *rx_msg; - unsigned long flags; lockdep_assert_held(&ipc->cons_list_lock); + lockdep_assert_irqs_disabled(); rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC); if (!rx_msg) { @@ -382,9 +382,9 @@ ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, rx_msg->ipc_hdr = ipc_hdr; rx_msg->jsm_msg = jsm_msg; - spin_lock_irqsave(&cons->rx_lock, flags); + spin_lock(&cons->rx_lock); list_add_tail(&rx_msg->link, &cons->rx_msg_list); - spin_unlock_irqrestore(&cons->rx_lock, flags); + spin_unlock(&cons->rx_lock); wake_up(&cons->rx_msg_wq); } From b3c10b71a61c3a0412b7c390831c88405be3d24f Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 13 Nov 2023 18:02:50 +0100 Subject: [PATCH 115/117] accel/ivpu: Do not use cons->aborted for job_done_thread This allow to simplify ivpu_ipc_receive() as now we do not have to process all messages in aborted state - they will be freed in ivpu_ipc_consumer_del(). Signed-off-by: Stanislaw Gruszka Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231113170252.758137-4-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_ipc.c | 18 +++++++++--------- drivers/accel/ivpu/ivpu_job.c | 1 - 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 781c7e40505a6..1dd4413dc88fa 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -238,17 +238,16 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, return -ETIMEDOUT; spin_lock_irq(&cons->rx_lock); + if (cons->aborted) { + spin_unlock_irq(&cons->rx_lock); + return -ECANCELED; + } rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link); if (!rx_msg) { spin_unlock_irq(&cons->rx_lock); return -EAGAIN; } list_del(&rx_msg->link); - if (cons->aborted) { - spin_unlock_irq(&cons->rx_lock); - ret = -ECANCELED; - goto out; - } spin_unlock_irq(&cons->rx_lock); if (ipc_buf) @@ -266,7 +265,6 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, } ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); -out: atomic_dec(&ipc->rx_msg_count); kfree(rx_msg); @@ -528,9 +526,11 @@ void ivpu_ipc_disable(struct ivpu_device *vdev) spin_lock_irqsave(&ipc->cons_list_lock, flags); list_for_each_entry_safe(cons, c, &ipc->cons_list, link) { - spin_lock(&cons->rx_lock); - cons->aborted = true; - spin_unlock(&cons->rx_lock); + if (cons->channel != VPU_IPC_CHAN_JOB_RET) { + spin_lock(&cons->rx_lock); + cons->aborted = true; + spin_unlock(&cons->rx_lock); + } wake_up(&cons->rx_msg_wq); } spin_unlock_irqrestore(&ipc->cons_list_lock, flags); diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 02acd8dba02aa..77b1b8abadd6e 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -578,7 +578,6 @@ static int ivpu_job_done_thread(void *arg) ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET); while (!kthread_should_stop()) { - cons.aborted = false; timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa); ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout); From 58cde80f45a2b1683ea3c24a9a9a4b0e1005336b Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Mon, 13 Nov 2023 18:02:51 +0100 Subject: [PATCH 116/117] accel/ivpu: Use dedicated work for job timeout detection Change to use work for timeout detection. Needed for thread_irq conversion. Signed-off-by: Stanislaw Gruszka Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231113170252.758137-5-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_job.c | 24 +++++++++--------------- drivers/accel/ivpu/ivpu_pm.c | 31 +++++++++++++++++++++++++++++++ drivers/accel/ivpu/ivpu_pm.h | 3 +++ 3 files changed, 43 insertions(+), 15 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 77b1b8abadd6e..796366d679846 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -24,10 +24,6 @@ #define JOB_ID_CONTEXT_MASK GENMASK(31, 8) #define JOB_MAX_BUFFER_COUNT 65535 -static unsigned int ivpu_tdr_timeout_ms; -module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644); -MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default"); - static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) { ivpu_hw_reg_db_set(vdev, cmdq->db_id); @@ -342,6 +338,8 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status) ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n", job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status); + ivpu_stop_job_timeout_detection(vdev); + job_put(job); return 0; } @@ -357,6 +355,9 @@ static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg) ret = ivpu_job_done(vdev, payload->job_id, payload->job_status); if (ret) ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret); + + if (!ret && !xa_empty(&vdev->submitted_jobs_xa)) + ivpu_start_job_timeout_detection(vdev); } void ivpu_jobs_abort_all(struct ivpu_device *vdev) @@ -400,6 +401,8 @@ static int ivpu_direct_job_submission(struct ivpu_job *job) if (ret) goto err_xa_erase; + ivpu_start_job_timeout_detection(vdev); + ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n", job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id, job->engine_idx, cmdq->jobq->header.tail); @@ -569,7 +572,6 @@ static int ivpu_job_done_thread(void *arg) struct ivpu_device *vdev = (struct ivpu_device *)arg; struct ivpu_ipc_consumer cons; struct vpu_jsm_msg jsm_msg; - bool jobs_submitted; unsigned int timeout; int ret; @@ -578,18 +580,10 @@ static int ivpu_job_done_thread(void *arg) ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET); while (!kthread_should_stop()) { - timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; - jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa); ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout); - if (!ret) { + if (!ret) ivpu_job_done_message(vdev, &jsm_msg); - } else if (ret == -ETIMEDOUT) { - if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) { - ivpu_err(vdev, "TDR detected, timeout %d ms", timeout); - ivpu_hw_diagnose_failure(vdev); - ivpu_pm_schedule_recovery(vdev); - } - } + if (kthread_should_park()) { ivpu_dbg(vdev, JOB, "Parked %s\n", __func__); kthread_parkme(); diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index d915f3c2991f4..0af8864cb3b55 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -23,6 +23,10 @@ static bool ivpu_disable_recovery; module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644); MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected"); +static unsigned long ivpu_tdr_timeout_ms; +module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default"); + #define PM_RESCHEDULE_LIMIT 5 static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) @@ -141,6 +145,31 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev) } } +static void ivpu_job_timeout_work(struct work_struct *work) +{ + struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work); + struct ivpu_device *vdev = pm->vdev; + unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; + + ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms); + ivpu_hw_diagnose_failure(vdev); + + ivpu_pm_schedule_recovery(vdev); +} + +void ivpu_start_job_timeout_detection(struct ivpu_device *vdev) +{ + unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; + + /* No-op if already queued */ + queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms)); +} + +void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev) +{ + cancel_delayed_work_sync(&vdev->pm->job_timeout_work); +} + int ivpu_pm_suspend_cb(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); @@ -317,6 +346,7 @@ void ivpu_pm_init(struct ivpu_device *vdev) atomic_set(&pm->in_reset, 0); INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work); + INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work); if (ivpu_disable_recovery) delay = -1; @@ -331,6 +361,7 @@ void ivpu_pm_init(struct ivpu_device *vdev) void ivpu_pm_cancel_recovery(struct ivpu_device *vdev) { + drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work)); cancel_work_sync(&vdev->pm->recovery_work); } diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h index 044db150be078..97c6e0b0aa42d 100644 --- a/drivers/accel/ivpu/ivpu_pm.h +++ b/drivers/accel/ivpu/ivpu_pm.h @@ -12,6 +12,7 @@ struct ivpu_device; struct ivpu_pm_info { struct ivpu_device *vdev; + struct delayed_work job_timeout_work; struct work_struct recovery_work; atomic_t in_reset; atomic_t reset_counter; @@ -37,5 +38,7 @@ int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev); void ivpu_rpm_put(struct ivpu_device *vdev); void ivpu_pm_schedule_recovery(struct ivpu_device *vdev); +void ivpu_start_job_timeout_detection(struct ivpu_device *vdev); +void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev); #endif /* __IVPU_PM_H__ */ From 3b434a3445fff3149128db0169da864d67057325 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Mon, 13 Nov 2023 18:02:52 +0100 Subject: [PATCH 117/117] accel/ivpu: Use threaded IRQ to handle JOB done messages Remove job_done thread and replace it with generic callback based mechanism. Signed-off-by: Jacek Lawrynowicz Reviewed-by: Jeffrey Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20231113170252.758137-6-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_drv.c | 30 +++-- drivers/accel/ivpu/ivpu_drv.h | 3 +- drivers/accel/ivpu/ivpu_hw_37xx.c | 29 +++-- drivers/accel/ivpu/ivpu_hw_40xx.c | 30 ++--- drivers/accel/ivpu/ivpu_ipc.c | 196 +++++++++++++++++------------- drivers/accel/ivpu/ivpu_ipc.h | 22 +++- drivers/accel/ivpu/ivpu_job.c | 84 +++---------- drivers/accel/ivpu/ivpu_job.h | 6 +- 8 files changed, 199 insertions(+), 201 deletions(-) diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index bc15b06e17440..64927682161b2 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -318,13 +318,11 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev) if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST) return 0; - ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG); + ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL); timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); while (1) { - ret = ivpu_ipc_irq_handler(vdev); - if (ret) - break; + ivpu_ipc_irq_handler(vdev, NULL); ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) break; @@ -378,7 +376,6 @@ int ivpu_boot(struct ivpu_device *vdev) enable_irq(vdev->irq); ivpu_hw_irq_enable(vdev); ivpu_ipc_enable(vdev); - ivpu_job_done_thread_enable(vdev); return 0; } @@ -388,7 +385,6 @@ void ivpu_prepare_for_reset(struct ivpu_device *vdev) disable_irq(vdev->irq); ivpu_ipc_disable(vdev); ivpu_mmu_disable(vdev); - ivpu_job_done_thread_disable(vdev); } int ivpu_shutdown(struct ivpu_device *vdev) @@ -429,6 +425,13 @@ static const struct drm_driver driver = { .minor = DRM_IVPU_DRIVER_MINOR, }; +static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg) +{ + struct ivpu_device *vdev = arg; + + return ivpu_ipc_irq_thread_handler(vdev); +} + static int ivpu_irq_init(struct ivpu_device *vdev) { struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); @@ -442,8 +445,8 @@ static int ivpu_irq_init(struct ivpu_device *vdev) vdev->irq = pci_irq_vector(pdev, 0); - ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler, - IRQF_NO_AUTOEN, DRIVER_NAME, vdev); + ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler, + ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev); if (ret) ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); @@ -581,20 +584,15 @@ static int ivpu_dev_init(struct ivpu_device *vdev) ivpu_pm_init(vdev); - ret = ivpu_job_done_thread_init(vdev); - if (ret) - goto err_ipc_fini; - ret = ivpu_boot(vdev); if (ret) - goto err_job_done_thread_fini; + goto err_ipc_fini; + ivpu_job_done_consumer_init(vdev); ivpu_pm_enable(vdev); return 0; -err_job_done_thread_fini: - ivpu_job_done_thread_fini(vdev); err_ipc_fini: ivpu_ipc_fini(vdev); err_fw_fini: @@ -619,7 +617,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) ivpu_shutdown(vdev); if (IVPU_WA(d3hot_after_power_off)) pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); - ivpu_job_done_thread_fini(vdev); + ivpu_job_done_consumer_fini(vdev); ivpu_pm_cancel_recovery(vdev); ivpu_ipc_fini(vdev); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index f1e7072449f1e..ebc4b84f27b20 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -17,6 +17,7 @@ #include #include "ivpu_mmu_context.h" +#include "ivpu_ipc.h" #define DRIVER_NAME "intel_vpu" #define DRIVER_DESC "Driver for Intel NPU (Neural Processing Unit)" @@ -120,7 +121,7 @@ struct ivpu_device { struct list_head bo_list; struct xarray submitted_jobs_xa; - struct task_struct *job_done_thread; + struct ivpu_ipc_consumer job_done_consumer; atomic64_t unique_id_counter; diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index a172cfb1c31f8..4ab1f14cf3604 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -891,17 +891,20 @@ static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev) } /* Handler for IRQs from VPU core (irqV) */ -static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq) +static bool ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) { u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; + if (!status) + return false; + REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status); if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status)) ivpu_mmu_irq_evtq_handler(vdev); if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) - ivpu_ipc_irq_handler(vdev); + ivpu_ipc_irq_handler(vdev, wake_thread); if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); @@ -918,17 +921,17 @@ static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq) if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) ivpu_hw_37xx_irq_noc_firewall_handler(vdev); - return status; + return true; } /* Handler for IRQs from Buttress core (irqB) */ -static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) +static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) { u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; bool schedule_recovery = false; - if (status == 0) - return 0; + if (!status) + return false; if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", @@ -964,23 +967,27 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) if (schedule_recovery) ivpu_pm_schedule_recovery(vdev); - return status; + return true; } static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) { struct ivpu_device *vdev = ptr; - u32 ret_irqv, ret_irqb; + bool irqv_handled, irqb_handled, wake_thread = false; REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); - ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq); - ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq); + irqv_handled = ivpu_hw_37xx_irqv_handler(vdev, irq, &wake_thread); + irqb_handled = ivpu_hw_37xx_irqb_handler(vdev, irq); /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); - return IRQ_RETVAL(ret_irqb | ret_irqv); + if (wake_thread) + return IRQ_WAKE_THREAD; + if (irqv_handled || irqb_handled) + return IRQ_HANDLED; + return IRQ_NONE; } static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c index f7e1f5c0667bd..eba2fdef2ace1 100644 --- a/drivers/accel/ivpu/ivpu_hw_40xx.c +++ b/drivers/accel/ivpu/ivpu_hw_40xx.c @@ -1047,13 +1047,12 @@ static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev) } /* Handler for IRQs from VPU core (irqV) */ -static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq) +static bool ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq, bool *wake_thread) { u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK; - irqreturn_t ret = IRQ_NONE; if (!status) - return IRQ_NONE; + return false; REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status); @@ -1061,7 +1060,7 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq) ivpu_mmu_irq_evtq_handler(vdev); if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status)) - ret |= ivpu_ipc_irq_handler(vdev); + ivpu_ipc_irq_handler(vdev, wake_thread); if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status)) ivpu_dbg(vdev, IRQ, "MMU sync complete\n"); @@ -1078,17 +1077,17 @@ static irqreturn_t ivpu_hw_40xx_irqv_handler(struct ivpu_device *vdev, int irq) if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status)) ivpu_hw_40xx_irq_noc_firewall_handler(vdev); - return ret; + return true; } /* Handler for IRQs from Buttress core (irqB) */ -static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) +static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) { bool schedule_recovery = false; u32 status = REGB_RD32(VPU_40XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK; - if (status == 0) - return IRQ_NONE; + if (!status) + return false; if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) ivpu_dbg(vdev, IRQ, "FREQ_CHANGE"); @@ -1140,26 +1139,27 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq) if (schedule_recovery) ivpu_pm_schedule_recovery(vdev); - return IRQ_HANDLED; + return true; } static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr) { + bool irqv_handled, irqb_handled, wake_thread = false; struct ivpu_device *vdev = ptr; - irqreturn_t ret = IRQ_NONE; REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); - ret |= ivpu_hw_40xx_irqv_handler(vdev, irq); - ret |= ivpu_hw_40xx_irqb_handler(vdev, irq); + irqv_handled = ivpu_hw_40xx_irqv_handler(vdev, irq, &wake_thread); + irqb_handled = ivpu_hw_40xx_irqb_handler(vdev, irq); /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); - if (ret & IRQ_WAKE_THREAD) + if (wake_thread) return IRQ_WAKE_THREAD; - - return ret; + if (irqv_handled || irqb_handled) + return IRQ_HANDLED; + return IRQ_NONE; } static void ivpu_hw_40xx_diagnose_failure(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 1dd4413dc88fa..e86621f16f85a 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -5,7 +5,6 @@ #include #include -#include #include #include @@ -18,19 +17,12 @@ #include "ivpu_pm.h" #define IPC_MAX_RX_MSG 128 -#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD) struct ivpu_ipc_tx_buf { struct ivpu_ipc_hdr ipc; struct vpu_jsm_msg jsm; }; -struct ivpu_ipc_rx_msg { - struct list_head link; - struct ivpu_ipc_hdr *ipc_hdr; - struct vpu_jsm_msg *jsm_msg; -}; - static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c, struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr) { @@ -140,8 +132,49 @@ static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr) ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr); } -void -ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel) +static void +ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_rx_msg *rx_msg; + + lockdep_assert_held(&ipc->cons_lock); + lockdep_assert_irqs_disabled(); + + rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC); + if (!rx_msg) { + ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); + return; + } + + atomic_inc(&ipc->rx_msg_count); + + rx_msg->ipc_hdr = ipc_hdr; + rx_msg->jsm_msg = jsm_msg; + rx_msg->callback = cons->rx_callback; + + if (rx_msg->callback) { + list_add_tail(&rx_msg->link, &ipc->cb_msg_list); + } else { + spin_lock(&cons->rx_lock); + list_add_tail(&rx_msg->link, &cons->rx_msg_list); + spin_unlock(&cons->rx_lock); + wake_up(&cons->rx_msg_wq); + } +} + +static void +ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg) +{ + list_del(&rx_msg->link); + ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); + atomic_dec(&vdev->ipc->rx_msg_count); + kfree(rx_msg); +} + +void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, + u32 channel, ivpu_ipc_rx_callback_t rx_callback) { struct ivpu_ipc_info *ipc = vdev->ipc; @@ -150,13 +183,14 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, cons->tx_vpu_addr = 0; cons->request_id = 0; cons->aborted = false; + cons->rx_callback = rx_callback; spin_lock_init(&cons->rx_lock); INIT_LIST_HEAD(&cons->rx_msg_list); init_waitqueue_head(&cons->rx_msg_wq); - spin_lock_irq(&ipc->cons_list_lock); + spin_lock_irq(&ipc->cons_lock); list_add_tail(&cons->link, &ipc->cons_list); - spin_unlock_irq(&ipc->cons_list_lock); + spin_unlock_irq(&ipc->cons_lock); } void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons) @@ -164,18 +198,13 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c struct ivpu_ipc_info *ipc = vdev->ipc; struct ivpu_ipc_rx_msg *rx_msg, *r; - spin_lock_irq(&ipc->cons_list_lock); + spin_lock_irq(&ipc->cons_lock); list_del(&cons->link); - spin_unlock_irq(&ipc->cons_list_lock); + spin_unlock_irq(&ipc->cons_lock); spin_lock_irq(&cons->rx_lock); - list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) { - list_del(&rx_msg->link); - if (!cons->aborted) - ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); - atomic_dec(&ipc->rx_msg_count); - kfree(rx_msg); - } + list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) + ivpu_ipc_rx_msg_del(vdev, rx_msg); spin_unlock_irq(&cons->rx_lock); ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr); @@ -205,15 +234,12 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v return ret; } -static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) +static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) { - int ret = 0; - - if (IS_KTHREAD()) - ret |= (kthread_should_stop() || kthread_should_park()); + bool ret; spin_lock_irq(&cons->rx_lock); - ret |= !list_empty(&cons->rx_msg_list) || cons->aborted; + ret = !list_empty(&cons->rx_msg_list) || cons->aborted; spin_unlock_irq(&cons->rx_lock); return ret; @@ -221,19 +247,18 @@ static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct ivpu_ipc_hdr *ipc_buf, - struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms) + struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms) { - struct ivpu_ipc_info *ipc = vdev->ipc; struct ivpu_ipc_rx_msg *rx_msg; int wait_ret, ret = 0; + if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n")) + return -EINVAL; + wait_ret = wait_event_timeout(cons->rx_msg_wq, ivpu_ipc_rx_need_wakeup(cons), msecs_to_jiffies(timeout_ms)); - if (IS_KTHREAD() && kthread_should_stop()) - return -EINTR; - if (wait_ret == 0) return -ETIMEDOUT; @@ -247,27 +272,23 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, spin_unlock_irq(&cons->rx_lock); return -EAGAIN; } - list_del(&rx_msg->link); - spin_unlock_irq(&cons->rx_lock); if (ipc_buf) memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf)); if (rx_msg->jsm_msg) { - u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload)); + u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg)); if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) { ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result); ret = -EBADMSG; } - if (ipc_payload) - memcpy(ipc_payload, rx_msg->jsm_msg, size); + if (jsm_msg) + memcpy(jsm_msg, rx_msg->jsm_msg, size); } - ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); - atomic_dec(&ipc->rx_msg_count); - kfree(rx_msg); - + ivpu_ipc_rx_msg_del(vdev, rx_msg); + spin_unlock_irq(&cons->rx_lock); return ret; } @@ -280,7 +301,7 @@ ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req struct ivpu_ipc_consumer cons; int ret; - ivpu_ipc_consumer_add(vdev, &cons, channel); + ivpu_ipc_consumer_add(vdev, &cons, channel, NULL); ret = ivpu_ipc_send(vdev, &cons, req); if (ret) { @@ -359,35 +380,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons return false; } -static void -ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, - struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) -{ - struct ivpu_ipc_info *ipc = vdev->ipc; - struct ivpu_ipc_rx_msg *rx_msg; - - lockdep_assert_held(&ipc->cons_list_lock); - lockdep_assert_irqs_disabled(); - - rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC); - if (!rx_msg) { - ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); - return; - } - - atomic_inc(&ipc->rx_msg_count); - - rx_msg->ipc_hdr = ipc_hdr; - rx_msg->jsm_msg = jsm_msg; - - spin_lock(&cons->rx_lock); - list_add_tail(&rx_msg->link, &cons->rx_msg_list); - spin_unlock(&cons->rx_lock); - - wake_up(&cons->rx_msg_wq); -} - -int ivpu_ipc_irq_handler(struct ivpu_device *vdev) +void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread) { struct ivpu_ipc_info *ipc = vdev->ipc; struct ivpu_ipc_consumer *cons; @@ -405,7 +398,7 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev) vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev); if (vpu_addr == REG_IO_ERROR) { ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n"); - return -EIO; + return; } ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr); @@ -435,15 +428,15 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev) } dispatched = false; - spin_lock_irqsave(&ipc->cons_list_lock, flags); + spin_lock_irqsave(&ipc->cons_lock, flags); list_for_each_entry(cons, &ipc->cons_list, link) { if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) { - ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg); + ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg); dispatched = true; break; } } - spin_unlock_irqrestore(&ipc->cons_list_lock, flags); + spin_unlock_irqrestore(&ipc->cons_lock, flags); if (!dispatched) { ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr); @@ -451,7 +444,28 @@ int ivpu_ipc_irq_handler(struct ivpu_device *vdev) } } - return 0; + if (wake_thread) + *wake_thread = !list_empty(&ipc->cb_msg_list); +} + +irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev) +{ + struct ivpu_ipc_info *ipc = vdev->ipc; + struct ivpu_ipc_rx_msg *rx_msg, *r; + struct list_head cb_msg_list; + + INIT_LIST_HEAD(&cb_msg_list); + + spin_lock_irq(&ipc->cons_lock); + list_splice_tail_init(&ipc->cb_msg_list, &cb_msg_list); + spin_unlock_irq(&ipc->cons_lock); + + list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) { + rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); + ivpu_ipc_rx_msg_del(vdev, rx_msg); + } + + return IRQ_HANDLED; } int ivpu_ipc_init(struct ivpu_device *vdev) @@ -486,10 +500,10 @@ int ivpu_ipc_init(struct ivpu_device *vdev) goto err_free_rx; } + spin_lock_init(&ipc->cons_lock); INIT_LIST_HEAD(&ipc->cons_list); - spin_lock_init(&ipc->cons_list_lock); + INIT_LIST_HEAD(&ipc->cb_msg_list); drmm_mutex_init(&vdev->drm, &ipc->lock); - ivpu_ipc_reset(vdev); return 0; @@ -502,6 +516,13 @@ int ivpu_ipc_init(struct ivpu_device *vdev) void ivpu_ipc_fini(struct ivpu_device *vdev) { + struct ivpu_ipc_info *ipc = vdev->ipc; + + drm_WARN_ON(&vdev->drm, ipc->on); + drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list)); + drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); + drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); + ivpu_ipc_mem_fini(vdev); } @@ -518,22 +539,27 @@ void ivpu_ipc_disable(struct ivpu_device *vdev) { struct ivpu_ipc_info *ipc = vdev->ipc; struct ivpu_ipc_consumer *cons, *c; - unsigned long flags; + struct ivpu_ipc_rx_msg *rx_msg, *r; + + drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); mutex_lock(&ipc->lock); ipc->on = false; mutex_unlock(&ipc->lock); - spin_lock_irqsave(&ipc->cons_list_lock, flags); + spin_lock_irq(&ipc->cons_lock); list_for_each_entry_safe(cons, c, &ipc->cons_list, link) { - if (cons->channel != VPU_IPC_CHAN_JOB_RET) { - spin_lock(&cons->rx_lock); + spin_lock(&cons->rx_lock); + if (!cons->rx_callback) cons->aborted = true; - spin_unlock(&cons->rx_lock); - } + list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) + ivpu_ipc_rx_msg_del(vdev, rx_msg); + spin_unlock(&cons->rx_lock); wake_up(&cons->rx_msg_wq); } - spin_unlock_irqrestore(&ipc->cons_list_lock, flags); + spin_unlock_irq(&ipc->cons_lock); + + drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); } void ivpu_ipc_reset(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h index 71b2e648dffd7..40ca3cc4e61f7 100644 --- a/drivers/accel/ivpu/ivpu_ipc.h +++ b/drivers/accel/ivpu/ivpu_ipc.h @@ -42,12 +42,24 @@ struct ivpu_ipc_hdr { u8 status; } __packed __aligned(IVPU_IPC_ALIGNMENT); +typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev, + struct ivpu_ipc_hdr *ipc_hdr, + struct vpu_jsm_msg *jsm_msg); + +struct ivpu_ipc_rx_msg { + struct list_head link; + struct ivpu_ipc_hdr *ipc_hdr; + struct vpu_jsm_msg *jsm_msg; + ivpu_ipc_rx_callback_t callback; +}; + struct ivpu_ipc_consumer { struct list_head link; u32 channel; u32 tx_vpu_addr; u32 request_id; bool aborted; + ivpu_ipc_rx_callback_t rx_callback; spinlock_t rx_lock; /* Protects rx_msg_list and aborted */ struct list_head rx_msg_list; @@ -61,8 +73,9 @@ struct ivpu_ipc_info { atomic_t rx_msg_count; - spinlock_t cons_list_lock; /* Protects cons_list */ + spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */ struct list_head cons_list; + struct list_head cb_msg_list; atomic_t request_id; struct mutex lock; /* Lock on status */ @@ -76,14 +89,15 @@ void ivpu_ipc_enable(struct ivpu_device *vdev); void ivpu_ipc_disable(struct ivpu_device *vdev); void ivpu_ipc_reset(struct ivpu_device *vdev); -int ivpu_ipc_irq_handler(struct ivpu_device *vdev); +void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread); +irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev); void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, - u32 channel); + u32 channel, ivpu_ipc_rx_callback_t callback); void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons); int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, - struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload, + struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms); int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 796366d679846..7206cf9cdb4a4 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -344,22 +343,6 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status) return 0; } -static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg) -{ - struct vpu_ipc_msg_payload_job_done *payload; - struct vpu_jsm_msg *job_ret_msg = msg; - int ret; - - payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload; - - ret = ivpu_job_done(vdev, payload->job_id, payload->job_status); - if (ret) - ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret); - - if (!ret && !xa_empty(&vdev->submitted_jobs_xa)) - ivpu_start_job_timeout_detection(vdev); -} - void ivpu_jobs_abort_all(struct ivpu_device *vdev) { struct ivpu_job *job; @@ -567,65 +550,36 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return ret; } -static int ivpu_job_done_thread(void *arg) +static void +ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, + struct vpu_jsm_msg *jsm_msg) { - struct ivpu_device *vdev = (struct ivpu_device *)arg; - struct ivpu_ipc_consumer cons; - struct vpu_jsm_msg jsm_msg; - unsigned int timeout; + struct vpu_ipc_msg_payload_job_done *payload; int ret; - ivpu_dbg(vdev, JOB, "Started %s\n", __func__); - - ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET); - - while (!kthread_should_stop()) { - ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout); - if (!ret) - ivpu_job_done_message(vdev, &jsm_msg); - - if (kthread_should_park()) { - ivpu_dbg(vdev, JOB, "Parked %s\n", __func__); - kthread_parkme(); - ivpu_dbg(vdev, JOB, "Unparked %s\n", __func__); - } + if (!jsm_msg) { + ivpu_err(vdev, "IPC message has no JSM payload\n"); + return; } - ivpu_ipc_consumer_del(vdev, &cons); - - ivpu_jobs_abort_all(vdev); - - ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__); - return 0; -} - -int ivpu_job_done_thread_init(struct ivpu_device *vdev) -{ - struct task_struct *thread; - - thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread"); - if (IS_ERR(thread)) { - ivpu_err(vdev, "Failed to start job completion thread\n"); - return -EIO; + if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) { + ivpu_err(vdev, "Invalid JSM message result: %d\n", jsm_msg->result); + return; } - vdev->job_done_thread = thread; - - return 0; -} - -void ivpu_job_done_thread_fini(struct ivpu_device *vdev) -{ - kthread_unpark(vdev->job_done_thread); - kthread_stop(vdev->job_done_thread); + payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload; + ret = ivpu_job_done(vdev, payload->job_id, payload->job_status); + if (!ret && !xa_empty(&vdev->submitted_jobs_xa)) + ivpu_start_job_timeout_detection(vdev); } -void ivpu_job_done_thread_disable(struct ivpu_device *vdev) +void ivpu_job_done_consumer_init(struct ivpu_device *vdev) { - kthread_park(vdev->job_done_thread); + ivpu_ipc_consumer_add(vdev, &vdev->job_done_consumer, + VPU_IPC_CHAN_JOB_RET, ivpu_job_done_callback); } -void ivpu_job_done_thread_enable(struct ivpu_device *vdev) +void ivpu_job_done_consumer_fini(struct ivpu_device *vdev) { - kthread_unpark(vdev->job_done_thread); + ivpu_ipc_consumer_del(vdev, &vdev->job_done_consumer); } diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 4b604c90041ea..45a2f2ec82e5b 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -59,10 +59,8 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv); void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); -int ivpu_job_done_thread_init(struct ivpu_device *vdev); -void ivpu_job_done_thread_fini(struct ivpu_device *vdev); -void ivpu_job_done_thread_disable(struct ivpu_device *vdev); -void ivpu_job_done_thread_enable(struct ivpu_device *vdev); +void ivpu_job_done_consumer_init(struct ivpu_device *vdev); +void ivpu_job_done_consumer_fini(struct ivpu_device *vdev); void ivpu_jobs_abort_all(struct ivpu_device *vdev);