--- /dev/null
+digraph T {
+ /* Make sure our payloads are always drawn below the driver node */
+ subgraph cluster_driver {
+ fillcolor = grey;
+ style = filled;
+ driver -> {payload1, payload2} [dir=none];
+ }
+
+ /* Driver malloc references */
+ edge [style=dashed];
+ driver -> port1;
+ driver -> port2;
+ driver -> port3:e;
+ driver -> port4;
+
+ payload1:s -> port1:e;
+ payload2:s -> port3:e;
+ edge [style=""];
+
+ subgraph cluster_topology {
+ label="Topology Manager";
+ labelloc=bottom;
+
+ /* Topology references */
+ mstb1 -> {port1, port2};
+ port1 -> mstb2;
+ port2 -> mstb3 -> {port3, port4};
+ port3 -> mstb4;
+
+ /* Malloc references */
+ edge [style=dashed;dir=back];
+ mstb1 -> {port1, port2};
+ port1 -> mstb2;
+ port2 -> mstb3 -> {port3, port4};
+ port3 -> mstb4;
+ }
+
+ driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
+
+ payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
+ payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue];
+
+ mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen;shape=oval];
+ mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen;shape=oval];
+ mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;shape=oval];
+ mstb4 [label="MSTB #4";style=filled;fillcolor=palegreen;shape=oval];
+
+ port1 [label="Port #1";shape=oval];
+ port2 [label="Port #2";shape=oval];
+ port3 [label="Port #3";shape=oval];
+ port4 [label="Port #4";shape=oval];
+}
--- /dev/null
+digraph T {
+ /* Make sure our payloads are always drawn below the driver node */
+ subgraph cluster_driver {
+ fillcolor = grey;
+ style = filled;
+ driver -> {payload1, payload2} [dir=none];
+ }
+
+ /* Driver malloc references */
+ edge [style=dashed];
+ driver -> port1;
+ driver -> port2;
+ driver -> port3:e;
+ driver -> port4 [color=red];
+
+ payload1:s -> port1:e;
+ payload2:s -> port3:e;
+ edge [style=""];
+
+ subgraph cluster_topology {
+ label="Topology Manager";
+ labelloc=bottom;
+
+ /* Topology references */
+ mstb1 -> {port1, port2};
+ port1 -> mstb2;
+ edge [color=red];
+ port2 -> mstb3 -> {port3, port4};
+ port3 -> mstb4;
+ edge [color=""];
+
+ /* Malloc references */
+ edge [style=dashed;dir=back];
+ mstb1 -> {port1, port2};
+ port1 -> mstb2;
+ port2 -> mstb3 -> port3;
+ edge [color=red];
+ mstb3 -> port4;
+ port3 -> mstb4;
+ }
+
+ mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen];
+ mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen];
+ mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen];
+ mstb4 [label="MSTB #4";style=filled;fillcolor=grey];
+
+ port1 [label="Port #1"];
+ port2 [label="Port #2"];
+ port3 [label="Port #3"];
+ port4 [label="Port #4";style=filled;fillcolor=grey];
+
+ driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
+
+ payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
+ payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue];
+}
--- /dev/null
+digraph T {
+ /* Make sure our payloads are always drawn below the driver node */
+ subgraph cluster_driver {
+ fillcolor = grey;
+ style = filled;
+ edge [dir=none];
+ driver -> payload1;
+ driver -> payload2 [penwidth=3];
+ edge [dir=""];
+ }
+
+ /* Driver malloc references */
+ edge [style=dashed];
+ driver -> port1;
+ driver -> port2;
+ driver -> port3:e;
+ driver -> port4 [color=grey];
+ payload1:s -> port1:e;
+ payload2:s -> port3:e [penwidth=3];
+ edge [style=""];
+
+ subgraph cluster_topology {
+ label="Topology Manager";
+ labelloc=bottom;
+
+ /* Topology references */
+ mstb1 -> {port1, port2};
+ port1 -> mstb2;
+ edge [color=grey];
+ port2 -> mstb3 -> {port3, port4};
+ port3 -> mstb4;
+ edge [color=""];
+
+ /* Malloc references */
+ edge [style=dashed;dir=back];
+ mstb1 -> {port1, port2};
+ port1 -> mstb2;
+ port2 -> mstb3 [penwidth=3];
+ mstb3 -> port3 [penwidth=3];
+ edge [color=grey];
+ mstb3 -> port4;
+ port3 -> mstb4;
+ }
+
+ mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen];
+ mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen];
+ mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;penwidth=3];
+ mstb4 [label="MSTB #4";style=filled;fillcolor=grey];
+
+ port1 [label="Port #1"];
+ port2 [label="Port #2";penwidth=5];
+ port3 [label="Port #3";penwidth=3];
+ port4 [label="Port #4";style=filled;fillcolor=grey];
+
+ driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
+
+ payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
+ payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue;penwidth=3];
+}
.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
:export:
-Display Port MST Helper Functions Reference
-===========================================
+Display Port MST Helpers
+========================
+
+Overview
+--------
.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
:doc: dp mst helper
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+ :doc: Branch device and port refcounting
+
+Functions Reference
+-------------------
+
.. kernel-doc:: include/drm/drm_dp_mst_helper.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
:export:
+Topology Lifetime Internals
+---------------------------
+
+These functions aren't exported to drivers, but are documented here to help make
+the MST topology helpers easier to understand
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+ :functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb
+ drm_dp_mst_topology_put_mstb
+ drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port
+ drm_dp_mst_topology_put_port
+ drm_dp_mst_get_mstb_malloc drm_dp_mst_put_mstb_malloc
+
MIPI DSI Helper Functions Reference
===================================
Contact: Daniel Vetter
+Generic fbdev defio support
+---------------------------
+
+The defio support code in the fbdev core has some very specific requirements,
+which means drivers need to have a special framebuffer for fbdev. Which prevents
+us from using the generic fbdev emulation code everywhere. The main issue is
+that it uses some fields in struct page itself, which breaks shmem gem objects
+(and other things).
+
+Possible solution would be to write our own defio mmap code in the drm fbdev
+emulation. It would need to fully wrap the existing mmap ops, forwarding
+everything after it has done the write-protect/mkwrite trickery:
+
+- In the drm_fbdev_fb_mmap helper, if we need defio, change the
+ default page prots to write-protected with something like this::
+
+ vma->vm_page_prot = pgprot_wrprotect(vma->vm_page_prot);
+
+- Set the mkwrite and fsync callbacks with similar implementions to the core
+ fbdev defio stuff. These should all work on plain ptes, they don't actually
+ require a struct page. uff. These should all work on plain ptes, they don't
+ actually require a struct page.
+
+- Track the dirty pages in a separate structure (bitfield with one bit per page
+ should work) to avoid clobbering struct page.
+
+Might be good to also have some igt testcases for this.
+
+Contact: Daniel Vetter, Noralf Tronnes
+
Put a reservation_object into drm_gem_object
--------------------------------------------
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
dce_v11_0_audio_write_sad_regs(encoder);
dce_v11_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct hdmi_avi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
uint8_t *payload = buffer + 3;
ssize_t err;
u32 tmp;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
dce_v8_0_audio_write_sad_regs(encoder);
dce_v8_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
+ drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
kfree(amdgpu_dm_connector);
}
amdgpu_dm_connector_funcs_reset(connector);
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ drm_dp_mst_get_port_malloc(port);
DRM_DEBUG_KMS(":%d\n", connector->base.id);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
+ aconnector, connector->base.id, aconnector->mst_port);
- aconnector->port = NULL;
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
- dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+ dc_link_remove_remote_sink(aconnector->dc_link,
+ aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
}
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_vblank.h>
#include <drm/drm_plane_helper.h>
#include <linux/clk.h>
#include <linux/platform_data/simplefb.h>
#include <linux/clk.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include "arcpgu.h"
#include "arcpgu_regs.h"
mutex_lock(&anx78xx->lock);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode,
- false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &anx78xx->connector,
+ adjusted_mode);
if (err) {
DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
goto unlock;
if (ret)
return;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &sii902x->connector, adj);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
- mode,
- true);
+ NULL, mode);
if (ctx->use_packed_pixel)
frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
* Copyright (c) 2017 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
#include <drm/bridge/dw_hdmi.h>
#include <sound/hdmi-codec.h>
u8 val;
/* Initialise info frame from DRM mode */
- drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &hdmi->connector, mode);
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
}
EXPORT_SYMBOL(drm_crtc_from_index);
-/**
- * drm_crtc_force_disable - Forcibly turn off a CRTC
- * @crtc: CRTC to turn off
- *
- * Note: This should only be used by non-atomic legacy drivers.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
int drm_crtc_force_disable(struct drm_crtc *crtc)
{
struct drm_mode_set set = {
return drm_mode_set_config_internal(&set);
}
-EXPORT_SYMBOL(drm_crtc_force_disable);
/**
* drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_ATOMIC))
- DRM_ERROR("Called for atomic driver, this is not what you want.\n");
+ WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
struct drm_encoder *encoder;
bool ret = true;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
drm_warn_on_modeset_not_all_locked(dev);
saved_enabled = crtc->enabled;
crtc_funcs = set->crtc->helper_private;
+ dev = set->crtc->dev;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
if (!set->mode)
set->fb = NULL;
return 0;
}
- dev = set->crtc->dev;
-
drm_warn_on_modeset_not_all_locked(dev);
/*
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
+ WARN_ON(drm_drv_uses_atomic_modeset(connector->dev));
+
if (mode == connector->dpms)
return 0;
int encoder_dpms;
bool ret;
+ WARN_ON(drm_drv_uses_atomic_modeset(dev));
+
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
const struct drm_framebuffer *fb);
int drm_crtc_register_all(struct drm_device *dev);
void drm_crtc_unregister_all(struct drm_device *dev);
+int drm_crtc_force_disable(struct drm_crtc *crtc);
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc);
char *buf);
static int test_calc_pbn_mode(void);
-static void drm_dp_put_port(struct drm_dp_mst_port *port);
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id,
if (lct > 1)
memcpy(mstb->rad, rad, lct / 2);
INIT_LIST_HEAD(&mstb->ports);
- kref_init(&mstb->kref);
+ kref_init(&mstb->topology_kref);
+ kref_init(&mstb->malloc_kref);
return mstb;
}
-static void drm_dp_free_mst_port(struct kref *kref);
-
static void drm_dp_free_mst_branch_device(struct kref *kref)
{
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
- if (mstb->port_parent) {
- if (list_empty(&mstb->port_parent->next))
- kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
- }
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, malloc_kref);
+
+ if (mstb->port_parent)
+ drm_dp_mst_put_port_malloc(mstb->port_parent);
+
kfree(mstb);
}
+/**
+ * DOC: Branch device and port refcounting
+ *
+ * Topology refcount overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The refcounting schemes for &struct drm_dp_mst_branch and &struct
+ * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
+ * two different kinds of refcounts: topology refcounts, and malloc refcounts.
+ *
+ * Topology refcounts are not exposed to drivers, and are handled internally
+ * by the DP MST helpers. The helpers use them in order to prevent the
+ * in-memory topology state from being changed in the middle of critical
+ * operations like changing the internal state of payload allocations. This
+ * means each branch and port will be considered to be connected to the rest
+ * of the topology until it's topology refcount reaches zero. Additionally,
+ * for ports this means that their associated &struct drm_connector will stay
+ * registered with userspace until the port's refcount reaches 0.
+ *
+ * Malloc refcount overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
+ * drm_dp_mst_branch allocated even after all of its topology references have
+ * been dropped, so that the driver or MST helpers can safely access each
+ * branch's last known state before it was disconnected from the topology.
+ * When the malloc refcount of a port or branch reaches 0, the memory
+ * allocation containing the &struct drm_dp_mst_branch or &struct
+ * drm_dp_mst_port respectively will be freed.
+ *
+ * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
+ * to drivers. As of writing this documentation, there are no drivers that
+ * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
+ * helpers. Exposing this API to drivers in a race-free manner would take more
+ * tweaking of the refcounting scheme, however patches are welcome provided
+ * there is a legitimate driver usecase for this.
+ *
+ * Refcount relationships in a topology
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Let's take a look at why the relationship between topology and malloc
+ * refcounts is designed the way it is.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-1.dot
+ *
+ * An example of topology and malloc refs in a DP MST topology with two
+ * active payloads. Topology refcount increments are indicated by solid
+ * lines, and malloc refcount increments are indicated by dashed lines.
+ * Each starts from the branch which incremented the refcount, and ends at
+ * the branch to which the refcount belongs to, i.e. the arrow points the
+ * same way as the C pointers used to reference a structure.
+ *
+ * As you can see in the above figure, every branch increments the topology
+ * refcount of it's children, and increments the malloc refcount of it's
+ * parent. Additionally, every payload increments the malloc refcount of it's
+ * assigned port by 1.
+ *
+ * So, what would happen if MSTB #3 from the above figure was unplugged from
+ * the system, but the driver hadn't yet removed payload #2 from port #3? The
+ * topology would start to look like the figure below.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-2.dot
+ *
+ * Ports and branch devices which have been released from memory are
+ * colored grey, and references which have been removed are colored red.
+ *
+ * Whenever a port or branch device's topology refcount reaches zero, it will
+ * decrement the topology refcounts of all its children, the malloc refcount
+ * of its parent, and finally its own malloc refcount. For MSTB #4 and port
+ * #4, this means they both have been disconnected from the topology and freed
+ * from memory. But, because payload #2 is still holding a reference to port
+ * #3, port #3 is removed from the topology but it's &struct drm_dp_mst_port
+ * is still accessible from memory. This also means port #3 has not yet
+ * decremented the malloc refcount of MSTB #3, so it's &struct
+ * drm_dp_mst_branch will also stay allocated in memory until port #3's
+ * malloc refcount reaches 0.
+ *
+ * This relationship is necessary because in order to release payload #2, we
+ * need to be able to figure out the last relative of port #3 that's still
+ * connected to the topology. In this case, we would travel up the topology as
+ * shown below.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-3.dot
+ *
+ * And finally, remove payload #2 by communicating with port #2 through
+ * sideband transactions.
+ */
+
+/**
+ * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
+ *
+ * Increments &drm_dp_mst_branch.malloc_kref. When
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
+ * will be released and @mstb may no longer be used.
+ *
+ * See also: drm_dp_mst_put_mstb_malloc()
+ */
+static void
+drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
+{
+ kref_get(&mstb->malloc_kref);
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
+}
+
+/**
+ * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
+ *
+ * Decrements &drm_dp_mst_branch.malloc_kref. When
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
+ * will be released and @mstb may no longer be used.
+ *
+ * See also: drm_dp_mst_get_mstb_malloc()
+ */
+static void
+drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
+{
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
+ kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
+}
+
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port =
+ container_of(kref, struct drm_dp_mst_port, malloc_kref);
+
+ drm_dp_mst_put_mstb_malloc(port->parent);
+ kfree(port);
+}
+
+/**
+ * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
+ * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
+ *
+ * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
+ * reaches 0, the memory allocation for @port will be released and @port may
+ * no longer be used.
+ *
+ * Because @port could potentially be freed at any time by the DP MST helpers
+ * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
+ * function, drivers that which to make use of &struct drm_dp_mst_port should
+ * ensure that they grab at least one main malloc reference to their MST ports
+ * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
+ * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
+ *
+ * See also: drm_dp_mst_put_port_malloc()
+ */
+void
+drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
+{
+ kref_get(&port->malloc_kref);
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
+}
+EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
+
+/**
+ * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
+ * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
+ *
+ * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
+ * reaches 0, the memory allocation for @port will be released and @port may
+ * no longer be used.
+ *
+ * See also: drm_dp_mst_get_port_malloc()
+ */
+void
+drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
+ kref_put(&port->malloc_kref, drm_dp_free_mst_port);
+}
+EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
- /*
- * init kref again to be used by ports to remove mst branch when it is
- * not needed anymore
- */
- kref_init(kref);
-
- if (mstb->port_parent && list_empty(&mstb->port_parent->next))
- kref_get(&mstb->port_parent->kref);
-
- /*
- * destroy all ports - don't need lock
- * as there are no more references to the mst branch
- * device at this point.
- */
+ mutex_lock(&mgr->lock);
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
list_del(&port->next);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
+ mutex_unlock(&mgr->lock);
/* drop any tx slots msg */
mutex_lock(&mstb->mgr->qlock);
if (wake_tx)
wake_up_all(&mstb->mgr->tx_waitq);
- kref_put(kref, drm_dp_free_mst_branch_device);
+ drm_dp_mst_put_mstb_malloc(mstb);
}
-static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+/**
+ * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
+ * branch device unless its zero
+ * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
+ *
+ * Attempts to grab a topology reference to @mstb, if it hasn't yet been
+ * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
+ * reached 0). Holding a topology reference implies that a malloc reference
+ * will be held to @mstb as long as the user holds the topology reference.
+ *
+ * Care should be taken to ensure that the user has at least one malloc
+ * reference to @mstb. If you already have a topology reference to @mstb, you
+ * should use drm_dp_mst_topology_get_mstb() instead.
+ *
+ * See also:
+ * drm_dp_mst_topology_get_mstb()
+ * drm_dp_mst_topology_put_mstb()
+ *
+ * Returns:
+ * * 1: A topology reference was grabbed successfully
+ * * 0: @port is no longer in the topology, no reference was grabbed
+ */
+static int __must_check
+drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+ int ret = kref_get_unless_zero(&mstb->topology_kref);
+
+ if (ret)
+ DRM_DEBUG("mstb %p (%d)\n", mstb,
+ kref_read(&mstb->topology_kref));
+
+ return ret;
}
+/**
+ * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
+ * branch device
+ * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
+ *
+ * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
+ * not it's already reached 0. This is only valid to use in scenarios where
+ * you are already guaranteed to have at least one active topology reference
+ * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_mstb()
+ * drm_dp_mst_topology_put_mstb()
+ */
+static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
+{
+ WARN_ON(kref_read(&mstb->topology_kref) == 0);
+ kref_get(&mstb->topology_kref);
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+}
+
+/**
+ * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
+ *
+ * Releases a topology reference from @mstb by decrementing
+ * &drm_dp_mst_branch.topology_kref.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_mstb()
+ * drm_dp_mst_topology_get_mstb()
+ */
+static void
+drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
+{
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref) - 1);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
+}
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
case DP_PEER_DEVICE_MST_BRANCHING:
mstb = port->mstb;
port->mstb = NULL;
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
break;
}
}
static void drm_dp_destroy_port(struct kref *kref)
{
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ struct drm_dp_mst_port *port =
+ container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
- port->vcpi.num_slots = 0;
-
kfree(port->cached_edid);
/*
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
- kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
}
- kfree(port);
+ drm_dp_mst_put_port_malloc(port);
}
-static void drm_dp_put_port(struct drm_dp_mst_port *port)
+/**
+ * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
+ * port unless its zero
+ * @port: &struct drm_dp_mst_port to increment the topology refcount of
+ *
+ * Attempts to grab a topology reference to @port, if it hasn't yet been
+ * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
+ * 0). Holding a topology reference implies that a malloc reference will be
+ * held to @port as long as the user holds the topology reference.
+ *
+ * Care should be taken to ensure that the user has at least one malloc
+ * reference to @port. If you already have a topology reference to @port, you
+ * should use drm_dp_mst_topology_get_port() instead.
+ *
+ * See also:
+ * drm_dp_mst_topology_get_port()
+ * drm_dp_mst_topology_put_port()
+ *
+ * Returns:
+ * * 1: A topology reference was grabbed successfully
+ * * 0: @port is no longer in the topology, no reference was grabbed
+ */
+static int __must_check
+drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- kref_put(&port->kref, drm_dp_destroy_port);
+ int ret = kref_get_unless_zero(&port->topology_kref);
+
+ if (ret)
+ DRM_DEBUG("port %p (%d)\n", port,
+ kref_read(&port->topology_kref));
+
+ return ret;
}
-static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+/**
+ * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
+ * @port: The &struct drm_dp_mst_port to increment the topology refcount of
+ *
+ * Increments &drm_dp_mst_port.topology_refcount without checking whether or
+ * not it's already reached 0. This is only valid to use in scenarios where
+ * you are already guaranteed to have at least one active topology reference
+ * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_port()
+ * drm_dp_mst_topology_put_port()
+ */
+static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
+{
+ WARN_ON(kref_read(&port->topology_kref) == 0);
+ kref_get(&port->topology_kref);
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+}
+
+/**
+ * drm_dp_mst_topology_put_port() - release a topology reference to a port
+ * @port: The &struct drm_dp_mst_port to release the topology reference from
+ *
+ * Releases a topology reference from @port by decrementing
+ * &drm_dp_mst_port.topology_kref.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_port()
+ * drm_dp_mst_topology_get_port()
+ */
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref) - 1);
+ kref_put(&port->topology_kref, drm_dp_destroy_port);
+}
+
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_branch *to_find)
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *rmstb;
- if (to_find == mstb) {
- kref_get(&mstb->kref);
+
+ if (to_find == mstb)
return mstb;
- }
+
list_for_each_entry(port, &mstb->ports, next) {
if (port->mstb) {
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+ port->mstb, to_find);
if (rmstb)
return rmstb;
}
return NULL;
}
-static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_branch *rmstb = NULL;
+
mutex_lock(&mgr->lock);
- if (mgr->mst_primary)
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+ if (mgr->mst_primary) {
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+ mgr->mst_primary, mstb);
+
+ if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
+ rmstb = NULL;
+ }
mutex_unlock(&mgr->lock);
return rmstb;
}
-static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *to_find)
{
struct drm_dp_mst_port *port, *mport;
list_for_each_entry(port, &mstb->ports, next) {
- if (port == to_find) {
- kref_get(&port->kref);
+ if (port == to_find)
return port;
- }
+
if (port->mstb) {
- mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+ mport = drm_dp_mst_topology_get_port_validated_locked(
+ port->mstb, to_find);
if (mport)
return mport;
}
return NULL;
}
-static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *rport = NULL;
+
mutex_lock(&mgr->lock);
- if (mgr->mst_primary)
- rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+ if (mgr->mst_primary) {
+ rport = drm_dp_mst_topology_get_port_validated_locked(
+ mgr->mst_primary, port);
+
+ if (rport && !drm_dp_mst_topology_try_get_port(rport))
+ rport = NULL;
+ }
mutex_unlock(&mgr->lock);
return rport;
}
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
{
struct drm_dp_mst_port *port;
+ int ret;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
- kref_get(&port->kref);
- return port;
+ ret = drm_dp_mst_topology_try_get_port(port);
+ return ret ? port : NULL;
}
}
if (port->mstb) {
port->mstb->mgr = port->mgr;
port->mstb->port_parent = port;
+ /*
+ * Make sure this port's memory allocation stays
+ * around until it's child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
send_link = true;
}
bool created = false;
int old_pdt = 0;
int old_ddps = 0;
+
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return;
- kref_init(&port->kref);
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
port->parent = mstb;
port->port_num = port_msg->port_number;
port->mgr = mstb->mgr;
port->aux.name = "DPMST";
port->aux.dev = dev->dev;
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
created = true;
} else {
old_pdt = port->pdt;
for this list */
if (created) {
mutex_lock(&mstb->mgr->lock);
- kref_get(&port->kref);
+ drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
mutex_unlock(&mstb->mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
- if (!port->input)
- drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ if (!port->input) {
+ drm_dp_send_enum_path_resources(mstb->mgr,
+ mstb, port);
+ }
} else {
port->available_pbn = 0;
- }
+ }
}
if (old_pdt != port->pdt && !port->input) {
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ build_mst_prop_path(mstb, port->port_num, proppath,
+ sizeof(proppath));
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
+ port,
+ proppath);
if (!port->connector) {
/* remove it from the port list */
mutex_lock(&mstb->mgr->lock);
list_del(&port->next);
mutex_unlock(&mstb->mgr->lock);
/* drop port list reference */
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
goto out;
}
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
drm_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
out:
/* put reference to this port */
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
dowork = true;
}
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
{
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_port *port;
- int i;
+ int i, ret;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
}
}
}
- kref_get(&mstb->kref);
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
+ if (!ret)
+ mstb = NULL;
out:
mutex_unlock(&mgr->lock);
return mstb;
return NULL;
}
-static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
- struct drm_dp_mst_topology_mgr *mgr,
- uint8_t *guid)
+static struct drm_dp_mst_branch *
+drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
+ uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
+ int ret;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
-
- if (mstb)
- kref_get(&mstb->kref);
+ if (mstb) {
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
+ if (!ret)
+ mstb = NULL;
+ }
mutex_unlock(&mgr->lock);
return mstb;
drm_dp_send_enum_path_resources(mgr, mstb, port);
if (port->mstb) {
- mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+ mstb_child = drm_dp_mst_topology_get_mstb_validated(
+ mgr, port->mstb);
if (mstb_child) {
drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_put_mst_branch_device(mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
}
}
}
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
struct drm_dp_mst_branch *mstb;
+ int ret;
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
- kref_get(&mstb->kref);
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
+ if (!ret)
+ mstb = NULL;
}
mutex_unlock(&mgr->lock);
if (mstb) {
drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
}
}
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
}
-static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb,
- int *port_num)
+/*
+ * Searches upwards in the topology starting from mstb to try to find the
+ * closest available parent of mstb that's still connected to the rest of the
+ * topology. This can be used in order to perform operations like releasing
+ * payloads, where the branch device which owned the payload may no longer be
+ * around and thus would require that the payload on the last living relative
+ * be freed instead.
+ */
+static struct drm_dp_mst_branch *
+drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int *port_num)
{
struct drm_dp_mst_branch *rmstb = NULL;
struct drm_dp_mst_port *found_port;
+
mutex_lock(&mgr->lock);
- if (mgr->mst_primary) {
+ if (!mgr->mst_primary)
+ goto out;
+
+ do {
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+ if (!found_port)
+ break;
- if (found_port) {
+ if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
rmstb = found_port->parent;
- kref_get(&rmstb->kref);
*port_num = found_port->port_num;
+ } else {
+ /* Search again, starting from this parent */
+ mstb = found_port->parent;
}
- }
+ } while (!rmstb);
+out:
mutex_unlock(&mgr->lock);
return rmstb;
}
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
- return -EINVAL;
-
port_num = port->port_num;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb) {
- mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
+ port->parent,
+ &port_num);
- if (!mstb) {
- drm_dp_put_port(port);
+ if (!mstb)
return -EINVAL;
- }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
drm_dp_queue_down_tx(mgr, txmsg);
+ /*
+ * FIXME: there is a small chance that between getting the last
+ * connected mstb and sending the payload message, the last connected
+ * mstb could also be removed from the topology. In the future, this
+ * needs to be fixed by restarting the
+ * drm_dp_get_last_connected_port_and_mstb() search in the event of a
+ * timeout if the topology is still connected to the system.
+ */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
- if (txmsg->reply.reply_type == 1) {
+ if (txmsg->reply.reply_type == 1)
ret = -EINVAL;
- } else
+ else
ret = 0;
}
kfree(txmsg);
fail_put:
- drm_dp_put_mst_branch_device(mstb);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
struct drm_dp_sideband_msg_tx *txmsg;
int len, ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return -ENOMEM;
}
ret = 0;
}
kfree(txmsg);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
*/
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
{
- int i, j;
- int cur_slots = 1;
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
+ int i, j;
+ int cur_slots = 1;
mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
struct drm_dp_payload *payload = &mgr->payloads[i];
+ bool put_port = false;
/* solve the current payloads - compare to the hw ones
- update the hw view */
if (vcpi) {
port = container_of(vcpi, struct drm_dp_mst_port,
vcpi);
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port) {
- mutex_unlock(&mgr->payload_lock);
- return -EINVAL;
+
+ /* Validated ports don't matter if we're releasing
+ * VCPI
+ */
+ if (vcpi->num_slots) {
+ port = drm_dp_mst_topology_get_port_validated(
+ mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
+ put_port = true;
}
+
req_payload.num_slots = vcpi->num_slots;
req_payload.vcpi = vcpi->vcpi;
} else {
}
cur_slots += req_payload.num_slots;
- if (port)
- drm_dp_put_port(port);
+ if (put_port)
+ drm_dp_mst_topology_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EINVAL;
}
kfree(txmsg);
fail_put:
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
/* give this the main reference */
mgr->mst_primary = mstb;
- kref_get(&mgr->mst_primary->kref);
+ drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
out_unlock:
mutex_unlock(&mgr->lock);
if (mstb)
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
mgr->down_rep_recv.initial_hdr.lct,
mgr->down_rep_recv.initial_hdr.rad[0],
mgr->down_rep_recv.msg[0]);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
}
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
}
if (mstb)
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
enum drm_connector_status status = connector_status_disconnected;
/* we need to search for the port in the mgr in case its gone */
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
break;
}
out:
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return status;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
{
bool ret = false;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return ret;
ret = port->has_audio;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
struct edid *edid = NULL;
/* we need to search for the port in the mgr in case its gone */
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return NULL;
drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return edid;
}
EXPORT_SYMBOL(drm_dp_mst_get_edid);
}
/**
- * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
+ * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
* @state: global atomic state
* @mgr: MST topology manager for the port
* @port: port to find vcpi slots for
* @pbn: bandwidth required for the mode in PBN
*
- * RETURNS:
- * Total slots in the atomic state assigned for this port or error
+ * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
+ * may have had. Any atomic drivers which support MST must call this function
+ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
+ * current VCPI allocation for the new state, but only when
+ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
+ * to ensure compatibility with userspace applications that still use the
+ * legacy modesetting UAPI.
+ *
+ * Allocations set by this function are not checked against the bandwidth
+ * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
+ *
+ * Additionally, it is OK to call this function multiple times on the same
+ * @port as needed. It is not OK however, to call this function and
+ * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
+ *
+ * See also:
+ * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_mst_atomic_check()
+ *
+ * Returns:
+ * Total slots in the atomic state assigned for this port, or a negative error
+ * code if the port no longer exists
*/
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn)
{
struct drm_dp_mst_topology_state *topology_state;
- int req_slots;
+ struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
+ int prev_slots, req_slots, ret;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (port == NULL)
return -EINVAL;
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
- DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
- req_slots, topology_state->avail_slots);
- if (req_slots > topology_state->avail_slots) {
- drm_dp_put_port(port);
- return -ENOSPC;
+ /* Find the current allocation for this port, if any */
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
+ if (pos->port == port) {
+ vcpi = pos;
+ prev_slots = vcpi->vcpi;
+
+ /*
+ * This should never happen, unless the driver tries
+ * releasing and allocating the same VCPI allocation,
+ * which is an error
+ */
+ if (WARN_ON(!prev_slots)) {
+ DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
+ port);
+ return -EINVAL;
+ }
+
+ break;
+ }
}
+ if (!vcpi)
+ prev_slots = 0;
+
+ req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
- topology_state->avail_slots -= req_slots;
- DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_slots, req_slots);
+
+ /* Add the new allocation to the state */
+ if (!vcpi) {
+ vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
+ if (!vcpi) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drm_dp_mst_get_port_malloc(port);
+ vcpi->port = port;
+ list_add(&vcpi->next, &topology_state->vcpis);
+ }
+ vcpi->vcpi = req_slots;
- drm_dp_put_port(port);
- return req_slots;
+ ret = req_slots;
+out:
+ drm_dp_mst_topology_put_port(port);
+ return ret;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
* drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
* @state: global atomic state
* @mgr: MST topology manager for the port
- * @slots: number of vcpi slots to release
+ * @port: The port to release the VCPI slots from
*
- * RETURNS:
- * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
- * negative error code
+ * Releases any VCPI slots that have been allocated to a port in the atomic
+ * state. Any atomic drivers which support MST must call this function in
+ * their &drm_connector_helper_funcs.atomic_check() callback when the
+ * connector will no longer have VCPI allocated (e.g. because it's CRTC was
+ * removed) when it had VCPI allocated in the previous atomic state.
+ *
+ * It is OK to call this even if @port has been removed from the system.
+ * Additionally, it is OK to call this function multiple times on the same
+ * @port as needed. It is not OK however, to call this function and
+ * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
+ * phase.
+ *
+ * See also:
+ * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_mst_atomic_check()
+ *
+ * Returns:
+ * 0 if all slots for this port were added back to
+ * &drm_dp_mst_topology_state.avail_slots or negative error code
*/
int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- int slots)
+ struct drm_dp_mst_port *port)
{
struct drm_dp_mst_topology_state *topology_state;
+ struct drm_dp_vcpi_allocation *pos;
+ bool found = false;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- /* We cannot rely on port->vcpi.num_slots to update
- * topology_state->avail_slots as the port may not exist if the parent
- * branch device was unplugged. This should be fixed by tracking
- * per-port slot allocation in drm_dp_mst_topology_state instead of
- * depending on the caller to tell us how many slots to release.
- */
- topology_state->avail_slots += slots;
- DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
- slots, topology_state->avail_slots);
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
+ if (pos->port == port) {
+ found = true;
+ break;
+ }
+ }
+ if (WARN_ON(!found)) {
+ DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
+ port, &topology_state->base);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
+ if (pos->vcpi) {
+ drm_dp_mst_put_port_malloc(port);
+ pos->vcpi = 0;
+ }
return 0;
}
{
int ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return false;
return false;
if (port->vcpi.vcpi > 0) {
- DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
+ port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return true;
}
}
ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
if (ret) {
DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
goto out;
}
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
- pbn, port->vcpi.num_slots);
+ pbn, port->vcpi.num_slots);
- drm_dp_put_port(port);
+ /* Keep port allocated until it's payload has been removed */
+ drm_dp_mst_get_port_malloc(port);
+ drm_dp_mst_topology_put_port(port);
return true;
out:
return false;
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
int slots = 0;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return slots;
slots = port->vcpi.num_slots;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return slots;
}
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
*/
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
- return;
+ /*
+ * A port with VCPI will remain allocated until it's VCPI is
+ * released, no verified ref needed
+ */
+
port->vcpi.num_slots = 0;
- drm_dp_put_port(port);
}
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
* @mgr: manager for this port
* @port: unverified port to deallocate vcpi for
*/
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
- return;
+ /*
+ * A port with VCPI will remain allocated until it's VCPI is
+ * released, no verified ref needed
+ */
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
port->vcpi.num_slots = 0;
port->vcpi.pbn = 0;
port->vcpi.aligned_pbn = 0;
port->vcpi.vcpi = 0;
- drm_dp_put_port(port);
+ drm_dp_mst_put_port_malloc(port);
}
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_free_mst_port(struct kref *kref)
-{
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
- kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
- kfree(port);
-}
-
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- kref_init(&port->kref);
INIT_LIST_HEAD(&port->next);
mgr->cbs->destroy_connector(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
- if (!port->input && port->vcpi.vcpi > 0) {
- drm_dp_mst_reset_vcpi_slots(mgr, port);
- drm_dp_update_payload_part1(mgr);
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- }
-
- kref_put(&port->kref, drm_dp_free_mst_port);
+ drm_dp_mst_put_port_malloc(port);
send_hotplug = true;
}
if (send_hotplug)
static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
- struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_topology_state *state, *old_state =
+ to_dp_mst_topology_state(obj->state);
+ struct drm_dp_vcpi_allocation *pos, *vcpi;
- state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+ INIT_LIST_HEAD(&state->vcpis);
+
+ list_for_each_entry(pos, &old_state->vcpis, next) {
+ /* Prune leftover freed VCPI allocations */
+ if (!pos->vcpi)
+ continue;
+
+ vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
+ if (!vcpi)
+ goto fail;
+
+ drm_dp_mst_get_port_malloc(vcpi->port);
+ list_add(&vcpi->next, &state->vcpis);
+ }
+
return &state->base;
+
+fail:
+ list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
+ drm_dp_mst_put_port_malloc(pos->port);
+ kfree(pos);
+ }
+ kfree(state);
+
+ return NULL;
}
static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
+ struct drm_dp_vcpi_allocation *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
+ /* We only keep references to ports with non-zero VCPIs */
+ if (pos->vcpi)
+ drm_dp_mst_put_port_malloc(pos->port);
+ kfree(pos);
+ }
kfree(mst_state);
}
-static const struct drm_private_state_funcs mst_state_funcs = {
+static inline int
+drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
+{
+ struct drm_dp_vcpi_allocation *vcpi;
+ int avail_slots = 63, payload_count = 0;
+
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+ /* Releasing VCPI is always OK-even if the port is gone */
+ if (!vcpi->vcpi) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
+ vcpi->port);
+ continue;
+ }
+
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
+ vcpi->port, vcpi->vcpi);
+
+ avail_slots -= vcpi->vcpi;
+ if (avail_slots < 0) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
+ vcpi->port, mst_state,
+ avail_slots + vcpi->vcpi);
+ return -ENOSPC;
+ }
+
+ if (++payload_count > mgr->max_payloads) {
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
+ mgr, mst_state, mgr->max_payloads);
+ return -EINVAL;
+ }
+ }
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
+ mgr, mst_state, avail_slots,
+ 63 - avail_slots);
+
+ return 0;
+}
+
+/**
+ * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
+ * atomic update is valid
+ * @state: Pointer to the new &struct drm_dp_mst_topology_state
+ *
+ * Checks the given topology state for an atomic update to ensure that it's
+ * valid. This includes checking whether there's enough bandwidth to support
+ * the new VCPI allocations in the atomic update.
+ *
+ * Any atomic drivers supporting DP MST must make sure to call this after
+ * checking the rest of their state in their
+ * &drm_mode_config_funcs.atomic_check() callback.
+ *
+ * See also:
+ * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_atomic_release_vcpi_slots()
+ *
+ * Returns:
+ *
+ * 0 if the new state is valid, negative error code otherwise.
+ */
+int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ int i, ret = 0;
+
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_check);
+
+const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
.atomic_duplicate_state = drm_dp_mst_duplicate_state,
.atomic_destroy_state = drm_dp_mst_destroy_state,
};
+EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
/**
* drm_atomic_get_mst_topology_state: get MST topology state
return -ENOMEM;
mst_state->mgr = mgr;
-
- /* max. time slots - one slot for MTP header */
- mst_state->avail_slots = 63;
+ INIT_LIST_HEAD(&mst_state->vcpis);
drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base,
- &mst_state_funcs);
+ &drm_dp_mst_topology_state_funcs);
return 0;
}
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
}
out:
kfree(txmsg);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
return oui == HDMI_FORUM_IEEE_OUI;
}
+static bool cea_db_is_vcdb(const u8 *db)
+{
+ if (cea_db_tag(db) != USE_EXTENDED_TAG)
+ return false;
+
+ if (cea_db_payload_len(db) != 2)
+ return false;
+
+ if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK)
+ return false;
+
+ return true;
+}
+
static bool cea_db_is_y420cmdb(const u8 *db)
{
if (cea_db_tag(db) != USE_EXTENDED_TAG)
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
-/**
- * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
- * @edid: EDID block to scan
- *
- * Check whether the monitor reports the RGB quantization range selection
- * as supported. The AVI infoframe can then be used to inform the monitor
- * which quantization range (full or limited) is used.
- *
- * Return: True if the RGB quantization range is selectable, false otherwise.
- */
-bool drm_rgb_quant_range_selectable(struct edid *edid)
-{
- u8 *edid_ext;
- int i, start, end;
-
- edid_ext = drm_find_cea_extension(edid);
- if (!edid_ext)
- return false;
-
- if (cea_db_offsets(edid_ext, &start, &end))
- return false;
-
- for_each_cea_db(edid_ext, i, start, end) {
- if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG &&
- cea_db_payload_len(&edid_ext[i]) == 2 &&
- cea_db_extended_tag(&edid_ext[i]) ==
- EXT_VIDEO_CAPABILITY_BLOCK) {
- DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
- return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
- }
- }
-
- return false;
-}
-EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
* drm_default_rgb_quant_range - default RGB quantization range
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
+static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]);
+
+ if (db[2] & EDID_CEA_VCDB_QS)
+ info->rgb_quant_range_selectable = true;
+}
+
static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
const u8 *db)
{
drm_parse_hdmi_forum_vsdb(connector, db);
if (cea_db_is_y420cmdb(db))
drm_parse_y420cmdb_bitmap(connector, db);
+ if (cea_db_is_vcdb(db))
+ drm_parse_vcdb(connector, db);
}
}
info->max_tmds_clock = 0;
info->dvi_dual = false;
info->has_hdmi_infoframe = false;
+ info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
}
EXPORT_SYMBOL(drm_set_preferred_mode);
+static bool is_hdmi2_sink(struct drm_connector *connector)
+{
+ /*
+ * FIXME: sil-sii8620 doesn't have a connector around when
+ * we need one, so we have to be prepared for a NULL connector.
+ */
+ if (!connector)
+ return true;
+
+ return connector->display_info.hdmi.scdc.supported ||
+ connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
+}
+
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
* @frame: HDMI AVI infoframe
+ * @connector: the connector
* @mode: DRM display mode
- * @is_hdmi2_sink: Sink is HDMI 2.0 compliant
*
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- const struct drm_display_mode *mode,
- bool is_hdmi2_sink)
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
enum hdmi_picture_aspect picture_aspect;
int err;
* HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
* have to make sure we dont break HDMI 1.4 sinks.
*/
- if (!is_hdmi2_sink && frame->video_code > 64)
+ if (!is_hdmi2_sink(connector) && frame->video_code > 64)
frame->video_code = 0;
/*
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
* quantization range information
* @frame: HDMI AVI infoframe
+ * @connector: the connector
* @mode: DRM display mode
* @rgb_quant_range: RGB quantization range (Q)
- * @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS)
- * @is_hdmi2_sink: HDMI 2.0 sink, which has different default recommendations
- *
- * Note that @is_hdmi2_sink can be derived by looking at the
- * &drm_scdc.supported flag stored in &drm_hdmi_info.scdc,
- * &drm_display_info.hdmi, which can be found in &drm_connector.display_info.
*/
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
+ struct drm_connector *connector,
const struct drm_display_mode *mode,
- enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable,
- bool is_hdmi2_sink)
+ enum hdmi_quantization_range rgb_quant_range)
{
+ const struct drm_display_info *info = &connector->display_info;
+
/*
* CEA-861:
* "A Source shall not send a non-zero Q value that does not correspond
* HDMI 2.0 recommends sending non-zero Q when it does match the
* default RGB quantization range for the mode, even when QS=0.
*/
- if (rgb_quant_range_selectable ||
+ if (info->rgb_quant_range_selectable ||
rgb_quant_range == drm_default_rgb_quant_range(mode))
frame->quantization_range = rgb_quant_range;
else
* we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
* on on CEA-861-F.
*/
- if (!is_hdmi2_sink ||
+ if (!is_hdmi2_sink(connector) ||
rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
int i;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
+ int best_depth = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
sizes.surface_depth = 24;
sizes.fb_width = (u32)-1;
sizes.fb_height = (u32)-1;
- /* if driver picks 8 or 16 by default use that for both depth/bpp */
+ /*
+ * If driver picks 8 or 16 by default use that for both depth/bpp
+ * to begin with
+ */
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
}
+ /*
+ * If we run into a situation where, for example, the primary plane
+ * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
+ * 16) we need to scale down the depth of the sizes we request.
+ */
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
+ struct drm_crtc *crtc = mode_set->crtc;
+ struct drm_plane *plane = crtc->primary;
+ int j;
+
+ DRM_DEBUG("test CRTC %d primary plane\n", i);
+
+ for (j = 0; j < plane->format_count; j++) {
+ const struct drm_format_info *fmt;
+
+ fmt = drm_format_info(plane->format_types[j]);
+
+ /*
+ * Do not consider YUV or other complicated formats
+ * for framebuffers. This means only legacy formats
+ * are supported (fmt->depth is a legacy field) but
+ * the framebuffer emulation can only deal with such
+ * formats, specifically RGB/BGA formats.
+ */
+ if (fmt->depth == 0)
+ continue;
+
+ /* We found a perfect fit, great */
+ if (fmt->depth == sizes.surface_depth) {
+ best_depth = fmt->depth;
+ break;
+ }
+
+ /* Skip depths above what we're looking for */
+ if (fmt->depth > sizes.surface_depth)
+ continue;
+
+ /* Best depth found so far */
+ if (fmt->depth > best_depth)
+ best_depth = fmt->depth;
+ }
+ }
+ if (sizes.surface_depth != best_depth) {
+ DRM_INFO("requested bpp %d, scaled depth down to %d",
+ sizes.surface_bpp, best_depth);
+ sizes.surface_depth = best_depth;
+ }
+
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
return 0;
err_drm_fb_helper_fini:
- drm_fb_helper_fini(fb_helper);
+ drm_fb_helper_fbdev_teardown(dev);
return ret;
}
return 0;
}
-/*
- * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
- * unregister_framebuffer() or fb_release().
- */
-static void drm_fbdev_fb_destroy(struct fb_info *info)
+static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
{
- struct drm_fb_helper *fb_helper = info->par;
struct fb_info *fbi = fb_helper->fbdev;
struct fb_ops *fbops = NULL;
void *shadow = NULL;
- if (fbi->fbdefio) {
+ if (!fb_helper->dev)
+ return;
+
+ if (fbi && fbi->fbdefio) {
fb_deferred_io_cleanup(fbi);
shadow = fbi->screen_buffer;
fbops = fbi->fbops;
}
drm_client_framebuffer_delete(fb_helper->buffer);
+}
+
+static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
+{
+ drm_fbdev_cleanup(fb_helper);
+
/*
* FIXME:
* Remove conditional when all CMA drivers have been moved over to using
}
}
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+ drm_fbdev_release(info->par);
+}
+
static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
- int ret;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
fb = buffer->fb;
fbi = drm_fb_helper_alloc_fbi(fb_helper);
- if (IS_ERR(fbi)) {
- ret = PTR_ERR(fbi);
- goto err_free_buffer;
- }
+ if (IS_ERR(fbi))
+ return PTR_ERR(fbi);
fbi->par = fb_helper;
fbi->fbops = &drm_fbdev_fb_ops;
if (!fbops || !shadow) {
kfree(fbops);
vfree(shadow);
- ret = -ENOMEM;
- goto err_fb_info_destroy;
+ return -ENOMEM;
}
*fbops = *fbi->fbops;
}
return 0;
-
-err_fb_info_destroy:
- drm_fb_helper_fini(fb_helper);
-err_free_buffer:
- drm_client_framebuffer_delete(buffer);
-
- return ret;
}
EXPORT_SYMBOL(drm_fb_helper_generic_probe);
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (fb_helper->fbdev) {
- drm_fb_helper_unregister_fbi(fb_helper);
+ if (fb_helper->fbdev)
/* drm_fbdev_fb_destroy() takes care of cleanup */
- return;
- }
-
- /* Did drm_fb_helper_fbdev_setup() run? */
- if (fb_helper->dev)
- drm_fb_helper_fini(fb_helper);
-
- drm_client_release(client);
- kfree(fb_helper);
+ drm_fb_helper_unregister_fbi(fb_helper);
+ else
+ drm_fbdev_release(fb_helper);
}
static int drm_fbdev_client_restore(struct drm_client_dev *client)
struct drm_device *dev = client->dev;
int ret;
- /* If drm_fb_helper_fbdev_setup() failed, we only try once */
+ /* Setup is not retried if it has failed */
if (!fb_helper->dev && fb_helper->funcs)
return 0;
return 0;
}
- ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
- fb_helper->preferred_bpp, 0);
- if (ret) {
- fb_helper->dev = NULL;
- fb_helper->fbdev = NULL;
- return ret;
- }
+ drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
+
+ ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector);
+ if (ret)
+ goto err;
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret)
+ goto err_cleanup;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
+ if (ret)
+ goto err_cleanup;
return 0;
+
+err_cleanup:
+ drm_fbdev_cleanup(fb_helper);
+err:
+ fb_helper->dev = NULL;
+ fb_helper->fbdev = NULL;
+
+ DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
+
+ return ret;
}
static const struct drm_client_funcs drm_fbdev_client_funcs = {
drm_client_add(&fb_helper->client);
+ if (!preferred_bpp)
+ preferred_bpp = dev->mode_config.preferred_depth;
+ if (!preferred_bpp)
+ preferred_bpp = 32;
fb_helper->preferred_bpp = preferred_bpp;
ret = drm_fbdev_client_hotplug(&fb_helper->client);
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/mem_encrypt.h>
+#include <linux/pagevec.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void drm_gem_check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
{
struct address_space *mapping;
struct page *p, **pages;
+ struct pagevec pvec;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
if (pages == NULL)
return ERR_PTR(-ENOMEM);
+ mapping_set_unevictable(mapping);
+
for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
return pages;
fail:
- while (i--)
- put_page(pages[i]);
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec);
+ while (i--) {
+ if (!pagevec_add(&pvec, pages[i]))
+ drm_gem_check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ drm_gem_check_release_pagevec(&pvec);
kvfree(pages);
return ERR_CAST(p);
bool dirty, bool accessed)
{
int i, npages;
+ struct address_space *mapping;
+ struct pagevec pvec;
+
+ mapping = file_inode(obj->filp)->i_mapping;
+ mapping_clear_unevictable(mapping);
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
npages = obj->size >> PAGE_SHIFT;
+ pagevec_init(&pvec);
for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty(pages[i]);
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
- put_page(pages[i]);
+ if (!pagevec_add(&pvec, pages[i]))
+ drm_gem_check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ drm_gem_check_release_pagevec(&pvec);
kvfree(pages);
}
return;
}
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ &hdata->connector, m);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {
if (modes_changed) {
drm_helper_probe_single_connector_modes(connector, 0, 0);
- /* Disable the crtc to ensure a full modeset is
- * performed whenever it's turned on again. */
if (crtc)
- drm_crtc_force_disable(crtc);
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->primary->fb);
}
return 0;
{
union hdmi_infoframe frame;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ &priv->connector, mode);
frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
for (i = 0; i < tabs; i++)
seq_putc(m, '\t');
- seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
static void intel_encoder_info(struct seq_file *m,
intel_panel_fini(&intel_connector->panel);
drm_connector_cleanup(connector);
+
+ if (intel_connector->port)
+ drm_dp_mst_put_port_malloc(intel_connector->port);
+
kfree(connector);
}
"[modeset]" : "[fastset]");
}
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ return ret;
+
if (any_ms) {
ret = intel_modeset_checks(state);
struct drm_connector *connector = conn_state->connector;
void *port = to_intel_connector(connector)->port;
struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc = pipe_config->base.crtc;
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
int bpp;
- int lane_count, slots = 0;
+ int lane_count, slots =
+ to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
return true;
}
-static int intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_conn_state)
+static int
+intel_dp_mst_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state)
{
struct drm_atomic_state *state = new_conn_state->state;
- struct drm_connector_state *old_conn_state;
- struct drm_crtc *old_crtc;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct intel_connector *intel_connector =
+ to_intel_connector(connector);
+ struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
- int slots, ret = 0;
+ struct drm_dp_mst_topology_mgr *mgr;
+ int ret = 0;
- old_conn_state = drm_atomic_get_old_connector_state(state, connector);
- old_crtc = old_conn_state->crtc;
- if (!old_crtc)
- return ret;
-
- crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
- slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
- if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
- struct drm_dp_mst_topology_mgr *mgr;
- struct drm_encoder *old_encoder;
+ if (!old_conn_state->crtc)
+ return 0;
- old_encoder = old_conn_state->best_encoder;
- mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
+ /* We only want to free VCPI if this state disables the CRTC on this
+ * connector
+ */
+ if (new_crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
- ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
- if (ret)
- DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
- else
- to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
+ if (!crtc_state ||
+ !drm_atomic_crtc_needs_modeset(crtc_state) ||
+ crtc_state->enable)
+ return 0;
}
+
+ mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
+ ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
+ intel_connector->port);
+
return ret;
}
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
+ drm_dp_mst_get_port_malloc(port);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
} dp_dual_mode;
bool has_hdmi_sink;
bool has_audio;
- bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
struct cec_notifier *cec_notifier;
};
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- struct drm_connector *connector = &intel_hdmi->attached_connector->base;
- bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
- connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- adjusted_mode,
- is_hdmi2_sink);
+ conn_state->connector,
+ adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable,
- is_hdmi2_sink);
+ HDMI_QUANTIZATION_RANGE_FULL);
drm_hdmi_avi_infoframe_content_type(&frame.avi,
conn_state);
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
- intel_hdmi->rgb_quant_range_selectable = false;
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
- intel_hdmi->rgb_quant_range_selectable =
- drm_rgb_quant_range_selectable(edid);
-
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
uint8_t buf[VIDEO_DIP_DATA_SIZE];
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct intel_dp *intel_dp = &dig_port->dp;
- struct drm_connector *connector = &intel_dp->attached_connector->base;
- const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
- bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
if (!lspcon->active) {
DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- mode, is_hdmi2_sink);
+ conn_state->connector,
+ adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
}
- drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL,
- false, is_hdmi2_sink);
+ HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
bool has_hdmi_monitor;
bool has_hdmi_audio;
- bool rgb_quant_range_selectable;
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- const struct intel_crtc_state *pipe_config)
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
int ret;
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &pipe_config->base.adjusted_mode,
- false);
+ conn_state->connector,
+ adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
- if (intel_sdvo->rgb_quant_range_selectable) {
- if (pipe_config->limited_color_range)
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_FULL;
- }
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ conn_state->connector,
+ adjusted_mode,
+ pipe_config->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
if (len < 0)
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
+ intel_sdvo_set_avi_infoframe(intel_sdvo,
+ crtc_state, conn_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
if (intel_sdvo_connector->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
- intel_sdvo->rgb_quant_range_selectable =
- drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
- intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
u8 buffer[17];
ssize_t err;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &hdmi->conn, mode);
if (err < 0) {
dev_err(hdmi->dev,
"Failed to get AVI infoframe from mode: %zd\n", err);
unsigned int wr_clk =
readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
- DRM_DEBUG_DRIVER("%d:\"%s\"\n", mode->base.id, mode->name);
+ DRM_DEBUG_DRIVER("\"%s\"\n", mode->name);
/* Enable clocks */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
int vic = drm_match_cea_mode(mode);
enum drm_mode_status status;
- DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
/* Check against non-VIC supported modes */
if (!vic) {
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
- DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n",
- mode->base.id, mode->name, vic);
+ DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
/* VENC + VENC-DVI Mode setup */
meson_venc_hdmi_mode_set(priv, vic, mode);
mode = &crtc->state->adjusted_mode;
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mdp4_crtc->name, mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("%s: set mode: " DRM_MODE_FMT,
+ mdp4_crtc->name, DRM_MODE_ARG(mode));
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
ctrl_pol = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
mdp4_dtv_encoder->pixclock = mode->clock * 1000;
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
mdp4_lcdc_encoder->pixclock = mode->clock * 1000;
{
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
pingpong_tearcheck_setup(encoder, mode);
mdp5_crtc_set_pipeline(encoder->crtc);
}
mode = &crtc->state->adjusted_mode;
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- crtc->name, mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("%s: set mode: " DRM_MODE_FMT, crtc->name, DRM_MODE_ARG(mode));
mixer_width = mode->hdisplay;
if (r_mixer)
mode = adjusted_mode;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
ctrl_pol = 0;
struct mipi_dsi_host *host = msm_dsi->host;
bool is_dual_dsi = IS_DUAL_DSI();
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
struct msm_edp *edp = edp_bridge->edp;
- DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if ((connector->encoder != NULL) &&
u32 val;
int len;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ hdmi->connector, mode);
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc)
- drm_crtc_force_disable(crtc);
+ drm_crtc_helper_set_mode(crtc, &crtc->mode,
+ crtc->x, crtc->y,
+ crtc->primary->fb);
}
return 0;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
union hdmi_infoframe vendor_frame;
- bool scdc_supported, high_tmds_clock_ratio = false, scrambling = false;
+ bool high_tmds_clock_ratio = false, scrambling = false;
u8 config;
int ret;
int size;
return;
hdmi = &nv_connector->base.display_info.hdmi;
- scdc_supported = hdmi->scdc.supported;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
- scdc_supported);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
+ &nv_connector->base, mode);
if (!ret) {
/* We have an AVI InfoFrame, populate it to the display */
args.pwr.avi_infoframe_length
struct nv50_mstm *mstm = mstc->mstm;
int vcpi = mstc->port->vcpi.vcpi, i;
+ WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
+
NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
for (i = 0; i < mstm->mgr.max_payloads; i++) {
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
+ if (!msto->disabled)
+ return;
+
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
- if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
- drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
- if (msto->disabled) {
- msto->mstc = NULL;
- msto->head = NULL;
- msto->disabled = false;
- }
+
+ drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
+
+ msto->mstc = NULL;
+ msto->head = NULL;
+ msto->disabled = false;
}
static void
(0x0100 << msto->head->base.index),
};
+ mutex_lock(&mstm->mgr.payload_lock);
+
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
- if (mstc->port && mstc->port->vcpi.vcpi > 0) {
+ if (mstc->port->vcpi.vcpi > 0) {
struct drm_dp_payload *payload = nv50_msto_payload(msto);
if (payload) {
args.vcpi.start_slot = payload->start_slot;
msto->encoder.name, msto->head->base.base.name,
args.vcpi.start_slot, args.vcpi.num_slots,
args.vcpi.pbn, args.vcpi.aligned_pbn);
+
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
+ mutex_unlock(&mstm->mgr.payload_lock);
}
static int
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
struct nv50_mstm *mstm = mstc->mstm;
- int bpp = conn_state->connector->display_info.bpc * 3;
+ int bpp = connector->display_info.bpc * 3;
int slots;
- mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
+ mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
+ bpp);
- slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
- if (slots < 0)
- return slots;
+ if (drm_atomic_crtc_needs_modeset(crtc_state) &&
+ !drm_connector_is_unregistered(connector)) {
+ slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
+ mstc->port, mstc->pbn);
+ if (slots < 0)
+ return slots;
+ }
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
mstc->native);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
- if (mstc->port)
- drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
+ drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
return ret;
}
+static int
+nv50_mstc_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state)
+{
+ struct drm_atomic_state *state = new_conn_state->state;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *new_crtc = new_conn_state->crtc;
+
+ if (!old_conn_state->crtc)
+ return 0;
+
+ /* We only want to free VCPI if this state disables the CRTC on this
+ * connector
+ */
+ if (new_crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ if (!crtc_state ||
+ !drm_atomic_crtc_needs_modeset(crtc_state) ||
+ crtc_state->enable)
+ return 0;
+ }
+
+ return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
+}
+
static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
.best_encoder = nv50_mstc_best_encoder,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
+ .atomic_check = nv50_mstc_atomic_check,
};
static enum drm_connector_status
enum drm_connector_status conn_status;
int ret;
- if (!mstc->port)
+ if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
nv50_mstc_destroy(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
+
drm_connector_cleanup(&mstc->connector);
+ drm_dp_mst_put_port_malloc(mstc->port);
+
kfree(mstc);
}
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
drm_connector_set_path_property(&mstc->connector, path);
+ drm_dp_mst_get_port_malloc(port);
return 0;
}
drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
- drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
- mstc->port = NULL;
- drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
-
drm_connector_put(&mstc->connector);
}
int ret;
ret = nv50_mstc_new(mstm, port, path, &mstc);
- if (ret) {
- if (mstc)
- mstc->connector.funcs->destroy(&mstc->connector);
+ if (ret)
return NULL;
- }
return &mstc->connector;
}
return ret;
}
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret)
+ return ret;
+
return 0;
}
drm_mode_destroy(dev, new_mode);
done:
- DBG("connector: mode %s: "
- "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ DBG("connector: mode %s: " DRM_MODE_FMT,
(ret == MODE_OK) ? "valid" : "invalid",
- mode->base.id, mode->name, mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start,
- mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start,
- mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+ DRM_MODE_ARG(mode));
return ret;
}
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
- omap_crtc->name, mode->base.id, mode->name,
- mode->vrefresh, mode->clock,
- mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
- mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
- mode->type, mode->flags);
+ DBG("%s: set mode: " DRM_MODE_FMT,
+ omap_crtc->name, DRM_MODE_ARG(mode));
drm_display_mode_to_videomode(mode, &omap_crtc->vm);
}
struct hdmi_avi_infoframe avi;
int r;
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
- false);
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ adjusted_mode);
if (r == 0)
dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
}
}
if (!qdev->client_monitors_config) {
qdev->client_monitors_config = kzalloc(
- sizeof(struct qxl_monitors_config) +
- sizeof(struct qxl_head) * count, GFP_KERNEL);
+ struct_size(qdev->client_monitors_config,
+ heads, count), GFP_KERNEL);
if (!qdev->client_monitors_config)
return -ENOMEM;
}
if (!connector)
return -EINVAL;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
return err;
}
if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
- if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
- if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
- } else {
- frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
- }
+ drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode,
+ radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
union hdmi_infoframe frame;
int rc;
- rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ &hdmi->connector,
+ mode);
if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
return ERR_PTR(ret);
}
-static void
-rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_encoder *encoder;
- u32 encoder_mask = 0;
- int i;
-
- for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
- encoder_mask |= crtc_state->encoder_mask;
- encoder_mask |= crtc->state->encoder_mask;
- }
-
- drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
- rockchip_drm_psr_inhibit_get(encoder);
-}
-
-static void
-rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_encoder *encoder;
- u32 encoder_mask = 0;
- int i;
-
- for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
- encoder_mask |= crtc_state->encoder_mask;
- encoder_mask |= crtc->state->encoder_mask;
- }
-
- drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
- rockchip_drm_psr_inhibit_put(encoder);
-}
-
static void
rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
{
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include "rockchip_drm_drv.h"
}
EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
+void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_encoder *encoder;
+ u32 encoder_mask = 0;
+ int i;
+
+ for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+ encoder_mask |= crtc_state->encoder_mask;
+ encoder_mask |= crtc->state->encoder_mask;
+ }
+
+ drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+ rockchip_drm_psr_inhibit_get(encoder);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get_state);
+
+void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_encoder *encoder;
+ u32 encoder_mask = 0;
+ int i;
+
+ for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+ encoder_mask |= crtc_state->encoder_mask;
+ encoder_mask |= crtc->state->encoder_mask;
+ }
+
+ drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
+ rockchip_drm_psr_inhibit_put(encoder);
+}
+EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put_state);
+
/**
* rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
* @encoder: encoder to obtain the PSR encoder
int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
+void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state);
+void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state);
+
int rockchip_drm_psr_register(struct drm_encoder *encoder,
int (*psr_set)(struct drm_encoder *, bool enable));
void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
#include <drm/drm.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_flip_work.h>
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
-#define VOP_WIN_SET(x, win, name, v) \
+#define VOP_WIN_SET(vop, win, name, v) \
vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
-#define VOP_SCL_SET(x, win, name, v) \
+#define VOP_SCL_SET(vop, win, name, v) \
vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
-#define VOP_SCL_SET_EXT(x, win, name, v) \
+#define VOP_SCL_SET_EXT(vop, win, name, v) \
vop_reg_set(vop, &win->phy->scl->ext->name, \
win->base, ~0, v, #name)
+#define VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, name, v) \
+ do { \
+ if (win_yuv2yuv && win_yuv2yuv->name.mask) \
+ vop_reg_set(vop, &win_yuv2yuv->name, 0, ~0, v, #name); \
+ } while (0)
+
+#define VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop, win_yuv2yuv, name, v) \
+ do { \
+ if (win_yuv2yuv && win_yuv2yuv->phy->name.mask) \
+ vop_reg_set(vop, &win_yuv2yuv->phy->name, win_yuv2yuv->base, ~0, v, #name); \
+ } while (0)
+
#define VOP_INTR_SET_MASK(vop, name, mask, v) \
vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
#define VOP_INTR_GET_TYPE(vop, name, type) \
vop_get_intr_type(vop, &vop->data->intr->name, type)
-#define VOP_WIN_GET(x, win, name) \
- vop_read_reg(x, win->offset, win->phy->name)
+#define VOP_WIN_GET(vop, win, name) \
+ vop_read_reg(vop, win->offset, win->phy->name)
+
+#define VOP_WIN_HAS_REG(win, name) \
+ (!!(win->phy->name.mask))
#define VOP_WIN_GET_YRGBADDR(vop, win) \
vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
#define to_vop(x) container_of(x, struct vop, crtc)
#define to_vop_win(x) container_of(x, struct vop_win, base)
+/*
+ * The coefficients of the following matrix are all fixed points.
+ * The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
+ * They are all represented in two's complement.
+ */
+static const uint32_t bt601_yuv2rgb[] = {
+ 0x4A8, 0x0, 0x662,
+ 0x4A8, 0x1E6F, 0x1CBF,
+ 0x4A8, 0x812, 0x0,
+ 0x321168, 0x0877CF, 0x2EB127
+};
+
enum vop_pending {
VOP_PENDING_FB_UNREF,
};
struct vop_win {
struct drm_plane base;
const struct vop_win_data *data;
+ const struct vop_win_yuv2yuv_data *yuv2yuv_data;
struct vop *vop;
};
return -EINVAL;
}
+ if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) {
+ DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
+ return -EINVAL;
+ }
+
return 0;
}
struct drm_crtc *crtc = state->crtc;
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
+ const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data;
struct vop *vop = to_vop(state->crtc);
struct drm_framebuffer *fb = state->fb;
unsigned int actual_w, actual_h;
bool rb_swap;
int win_index = VOP_WIN_TO_INDEX(vop_win);
int format;
+ int is_yuv = fb->format->is_yuv;
+ int i;
/*
* can't update plane when vop is disabled.
offset += (src->y1 >> 16) * fb->pitches[0];
dma_addr = rk_obj->dma_addr + offset + fb->offsets[0];
+ /*
+ * For y-mirroring we need to move address
+ * to the beginning of the last line.
+ */
+ if (state->rotation & DRM_MODE_REFLECT_Y)
+ dma_addr += (actual_h - 1) * fb->pitches[0];
+
format = vop_convert_format(fb->format->format);
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
- if (fb->format->is_yuv) {
+ VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv);
+ VOP_WIN_SET(vop, win, y_mir_en,
+ (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0);
+ VOP_WIN_SET(vop, win, x_mir_en,
+ (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
+
+ if (is_yuv) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
int bpp = fb->format->cpp[1];
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
+
+ for (i = 0; i < NUM_YUV2YUV_COEFFICIENTS; i++) {
+ VOP_WIN_YUV2YUV_COEFFICIENT_SET(vop,
+ win_yuv2yuv,
+ y2r_coefficients[i],
+ bt601_yuv2rgb[i]);
+ }
}
if (win->phy->scl)
spin_unlock(&vop->reg_lock);
}
+static int vop_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
+ DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
+ DRM_PLANE_HELPER_NO_SCALING;
+ struct drm_crtc_state *crtc_state;
+
+ if (plane != state->crtc->cursor)
+ return -EINVAL;
+
+ if (!plane->state)
+ return -EINVAL;
+
+ if (!plane->state->fb)
+ return -EINVAL;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ else /* Special case for asynchronous cursor updates. */
+ crtc_state = plane->crtc->state;
+
+ return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
+ min_scale, max_scale,
+ true, true);
+}
+
+static void vop_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct vop *vop = to_vop(plane->state->crtc);
+ struct drm_plane_state *plane_state;
+
+ plane_state = plane->funcs->atomic_duplicate_state(plane);
+ plane_state->crtc_x = new_state->crtc_x;
+ plane_state->crtc_y = new_state->crtc_y;
+ plane_state->crtc_h = new_state->crtc_h;
+ plane_state->crtc_w = new_state->crtc_w;
+ plane_state->src_x = new_state->src_x;
+ plane_state->src_y = new_state->src_y;
+ plane_state->src_h = new_state->src_h;
+ plane_state->src_w = new_state->src_w;
+
+ if (plane_state->fb != new_state->fb)
+ drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
+
+ swap(plane_state, plane->state);
+
+ if (plane->state->fb && plane->state->fb != new_state->fb) {
+ drm_framebuffer_get(plane->state->fb);
+ WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
+ drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
+ set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+ }
+
+ if (vop->is_enabled) {
+ rockchip_drm_psr_inhibit_get_state(new_state->state);
+ vop_plane_atomic_update(plane, plane->state);
+ spin_lock(&vop->reg_lock);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+ rockchip_drm_psr_inhibit_put_state(new_state->state);
+ }
+
+ plane->funcs->atomic_destroy_state(plane, plane_state);
+}
+
static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_check = vop_plane_atomic_check,
.atomic_update = vop_plane_atomic_update,
.atomic_disable = vop_plane_atomic_disable,
+ .atomic_async_check = vop_plane_atomic_async_check,
+ .atomic_async_update = vop_plane_atomic_async_update,
.prepare_fb = drm_gem_fb_prepare_fb,
};
return ret;
}
+static void vop_plane_add_properties(struct drm_plane *plane,
+ const struct vop_win_data *win_data)
+{
+ unsigned int flags = 0;
+
+ flags |= VOP_WIN_HAS_REG(win_data, x_mir_en) ? DRM_MODE_REFLECT_X : 0;
+ flags |= VOP_WIN_HAS_REG(win_data, y_mir_en) ? DRM_MODE_REFLECT_Y : 0;
+ if (flags)
+ drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
+ DRM_MODE_ROTATE_0 | flags);
+}
+
static int vop_create_crtc(struct vop *vop)
{
const struct vop_data *vop_data = vop->data;
plane = &vop_win->base;
drm_plane_helper_add(plane, &plane_helper_funcs);
+ vop_plane_add_properties(plane, win_data);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
primary = plane;
else if (plane->type == DRM_PLANE_TYPE_CURSOR)
goto err_cleanup_crtc;
}
drm_plane_helper_add(&vop_win->base, &plane_helper_funcs);
+ vop_plane_add_properties(&vop_win->base, win_data);
}
port = of_get_child_by_name(dev->of_node, "port");
vop_win->data = win_data;
vop_win->vop = vop;
+ vop_win->yuv2yuv_data = &vop_data->win_yuv2yuv[i];
}
}
#define VOP_MAJOR(version) ((version) >> 8)
#define VOP_MINOR(version) ((version) & 0xff)
+#define NUM_YUV2YUV_COEFFICIENTS 12
+
enum vop_data_format {
VOP_FMT_ARGB8888 = 0,
VOP_FMT_RGB888,
struct vop_reg scale_cbcr_y;
};
+struct vop_yuv2yuv_phy {
+ struct vop_reg y2r_coefficients[NUM_YUV2YUV_COEFFICIENTS];
+};
+
struct vop_win_phy {
const struct vop_scl_regs *scl;
const uint32_t *data_formats;
struct vop_reg uv_mst;
struct vop_reg yrgb_vir;
struct vop_reg uv_vir;
+ struct vop_reg y_mir_en;
+ struct vop_reg x_mir_en;
struct vop_reg dst_alpha_ctl;
struct vop_reg src_alpha_ctl;
struct vop_reg channel;
};
+struct vop_win_yuv2yuv_data {
+ uint32_t base;
+ const struct vop_yuv2yuv_phy *phy;
+ struct vop_reg y2r_en;
+};
+
struct vop_win_data {
uint32_t base;
const struct vop_win_phy *phy;
const struct vop_misc *misc;
const struct vop_modeset *modeset;
const struct vop_output *output;
+ const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
.clear = VOP_REG_MASK_SYNC(RK3368_INTR_CLEAR, 0x3fff, 0),
};
+static const struct vop_win_phy rk3368_win01_data = {
+ .scl = &rk3288_win_full_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
+ .x_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 21),
+ .y_mir_en = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3368_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3368_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3368_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3368_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3368_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3368_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3368_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3368_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3368_WIN0_CTRL2, 0xff, 0),
+};
+
static const struct vop_win_phy rk3368_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
.rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
+ .y_mir_en = VOP_REG(RK3368_WIN2_CTRL1, 0x1, 15),
.dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
.dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
};
static const struct vop_win_data rk3368_vop_win_data[] = {
- { .base = 0x00, .phy = &rk3288_win01_data,
+ { .base = 0x00, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &rk3288_win01_data,
+ { .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
{ .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
};
+static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = {
+ .y2r_coefficients = {
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 16),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 4, 0xffff, 16),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 8, 0xffff, 16),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 12, 0xffff, 16),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 16, 0xffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 20, 0xffffffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 24, 0xffffffff, 0),
+ VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 28, 0xffffffff, 0),
+ },
+};
+
+static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win23_data = { };
+
+static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
+ { .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
+ .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1) },
+ { .base = 0x60, .phy = &rk3399_yuv2yuv_win01_data,
+ .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
+ { .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
+ { .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+};
+
static const struct vop_data rk3399_vop_big = {
.version = VOP_VERSION(3, 5),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.misc = &rk3368_misc,
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
+ .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
};
static const struct vop_win_data rk3399_vop_lit_win_data[] = {
- { .base = 0x00, .phy = &rk3288_win01_data,
+ { .base = 0x00, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
{ .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR},
};
+static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = {
+ { .base = 0x00, .phy = &rk3399_yuv2yuv_win01_data,
+ .y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 1)},
+ { .base = 0x60, .phy = &rk3399_yuv2yuv_win23_data },
+};
+
static const struct vop_data rk3399_vop_lit = {
.version = VOP_VERSION(3, 6),
.intr = &rk3366_vop_intr,
.misc = &rk3368_misc,
.win = rk3399_vop_lit_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
+ .win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
};
static const struct vop_win_data rk3228_vop_win_data[] = {
};
static const struct vop_win_data rk3328_vop_win_data[] = {
- { .base = 0xd0, .phy = &rk3288_win01_data,
+ { .base = 0xd0, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x1d0, .phy = &rk3288_win01_data,
+ { .base = 0x1d0, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x2d0, .phy = &rk3288_win01_data,
+ { .base = 0x2d0, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
struct clk *compo_clk, *pix_clk;
int rate = mode->clock * 1000;
- DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- mode->base.id, mode->name);
-
- DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
- mode->vrefresh, mode->clock,
- mode->hdisplay,
- mode->hsync_start, mode->hsync_end,
- mode->htotal,
- mode->vdisplay,
- mode->vsync_start, mode->vsync_end,
- mode->vtotal, mode->type, mode->flags);
+ DRM_DEBUG_KMS("CRTC:%d (%s) mode: (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer), mode->name);
+
+ DRM_DEBUG_KMS(DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
if (mixer->id == STI_MIXER_MAIN) {
compo_clk = compo->clk_compo_main;
DRM_DEBUG_DRIVER("\n");
- ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe,
+ hdmi->drm_connector, mode);
if (ret < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
return ret;
u8 buffer[17];
int i, ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &hdmi->connector, mode);
if (ret < 0) {
DRM_ERROR("Failed to get infoframes from mode\n");
return ret;
u8 buffer[17];
ssize_t err;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &hdmi->output.connector, mode);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
return;
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
+ &sor->output.connector, mode);
if (err < 0) {
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
return err;
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/tinydrm/tinydrm.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/module.h>
/**
* DOC: overview
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include <drm/tinydrm/tinydrm.h>
struct tinydrm_connector {
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
*/
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
struct vc4_encoder base;
bool hdmi_monitor;
bool limited_rgb_range;
- bool rgb_range_selectable;
};
static inline struct vc4_hdmi_encoder *
vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid);
- if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
- vc4_encoder->rgb_range_selectable =
- drm_rgb_quant_range_selectable(edid);
- }
-
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
union hdmi_infoframe frame;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ hdmi->connector, mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
- drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ hdmi->connector, mode,
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
- HDMI_QUANTIZATION_RANGE_FULL,
- vc4_encoder->rgb_range_selectable,
- false);
+ HDMI_QUANTIZATION_RANGE_FULL);
frame.avi.right_bar = cstate->tv.margins.right;
frame.avi.left_bar = cstate->tv.margins.left;
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o
static void virtio_gpu_conn_destroy(struct drm_connector *connector)
{
- struct virtio_gpu_output *virtio_gpu_output =
- drm_connector_to_virtio_gpu_output(connector);
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- kfree(virtio_gpu_output);
}
static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
+void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i;
vgdev_output_init(vgdev, i);
drm_mode_config_reset(vgdev->ddev);
- return 0;
}
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
+++ /dev/null
-/*
- * Copyright (C) 2015 Red Hat, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <drm/drm_fb_helper.h>
-
-#include "virtgpu_drv.h"
-
-int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
-{
- struct drm_device *dev;
- int ret;
-
- dev = drm_dev_alloc(driver, &vdev->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- vdev->priv = dev;
-
- if (strcmp(vdev->dev.parent->bus->name, "pci") == 0) {
- struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
- const char *pname = dev_name(&pdev->dev);
- bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
- char unique[20];
-
- DRM_INFO("pci: %s detected at %s\n",
- vga ? "virtio-vga" : "virtio-gpu-pci",
- pname);
- dev->pdev = pdev;
- if (vga)
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- 0,
- "virtiodrmfb");
-
- /*
- * Normally the drm_dev_set_unique() call is done by core DRM.
- * The following comment covers, why virtio cannot rely on it.
- *
- * Unlike the other virtual GPU drivers, virtio abstracts the
- * underlying bus type by using struct virtio_device.
- *
- * Hence the dev_is_pci() check, used in core DRM, will fail
- * and the unique returned will be the virtio_device "virtio0",
- * while a "pci:..." one is required.
- *
- * A few other ideas were considered:
- * - Extend the dev_is_pci() check [in drm_set_busid] to
- * consider virtio.
- * Seems like a bigger hack than what we have already.
- *
- * - Point drm_device::dev to the parent of the virtio_device
- * Semantic changes:
- * * Using the wrong device for i2c, framebuffer_alloc and
- * prime import.
- * Visual changes:
- * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
- * will print the wrong information.
- *
- * We could address the latter issues, by introducing
- * drm_device::bus_dev, ... which would be used solely for this.
- *
- * So for the moment keep things as-is, with a bulky comment
- * for the next person who feels like removing this
- * drm_dev_set_unique() quirk.
- */
- snprintf(unique, sizeof(unique), "pci:%s", pname);
- ret = drm_dev_set_unique(dev, unique);
- if (ret)
- goto err_free;
-
- }
-
- ret = drm_dev_register(dev, 0);
- if (ret)
- goto err_free;
-
- return 0;
-
-err_free:
- drm_dev_put(dev);
- return ret;
-}
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
+static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
+{
+ struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ const char *pname = dev_name(&pdev->dev);
+ bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
+ char unique[20];
+
+ DRM_INFO("pci: %s detected at %s\n",
+ vga ? "virtio-vga" : "virtio-gpu-pci",
+ pname);
+ dev->pdev = pdev;
+ if (vga)
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ 0,
+ "virtiodrmfb");
+
+ /*
+ * Normally the drm_dev_set_unique() call is done by core DRM.
+ * The following comment covers, why virtio cannot rely on it.
+ *
+ * Unlike the other virtual GPU drivers, virtio abstracts the
+ * underlying bus type by using struct virtio_device.
+ *
+ * Hence the dev_is_pci() check, used in core DRM, will fail
+ * and the unique returned will be the virtio_device "virtio0",
+ * while a "pci:..." one is required.
+ *
+ * A few other ideas were considered:
+ * - Extend the dev_is_pci() check [in drm_set_busid] to
+ * consider virtio.
+ * Seems like a bigger hack than what we have already.
+ *
+ * - Point drm_device::dev to the parent of the virtio_device
+ * Semantic changes:
+ * * Using the wrong device for i2c, framebuffer_alloc and
+ * prime import.
+ * Visual changes:
+ * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
+ * will print the wrong information.
+ *
+ * We could address the latter issues, by introducing
+ * drm_device::bus_dev, ... which would be used solely for this.
+ *
+ * So for the moment keep things as-is, with a bulky comment
+ * for the next person who feels like removing this
+ * drm_dev_set_unique() quirk.
+ */
+ snprintf(unique, sizeof(unique), "pci:%s", pname);
+ return drm_dev_set_unique(dev, unique);
+}
+
static int virtio_gpu_probe(struct virtio_device *vdev)
{
+ struct drm_device *dev;
int ret;
if (vgacon_text_force() && virtio_gpu_modeset == -1)
if (virtio_gpu_modeset == 0)
return -EINVAL;
- ret = drm_virtio_init(&driver, vdev);
+ dev = drm_dev_alloc(&driver, &vdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ vdev->priv = dev;
+
+ if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
+ ret = virtio_gpu_pci_quirk(dev, vdev);
+ if (ret)
+ goto err_free;
+ }
+
+ ret = virtio_gpu_init(dev);
if (ret)
- return ret;
+ goto err_free;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_free;
drm_fbdev_generic_setup(vdev->priv, 32);
return 0;
+
+err_free:
+ drm_dev_put(dev);
+ return ret;
}
static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
+ drm_dev_unregister(dev);
+ virtio_gpu_deinit(dev);
drm_put_dev(dev);
}
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .load = virtio_gpu_driver_load,
- .unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
-/* virtgpu_drm_bus.c */
-int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
-
struct virtio_gpu_object {
struct drm_gem_object gem_base;
uint32_t hw_res_handle;
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
/* virtio_kms.c */
-int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
-void virtio_gpu_driver_unload(struct drm_device *dev);
+int virtio_gpu_init(struct drm_device *dev);
+void virtio_gpu_deinit(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
struct virtio_gpu_framebuffer *vgfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
-int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
+void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
/* virtio_gpu_plane.c */
/* virtio_gpu_fence.c */
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
-void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
return fence;
}
-void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
-{
- if (!fence)
- return;
-
- dma_fence_put(&fence->f);
-}
-
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence)
virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
ret = virtio_gpu_object_attach(vgdev, qobj, fence);
if (ret) {
- virtio_gpu_fence_cleanup(fence);
+ dma_fence_put(&fence->f);
goto fail_backoff;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
vgdev->num_capsets = num_capsets;
}
-int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
+int virtio_gpu_init(struct drm_device *dev)
{
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
num_capsets, &num_capsets);
DRM_INFO("number of cap sets: %d\n", num_capsets);
- ret = virtio_gpu_modeset_init(vgdev);
- if (ret)
- goto err_modeset;
+ virtio_gpu_modeset_init(vgdev);
virtio_device_ready(vgdev->vdev);
vgdev->vqs_ready = true;
5 * HZ);
return 0;
-err_modeset:
err_scanouts:
virtio_gpu_ttm_fini(vgdev);
err_ttm:
}
}
-void virtio_gpu_driver_unload(struct drm_device *dev)
+void virtio_gpu_deinit(struct drm_device *dev)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
+ vgdev->vdev->config->reset(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
virtio_gpu_modeset_fini(vgdev);
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
- virtio_gpu_cmd_resource_flush(vgdev, handle,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16);
+ if (handle)
+ virtio_gpu_cmd_resource_flush(vgdev, handle,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
return;
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
- if (vgfb->fence)
- virtio_gpu_fence_cleanup(vgfb->fence);
+ if (vgfb->fence) {
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
+ }
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
- DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+ if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
+ if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ struct virtio_gpu_ctrl_hdr *cmd;
+ cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
+ DRM_ERROR("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
+ } else
+ DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
+ }
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
u64 f = le64_to_cpu(resp->fence_id);
union hdmi_infoframe frame;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ &hdmi->connector,
+ mode);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
ret);
#ifndef __DW_HDMI__
#define __DW_HDMI__
-#include <drm/drmP.h>
-
+struct drm_connector;
+struct drm_display_mode;
+struct drm_encoder;
struct dw_hdmi;
+struct platform_device;
/**
* DOC: Supported input formats and encodings
struct pci_dev;
struct pci_controller;
-#define DRM_SWITCH_POWER_ON 0
-#define DRM_SWITCH_POWER_OFF 1
-#define DRM_SWITCH_POWER_CHANGING 2
-#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-
/* returns true if currently okay to sleep */
static inline bool drm_can_sleep(void)
{
/**
* @abort_completion:
*
- * A flag that's set after drm_atomic_helper_setup_commit takes a second
- * reference for the completion of $drm_crtc_state.event. It's used by
- * the free code to remove the second reference if commit fails.
+ * A flag that's set after drm_atomic_helper_setup_commit() takes a
+ * second reference for the completion of $drm_crtc_state.event. It's
+ * used by the free code to remove the second reference if commit fails.
*/
bool abort_completion;
};
*/
bool has_hdmi_infoframe;
+ /**
+ * @rgb_quant_range_selectable: Does the sink support selecting
+ * the RGB quantization range?
+ */
+ bool rgb_quant_range_selectable;
+
/**
* @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
* more stuff redundant with @bus_formats.
return 1 << drm_crtc_index(crtc);
}
-int drm_crtc_force_disable(struct drm_crtc *crtc);
int drm_crtc_force_disable_all(struct drm_device *dev);
int drm_mode_set_config_internal(struct drm_mode_set *set);
struct pci_dev;
struct pci_controller;
+
+/**
+ * enum drm_switch_power - power state of drm device
+ */
+
+enum switch_power_state {
+ /** @DRM_SWITCH_POWER_ON: Power state is ON */
+ DRM_SWITCH_POWER_ON = 0,
+
+ /** @DRM_SWITCH_POWER_OFF: Power state is OFF */
+ DRM_SWITCH_POWER_OFF = 1,
+
+ /** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
+ DRM_SWITCH_POWER_CHANGING = 2,
+
+ /** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
+ DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
+};
+
/**
- * DRM device structure. This structure represent a complete card that
+ * struct drm_device - DRM device structure
+ *
+ * This structure represent a complete card that
* may contain multiple heads.
*/
struct drm_device {
- struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
- int if_version; /**< Highest interface version set */
-
- /** \name Lifetime Management */
- /*@{ */
- struct kref ref; /**< Object ref-count */
- struct device *dev; /**< Device structure of bus-device */
- struct drm_driver *driver; /**< DRM driver managing the device */
- void *dev_private; /**< DRM driver private data */
- struct drm_minor *primary; /**< Primary node */
- struct drm_minor *render; /**< Render node */
+ /**
+ * @legacy_dev_list:
+ *
+ * List of devices per driver for stealth attach cleanup
+ */
+ struct list_head legacy_dev_list;
+
+ /** @if_version: Highest interface version set */
+ int if_version;
+
+ /** @ref: Object ref-count */
+ struct kref ref;
+
+ /** @dev: Device structure of bus-device */
+ struct device *dev;
+
+ /** @driver: DRM driver managing the device */
+ struct drm_driver *driver;
+
+ /** @dev_private: DRM driver private data */
+ void *dev_private;
+
+ /** @primary: Primary node */
+ struct drm_minor *primary;
+
+ /** @render: Render node */
+ struct drm_minor *render;
+
bool registered;
- /* currently active master for this device. Protected by master_mutex */
+ /**
+ * @master:
+ *
+ * Currently active master for this device.
+ * Protected by &master_mutex
+ */
struct drm_master *master;
/**
*/
bool unplugged;
- struct inode *anon_inode; /**< inode for private address-space */
- char *unique; /**< unique name of the device */
- /*@} */
+ /** @anon_inode: inode for private address-space */
+ struct inode *anon_inode;
+
+ /** @unique: Unique name of the device */
+ char *unique;
+
+ /**
+ * @struct_mutex:
+ *
+ * Lock for others (not &drm_minor.master and &drm_file.is_master)
+ */
+ struct mutex struct_mutex;
+
+ /**
+ * @master_mutex:
+ *
+ * Lock for &drm_minor.master and &drm_file.is_master
+ */
+ struct mutex master_mutex;
+
+ /**
+ * @open_count:
+ *
+ * Usage counter for outstanding files open,
+ * protected by drm_global_mutex
+ */
+ int open_count;
+
+ /** @buf_lock: Lock for &buf_use and a few other things. */
+ spinlock_t buf_lock;
- /** \name Locks */
- /*@{ */
- struct mutex struct_mutex; /**< For others */
- struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
- /*@} */
+ /** @buf_use: Usage counter for buffers in use -- cannot alloc */
+ int buf_use;
- /** \name Usage Counters */
- /*@{ */
- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
- spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
- int buf_use; /**< Buffers in use -- cannot alloc */
- atomic_t buf_alloc; /**< Buffer allocation in progress */
- /*@} */
+ /** @buf_alloc: Buffer allocation in progress */
+ atomic_t buf_alloc;
struct mutex filelist_mutex;
struct list_head filelist;
/**
* @filelist_internal:
*
- * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
+ * List of open DRM files for in-kernel clients.
+ * Protected by &filelist_mutex.
*/
struct list_head filelist_internal;
/**
* @clientlist_mutex:
*
- * Protects @clientlist access.
+ * Protects &clientlist access.
*/
struct mutex clientlist_mutex;
/**
* @clientlist:
*
- * List of in-kernel clients. Protected by @clientlist_mutex.
+ * List of in-kernel clients. Protected by &clientlist_mutex.
*/
struct list_head clientlist;
- /** \name Memory management */
- /*@{ */
- struct list_head maplist; /**< Linked list of regions */
- struct drm_open_hash map_hash; /**< User token hash table for maps */
+ /** @maplist: Memory management - linked list of regions */
+ struct list_head maplist;
- /** \name Context handle management */
- /*@{ */
- struct list_head ctxlist; /**< Linked list of context handles */
- struct mutex ctxlist_mutex; /**< For ctxlist */
+ /** @map_hash: Memory management - user token hash table for maps */
+ struct drm_open_hash map_hash;
- struct idr ctx_idr;
+ /**
+ * @ctxlist:
+ * Context handle management - linked list of context handles
+ */
+ struct list_head ctxlist;
- struct list_head vmalist; /**< List of vmas (for debugging) */
+ /**
+ * @ctxlist_mutex:
+ *
+ * Context handle management - mutex for &ctxlist
+ */
+ struct mutex ctxlist_mutex;
- /*@} */
+ /**
+ * @ctx_idr:
+ * Context handle management
+ */
+ struct idr ctx_idr;
- /** \name DMA support */
- /*@{ */
- struct drm_device_dma *dma; /**< Optional pointer for DMA support */
- /*@} */
+ /**
+ * @vmalist:
+ * Context handle management - list of vmas (for debugging)
+ */
+ struct list_head vmalist;
+
+ /** @dma: Optional pointer for DMA support */
+ struct drm_device_dma *dma;
- /** \name Context support */
- /*@{ */
+ /** @context_flag: Context swapping flag */
+ __volatile__ long context_flag;
- __volatile__ long context_flag; /**< Context swapping flag */
- int last_context; /**< Last current context */
- /*@} */
+ /** @last_context: Last current context */
+ int last_context;
/**
* @irq_enabled:
*/
struct drm_vblank_crtc *vblank;
- spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
+ /**
+ * @vblank_time_lock:
+ *
+ * Protects vblank count and time updates during vblank enable/disable
+ */
+ spinlock_t vblank_time_lock;
spinlock_t vbl_lock;
/**
*
* If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
*/
- u32 max_vblank_count; /**< size of vblank counter register */
- /**
- * List of events
- */
+ /** @max_vblank_count: Size of vblank counter register */
+ u32 max_vblank_count;
+
+ /** @vblank_event_list: List of vblank events */
struct list_head vblank_event_list;
spinlock_t event_lock;
- /*@} */
+ /** @agp: AGP data */
+ struct drm_agp_head *agp;
- struct drm_agp_head *agp; /**< AGP data */
+ /** @pdev: PCI device structure */
+ struct pci_dev *pdev;
- struct pci_dev *pdev; /**< PCI device structure */
#ifdef __alpha__
struct pci_controller *hose;
#endif
- struct drm_sg_mem *sg; /**< Scatter gather memory */
- unsigned int num_crtcs; /**< Number of CRTCs on this device */
+ /** @sg: Scatter gather memory */
+ struct drm_sg_mem *sg;
+
+ /** @num_crtcs: Number of CRTCs on this device */
+ unsigned int num_crtcs;
struct {
int context;
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
- struct drm_mode_config mode_config; /**< Current mode config */
+ /** @mode_config: Current mode config */
+ struct drm_mode_config mode_config;
- /** \name GEM information */
- /*@{ */
+ /** @object_name_lock: GEM information */
struct mutex object_name_lock;
+
+ /** @object_name_idr: GEM information */
struct idr object_name_idr;
+
+ /** @vma_offset_manager: GEM information */
struct drm_vma_offset_manager *vma_offset_manager;
- /*@} */
- int switch_power_state;
+
+ /**
+ * @switch_power_state:
+ *
+ * Power state of the client.
+ * Used by drivers supporting the switcheroo driver.
+ * The state is maintained in the
+ * &vga_switcheroo_client_ops.set_gpu_state callback
+ */
+ enum switch_power_state switch_power_state;
/**
* @fb_helper:
/**
* struct drm_dp_mst_port - MST port
- * @kref: reference count for this port.
* @port_num: port number
* @input: if this port is an input port.
* @mcs: message capability status - DP 1.2 spec.
* in the MST topology.
*/
struct drm_dp_mst_port {
- struct kref kref;
+ /**
+ * @topology_kref: refcount for this port's lifetime in the topology,
+ * only the DP MST helpers should need to touch this
+ */
+ struct kref topology_kref;
+
+ /**
+ * @malloc_kref: refcount for the memory allocation containing this
+ * structure. See drm_dp_mst_get_port_malloc() and
+ * drm_dp_mst_put_port_malloc().
+ */
+ struct kref malloc_kref;
u8 port_num;
bool input;
/**
* struct drm_dp_mst_branch - MST branch device.
- * @kref: reference count for this port.
* @rad: Relative Address to talk to this branch device.
* @lct: Link count total to talk to this branch device.
* @num_ports: number of ports on the branch.
* to downstream port of parent branches.
*/
struct drm_dp_mst_branch {
- struct kref kref;
+ /**
+ * @topology_kref: refcount for this branch device's lifetime in the
+ * topology, only the DP MST helpers should need to touch this
+ */
+ struct kref topology_kref;
+
+ /**
+ * @malloc_kref: refcount for the memory allocation containing this
+ * structure. See drm_dp_mst_get_mstb_malloc() and
+ * drm_dp_mst_put_mstb_malloc().
+ */
+ struct kref malloc_kref;
+
u8 rad[8];
u8 lct;
int num_ports;
#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
+struct drm_dp_vcpi_allocation {
+ struct drm_dp_mst_port *port;
+ int vcpi;
+ struct list_head next;
+};
+
struct drm_dp_mst_topology_state {
struct drm_private_state base;
- int avail_slots;
+ struct list_head vcpis;
struct drm_dp_mst_topology_mgr *mgr;
};
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
-int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn);
-int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr,
- int slots);
+int __must_check
+drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn);
+int __must_check
+drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up);
+int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
+
+void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
+void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+
+extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
+
+/**
+ * __drm_dp_mst_state_iter_get - private atomic state iterator function for
+ * macro-internal use
+ * @state: &struct drm_atomic_state pointer
+ * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
+ * iteration cursor
+ * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
+ * iteration cursor
+ * @i: int iteration cursor, for macro-internal use
+ *
+ * Used by for_each_oldnew_mst_mgr_in_state(),
+ * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
+ * call this directly.
+ *
+ * Returns:
+ * True if the current &struct drm_private_obj is a &struct
+ * drm_dp_mst_topology_mgr, false otherwise.
+ */
+static inline bool
+__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr **mgr,
+ struct drm_dp_mst_topology_state **old_state,
+ struct drm_dp_mst_topology_state **new_state,
+ int i)
+{
+ struct __drm_private_objs_state *objs_state = &state->private_objs[i];
+
+ if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
+ return false;
+
+ *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
+ if (old_state)
+ *old_state = to_dp_mst_topology_state(objs_state->old_state);
+ if (new_state)
+ *new_state = to_dp_mst_topology_state(objs_state->new_state);
+
+ return true;
+}
+
+/**
+ * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
+ * managers in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
+ * state
+ * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking both old and new state. This is useful in places where the state
+ * delta needs to be considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
+ for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+ for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
+
+/**
+ * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
+ * in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking only the old state. This is useful in disable functions, where we
+ * need the old state the hardware is still in.
+ */
+#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
+ for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+ for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
+
+/**
+ * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
+ * in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
+ * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
+ * state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all DRM DP MST topology managers in an atomic update,
+ * tracking only the new state. This is useful in enable functions, where we
+ * need the new state the hardware should be in when the atomic commit
+ * operation has completed.
+ */
+#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
+ for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
+ for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
#endif
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- const struct drm_display_mode *mode,
- bool is_hdmi2_sink);
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode);
int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
struct drm_connector *connector,
const struct drm_display_mode *mode);
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
+ struct drm_connector *connector,
const struct drm_display_mode *mode,
- enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable,
- bool is_hdmi2_sink);
+ enum hdmi_quantization_range rgb_quant_range);
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
bool drm_detect_hdmi_monitor(struct edid *edid);
bool drm_detect_monitor_audio(struct edid *edid);
-bool drm_rgb_quant_range_selectable(struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
#ifndef __DRM_ENCODER_SLAVE_H__
#define __DRM_ENCODER_SLAVE_H__
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#ifndef __DRM_FRAMEBUFFER_H__
#define __DRM_FRAMEBUFFER_H__
-#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+
#include <drm/drm_mode_object.h>
-struct drm_framebuffer;
-struct drm_file;
+struct drm_clip_rect;
struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
/**
* struct drm_framebuffer_funcs - framebuffer hooks
#ifndef __DRM_GEM_CMA_HELPER_H__
#define __DRM_GEM_CMA_HELPER_H__
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
+struct drm_mode_create_dumb;
+
/**
* struct drm_gem_cma_object - GEM object backed by CMA memory allocations
* @base: base GEM object