intel_ringbuffer.o \
intel_uncore.o
+# general-purpose microcontroller (GuC) support
+i915-y += intel_guc_loader.o \
+ i915_guc_submission.o
+
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
- i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+ i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o
# modesetting output/encoder code
i915-y += dvo_ch7017.o \
PINNED_LIST,
};
-static const char *yesno(int v)
-{
- return v ? "yes" : "no";
-}
-
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
static int
struct intel_framebuffer *fb;
struct drm_framebuffer *drm_fb;
- #ifdef CONFIG_DRM_I915_FBDEV
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
ifbdev = dev_priv->fbdev;
return;
}
- page = i915_gem_object_get_page(ctx_obj, 1);
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
if (!WARN_ON(page == NULL)) {
reg_state = kmap_atomic(page);
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- struct drm_file *file;
int i;
if (INTEL_INFO(dev)->gen == 6)
ppgtt->debug_dump(ppgtt, m);
}
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
-
- seq_printf(m, "proc: %s\n",
- get_pid_task(file->pid, PIDTYPE_PID)->comm);
- idr_for_each(&file_priv->context_idr, per_file_ctx, m);
- }
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
}
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_file *file;
int ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
else if (INTEL_INFO(dev)->gen >= 6)
gen6_ppgtt_info(m, dev);
+ list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ seq_printf(m, "\nproc: %s\n",
+ get_pid_task(file->pid, PIDTYPE_PID)->comm);
+ idr_for_each(&file_priv->context_idr, per_file_ctx,
+ (void *)(unsigned long)m);
+ }
+
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
+static int i915_guc_load_status_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+ struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ u32 tmp, i;
+
+ if (!HAS_GUC_UCODE(dev_priv->dev))
+ return 0;
+
+ seq_printf(m, "GuC firmware status:\n");
+ seq_printf(m, "\tpath: %s\n",
+ guc_fw->guc_fw_path);
+ seq_printf(m, "\tfetch: %s\n",
+ intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+ seq_printf(m, "\tload: %s\n",
+ intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ seq_printf(m, "\tversion wanted: %d.%d\n",
+ guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ seq_printf(m, "\tversion found: %d.%d\n",
+ guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
+
+ tmp = I915_READ(GUC_STATUS);
+
+ seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
+ seq_printf(m, "\tBootrom status = 0x%x\n",
+ (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ seq_printf(m, "\tuKernel status = 0x%x\n",
+ (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ seq_printf(m, "\tMIA Core status = 0x%x\n",
+ (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ seq_puts(m, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++)
+ seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
+
+ return 0;
+}
+
+static void i915_guc_client_info(struct seq_file *m,
+ struct drm_i915_private *dev_priv,
+ struct i915_guc_client *client)
+{
+ struct intel_engine_cs *ring;
+ uint64_t tot = 0;
+ uint32_t i;
+
+ seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
+ client->priority, client->ctx_index, client->proc_desc_offset);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
+ client->doorbell_id, client->doorbell_offset, client->cookie);
+ seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
+ client->wq_size, client->wq_offset, client->wq_tail);
+
+ seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
+ seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
+ seq_printf(m, "\tLast submission result: %d\n", client->retcode);
+
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "\tSubmissions: %llu %s\n",
+ client->submissions[i],
+ ring->name);
+ tot += client->submissions[i];
+ }
+ seq_printf(m, "\tTotal: %llu\n", tot);
+}
+
+static int i915_guc_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_guc guc;
+ struct i915_guc_client client = {};
+ struct intel_engine_cs *ring;
+ enum intel_ring_id i;
+ u64 total = 0;
+
+ if (!HAS_GUC_SCHED(dev_priv->dev))
+ return 0;
+
+ /* Take a local copy of the GuC data, so we can dump it at leisure */
+ spin_lock(&dev_priv->guc.host2guc_lock);
+ guc = dev_priv->guc;
+ if (guc.execbuf_client) {
+ spin_lock(&guc.execbuf_client->wq_lock);
+ client = *guc.execbuf_client;
+ spin_unlock(&guc.execbuf_client->wq_lock);
+ }
+ spin_unlock(&dev_priv->guc.host2guc_lock);
+
+ seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
+ seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
+ seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
+ seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
+ seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+
+ seq_printf(m, "\nGuC submissions:\n");
+ for_each_ring(ring, dev_priv, i) {
+ seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
+ ring->name, guc.submissions[i],
+ guc.last_seqno[i], guc.last_seqno[i]);
+ total += guc.submissions[i];
+ }
+ seq_printf(m, "\t%s: %llu\n", "Total", total);
+
+ seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
+ i915_guc_client_info(m, dev_priv, &client);
+
+ /* Add more as required ... */
+
+ return 0;
+}
+
+static int i915_guc_log_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
+ u32 *log;
+ int i = 0, pg;
+
+ if (!log_obj)
+ return 0;
+
+ for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
+ log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
+
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
+ seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(log + i), *(log + i + 1),
+ *(log + i + 2), *(log + i + 3));
+
+ kunmap_atomic(log);
+ }
+
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
return "PORT_DDI_D_2_LANES";
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
return "PORT_DDI_D_4_LANES";
+ case POWER_DOMAIN_PORT_DDI_E_2_LANES:
+ return "PORT_DDI_E_2_LANES";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
struct sseu_dev_status *stat)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- const int ss_max = 2;
+ int ss_max = 2;
int ss;
u32 sig1[ss_max], sig2[ss_max];
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
+ {"i915_guc_info", i915_guc_info, 0},
+ {"i915_guc_load_status", i915_guc_load_status_info, 0},
+ {"i915_guc_log_dump", i915_guc_log_dump, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.num_pipes = 3,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
pci_disable_device(drm_dev->pdev);
/*
- * During hibernation on some GEN4 platforms the BIOS may try to access
+ * During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
* leave the device in D0 on those platforms and hope the BIOS will
- * power down the device properly. Platforms where this was seen:
- * Lenovo Thinkpad X301, X61s
+ * power down the device properly. The issue was seen on multiple old
+ * GENs with different BIOS vendors, so having an explicit blacklist
+ * is inpractical; apply the workaround on everything pre GEN6. The
+ * platforms where the issue was seen:
+ * Lenovo Thinkpad X301, X61s, X60, T60, X41
+ * Fujitsu FSC S7110
+ * Acer Aspire 1830T
*/
- if (!(hibernation &&
- drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
- INTEL_INFO(dev_priv)->gen == 4))
+ if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
return 0;
}
-int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
+int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
return ret;
}
-int i915_resume_legacy(struct drm_device *dev)
+int i915_resume_switcheroo(struct drm_device *dev)
{
int ret;
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER,
+ DRIVER_RENDER | DRIVER_MODESET,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
- /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
- .suspend = i915_suspend_legacy,
- .resume = i915_resume_legacy,
-
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
* either the i915.modeset prarameter or by the
* vga_text_mode_force boot option.
*/
- driver.driver_features |= DRIVER_MODESET;
if (i915.modeset == 0)
driver.driver_features &= ~DRIVER_MODESET;
#endif
if (!(driver.driver_features & DRIVER_MODESET)) {
- driver.get_vblank_timestamp = NULL;
/* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
return 0;
}
- /*
- * FIXME: Note that we're lying to the DRM core here so that we can get access
- * to the atomic ioctl and the atomic properties. Only plane operations on
- * a single CRTC will actually work.
- */
- if (driver.driver_features & DRIVER_MODESET)
+ if (i915.nuclear_pageflip)
driver.driver_features |= DRIVER_ATOMIC;
return drm_pci_init(&driver, &i915_pci_driver);
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include "intel_guc.h"
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150731"
+#define DRIVER_DATE "20150828"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
BUILD_BUG_ON(__i915_warn_cond); \
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
-#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
+#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
#endif
#undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
(long) (x), __func__);
unlikely(__ret_warn_on); \
})
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
+
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
+ POWER_DOMAIN_PORT_DDI_E_2_LANES,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
HPD_PORT_B,
HPD_PORT_C,
HPD_PORT_D,
+ HPD_PORT_E,
HPD_NUM_PINS
};
struct drm_i915_error_object {
int page_count;
- u32 gtt_offset;
+ u64 gtt_offset;
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
u32 size;
u32 name;
u32 rseqno[I915_NUM_RINGS], wseqno;
- u32 gtt_offset;
+ u64 gtt_offset;
u32 read_domains;
u32 write_domain;
s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
#define DP_AUX_C 0x20
#define DP_AUX_D 0x30
+ #define DDC_PIN_B 0x05
+ #define DDC_PIN_C 0x04
+ #define DDC_PIN_D 0x06
+
struct ddi_vbt_port_info {
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
uint8_t supports_dp:1;
uint8_t alternate_aux_channel;
+ uint8_t alternate_ddc_pin;
uint8_t dp_boost_level;
uint8_t hdmi_boost_level;
struct drm_file *file;
uint32_t dispatch_flags;
uint32_t args_batch_start_offset;
- uint32_t batch_obj_vm_offset;
+ uint64_t batch_obj_vm_offset;
struct intel_engine_cs *ring;
struct drm_i915_gem_object *batch_obj;
struct intel_context *ctx;
struct i915_virtual_gpu vgpu;
+ struct intel_guc guc;
+
struct intel_csr csr;
/* Display CSR-related protection */
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq;
+ unsigned int max_dotclk_freq;
unsigned int hpll_freq;
/**
struct drm_i915_gem_object *vlv_pctx;
- #ifdef CONFIG_DRM_I915_FBDEV
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
return to_i915(dev_get_drvdata(dev));
}
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+ return container_of(guc, struct drm_i915_private, guc);
+}
+
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
#define USES_PPGTT(dev) (i915.enable_ppgtt)
-#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
+#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2)
+#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3)
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
#define HAS_CSR(dev) (IS_SKYLAKE(dev))
+#define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
+#define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
+
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
-extern int i915_resume_legacy(struct drm_device *dev);
+extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+extern int i915_resume_switcheroo(struct drm_device *dev);
/* i915_params.c */
struct i915_params {
int use_mmio_flip;
int mmio_debug;
bool verbose_state_checks;
+ bool nuclear_pageflip;
int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-unsigned long
-i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view);
-unsigned long
-i915_gem_obj_offset(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
-static inline unsigned long
+u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+ const struct i915_ggtt_view *view);
+u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+static inline u64
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
};
+ static const u32 hpd_spt[HPD_NUM_PINS] = {
+ [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
+ [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
+ [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+ };
+
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_EN,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
};
return val & PORTC_HOTPLUG_LONG_DETECT;
case PORT_D:
return val & PORTD_HOTPLUG_LONG_DETECT;
+ case PORT_E:
+ return val & PORTE_HOTPLUG_LONG_DETECT;
default:
return false;
}
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+ u32 hotplug_trigger;
+
+ if (HAS_PCH_SPT(dev))
+ hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
+ else
+ hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
if (hotplug_trigger) {
u32 dig_hotplug_reg, pin_mask, long_mask;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
- dig_hotplug_reg, hpd_cpt,
- pch_port_hotplug_long_detect);
+ if (HAS_PCH_SPT(dev)) {
+ intel_get_hpd_pins(&pin_mask, &long_mask,
+ hotplug_trigger,
+ dig_hotplug_reg, hpd_spt,
+ pch_port_hotplug_long_detect);
+
+ /* detect PORTE HP event */
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
+ if (pch_port_hotplug_long_detect(PORT_E,
+ dig_hotplug_reg))
+ long_mask |= 1 << HPD_PORT_E;
+ } else
+ intel_get_hpd_pins(&pin_mask, &long_mask,
+ hotplug_trigger,
+ dig_hotplug_reg, hpd_cpt,
+ pch_port_hotplug_long_detect);
+
intel_hpd_irq_handler(dev, pin_mask, long_mask);
}
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
+ } else if (HAS_PCH_SPT(dev)) {
+ hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+ for_each_intel_encoder(dev, intel_encoder)
+ if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
+ enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
for_each_intel_encoder(dev, intel_encoder)
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+
+ /* enable SPT PORTE hot plug */
+ if (HAS_PCH_SPT(dev)) {
+ hotplug = I915_READ(PCH_PORT_HOTPLUG2);
+ hotplug |= PORTE_HOTPLUG_ENABLE;
+ I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
+ }
}
static void bxt_hpd_irq_setup(struct drm_device *dev)
u32 hotplug_port = 0;
u32 hotplug_ctrl;
- /* Now, enable HPD */
for_each_intel_encoder(dev, intel_encoder) {
if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
== HPD_ENABLED)
hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
}
- /* Mask all HPD control bits */
hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
- /* Enable requested port in hotplug control */
- /* TODO: implement (short) HPD support on port A */
- WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
+ if (hotplug_port & BXT_DE_PORT_HP_DDIA)
+ hotplug_ctrl |= BXT_DDIA_HPD_ENABLE;
if (hotplug_port & BXT_DE_PORT_HP_DDIB)
hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
if (hotplug_port & BXT_DE_PORT_HP_DDIC)
hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
- /* Unmask DDI hotplug in IMR */
hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
- /* Enable DDI hotplug in IER */
hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
POSTING_READ(GEN8_DE_PORT_IER);
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
#define MI_LRI_FORCE_POSTED (1<<12)
-#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
-#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
+#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
+#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
-#define MI_LOAD_REGISTER_MEM(x) MI_INSTR(0x29, 2*(x)-1)
-#define MI_LOAD_REGISTER_MEM_GEN8(x) MI_INSTR(0x29, 3*(x)-1)
+#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1)
+#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */
#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1)
+#define _CHV_CMN_DW0_CH0 0x8100
+#define DPIO_ALLDL_POWERDOWN_SHIFT_CH0 19
+#define DPIO_ANYDL_POWERDOWN_SHIFT_CH0 18
+#define DPIO_ALLDL_POWERDOWN (1 << 1)
+#define DPIO_ANYDL_POWERDOWN (1 << 0)
+
#define _CHV_CMN_DW5_CH0 0x8114
#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
#define _CHV_CMN_DW19_CH0 0x814c
#define _CHV_CMN_DW6_CH1 0x8098
+#define DPIO_ALLDL_POWERDOWN_SHIFT_CH1 30 /* CL2 DW6 only */
+#define DPIO_ANYDL_POWERDOWN_SHIFT_CH1 29 /* CL2 DW6 only */
+#define DPIO_DYNPWRDOWNEN_CH1 (1 << 28) /* CL2 DW6 only */
#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
+
#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+#define CHV_CMN_DW28 0x8170
+#define DPIO_CL1POWERDOWNEN (1 << 23)
+#define DPIO_DYNPWRDOWNEN_CH0 (1 << 22)
+#define DPIO_SUS_CLK_CONFIG_ON (0 << 0)
+#define DPIO_SUS_CLK_CONFIG_CLKREQ (1 << 0)
+#define DPIO_SUS_CLK_CONFIG_GATE (2 << 0)
+#define DPIO_SUS_CLK_CONFIG_GATE_CLKREQ (3 << 0)
+
#define CHV_CMN_DW30 0x8178
+#define DPIO_CL2_LDOFUSE_PWRENB (1 << 6)
#define DPIO_LRC_BYPASS (1 << 3)
#define _TXLANE(ch, lane, offset) ((ch ? 0x2400 : 0) + \
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_INTERRUPT_STEERING (1<<14)
#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
#define GFX_REPLAY_MODE (1<<11)
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
+#define GEN8_GFX_PPGTT_48B (1<<7)
+
+#define GFX_FORWARD_VBLANK_MASK (3<<5)
+#define GFX_FORWARD_VBLANK_NEVER (0<<5)
+#define GFX_FORWARD_VBLANK_ALWAYS (1<<5)
+#define GFX_FORWARD_VBLANK_COND (2<<5)
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
+#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27))
#define PHY_LDO_DELAY_0NS 0x0
#define PHY_LDO_DELAY_200NS 0x1
#define PHY_LDO_DELAY_600NS 0x2
#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
+#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11))
#define PHY_CH_SU_PSR 0x1
#define PHY_CH_DEEP_PSR 0x7
#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
+#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch))))
+#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline))))
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
/* How many wires to use. I guess 3 was too hard */
#define DP_PORT_WIDTH(width) (((width) - 1) << 19)
#define DP_PORT_WIDTH_MASK (7 << 19)
+#define DP_PORT_WIDTH_SHIFT 19
/* Mystic DPCD version 1.1 special mode */
#define DP_ENHANCED_FRAMING (1 << 18)
#define GEN8_GT_IIR(which) (0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) (0x4430c + (0x10 * (which)))
-#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_RCS_IRQ_SHIFT 0
-#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS1_IRQ_SHIFT 0
+#define GEN8_VCS2_IRQ_SHIFT 16
#define GEN8_VECS_IRQ_SHIFT 0
+#define GEN8_WD_IRQ_SHIFT 16
#define GEN8_DE_PIPE_ISR(pipe) (0x44400 + (0x10 * (pipe)))
#define GEN8_DE_PIPE_IMR(pipe) (0x44404 + (0x10 * (pipe)))
#define SDE_AUXC_CPT (1 << 26)
#define SDE_AUXB_CPT (1 << 25)
#define SDE_AUX_MASK_CPT (7 << 25)
+ #define SDE_PORTE_HOTPLUG_SPT (1 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
+ #define SDE_HOTPLUG_MASK_SPT (SDE_PORTE_HOTPLUG_SPT | \
+ SDE_PORTD_HOTPLUG_CPT | \
+ SDE_PORTC_HOTPLUG_CPT | \
+ SDE_PORTB_HOTPLUG_CPT)
#define SDE_GMBUS_CPT (1 << 17)
#define SDE_ERROR_CPT (1 << 16)
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+ #define PCH_PORT_HOTPLUG2 0xc403C /* SHOTPLUG_CTL2 */
+ #define PORTE_HOTPLUG_ENABLE (1 << 4)
+ #define PORTE_HOTPLUG_STATUS_MASK (0x3 << 0)
+ #define PORTE_HOTPLUG_NO_DETECT (0 << 0)
+ #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0)
+ #define PORTE_HOTPLUG_LONG_DETECT (2 << 0)
+
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
#define PCH_GPIOC 0xc5018
#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14)
#define GEN7_MISCCPCTL (0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
+#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
#define GEN8_GARBCNTL 0xB004
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
+#define DDI_PORT_WIDTH_MASK (7 << 1)
+#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
/* DDI Buffer Translations */
{ 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */
};
- /* Skylake H, S, and Skylake Y with 0.95V VccIO */
+ /* Skylake H and S */
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 },
/* Skylake U */
static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
- { 0x00002016, 0x000000A2, 0x0 },
+ { 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
- { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */
- { 0x00002016, 0x0000009D, 0x0 },
+ { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */
+ { 0x0000201B, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
{ 0x00002016, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
};
- /* Skylake Y with 0.85V VccIO */
- static const struct ddi_buf_trans skl_y_085v_ddi_translations_dp[] = {
+ /* Skylake Y */
+ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 },
- { 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost */
+ { 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 },
};
/*
- * Skylake H and S, and Skylake Y with 0.95V VccIO
+ * Skylake H and S
* eDP 1.4 low vswing translation parameters
*/
static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
};
/*
- * Skylake Y with 0.95V VccIO
+ * Skylake Y
* eDP 1.4 low vswing translation parameters
*/
- static const struct ddi_buf_trans skl_y_085v_ddi_translations_edp[] = {
+ static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = {
{ 0x00000018, 0x000000A8, 0x0 },
{ 0x00004013, 0x000000AB, 0x0 },
{ 0x00007011, 0x000000A4, 0x0 },
{ 0x00000018, 0x0000008A, 0x0 },
};
- /* Skylake H, S and U, and Skylake Y with 0.95V VccIO */
+ /* Skylake U, H and S */
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000AC, 0x0 },
{ 0x00005012, 0x0000009D, 0x0 },
{ 0x00000018, 0x000000C7, 0x0 },
};
- /* Skylake Y with 0.85V VccIO */
- static const struct ddi_buf_trans skl_y_085v_ddi_translations_hdmi[] = {
+ /* Skylake Y */
+ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
{ 0x00000018, 0x000000A1, 0x0 },
{ 0x00005012, 0x000000DF, 0x0 },
{ 0x00007011, 0x00000084, 0x0 },
{ 0x00006013, 0x000000C7, 0x0 },
{ 0x00000018, 0x0000008A, 0x0 },
{ 0x00003015, 0x000000C7, 0x0 }, /* Default */
- { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost */
+ { 0x80003015, 0x000000C7, 0x7 }, /* Uses I_boost level 0x7 */
{ 0x00000018, 0x000000C7, 0x0 },
};
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
int *n_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
- static int is_095v = -1;
-
- if (is_095v == -1) {
- u32 spr1 = I915_READ(UAIMI_SPR1);
-
- is_095v = spr1 & SKL_VCCIO_MASK;
- }
- if (IS_SKL_ULX(dev) && !is_095v) {
- ddi_translations = skl_y_085v_ddi_translations_dp;
- *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+ if (IS_SKL_ULX(dev)) {
+ ddi_translations = skl_y_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
} else if (IS_SKL_ULT(dev)) {
ddi_translations = skl_u_ddi_translations_dp;
*n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
- static int is_095v = -1;
-
- if (is_095v == -1) {
- u32 spr1 = I915_READ(UAIMI_SPR1);
- is_095v = spr1 & SKL_VCCIO_MASK;
- }
-
- if (IS_SKL_ULX(dev) && !is_095v) {
+ if (IS_SKL_ULX(dev)) {
if (dev_priv->edp_low_vswing) {
- ddi_translations = skl_y_085v_ddi_translations_edp;
- *n_entries =
- ARRAY_SIZE(skl_y_085v_ddi_translations_edp);
+ ddi_translations = skl_y_ddi_translations_edp;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
} else {
- ddi_translations = skl_y_085v_ddi_translations_dp;
- *n_entries =
- ARRAY_SIZE(skl_y_085v_ddi_translations_dp);
+ ddi_translations = skl_y_ddi_translations_dp;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
}
} else if (IS_SKL_ULT(dev)) {
if (dev_priv->edp_low_vswing) {
skl_get_buf_trans_hdmi(struct drm_device *dev,
int *n_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const struct ddi_buf_trans *ddi_translations;
- static int is_095v = -1;
-
- if (is_095v == -1) {
- u32 spr1 = I915_READ(UAIMI_SPR1);
-
- is_095v = spr1 & SKL_VCCIO_MASK;
- }
- if (IS_SKL_ULX(dev) && !is_095v) {
- ddi_translations = skl_y_085v_ddi_translations_hdmi;
- *n_entries = ARRAY_SIZE(skl_y_085v_ddi_translations_hdmi);
+ if (IS_SKL_ULX(dev)) {
+ ddi_translations = skl_y_ddi_translations_hdmi;
+ *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
} else {
ddi_translations = skl_ddi_translations_hdmi;
*n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
-
}
static struct intel_encoder *
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- struct intel_encoder *intel_encoder,
- int clock)
+ struct intel_encoder *intel_encoder)
{
+ int clock = crtc_state->port_clock;
+
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
struct intel_shared_dpll *pll;
uint32_t val;
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- struct intel_encoder *intel_encoder,
- int clock)
+ struct intel_encoder *intel_encoder)
{
struct intel_shared_dpll *pll;
uint32_t ctrl1, cfgcr1, cfgcr2;
+ int clock = crtc_state->port_clock;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
static bool
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- struct intel_encoder *intel_encoder,
- int clock)
+ struct intel_encoder *intel_encoder)
{
struct intel_shared_dpll *pll;
struct bxt_clk_div clk_div = {0};
int vco = 0;
uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
uint32_t lanestagger;
+ int clock = crtc_state->port_clock;
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
intel_clock_t best_clock;
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- int clock = crtc_state->port_clock;
if (IS_SKYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, crtc_state,
- intel_encoder, clock);
+ intel_encoder);
else if (IS_BROXTON(dev))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
- intel_encoder, clock);
+ intel_encoder);
else
return hsw_ddi_pll_select(intel_crtc, crtc_state,
- intel_encoder, clock);
+ intel_encoder);
}
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
} else
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
- temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
} else
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
- temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_dp_set_link_params(intel_dp, crtc->config);
+
intel_ddi_init_dp_buf_reg(intel_encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
case TRANS_DDI_MODE_SELECT_DP_SST:
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->has_dp_encoder = true;
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
break;
default:
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
+ /*
+ * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+ * interrupts to check the external panel connection.
+ */
+ if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0)
+ && port == PORT_B)
+ dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
+ else
+ dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
}
+/* hrawclock is 1/4 the FSB frequency */
+int intel_hrawclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t clkcfg;
+
+ /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+ if (IS_VALLEYVIEW(dev))
+ return 200;
+
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100;
+ case CLKCFG_FSB_533:
+ return 133;
+ case CLKCFG_FSB_667:
+ return 166;
+ case CLKCFG_FSB_800:
+ return 200;
+ case CLKCFG_FSB_1067:
+ return 266;
+ case CLKCFG_FSB_1333:
+ return 333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400;
+ default:
+ return 133;
+ }
+}
+
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
}
}
-/*
- * ibx_digital_port_connected - is the specified port connected?
- * @dev_priv: i915 private structure
- * @port: the port to test
- *
- * Returns true if @port is connected, false otherwise.
- */
-bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
-{
- u32 bit;
-
- if (HAS_PCH_IBX(dev_priv->dev)) {
- switch (port->port) {
- case PORT_B:
- bit = SDE_PORTB_HOTPLUG;
- break;
- case PORT_C:
- bit = SDE_PORTC_HOTPLUG;
- break;
- case PORT_D:
- bit = SDE_PORTD_HOTPLUG;
- break;
- default:
- return true;
- }
- } else {
- switch (port->port) {
- case PORT_B:
- bit = SDE_PORTB_HOTPLUG_CPT;
- break;
- case PORT_C:
- bit = SDE_PORTC_HOTPLUG_CPT;
- break;
- case PORT_D:
- bit = SDE_PORTD_HOTPLUG_CPT;
- break;
- case PORT_E:
- bit = SDE_PORTE_HOTPLUG_SPT;
- break;
- default:
- return true;
- }
- }
-
- return I915_READ(SDEISR) & bit;
-}
-
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
-static void intel_init_dpio(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_VALLEYVIEW(dev))
- return;
-
- /*
- * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
- * CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(dev)) {
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
- DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
- } else {
- DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
- }
-}
-
static void vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
mutex_unlock(&dev_priv->sb_lock);
}
I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
- DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->pipe, id);
}
/*
{
switch (port) {
case PORT_A:
- case PORT_E:
return POWER_DOMAIN_PORT_DDI_A_4_LANES;
case PORT_B:
return POWER_DOMAIN_PORT_DDI_B_4_LANES;
return POWER_DOMAIN_PORT_DDI_C_4_LANES;
case PORT_D:
return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ case PORT_E:
+ return POWER_DOMAIN_PORT_DDI_E_2_LANES;
default:
WARN_ON_ONCE(1);
return POWER_DOMAIN_PORT_OTHER;
modeset_put_power_domains(dev_priv, put_domains[i]);
}
+static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+{
+ int max_cdclk_freq = dev_priv->max_cdclk_freq;
+
+ if (INTEL_INFO(dev_priv)->gen >= 9 ||
+ IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ return max_cdclk_freq;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return max_cdclk_freq*95/100;
+ else if (INTEL_INFO(dev_priv)->gen < 4)
+ return 2*max_cdclk_freq*90/100;
+ else
+ return max_cdclk_freq*90/100;
+}
+
static void intel_update_max_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
}
+ dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+
DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
dev_priv->max_cdclk_freq);
+
+ DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
+ dev_priv->max_dotclk_freq);
}
static void intel_update_cdclk(struct drm_device *dev)
/* enable PG1 and Misc I/O */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
- /* DPLL0 already enabed !? */
- if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
- DRM_DEBUG_DRIVER("DPLL0 already running\n");
- return;
+ /* DPLL0 not enabled (happens on early BIOS versions) */
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
+ /* enable DPLL0 */
+ required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+ skl_dpll0_enable(dev_priv, required_vco);
}
- /* enable DPLL0 */
- required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
- skl_dpll0_enable(dev_priv, required_vco);
-
/* set CDCLK to the frequency the BIOS chose */
skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
- if (!is_dsi) {
- if (IS_CHERRYVIEW(dev))
- chv_prepare_pll(intel_crtc, intel_crtc->config);
- else
- vlv_prepare_pll(intel_crtc, intel_crtc->config);
- }
-
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
encoder->pre_pll_enable(encoder);
if (!is_dsi) {
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev)) {
+ chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
- else
+ } else {
+ vlv_prepare_pll(intel_crtc, intel_crtc->config);
vlv_enable_pll(intel_crtc, intel_crtc->config);
+ }
}
for_each_encoder_on_crtc(dev, crtc, encoder)
i9xx_disable_pll(intel_crtc);
}
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_pll_disable)
+ encoder->post_pll_disable(encoder);
+
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- if (bestm2_frac)
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
else
i9xx_crtc_clock_get(crtc, pipe_config);
+ /*
+ * Normally the dotclock is filled in by the encoder .get_config()
+ * but in case the pipe is enabled w/o any ports we need a sane
+ * default.
+ */
+ pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->port_clock / pipe_config->pixel_multiplier;
+
return true;
}
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
- #ifdef CONFIG_DRM_I915_FBDEV
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
+ intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, DERRMR);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- u32 start_vbl_count;
intel_mark_page_flip_active(intel_crtc);
- intel_pipe_update_start(intel_crtc, &start_vbl_count);
+ intel_pipe_update_start(intel_crtc);
if (INTEL_INFO(dev)->gen >= 9)
skl_do_mmio_flip(intel_crtc);
/* use_mmio_flip() retricts MMIO flips to ilk+ */
ilk_do_mmio_flip(intel_crtc);
- intel_pipe_update_end(intel_crtc, start_vbl_count);
+ intel_pipe_update_end(intel_crtc);
}
static void intel_mmio_flip_work_func(struct work_struct *work)
if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
return true;
+ if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+ return false;
+
if (!work->enable_stall_check)
return false;
intel_crtc->atomic.update_wm_pre = true;
}
- if (visible)
+ if (visible || was_visible)
intel_crtc->atomic.fb_bits |=
to_intel_plane(plane)->frontbuffer_bit;
pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
pipe_config->has_dp_encoder,
+ pipe_config->lane_count,
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
- DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
+ DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
pipe_config->has_dp_encoder,
+ pipe_config->lane_count,
pipe_config->dp_m2_n2.gmch_m,
pipe_config->dp_m2_n2.gmch_n,
pipe_config->dp_m2_n2.link_m,
PIPE_CONF_CHECK_M_N(fdi_m_n);
PIPE_CONF_CHECK_I(has_dp_encoder);
+ PIPE_CONF_CHECK_I(lane_count);
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
/* Perform vblank evasion around commit operation */
if (crtc->state->active)
- intel_pipe_update_start(intel_crtc, &intel_crtc->start_vbl_count);
+ intel_pipe_update_start(intel_crtc);
if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(intel_crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (crtc->state->active)
- intel_pipe_update_end(intel_crtc, intel_crtc->start_vbl_count);
+ intel_pipe_update_end(intel_crtc);
}
/**
struct intel_plane *primary;
struct intel_plane_state *state;
const uint32_t *intel_primary_formats;
- int num_formats;
+ unsigned int num_formats;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
if (primary == NULL)
intel_ddi_init(dev, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
intel_ddi_init(dev, PORT_D);
+ /*
+ * On SKL we don't have a way to detect DDI-E so we rely on VBT.
+ */
+ if (IS_SKYLAKE(dev) &&
+ (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
+ dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
+ dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+ intel_ddi_init(dev, PORT_E);
+
} else if (HAS_PCH_SPLIT(dev)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
return intel_framebuffer_create(dev, mode_cmd, obj);
}
- #ifndef CONFIG_DRM_I915_FBDEV
+ #ifndef CONFIG_DRM_FBDEV_EMULATION
static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
}
if (INTEL_INFO(dev)->num_pipes == 0)
return;
+ /*
+ * There may be no VBT; and if the BIOS enabled SSC we can
+ * just keep using it to avoid unnecessary flicker. Whereas if the
+ * BIOS isn't using it, don't assume it will work even if the VBT
+ * indicates as much.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+ bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+ DREF_SSC1_ENABLE);
+
+ if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
+ bios_lvds_use_ssc ? "en" : "dis",
+ dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
+ dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ }
+ }
+
intel_init_display(dev);
intel_init_audio(dev);
}
}
- intel_init_dpio(dev);
-
intel_shared_dpll_init(dev);
/* Just disable it once at startup */
return true;
}
+static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder;
+
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder)
+ return true;
+
+ return false;
+}
+
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
u32 reg;
- bool enable;
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->config->cpu_transcoder);
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- enable = false;
- for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
- enable = true;
- break;
- }
-
- if (!enable)
+ if (!intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
if (crtc->active != crtc->base.state->active) {
+ struct intel_encoder *encoder;
/* This can happen either due to bugs in the get_hw_state
* functions or because of calls to intel_crtc_disable_noatomic,
void intel_modeset_gem_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
- /*
- * There may be no VBT; and if the BIOS enabled SSC we can
- * just keep using it to avoid unnecessary flicker. Whereas if the
- * BIOS isn't using it, don't assume it will work even if the VBT
- * indicates as much.
- */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
- DREF_SSC1_ENABLE);
-
intel_modeset_init_hw(dev);
intel_setup_overlay(dev);
324000, 432000, 540000 };
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
- static const int chv_rates[] = { 162000, 202500, 210000, 216000,
- 243000, 270000, 324000, 405000,
- 420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
+static unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
dst[i] = src >> ((3-i) * 8);
}
-/* hrawclock is 1/4 the FSB frequency */
-static int
-intel_hrawclk(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t clkcfg;
-
- /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
- if (IS_VALLEYVIEW(dev))
- return 200;
-
- clkcfg = I915_READ(CLKCFG);
- switch (clkcfg & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_400:
- return 100;
- case CLKCFG_FSB_533:
- return 133;
- case CLKCFG_FSB_667:
- return 166;
- case CLKCFG_FSB_800:
- return 200;
- case CLKCFG_FSB_1067:
- return 266;
- case CLKCFG_FSB_1333:
- return 333;
- /* these two are just a guess; one of them might be right */
- case CLKCFG_FSB_1600:
- case CLKCFG_FSB_1600_ALT:
- return 400;
- default:
- return 133;
- }
-}
-
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_dp->pps_pipe;
- bool pll_enabled;
+ bool pll_enabled, release_cl_override = false;
+ enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_channel ch = vlv_pipe_to_channel(pipe);
uint32_t DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
* The DPLL for the pipe must be enabled for this to work.
* So enable temporarily it if it's not already enabled.
*/
- if (!pll_enabled)
+ if (!pll_enabled) {
+ release_cl_override = IS_CHERRYVIEW(dev) &&
+ !chv_phy_powergate_ch(dev_priv, phy, ch, true);
+
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+ }
/*
* Similar magic as in intel_dp_enable_port().
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
- if (!pll_enabled)
+ if (!pll_enabled) {
vlv_force_pll_off(dev, pipe);
+
+ if (release_cl_override)
+ chv_phy_powergate_ch(dev_priv, phy, ch, false);
+ }
}
static enum pipe
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
- static void
+ void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
{
memset(&pipe_config->dpll_hw_state, 0,
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
+ static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+ {
+ /* WaDisableHBR2:skl */
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ return false;
+
+ if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+ (INTEL_INFO(dev)->gen >= 9))
+ return true;
+ else
+ return false;
+ }
+
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
+ int size;
+
if (IS_BROXTON(dev)) {
*source_rates = bxt_rates;
- return ARRAY_SIZE(bxt_rates);
+ size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
- return ARRAY_SIZE(skl_rates);
- } else if (IS_CHERRYVIEW(dev)) {
- *source_rates = chv_rates;
- return ARRAY_SIZE(chv_rates);
+ size = ARRAY_SIZE(skl_rates);
+ } else {
+ *source_rates = default_rates;
+ size = ARRAY_SIZE(default_rates);
}
- *source_rates = default_rates;
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (!intel_dp_source_supports_hbr2(dev))
+ size--;
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- return (DP_LINK_BW_2_7 >> 3) + 1;
- else if (INTEL_INFO(dev)->gen >= 8 ||
- (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
- return (DP_LINK_BW_5_4 >> 3) + 1;
- else
- return (DP_LINK_BW_2_7 >> 3) + 1;
+ return size;
}
static void
return rate_to_index(rate, intel_dp->sink_rates);
}
+static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ uint8_t *link_bw, uint8_t *rate_select)
+{
+ if (intel_dp->num_sink_rates) {
+ *link_bw = 0;
+ *rate_select =
+ intel_dp_rate_select(intel_dp, port_clock);
+ } else {
+ *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
+ *rate_select = 0;
+ }
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
int link_avail, link_clock;
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int common_len;
+ uint8_t link_bw, rate_select;
common_len = intel_dp_common_rates(intel_dp, common_rates);
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
- if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
- intel_dp->color_range = DP_COLOR_RANGE_16_235;
- else
- intel_dp->color_range = 0;
- }
-
- if (intel_dp->color_range)
- pipe_config->limited_color_range = true;
-
- intel_dp->lane_count = lane_count;
-
- if (intel_dp->num_sink_rates) {
- intel_dp->link_bw = 0;
- intel_dp->rate_select =
- intel_dp_rate_select(intel_dp, common_rates[clock]);
+ pipe_config->limited_color_range =
+ bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
} else {
- intel_dp->link_bw =
- drm_dp_link_rate_to_bw_code(common_rates[clock]);
- intel_dp->rate_select = 0;
+ pipe_config->limited_color_range =
+ intel_dp->limited_color_range;
}
+ pipe_config->lane_count = lane_count;
+
pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = common_rates[clock];
- DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
+ intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
+ &link_bw, &rate_select);
+
+ DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
+ link_bw, rate_select, pipe_config->lane_count,
pipe_config->port_clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
udelay(500);
}
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ intel_dp->link_rate = pipe_config->port_clock;
+ intel_dp->lane_count = pipe_config->lane_count;
+}
+
static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ intel_dp_set_link_params(intel_dp, crtc->config);
+
/*
* There are four kinds of DP registers:
*
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
- intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
+ intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
if (crtc->config->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
trans_dp &= ~TRANS_DP_ENH_FRAMING;
I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
- if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
- intel_dp->DP |= intel_dp->color_range;
+ if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
+ crtc->config->limited_color_range)
+ intel_dp->DP |= DP_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev) && port != PORT_A) {
- tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
- if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+
+ if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
- if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+ if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->has_dp_encoder = true;
+ pipe_config->lane_count =
+ ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
+
intel_dp_get_m_n(crtc, pipe_config);
if (port == PORT_A) {
intel_dp_link_down(intel_dp);
}
-static void chv_post_disable_dp(struct intel_encoder *encoder)
+static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
- intel_dp_link_down(intel_dp);
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
- mutex_lock(&dev_priv->sb_lock);
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ }
- /* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ }
+}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+static void chv_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ intel_dp_link_down(intel_dp);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
mutex_unlock(&dev_priv->sb_lock);
}
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- unsigned int lane_mask = 0x0;
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
pps_unlock(intel_dp);
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev)) {
+ unsigned int lane_mask = 0x0;
+
+ if (IS_CHERRYVIEW(dev))
+ lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
+
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
lane_mask);
+ }
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- /* Deassert soft data lane reset*/
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
/* Program Tx lane latency optimal setting*/
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
/* Set the upar bit */
- data = (i == 1) ? 0x0 : 0x1;
+ if (intel_crtc->config->lane_count == 1)
+ data = 0x0;
+ else
+ data = (i == 1) ? 0x0 : 0x1;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
val |= DPIO_TX2_STAGGER_MASK(0x1f);
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_TX1_STAGGER_MULT(6) |
DPIO_TX2_STAGGER_MULT(0));
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
+ if (intel_crtc->config->lane_count > 2) {
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
+ }
+
+ /* Deassert data lane reset */
+ chv_data_lane_soft_reset(encoder, false);
mutex_unlock(&dev_priv->sb_lock);
intel_enable_dp(encoder);
+
+ /* Second common lane will stay alive on its own now */
+ if (dport->release_cl2_override) {
+ chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ dport->release_cl2_override = false;
+ }
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
enum pipe pipe = intel_crtc->pipe;
+ unsigned int lane_mask =
+ intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
u32 val;
intel_dp_prepare(encoder);
+ /*
+ * Must trick the second common lane into life.
+ * Otherwise we can't even access the PLL.
+ */
+ if (ch == DPIO_CH0 && pipe == PIPE_B)
+ dport->release_cl2_override =
+ !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+ chv_phy_powergate_lanes(encoder, true, lane_mask);
+
mutex_lock(&dev_priv->sb_lock);
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
+
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
val |= CHV_PCS_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ }
/*
* This a a bit weird since generally CL
mutex_unlock(&dev_priv->sb_lock);
}
+static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /*
+ * Leave the power down bit cleared for at least one
+ * lane so that chv_powergate_phy_ch() will power
+ * on something when the channel is otherwise unused.
+ * When the port is off and the override is removed
+ * the lanes power down anyway, so otherwise it doesn't
+ * really matter what the state of power down bits is
+ * after this.
+ */
+ chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
return 0;
}
+static bool chv_need_uniq_trans_scale(uint8_t train_set)
+{
+ return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
+ (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+}
+
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ }
/* Program swing deemph */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
}
/* Program swing margin */
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+ /*
+ * Supposedly this value shouldn't matter when unique transition
+ * scale is disabled, but in fact it does matter. Let's just
+ * always program the same value and hope it's OK.
+ */
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
- /* Disable unique transition scale */
- for (i = 0; i < 4; i++) {
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
- == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
- ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
- == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ if (chv_need_uniq_trans_scale(train_set))
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
+ else
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
/* LRC Bypass */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
const uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_dig_port->base.base.dev);
int ret;
intel_get_adjust_train(intel_dp, link_status);
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
uint8_t link_config[2];
+ uint8_t link_bw, rate_select;
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
+ intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
+ &link_bw, &rate_select);
+
/* Write the link configuration data */
- link_config[0] = intel_dp->link_bw;
+ link_config[0] = link_bw;
link_config[1] = intel_dp->lane_count;
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
if (intel_dp->num_sink_rates)
drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
- &intel_dp->rate_select, 1);
+ &rate_select, 1);
link_config[0] = 0;
link_config[1] = DP_SET_ANSI_8B10B;
uint32_t DP = intel_dp->DP;
uint32_t training_pattern = DP_TRAINING_PATTERN_2;
- /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
- if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
+ /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
+ if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
training_pattern = DP_TRAINING_PATTERN_3;
/* channel equalization */
}
/* Make sure clock is still ok */
- if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ if (!drm_dp_clock_recovery_ok(link_status,
+ intel_dp->lane_count)) {
intel_dp->train_set_valid = false;
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
continue;
}
- if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ if (drm_dp_channel_eq_ok(link_status,
+ intel_dp->lane_count)) {
channel_eq = true;
break;
}
}
}
- /* Training Pattern 3 support, both source and sink */
+ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+ * have support for TP3 hence that check is used along with dpcd check
+ * to ensure TP3 can be enabled.
+ * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+ * supported but still not enabled.
+ */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
- intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
+ if (drm_dp_tps3_supported(intel_dp->dpcd) &&
- (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+ intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
return intel_dp->is_mst;
}
-static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
+static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
+ int ret = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
- return;
+ ret = -EIO;
+ goto out;
}
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0)
+ buf & ~DP_TEST_SINK_START) < 0) {
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
+ ret = -EIO;
+ goto out;
+ }
+ intel_dp->sink_crc.started = false;
+ out:
hsw_enable_ips(intel_crtc);
+ return ret;
}
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
+ int ret;
+
+ if (intel_dp->sink_crc.started) {
+ ret = intel_dp_sink_crc_stop(intel_dp);
+ if (ret)
+ return ret;
+ }
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
+ intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
+
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
return -EIO;
}
+ intel_dp->sink_crc.started = true;
return 0;
}
struct drm_device *dev = dig_port->base.base.dev;
struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
u8 buf;
- int test_crc_count;
+ int count, ret;
int attempts = 6;
- int ret;
+ bool old_equal_new;
ret = intel_dp_sink_crc_start(intel_dp);
if (ret)
return ret;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
- ret = -EIO;
- goto stop;
- }
-
- test_crc_count = buf & DP_TEST_COUNT_MASK;
-
do {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0) {
ret = -EIO;
goto stop;
}
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
+ count = buf & DP_TEST_COUNT_MASK;
+
+ /*
+ * Count might be reset during the loop. In this case
+ * last known count needs to be reset as well.
+ */
+ if (count == 0)
+ intel_dp->sink_crc.last_count = 0;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+ ret = -EIO;
+ goto stop;
+ }
+
+ old_equal_new = (count == intel_dp->sink_crc.last_count &&
+ !memcmp(intel_dp->sink_crc.last_crc, crc,
+ 6 * sizeof(u8)));
+
+ } while (--attempts && (count == 0 || old_equal_new));
+
+ intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
+ memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
if (attempts == 0) {
- DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
- ret = -ETIMEDOUT;
- goto stop;
+ if (old_equal_new) {
+ DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
+ } else {
+ DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
+ ret = -ETIMEDOUT;
+ goto stop;
+ }
}
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
- ret = -EIO;
stop:
intel_dp_sink_crc_stop(intel_dp);
return ret;
if (bret == true) {
/* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ if (intel_dp->active_mst_links &&
+ !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
return status;
}
+static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_A:
+ return true;
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG;
+ break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
+static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_A:
+ return true;
+ case PORT_B:
+ bit = SDE_PORTB_HOTPLUG_CPT;
+ break;
+ case PORT_C:
+ bit = SDE_PORTC_HOTPLUG_CPT;
+ break;
+ case PORT_D:
+ bit = SDE_PORTD_HOTPLUG_CPT;
+ break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
+static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_B:
+ bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_C:
+ bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case PORT_D:
+ bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
+static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_A:
+ bit = BXT_DE_PORT_HP_DDIA;
+ break;
+ case PORT_B:
+ bit = BXT_DE_PORT_HP_DDIB;
+ break;
+ case PORT_C:
+ bit = BXT_DE_PORT_HP_DDIC;
+ break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(GEN8_DE_PORT_ISR) & bit;
+}
+
+/*
+ * intel_digital_port_connected - is the specified port connected?
+ * @dev_priv: i915 private structure
+ * @port: the port to test
+ *
+ * Return %true if @port is connected, %false otherwise.
+ */
+static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (HAS_PCH_IBX(dev_priv))
+ return ibx_digital_port_connected(dev_priv, port);
+ if (HAS_PCH_SPLIT(dev_priv))
+ return cpt_digital_port_connected(dev_priv, port);
+ else if (IS_BROXTON(dev_priv))
+ return bxt_digital_port_connected(dev_priv, port);
+ else if (IS_VALLEYVIEW(dev_priv))
+ return vlv_digital_port_connected(dev_priv, port);
+ else
+ return g4x_digital_port_connected(dev_priv, port);
+}
+
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ if (!intel_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
}
-static int g4x_digital_port_connected(struct drm_device *dev,
- struct intel_digital_port *intel_dig_port)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit;
-
- if (IS_VALLEYVIEW(dev)) {
- switch (intel_dig_port->port) {
- case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
- break;
- case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
- break;
- case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
- break;
- default:
- return -EINVAL;
- }
- } else {
- switch (intel_dig_port->port) {
- case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
- break;
- case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
- break;
- case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
- break;
- default:
- return -EINVAL;
- }
- }
-
- if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
- return 0;
- return 1;
-}
-
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- int ret;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
return status;
}
- ret = g4x_digital_port_connected(dev, intel_dig_port);
- if (ret == -EINVAL)
- return connector_status_unknown;
- else if (ret == 0)
+ if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_dp->color_range_auto;
- uint32_t old_range = intel_dp->color_range;
+ bool old_range = intel_dp->limited_color_range;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
break;
case INTEL_BROADCAST_RGB_FULL:
intel_dp->color_range_auto = false;
- intel_dp->color_range = 0;
+ intel_dp->limited_color_range = false;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_dp->color_range_auto = false;
- intel_dp->color_range = DP_COLOR_RANGE_16_235;
+ intel_dp->limited_color_range = true;
break;
default:
return -EINVAL;
}
if (old_auto == intel_dp->color_range_auto &&
- old_range == intel_dp->color_range)
+ old_range == intel_dp->limited_color_range)
return 0;
goto done;
/* indicate that we need to restart link training */
intel_dp->train_set_valid = false;
- if (HAS_PCH_SPLIT(dev)) {
- if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
- goto mst_fail;
- } else {
- if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
- goto mst_fail;
- }
+ if (!intel_digital_port_connected(dev_priv, intel_dig_port))
+ goto mst_fail;
if (!intel_dp_get_dpcd(intel_dp)) {
goto mst_fail;
intel_dp_probe_oui(intel_dp);
- if (!intel_dp_probe_mst(intel_dp))
+ if (!intel_dp_probe_mst(intel_dp)) {
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
goto mst_fail;
-
+ }
} else {
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
}
if (!intel_dp->is_mst) {
- /*
- * we'll check the link status via the normal hot plug path later -
- * but for short hpds we should check it now
- */
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
return -1;
}
- /* check the VBT to see whether the eDP is on DP-D port */
+ /* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
union child_device_config *p_child;
int i;
static const short port_mapping[] = {
- [PORT_B] = PORT_IDPB,
- [PORT_C] = PORT_IDPC,
- [PORT_D] = PORT_IDPD,
+ [PORT_B] = DVO_PORT_DPB,
+ [PORT_C] = DVO_PORT_DPC,
+ [PORT_D] = DVO_PORT_DPD,
+ [PORT_E] = DVO_PORT_DPE,
};
if (port == PORT_A)
break;
case PORT_B:
intel_encoder->hpd_pin = HPD_PORT_B;
+ if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+ intel_encoder->hpd_pin = HPD_PORT_A;
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
case PORT_D:
intel_encoder->hpd_pin = HPD_PORT_D;
break;
+ case PORT_E:
+ intel_encoder->hpd_pin = HPD_PORT_E;
+ break;
default:
BUG();
}
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
+ intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
intel_encoder->pre_enable = vlv_pre_enable_dp;
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_atomic_state *state;
int bpp, i;
- int lane_count, slots, rate;
+ int lane_count, slots;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct drm_connector *drm_connector;
struct intel_connector *connector, *found = NULL;
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
- rate = intel_dp_max_link_rate(intel_dp);
- if (intel_dp->num_sink_rates) {
- intel_dp->link_bw = 0;
- intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate);
- } else {
- intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate);
- intel_dp->rate_select = 0;
- }
-
- intel_dp->lane_count = lane_count;
+ pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
- pipe_config->port_clock = rate;
+ pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
state = pipe_config->base.state;
&pipe_config->dp_m_n);
pipe_config->dp_m_n.tu = slots;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hsw_dp_set_ddi_pll_sel(pipe_config);
+
return true;
}
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
+ intel_dp_set_link_params(intel_dp, intel_crtc->config);
+
/* FIXME: add support for SKL */
if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port),
break;
}
pipe_config->base.adjusted_mode.flags |= flags;
+
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+
intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{
- #ifdef CONFIG_DRM_I915_FBDEV
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
- #ifdef CONFIG_DRM_I915_FBDEV
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
void (*mode_set)(struct intel_encoder *intel_encoder);
void (*disable)(struct intel_encoder *);
void (*post_disable)(struct intel_encoder *);
+ void (*post_pll_disable)(struct intel_encoder *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
+ uint8_t lane_count;
+
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
int scanline_offset;
unsigned start_vbl_count;
+ ktime_t start_vbl_time;
+
struct intel_crtc_atomic_commit atomic;
/* scalers available on this crtc */
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
- uint32_t color_range;
+ bool limited_color_range;
bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
M2_N2
};
+struct sink_crc {
+ bool started;
+ u8 last_crc[6];
+ int last_count;
+};
+
struct intel_dp {
uint32_t output_reg;
uint32_t aux_ch_ctl_reg;
uint32_t DP;
+ int link_rate;
+ uint8_t lane_count;
bool has_audio;
enum hdmi_force_audio force_audio;
- uint32_t color_range;
+ bool limited_color_range;
bool color_range_auto;
- uint8_t link_bw;
- uint8_t rate_select;
- uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ struct sink_crc sink_crc;
struct drm_dp_aux aux;
uint8_t train_set[4];
int panel_power_up_delay;
struct intel_dp dp;
struct intel_hdmi hdmi;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
+ bool release_cl2_override;
};
struct intel_dp_mst_encoder {
void *port; /* store this opaque as its illegal to dereference it */
};
-static inline int
+static inline enum dpio_channel
vlv_dport_to_channel(struct intel_digital_port *dport)
{
switch (dport->port) {
}
}
-static inline int
+static inline enum dpio_phy
+vlv_dport_to_phy(struct intel_digital_port *dport)
+{
+ switch (dport->port) {
+ case PORT_B:
+ case PORT_C:
+ return DPIO_PHY0;
+ case PORT_D:
+ return DPIO_PHY1;
+ default:
+ BUG();
+ }
+}
+
+static inline enum dpio_channel
vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
extern const struct drm_plane_funcs intel_plane_funcs;
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
+int intel_hrawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
bool intel_connector_get_hw_state(struct intel_connector *connector);
-bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+ void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
/* legacy fbdev emulation in intel_fbdev.c */
- #ifdef CONFIG_DRM_I915_FBDEV
+ #ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_fini(struct drm_device *dev);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_pipe_update_start(struct intel_crtc *crtc,
- uint32_t *start_vbl_count);
-void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
+void intel_pipe_update_start(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
ret = drm_fb_helper_set_par(info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
ret = drm_fb_helper_blank(blank, info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
ret = drm_fb_helper_pan_display(var, info);
if (ret == 0) {
- /*
- * FIXME: fbdev presumes that all callbacks also work from
- * atomic contexts and relies on that for emergency oops
- * printing. KMS totally doesn't do that and the locking here is
- * by far not the only place this goes wrong. Ignore this for
- * now until we solve this for real.
- */
mutex_lock(&fb_helper->dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
mutex_unlock(&fb_helper->dev->struct_mutex);
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = intel_fbdev_set_par,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
obj = intel_fb->obj;
size = obj->base.size;
- info = framebuffer_alloc(0, &dev->pdev->dev);
- if (!info) {
- ret = -ENOMEM;
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
goto out_unpin;
}
fb = &ifbdev->fb->base;
ifbdev->helper.fb = fb;
- ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
- ret = fb_alloc_cmap(&info->cmap, 256, 0);
- if (ret) {
- ret = -ENOMEM;
- goto out_unpin;
- }
/* setup aperture base/size for vesafb takeover */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unpin;
- }
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
size);
if (!info->screen_base) {
ret = -ENOSPC;
- goto out_unpin;
+ goto out_destroy_fbi;
}
info->screen_size = size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n",
fb->width, fb->height,
i915_gem_obj_ggtt_offset(obj), obj);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
+ out_destroy_fbi:
+ drm_fb_helper_release_fbi(helper);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
- if (ifbdev->helper.fbdev) {
- struct fb_info *info = ifbdev->helper.fbdev;
- unregister_framebuffer(info);
- iounmap(info->screen_base);
- if (info->cmap.len)
- fb_dealloc_cmap(&info->cmap);
-
- framebuffer_release(info);
- }
+ drm_fb_helper_unregister_fbi(&ifbdev->helper);
+ drm_fb_helper_release_fbi(&ifbdev->helper);
drm_fb_helper_fini(&ifbdev->helper);
if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
- fb_set_suspend(info, state);
+ drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
}
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev))
- hdmi_val |= intel_hdmi->color_range;
+ if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
+ hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
- if (pipe_config->has_hdmi_sink &&
- drm_match_cea_mode(adjusted_mode) > 1)
- intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
- else
- intel_hdmi->color_range = 0;
+ pipe_config->limited_color_range =
+ pipe_config->has_hdmi_sink &&
+ drm_match_cea_mode(adjusted_mode) > 1;
+ } else {
+ pipe_config->limited_color_range =
+ intel_hdmi->limited_color_range;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
clock_12bpc *= 2;
}
- if (intel_hdmi->color_range)
- pipe_config->limited_color_range = true;
-
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev))
pipe_config->has_pch_encoder = true;
if (property == dev_priv->broadcast_rgb_property) {
bool old_auto = intel_hdmi->color_range_auto;
- uint32_t old_range = intel_hdmi->color_range;
+ bool old_range = intel_hdmi->limited_color_range;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
break;
case INTEL_BROADCAST_RGB_FULL:
intel_hdmi->color_range_auto = false;
- intel_hdmi->color_range = 0;
+ intel_hdmi->limited_color_range = false;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false;
- intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235;
+ intel_hdmi->limited_color_range = true;
break;
default:
return -EINVAL;
}
if (old_auto == intel_hdmi->color_range_auto &&
- old_range == intel_hdmi->color_range)
+ old_range == intel_hdmi->limited_color_range)
return 0;
goto done;
mutex_unlock(&dev_priv->sb_lock);
}
+static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ }
+}
+
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
intel_hdmi_prepare(encoder);
+ /*
+ * Must trick the second common lane into life.
+ * Otherwise we can't even access the PLL.
+ */
+ if (ch == DPIO_CH0 && pipe == PIPE_B)
+ dport->release_cl2_override =
+ !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+ chv_phy_powergate_lanes(encoder, true, 0x0);
+
mutex_lock(&dev_priv->sb_lock);
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
+
/* program left/right clock distribution */
if (pipe != PIPE_B) {
val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
mutex_unlock(&dev_priv->sb_lock);
}
+static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /*
+ * Leave the power down bit cleared for at least one
+ * lane so that chv_powergate_phy_ch() will power
+ * on something when the channel is otherwise unused.
+ * When the port is off and the override is removed
+ * the lanes power down anyway, so otherwise it doesn't
+ * really matter what the state of power down bits is
+ * after this.
+ */
+ chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
mutex_lock(&dev_priv->sb_lock);
- /* Propagate soft reset to data lane reset */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
mutex_unlock(&dev_priv->sb_lock);
}
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- /* Deassert soft data lane reset*/
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
-
/* Program Tx latency optimal setting */
for (i = 0; i < 4; i++) {
/* Set the upar bit */
DPIO_TX1_STAGGER_MULT(7) |
DPIO_TX2_STAGGER_MULT(5));
+ /* Deassert data lane reset */
+ chv_data_lane_soft_reset(encoder, false);
+
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
+
+ /*
+ * Supposedly this value shouldn't matter when unique transition
+ * scale is disabled, but in fact it does matter. Let's just
+ * always program the same value and hope it's OK.
+ */
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
- /* Disable unique transition scale */
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
}
- /* Additional steps for 1200mV-0dB */
-#if 0
- val = vlv_dpio_read(dev_priv, pipe, VLV_TX_DW3(ch));
- if (ch)
- val |= DPIO_TX_UNIQ_TRANS_SCALE_CH1;
- else
- val |= DPIO_TX_UNIQ_TRANS_SCALE_CH0;
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(ch), val);
-
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(ch),
- vlv_dpio_read(dev_priv, pipe, VLV_TX_DW2(ch)) |
- (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT));
-#endif
/* Start swing calculation */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
g4x_enable_hdmi(encoder);
vlv_wait_port_ready(dev_priv, dport, 0x0);
+
+ /* Second common lane will stay alive on its own now */
+ if (dport->release_cl2_override) {
+ chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ dport->release_cl2_override = false;
+ }
}
static void intel_hdmi_destroy(struct drm_connector *connector)
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
+ uint8_t alternate_ddc_pin;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
- intel_encoder->hpd_pin = HPD_PORT_B;
+ /*
+ * On BXT A0/A1, sw needs to activate DDIA HPD logic and
+ * interrupts to check the external panel connection.
+ */
+ if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
+ intel_encoder->hpd_pin = HPD_PORT_A;
+ else
+ intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
+ case PORT_E:
+ /* On SKL PORT E doesn't have seperate GMBUS pin
+ * We rely on VBT to set a proper alternate GMBUS pin. */
+ alternate_ddc_pin =
+ dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
+ switch (alternate_ddc_pin) {
+ case DDC_PIN_B:
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
+ break;
+ case DDC_PIN_C:
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
+ break;
+ case DDC_PIN_D:
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
+ break;
+ default:
+ MISSING_CASE(alternate_ddc_pin);
+ }
+ intel_encoder->hpd_pin = HPD_PORT_E;
+ break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
+ intel_encoder->post_pll_disable = chv_hdmi_post_pll_disable;
} else if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = vlv_hdmi_pre_enable;
reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
}
+#define ASSIGN_CTX_PML4(ppgtt, reg_state) { \
+ reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
+ reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
+}
+
enum {
ADVANCED_CONTEXT = 0,
- LEGACY_CONTEXT,
+ LEGACY_32B_CONTEXT,
ADVANCED_AD_CONTEXT,
LEGACY_64B_CONTEXT
};
-#define GEN8_CTX_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
+ LEGACY_64B_CONTEXT :\
+ LEGACY_32B_CONTEXT)
enum {
FAULT_AND_HANG = 0,
FAULT_AND_HALT, /* Debug only */
{
WARN_ON(i915.enable_ppgtt == -1);
+ /* On platforms with execlist available, vGPU will only
+ * support execlist mode, no ring buffer mode.
+ */
+ if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+ return 1;
+
if (INTEL_INFO(dev)->gen >= 9)
return 1;
*/
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
{
- u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+ u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
+ LRC_PPHWSP_PN * PAGE_SIZE;
/* LRCA is required to be 4K aligned so the more significant 20 bits
* are globally unique */
return lrca >> 12;
}
-static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_request *rq)
+uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+ struct intel_engine_cs *ring)
{
- struct intel_engine_cs *ring = rq->ring;
struct drm_device *dev = ring->dev;
- struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
uint64_t desc;
- uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+ uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
+ LRC_PPHWSP_PN * PAGE_SIZE;
WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
desc = GEN8_CTX_VALID;
- desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
+ desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(ctx_obj->base.dev))
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
uint64_t desc[2];
if (rq1) {
- desc[1] = execlists_ctx_descriptor(rq1);
+ desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
rq1->elsp_submitted++;
} else {
desc[1] = 0;
}
- desc[0] = execlists_ctx_descriptor(rq0);
+ desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
rq0->elsp_submitted++;
/* You must always write both descriptors in the order below. */
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
- page = i915_gem_object_get_page(ctx_obj, 1);
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
reg_state[CTX_RING_TAIL+1] = rq->tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
- /* True PPGTT with dynamic page allocation: update PDP registers and
- * point the unallocated PDPs to the scratch page
- */
- if (ppgtt) {
+ if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ /* True 32b PPGTT with dynamic page allocation: update PDP
+ * registers and point the unallocated PDPs to scratch page.
+ * PML4 is allocated during ppgtt init, so this is not needed
+ * in 48-bit mode.
+ */
ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
i915_gem_request_reference(request);
- request->tail = request->ringbuf->tail;
-
spin_lock_irq(&ring->execlist_lock);
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *ring = request->ring;
+ struct drm_i915_private *dev_priv = request->i915;
intel_logical_ring_advance(request->ringbuf);
+ request->tail = request->ringbuf->tail;
+
if (intel_ring_stopped(ring))
return;
- execlists_context_queue(request);
+ if (dev_priv->guc.execbuf_client)
+ i915_guc_submit(dev_priv->guc.execbuf_client, request);
+ else
+ execlists_context_queue(request);
}
static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
+ struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
- ret = i915_gem_obj_ggtt_pin(ctx_obj,
- GEN8_LR_CONTEXT_ALIGN, 0);
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
goto reset_pin_count;
if (ret)
goto unpin_ctx_obj;
+ ctx_obj->dirty = true;
++
+ /* Invalidate GuC TLB. */
+ if (i915.enable_guc_submission)
+ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
return ret;
if (IS_SKYLAKE(ring->dev) && INTEL_REVID(ring->dev) <= SKL_REVID_E0)
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
- wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8(1) |
+ wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8(1) |
+ wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit(batch, index, GEN8_L3SQCREG4);
wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
* Ideally, we should set Force PD Restore in ctx descriptor,
* but we can't. Force Restore would be a second option, but
* it is unsafe in case of lite-restore (because the ctx is
- * not idle). */
+ * not idle). PML4 is allocated during ppgtt init so this is
+ * not needed in 48-bit.*/
if (req->ctx->ppgtt &&
(intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
- ret = intel_logical_ring_emit_pdps(req);
- if (ret)
- return ret;
+ if (!USES_FULL_48BIT_PPGTT(req->i915) &&
+ !intel_vgpu_active(req->i915->dev)) {
+ ret = intel_logical_ring_emit_pdps(req);
+ if (ret)
+ return ret;
+ }
req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
}
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
+static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+{
+
+ /*
+ * On BXT A steppings there is a HW coherency issue whereby the
+ * MI_STORE_DATA_IMM storing the completed request's seqno
+ * occasionally doesn't invalidate the CPU cache. Work around this by
+ * clflushing the corresponding cacheline whenever the caller wants
+ * the coherency to be guaranteed. Note that this cacheline is known
+ * to be clean at this point, since we only write it in
+ * bxt_a_set_seqno(), where we also do a clflush after the write. So
+ * this clflush in practice becomes an invalidate operation.
+ */
+
+ if (!lazy_coherency)
+ intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+{
+ intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+
+ /* See bxt_a_get_seqno() explaining the reason for the clflush. */
+ intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
ring->init_hw = gen8_init_render_ring;
ring->init_context = gen8_init_rcs_context;
ring->cleanup = intel_fini_pipe_control;
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
+ if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ ring->get_seqno = bxt_a_get_seqno;
+ ring->set_seqno = bxt_a_set_seqno;
+ } else {
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ }
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
ring->irq_get = gen8_logical_ring_get_irq;
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
+ if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ ring->get_seqno = bxt_a_get_seqno;
+ ring->set_seqno = bxt_a_set_seqno;
+ } else {
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ }
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
+ if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ ring->get_seqno = bxt_a_get_seqno;
+ ring->set_seqno = bxt_a_set_seqno;
+ } else {
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ }
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->init_hw = gen8_init_common_ring;
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
+ if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) {
+ ring->get_seqno = bxt_a_get_seqno;
+ ring->set_seqno = bxt_a_set_seqno;
+ } else {
+ ring->get_seqno = gen8_get_seqno;
+ ring->set_seqno = gen8_set_seqno;
+ }
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- page = i915_gem_object_get_page(ctx_obj, 1);
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- /* With dynamic page allocation, PDPs may not be allocated at this point,
- * Point the unallocated PDPs to the scratch page
- */
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+ if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ /* 64b PPGTT (48bit canonical)
+ * PDP0_DESCRIPTOR contains the base address to PML4 and
+ * other PDP Descriptors are ignored.
+ */
+ ASSIGN_CTX_PML4(ppgtt, reg_state);
+ } else {
+ /* 32b PPGTT
+ * PDP*_DESCRIPTOR contains the base address of space supported.
+ * With dynamic page allocation, PDPs may not be allocated at
+ * this point. Point the unallocated PDPs to the scratch page
+ */
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+ }
+
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
struct drm_i915_gem_object *default_ctx_obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct page *page;
- /* The status page is offset 0 from the default context object
- * in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
- ring->status_page.page_addr =
- kmap(sg_page(default_ctx_obj->pages->sgl));
+ /* The HWSP is part of the default context object in LRC mode. */
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
+ + LRC_PPHWSP_PN * PAGE_SIZE;
+ page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
+ ring->status_page.page_addr = kmap(page);
ring->status_page.obj = default_ctx_obj;
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
{
const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
context_size = round_up(get_lr_context_size(ring), 4096);
+ /* One extra page as the sharing data between driver and GuC */
+ context_size += PAGE_SIZE * LRC_PPHWSP_PN;
+
ctx_obj = i915_gem_alloc_object(dev, context_size);
if (!ctx_obj) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
}
if (is_global_default_ctx) {
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+ ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret) {
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
ret);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
+
+ /* Invalidate GuC TLB. */
+ if (i915.enable_guc_submission)
+ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
ringbuf->ring = ring;
- ringbuf->size = 32 * PAGE_SIZE;
+ ringbuf->size = 4 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
WARN(1, "Failed get_pages for context obj\n");
continue;
}
- page = i915_gem_object_get_page(ctx_obj, 1);
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
reg_state[CTX_RING_HEAD+1] = 0;
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
+ enum pipe pipe;
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection. Supposedly DSI also
+ * needs the ref clock up and running.
+ *
+ * CHV DPLL B/C have some issues if VGA mode is enabled.
+ */
+ for_each_pipe(dev_priv->dev, pipe) {
+ u32 val = I915_READ(DPLL(pipe));
+
+ val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ I915_WRITE(DPLL(pipe), val);
+ }
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ /* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
vlv_set_power_well(dev_priv, power_well, false);
}
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ int power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+ u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_status = 0;
+ u32 tmp;
+
+ if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+ /* CL1 is on whenever anything is on in either channel */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+ /*
+ * The DPLLB check accounts for the pipe B + port A usage
+ * with CL2 powered up but all the lanes in the second channel
+ * powered down.
+ */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+ (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+ }
+
+ if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+ }
+
+ /*
+ * The PHY may be busy with some initial calibration and whatnot,
+ * so the power state can take a while to actually change.
+ */
+ if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10))
+ WARN(phy_status != tmp,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ tmp, phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum dpio_phy phy;
+ enum pipe pipe;
+ uint32_t tmp;
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ pipe = PIPE_A;
phy = DPIO_PHY0;
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
- DPLL_REF_CLK_ENABLE_VLV);
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) | DPLL_VGA_MODE_DIS |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
} else {
+ pipe = PIPE_C;
phy = DPIO_PHY1;
- I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) | DPLL_VGA_MODE_DIS |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
+
+ /* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Enable dynamic power down */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+ DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+ if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
+ tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp |= DPIO_DYNPWRDOWNEN_CH1;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ } else {
+ /*
+ * Force the non-existing CL2 off. BXT does this
+ * too, so maybe it saves some power even though
+ * CL2 doesn't exist?
+ */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
}
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
+
+ DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override, unsigned int mask)
+{
+ enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+ u32 reg, val, expected, actual;
+
+ if (ch == DPIO_CH0)
+ reg = _CHV_CMN_DW0_CH0;
+ else
+ reg = _CHV_CMN_DW6_CH1;
+
+ mutex_lock(&dev_priv->sb_lock);
+ val = vlv_dpio_read(dev_priv, pipe, reg);
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /*
+ * This assumes !override is only used when the port is disabled.
+ * All lanes should power down even without the override when
+ * the port is disabled.
+ */
+ if (!override || mask == 0xf) {
+ expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ /*
+ * If CH1 common lane is not active anymore
+ * (eg. for pipe B DPLL) the entire channel will
+ * shut down, which causes the common lane registers
+ * to read as 0. That means we can't actually check
+ * the lane power down status bits, but as the entire
+ * register reads as 0 it's a good indication that the
+ * channel is indeed entirely powered down.
+ */
+ if (ch == DPIO_CH1 && val == 0)
+ expected = 0;
+ } else if (mask != 0x0) {
+ expected = DPIO_ANYDL_POWERDOWN;
+ } else {
+ expected = 0;
+ }
+
+ if (ch == DPIO_CH0)
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ else
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+ actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+ WARN(actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool was_override;
+
+ mutex_lock(&power_domains->lock);
+
+ was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ if (override == was_override)
+ goto out;
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+out:
+ mutex_unlock(&power_domains->lock);
+
+ return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+
+ mutex_lock(&power_domains->lock);
+
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+ assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+ mutex_unlock(&power_domains->lock);
}
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
}
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
},
};
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- int power_well_id)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- struct i915_power_well *power_well;
- int i;
-
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
- if (power_well->data == power_well_id)
- return power_well;
- }
-
- return NULL;
-}
-
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id)
{
* DISPLAY_PHY_CONTROL can get corrupted if read. As a
* workaround never ever read DISPLAY_PHY_CONTROL, and
* instead maintain a shadow copy ourselves. Use the actual
- * power well state to reconstruct the expected initial
- * value.
+ * power well state and lane status to reconstruct the
+ * expected initial value.
*/
dev_priv->chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
- PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
- PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
- if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
+ PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
+
+ /*
+ * If all lanes are disabled we leave the override disabled
+ * with all power down bits cleared to match the state we
+ * would use after disabling the port. Otherwise enable the
+ * override and set the lane powerdown bits accding to the
+ * current lane status.
+ */
+ if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
+ uint32_t status = I915_READ(DPLL(PIPE_A));
+ unsigned int mask;
+
+ mask = status & DPLL_PORTB_READY_MASK;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
+
+ mask = (status & DPLL_PORTC_READY_MASK) >> 4;
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
+
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
+ }
+
+ if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
+ uint32_t status = I915_READ(DPIO_PHY_STATUS);
+ unsigned int mask;
+
+ mask = status & DPLL_PORTD_READY_MASK;
+
+ if (mask == 0xf)
+ mask = 0x0;
+ else
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
+
+ dev_priv->chv_phy_control |=
+ PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
+
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ }
+
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+
+ DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->chv_phy_control);
}
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
power_domains->initializing = true;
if (IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
+ mutex_unlock(&power_domains->lock);
} else if (IS_VALLEYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+ #define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
}
+static inline bool
+drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x12 &&
+ dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
+}
+
/*
* DisplayPort AUX channel
*/