summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c14
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c355
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c22
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c126
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c31
-rw-r--r--include/drm/drm_crtc.h1
24 files changed, 591 insertions, 280 deletions
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 484c36a4b7a5..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2079,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
return num_modes;
}
EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+ uint8_t i;
+
+ for (i = 0; i < drm_num_cea_modes; i++)
+ if (drm_mode_equal(mode, &edid_cea_modes[i]))
+ return i + 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4568e7d8a060..e6a11ca85eaf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
- if (!work->pending) {
+ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Stall check enabled, ");
else
seq_printf(m, "Stall check waiting for page flip ioctl, ");
- seq_printf(m, "%d prepares\n", work->pending);
+ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- seq_printf(m, " SYNC_0: 0x%08x\n",
- error->semaphore_mboxes[ring][0]);
- seq_printf(m, " SYNC_1: 0x%08x\n",
- error->semaphore_mboxes[ring][1]);
+ seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
}
seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a48e4910ea2c..8f63cd5de4b4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -141,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
+ ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
@@ -592,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
static int i915_quiescent(struct drm_device *dev)
{
- struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
i915_kernel_lost_context(dev);
- return intel_wait_ring_idle(ring);
+ return intel_ring_idle(LP_RING(dev->dev_private));
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -1045,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
+ struct intel_ring_buffer *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
@@ -1065,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
+ ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6745c7f976db..530db83ef320 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -554,8 +554,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ironlake_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
@@ -564,7 +563,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
intel_modeset_init_hw(dev);
- intel_modeset_setup_hw_state(dev);
+ intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 87c06f97fa89..557843dd4b2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -197,6 +197,7 @@ struct drm_i915_error_state {
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
@@ -381,6 +382,11 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */
};
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -909,6 +915,8 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled;
uint32_t hw_context_size;
+ bool fdi_rx_polarity_reversed;
+
struct i915_suspend_saved_registers regfile;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
@@ -1417,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno);
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1436,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1652,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+ bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b0016bb65631..c1f691958f89 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1696,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count)
return -EBUSY;
+ /* ->put_pages might need to allocate memory for the bit17 swizzle
+ * array, hence protect them from being reaped by removing them from gtt
+ * lists early. */
+ list_del(&obj->gtt_list);
+
ops->put_pages(obj);
obj->pages = NULL;
- list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -1857,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
obj->ring = ring;
@@ -1922,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev));
}
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno = dev_priv->next_seqno;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ int ret, i, j;
- /* reserve 0 for non-seqno */
- if (++dev_priv->next_seqno == 0)
- dev_priv->next_seqno = 1;
+ /* The hardware uses various monotonic 32-bit counters, if we
+ * detect that they will wraparound we need to idle the GPU
+ * and reset those counters.
+ */
+ ret = 0;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ret |= ring->sync_seqno[j] != 0;
+ }
+ if (ret == 0)
+ return ret;
+
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(dev);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+ ring->sync_seqno[j] = 0;
+ }
- return seqno;
+ return 0;
}
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
- if (ring->outstanding_lazy_request == 0)
- ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* reserve 0 for non-seqno */
+ if (dev_priv->next_seqno == 0) {
+ int ret = i915_gem_handle_seqno_wrap(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->next_seqno = 1;
+ }
- return ring->outstanding_lazy_request;
+ *seqno = dev_priv->next_seqno++;
+ return 0;
}
int
@@ -1952,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
- u32 seqno;
int was_empty;
int ret;
@@ -1971,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
if (request == NULL)
return -ENOMEM;
- seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
@@ -1980,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
*/
request_ring_position = intel_ring_get_tail(ring);
- ret = ring->add_request(ring, &seqno);
+ ret = ring->add_request(ring);
if (ret) {
kfree(request);
return ret;
}
- trace_i915_gem_request_add(ring, seqno);
-
- request->seqno = seqno;
+ request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
request->emitted_jiffies = jiffies;
@@ -2006,6 +2034,7 @@ i915_add_request(struct intel_ring_buffer *ring,
spin_unlock(&file_priv->mm.lock);
}
+ trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
if (!dev_priv->mm.suspended) {
@@ -2022,7 +2051,7 @@ i915_add_request(struct intel_ring_buffer *ring,
}
if (out_seqno)
- *out_seqno = seqno;
+ *out_seqno = request->seqno;
return 0;
}
@@ -2120,7 +2149,6 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
- int i;
if (list_empty(&ring->request_list))
return;
@@ -2129,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
- if (seqno >= ring->sync_seqno[i])
- ring->sync_seqno[i] = 0;
-
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2377,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
ret = to->sync_to(to, from, seqno);
if (!ret)
- from->sync_seqno[idx] = seqno;
+ /* We use last_read_seqno because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->sync_seqno[idx] = obj->last_read_seqno;
return ret;
}
@@ -2460,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}
-static int i915_ring_idle(struct intel_ring_buffer *ring)
-{
- if (list_empty(&ring->active_list))
- return 0;
-
- return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
-}
-
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2480,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret)
return ret;
- ret = i915_ring_idle(ring);
+ ret = intel_ring_idle(ring);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 0e510df80d73..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
- u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from_obj, ring, seqno);
+ i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 48e4317e72dc..ee8f97f0539e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -713,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -726,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring, seqno);
+ i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
}
@@ -789,7 +788,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 seqno;
u32 mask;
u32 flags;
int ret, mode, i;
@@ -994,22 +992,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- seqno = i915_gem_next_request_seqno(ring);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
- if (seqno < ring->sync_seqno[i]) {
- /* The GPU can not handle its semaphore value wrapping,
- * so every billion or so execbuffers, we need to stall
- * the GPU in order to reset the counters.
- */
- ret = i915_gpu_idle(dev);
- if (ret)
- goto err;
- i915_gem_retire_requests(dev);
-
- BUG_ON(ring->sync_seqno[i]);
- }
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1035,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, seqno, flags);
-
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1060,7 +1040,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f7ac61ee1504..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -639,6 +639,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+#ifdef CONFIG_INTEL_IOMMU
+ dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2604867e6b7d..a4dc97f8b9f0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
- if (work == NULL || work->pending || !work->enable_stall_check) {
+ if (work == NULL ||
+ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+ !work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 97fbd9d1823b..3f75cfaf1c3f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3843,7 +3843,9 @@
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
-#define DPLS_EDP_PPS_FIX_DIS (1<<0)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
+#define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c
#define _FDI_RXB_CHICKEN 0xc2010
@@ -3915,6 +3917,7 @@
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
+#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
@@ -4534,6 +4537,10 @@
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
+#define SBI_CTL_DEST_ICLK (0x0<<16)
+#define SBI_CTL_DEST_MPHY (0x1<<16)
+#define SBI_CTL_OP_IORD (0x2<<8)
+#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4551,10 +4558,12 @@
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
+#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
+#define SBI_DBUFF0_ENABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3bf51d58319d..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
- if (!IS_IVYBRIDGE(dev))
+ if (!HAS_L3_GPU_CACHE(dev))
return -EPERM;
if (offset % 4 != 0)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 331af3bc6894..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
+ /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+ if (HAS_PCH_LPT(dev) &&
+ (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
@@ -793,4 +798,12 @@ void intel_crt_init(struct drm_device *dev)
crt->force_hotplug_required = 0;
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+ /*
+ * TODO: find a proper way to discover whether we need to set the
+ * polarity reversal bit or not, instead of relying on the BIOS.
+ */
+ if (HAS_PCH_LPT(dev))
+ dev_priv->fdi_rx_polarity_reversed =
+ !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 852012b6fc5b..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -138,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_800MV_3_5DB_HSW
};
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ uint32_t reg = DDI_BUF_CTL(port);
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ udelay(1);
+ if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ return;
+ }
+ DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
@@ -167,6 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
((intel_crtc->fdi_lanes - 1) << 19);
+ if (dev_priv->fdi_rx_polarity_reversed)
+ rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
@@ -231,18 +246,30 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
return;
}
+ temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+ POSTING_READ(DDI_BUF_CTL(PORT_E));
+
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- I915_WRITE(DP_TP_CTL(PORT_E),
- I915_READ(DP_TP_CTL(PORT_E)) & ~DP_TP_CTL_ENABLE);
+ temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+ temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+ I915_WRITE(DP_TP_CTL(PORT_E), temp);
+ POSTING_READ(DP_TP_CTL(PORT_E));
+
+ intel_wait_ddi_buf_idle(dev_priv, PORT_E);
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+ POSTING_READ(_FDI_RXA_CTL);
/* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(_FDI_RXA_MISC);
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
I915_WRITE(_FDI_RXA_MISC, temp);
+ POSTING_READ(_FDI_RXA_MISC);
}
DRM_ERROR("FDI link training failed!\n");
@@ -1222,20 +1249,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
}
}
-static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
- enum port port)
-{
- uint32_t reg = DDI_BUF_CTL(port);
- int i;
-
- for (i = 0; i < 8; i++) {
- udelay(1);
- if (I915_READ(reg) & DDI_BUF_IS_IDLE)
- return;
- }
- DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
-}
-
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index de51489de23c..5d127e068950 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1506,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* SBI access */
static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
+ u32 tmp;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_DATA,
- value);
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRWR);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, value);
+
+ if (destination == SBI_ICLK)
+ tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+ else
+ tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+ I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -1536,23 +1538,25 @@ out_unlock:
}
static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+ enum intel_sbi_destination destination)
{
unsigned long flags;
u32 value = 0;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
- I915_WRITE(SBI_ADDR,
- (reg << 16));
- I915_WRITE(SBI_CTL_STAT,
- SBI_BUSY |
- SBI_CTL_OP_CRRD);
+ I915_WRITE(SBI_ADDR, (reg << 16));
+
+ if (destination == SBI_ICLK)
+ value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
@@ -2424,18 +2428,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags |= FDI_PHASE_SYNC_OVR(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
- flags |= FDI_PHASE_SYNC_EN(pipe);
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
- POSTING_READ(SOUTH_CHICKEN1);
-}
-
static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2610,8 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2744,8 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
- cpt_phase_pointer_enable(dev, pipe);
-
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2884,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
udelay(100);
}
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 flags = I915_READ(SOUTH_CHICKEN1);
-
- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
- POSTING_READ(SOUTH_CHICKEN1);
-}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2921,8 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- } else if (HAS_PCH_CPT(dev)) {
- cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -3024,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
/* Disable SSCCTL */