Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
- seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris
  Wilson
- some leftover kill-agp on gen6+ patches from Ben
- hotplug improvements from Damien
- clear fb when allocated from stolen, avoids dirt on the fbcon (Chris)
- Stolen mem support from Chris Wilson, one of the many steps to get to
  real fastboot support.
- Some DDI code cleanups from Paulo.
- Some refactorings around lvds and dp code.
- some random little bits&pieces

* tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits)
  drm/i915: Return the real error code from intel_set_mode()
  drm/i915: Make GSM void
  drm/i915: Move GSM mapping into dev_priv
  drm/i915: Move even more gtt code to i915_gem_gtt
  drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno
  drm/i915: Introduce i915_gem_set_seqno()
  drm/i915: Always clear semaphore mboxes on seqno wrap
  drm/i915: Initialize hardware semaphore state on ring init
  drm/i915: Introduce ring set_seqno
  drm/i915: Missed conversion to gtt_pte_t
  drm/i915: Bug on unsupported swizzled platforms
  drm/i915: BUG() if fences are used on unsupported platform
  drm/i915: fixup overlay stolen memory leak
  drm/i915: clean up PIPECONF bpc #defines
  drm/i915: add intel_dp_set_signal_levels
  drm/i915: remove leftover display.update_wm assignment
  drm/i915: check for the PCH when setting pch_transcoder
  drm/i915: Clear the stolen fb before enabling
  drm/i915: Access to snooped system memory through the GTT is incoherent
  drm/i915: Remove stale comment about intel_dp_detect()
  ...

Conflicts:
	drivers/gpu/drm/i915/intel_display.c
This commit is contained in:
Dave Airlie 2013-01-17 20:34:08 +10:00
commit b5cc6c0387
32 changed files with 1576 additions and 1115 deletions

View file

@ -602,7 +602,6 @@ static int intel_gtt_init(void)
iounmap(intel_private.registers); iounmap(intel_private.registers);
return -ENOMEM; return -ENOMEM;
} }
intel_private.base.gtt = intel_private.gtt;
global_cache_flush(); /* FIXME: ? */ global_cache_flush(); /* FIXME: ? */

View file

@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
} }
EXPORT_SYMBOL(drm_mm_pre_get); EXPORT_SYMBOL(drm_mm_pre_get);
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
struct drm_mm_node *next_node =
list_entry(hole_node->node_list.next, struct drm_mm_node,
node_list);
return next_node->start;
}
static void drm_mm_insert_helper(struct drm_mm_node *hole_node, static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long size, unsigned alignment,
@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
unsigned long adj_start = hole_start; unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end; unsigned long adj_end = hole_end;
BUG_ON(!hole_node->hole_follows || node->allocated); BUG_ON(node->allocated);
if (mm->color_adjust) if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end); mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > adj_end); BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0; node->hole_follows = 0;
if (node->start + node->size < hole_end) { if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack); list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1; node->hole_follows = 1;
} }
} }
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic)
{
struct drm_mm_node *hole, *node;
unsigned long end = start + size;
unsigned long hole_start;
unsigned long hole_end;
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > start || hole_end < end)
continue;
node = drm_mm_kmalloc(mm, atomic);
if (unlikely(node == NULL))
return NULL;
node->start = start;
node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
if (start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
node->hole_follows = 0;
if (end != hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
return node;
}
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
return NULL;
}
EXPORT_SYMBOL(drm_mm_create_block);
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,
@ -251,7 +282,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
BUG_ON(node->start + node->size > end); BUG_ON(node->start + node->size > end);
node->hole_follows = 0; node->hole_follows = 0;
if (node->start + node->size < hole_end) { if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack); list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1; node->hole_follows = 1;
} }
@ -325,12 +356,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
list_entry(node->node_list.prev, struct drm_mm_node, node_list); list_entry(node->node_list.prev, struct drm_mm_node, node_list);
if (node->hole_follows) { if (node->hole_follows) {
BUG_ON(drm_mm_hole_node_start(node) BUG_ON(__drm_mm_hole_node_start(node) ==
== drm_mm_hole_node_end(node)); __drm_mm_hole_node_end(node));
list_del(&node->hole_stack); list_del(&node->hole_stack);
} else } else
BUG_ON(drm_mm_hole_node_start(node) BUG_ON(__drm_mm_hole_node_start(node) !=
!= drm_mm_hole_node_end(node)); __drm_mm_hole_node_end(node));
if (!prev_node->hole_follows) { if (!prev_node->hole_follows) {
prev_node->hole_follows = 1; prev_node->hole_follows = 1;
@ -388,6 +420,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
{ {
struct drm_mm_node *entry; struct drm_mm_node *entry;
struct drm_mm_node *best; struct drm_mm_node *best;
unsigned long adj_start;
unsigned long adj_end;
unsigned long best_size; unsigned long best_size;
BUG_ON(mm->scanned_blocks); BUG_ON(mm->scanned_blocks);
@ -395,17 +429,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL; best = NULL;
best_size = ~0UL; best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) { drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
unsigned long adj_start = drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry);
if (mm->color_adjust) { if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end); mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start) if (adj_end <= adj_start)
continue; continue;
} }
BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment)) if (!check_free_hole(adj_start, adj_end, size, alignment))
continue; continue;
@ -432,6 +462,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
{ {
struct drm_mm_node *entry; struct drm_mm_node *entry;
struct drm_mm_node *best; struct drm_mm_node *best;
unsigned long adj_start;
unsigned long adj_end;
unsigned long best_size; unsigned long best_size;
BUG_ON(mm->scanned_blocks); BUG_ON(mm->scanned_blocks);
@ -439,13 +471,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
best = NULL; best = NULL;
best_size = ~0UL; best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) { drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? if (adj_start < start)
start : drm_mm_hole_node_start(entry); adj_start = start;
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? if (adj_end > end)
end : drm_mm_hole_node_end(entry); adj_end = end;
BUG_ON(!entry->hole_follows);
if (mm->color_adjust) { if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end); mm->color_adjust(entry, color, &adj_start, &adj_end);

View file

@ -102,7 +102,7 @@ static const char *cache_level_str(int type)
static void static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{ {
seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
&obj->base, &obj->base,
get_pin_flag(obj), get_pin_flag(obj),
get_tiling_flag(obj), get_tiling_flag(obj),
@ -124,6 +124,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->gtt_space != NULL) if (obj->gtt_space != NULL)
seq_printf(m, " (gtt offset: %08x, size: %08x)", seq_printf(m, " (gtt offset: %08x, size: %08x)",
obj->gtt_offset, (unsigned int)obj->gtt_space->size); obj->gtt_offset, (unsigned int)obj->gtt_space->size);
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) { if (obj->pin_mappable || obj->fault_mappable) {
char s[3], *t = s; char s[3], *t = s;
if (obj->pin_mappable) if (obj->pin_mappable)
@ -387,7 +389,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
if (ring->get_seqno) { if (ring->get_seqno) {
seq_printf(m, "Current sequence (%s): %d\n", seq_printf(m, "Current sequence (%s): %u\n",
ring->name, ring->get_seqno(ring, false)); ring->name, ring->get_seqno(ring, false));
} }
} }
@ -544,11 +546,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
const volatile u32 __iomem *hws; const u32 *hws;
int i; int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
hws = (volatile u32 __iomem *)ring->status_page.page_addr; hws = ring->status_page.page_addr;
if (hws == NULL) if (hws == NULL)
return 0; return 0;
@ -608,7 +610,7 @@ static void print_error_buffers(struct seq_file *m,
seq_printf(m, "%s [%d]:\n", name, count); seq_printf(m, "%s [%d]:\n", name, count);
while (count--) { while (count--) {
seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
err->gtt_offset, err->gtt_offset,
err->size, err->size,
err->read_domains, err->read_domains,
@ -841,6 +843,77 @@ static const struct file_operations i915_error_state_fops = {
.release = i915_error_state_release, .release = i915_error_state_release,
}; };
static ssize_t
i915_next_seqno_read(struct file *filp,
char __user *ubuf,
size_t max,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
drm_i915_private_t *dev_priv = dev->dev_private;
char buf[80];
int len;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"next_seqno : 0x%x\n",
dev_priv->next_seqno);
mutex_unlock(&dev->struct_mutex);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
static ssize_t
i915_next_seqno_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
char buf[20];
u32 val = 1;
int ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = kstrtouint(buf, 0, &val);
if (ret < 0)
return ret;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
ret = i915_gem_set_seqno(dev, val);
mutex_unlock(&dev->struct_mutex);
return ret ?: cnt;
}
static const struct file_operations i915_next_seqno_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = i915_next_seqno_read,
.write = i915_next_seqno_write,
.llseek = default_llseek,
};
static int i915_rstdby_delays(struct seq_file *m, void *unused) static int i915_rstdby_delays(struct seq_file *m, void *unused)
{ {
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
@ -1551,7 +1624,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0; return 0;
} }
ret = mutex_lock_interruptible(&dev->mode_config.mutex); ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
if (ret) if (ret)
return ret; return ret;
@ -1580,7 +1653,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev_priv->dpio_lock);
return 0; return 0;
} }
@ -2105,6 +2178,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret) if (ret)
return ret; return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_next_seqno",
&i915_next_seqno_fops);
if (ret)
return ret;
return drm_debugfs_create_files(i915_debugfs_list, return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor); minor->debugfs_root, minor);
@ -2128,6 +2207,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
1, minor); 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
1, minor);
} }
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */

View file

@ -1297,19 +1297,21 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_switcheroo; goto cleanup_vga_switcheroo;
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem_stolen;
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev); intel_modeset_init(dev);
ret = i915_gem_init(dev); ret = i915_gem_init(dev);
if (ret) if (ret)
goto cleanup_gem_stolen; goto cleanup_irq;
intel_modeset_gem_init(dev);
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
ret = drm_irq_install(dev); intel_modeset_gem_init(dev);
if (ret)
goto cleanup_gem;
/* Always safe in the mode setting case. */ /* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */ /* FIXME: do pre/post-mode set stuff in core KMS code */
@ -1317,7 +1319,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = intel_fbdev_init(dev); ret = intel_fbdev_init(dev);
if (ret) if (ret)
goto cleanup_irq; goto cleanup_gem;
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev);
/*
* Some ports require correctly set-up hpd registers for detection to
* work properly (leading to ghost connected connector status), e.g. VGA
* on gm45. Hence we can only set up the initial fbdev config after hpd
* irqs are fully enabled. Now we should scan for the initial config
* only once hotplug handling is enabled, but due to screwed-up locking
* around kms/fbdev init we can't protect the fdbev initial config
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
*/
intel_fbdev_initial_config(dev);
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
drm_kms_helper_poll_init(dev); drm_kms_helper_poll_init(dev);
@ -1326,13 +1346,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0; return 0;
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem: cleanup_gem:
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev); i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen: cleanup_gem_stolen:
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo: cleanup_vga_switcheroo:
@ -1582,7 +1602,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->rps.hw_lock);
@ -1614,9 +1634,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_opregion_init(dev); intel_opregion_init(dev);
acpi_video_register(); acpi_video_register();
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
if (IS_GEN5(dev)) if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv); intel_gpu_ips_init(dev_priv);
@ -1723,9 +1740,6 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev); i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
if (!I915_NEED_GFX_HWS(dev)) if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev); i915_free_hws(dev);
@ -1738,6 +1752,10 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
pci_dev_put(dev_priv->bridge_dev); pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private); kfree(dev->dev_private);

View file

@ -565,6 +565,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_modeset_init_hw(dev); intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev, false); intel_modeset_setup_hw_state(dev, false);
drm_irq_install(dev); drm_irq_install(dev);
intel_hpd_init(dev);
} }
intel_opregion_init(dev); intel_opregion_init(dev);
@ -870,6 +871,7 @@ int i915_reset(struct drm_device *dev)
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
drm_irq_install(dev); drm_irq_install(dev);
intel_hpd_init(dev);
} else { } else {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }

View file

@ -30,6 +30,8 @@
#ifndef _I915_DRV_H_ #ifndef _I915_DRV_H_
#define _I915_DRV_H_ #define _I915_DRV_H_
#include <uapi/drm/i915_drm.h>
#include "i915_reg.h" #include "i915_reg.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
@ -40,6 +42,7 @@
#include <linux/backlight.h> #include <linux/backlight.h>
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/pm_qos.h>
/* General customization: /* General customization:
*/ */
@ -83,7 +86,12 @@ enum port {
}; };
#define port_name(p) ((p) + 'A') #define port_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) #define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
I915_GEM_DOMAIN_COMMAND | \
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
@ -101,6 +109,19 @@ struct intel_pch_pll {
}; };
#define I915_NUM_PLLS 2 #define I915_NUM_PLLS 2
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
uint32_t gmch_m;
uint32_t gmch_n;
uint32_t link_m;
uint32_t link_n;
};
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
struct intel_ddi_plls { struct intel_ddi_plls {
int spll_refcount; int spll_refcount;
int wrpll1_refcount; int wrpll1_refcount;
@ -276,6 +297,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y); int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */ /* clock updates for mode set */
/* cursor updates */ /* cursor updates */
/* render clock increase/decrease */ /* render clock increase/decrease */
@ -577,6 +599,9 @@ struct intel_gen6_power_mgmt {
struct mutex hw_lock; struct mutex hw_lock;
}; };
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;
struct intel_ilk_power_mgmt { struct intel_ilk_power_mgmt {
u8 cur_delay; u8 cur_delay;
u8 min_delay; u8 min_delay;
@ -619,6 +644,7 @@ struct intel_l3_parity {
typedef struct drm_i915_private { typedef struct drm_i915_private {
struct drm_device *dev; struct drm_device *dev;
struct kmem_cache *slab;
const struct intel_device_info *info; const struct intel_device_info *info;
@ -633,10 +659,11 @@ typedef struct drm_i915_private {
/** forcewake_count is protected by gt_lock */ /** forcewake_count is protected by gt_lock */
unsigned forcewake_count; unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */ /** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock; spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus /** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */ * controller on different i2c buses. */
struct mutex gmbus_mutex; struct mutex gmbus_mutex;
@ -646,9 +673,11 @@ typedef struct drm_i915_private {
*/ */
uint32_t gpio_mmio_base; uint32_t gpio_mmio_base;
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS]; struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno; uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah; drm_dma_handle_t *status_page_dmah;
struct resource mch_res; struct resource mch_res;
@ -658,8 +687,11 @@ typedef struct drm_i915_private {
/* protects the irq masks */ /* protects the irq masks */
spinlock_t irq_lock; spinlock_t irq_lock;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* DPIO indirect register protection */ /* DPIO indirect register protection */
spinlock_t dpio_lock; struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */ /** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2]; u32 pipestat[2];
@ -669,6 +701,7 @@ typedef struct drm_i915_private {
u32 hotplug_supported_mask; u32 hotplug_supported_mask;
struct work_struct hotplug_work; struct work_struct hotplug_work;
bool enable_hotplug_processing;
int num_pipe; int num_pipe;
int num_pch_pll; int num_pch_pll;
@ -710,7 +743,6 @@ typedef struct drm_i915_private {
unsigned int display_clock_mode:1; unsigned int display_clock_mode:1;
int lvds_ssc_freq; int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct { struct {
int rate; int rate;
int lanes; int lanes;
@ -771,6 +803,10 @@ typedef struct drm_i915_private {
unsigned long gtt_start; unsigned long gtt_start;
unsigned long gtt_mappable_end; unsigned long gtt_mappable_end;
unsigned long gtt_end; unsigned long gtt_end;
unsigned long stolen_base; /* limited to low memory (32-bit) */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
struct io_mapping *gtt_mapping; struct io_mapping *gtt_mapping;
phys_addr_t gtt_base_addr; phys_addr_t gtt_base_addr;
@ -943,6 +979,8 @@ enum i915_cache_level {
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
}; };
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
struct drm_i915_gem_object_ops { struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage. /* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set * get_pages() is called once prior to the use of the associated set
@ -968,6 +1006,8 @@ struct drm_i915_gem_object {
/** Current space allocated to this object in the GTT, if any. */ /** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space; struct drm_mm_node *gtt_space;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head gtt_list; struct list_head gtt_list;
/** This object's place on the active/inactive lists */ /** This object's place on the active/inactive lists */
@ -1138,7 +1178,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private { struct drm_i915_file_private {
struct { struct {
struct spinlock lock; spinlock_t lock;
struct list_head request_list; struct list_head request_list;
} mm; } mm;
struct idr context_idr; struct idr context_idr;
@ -1224,6 +1264,8 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_DDI(dev) (IS_HASWELL(dev))
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@ -1320,6 +1362,7 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged); void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev); extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev); extern void intel_gt_reset(struct drm_device *dev);
@ -1388,12 +1431,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj, void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops); const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable, bool map_and_fenceable,
@ -1451,8 +1497,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0; return (int32_t)(seq1 - seq2) >= 0;
} }
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@ -1559,10 +1605,9 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev, void i915_gem_init_global_gtt(struct drm_device *dev);
unsigned long start, void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end, unsigned long end);
unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev); int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev); void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev) static inline void i915_gem_chipset_flush(struct drm_device *dev)
@ -1582,9 +1627,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */ /* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev); int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */ /* i915_gem_tiling.c */
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
}
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);

View file

@ -163,8 +163,8 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return -ENODEV; return -ENODEV;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_init_global_gtt(dev, args->gtt_start, i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end, args->gtt_end); args->gtt_end);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
@ -192,6 +192,18 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
kmem_cache_free(dev_priv->slab, obj);
}
static int static int
i915_gem_create(struct drm_file *file, i915_gem_create(struct drm_file *file,
struct drm_device *dev, struct drm_device *dev,
@ -215,7 +227,7 @@ i915_gem_create(struct drm_file *file,
if (ret) { if (ret) {
drm_gem_object_release(&obj->base); drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->base.size); i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj); i915_gem_object_free(obj);
return ret; return ret;
} }
@ -259,14 +271,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle); args->size, &args->handle);
} }
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
}
static inline int static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr, __copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset, const char *gpu_vaddr, int gpu_offset,
@ -407,7 +411,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
loff_t offset; loff_t offset;
int shmem_page_offset, page_length, ret = 0; int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling; int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0;
int prefaulted = 0; int prefaulted = 0;
int needs_clflush = 0; int needs_clflush = 0;
struct scatterlist *sg; struct scatterlist *sg;
@ -469,7 +472,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
if (ret == 0) if (ret == 0)
goto next_page; goto next_page;
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (!prefaulted) { if (!prefaulted) {
@ -502,12 +504,6 @@ next_page:
out: out:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
i915_gem_object_truncate(obj);
}
return ret; return ret;
} }
@ -838,12 +834,13 @@ out:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
if (hit_slowpath) { if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */ /*
if (obj->madv == __I915_MADV_PURGED) * Fixup: Flush cpu caches in case we didn't flush the dirty
i915_gem_object_truncate(obj); * cachelines in-line while writing and the object moved
/* and flush dirty cachelines in case the object isn't in the cpu write * out of the cpu write domain while we've dropped the lock.
* domain anymore. */ */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { if (!needs_clflush_after &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
i915_gem_chipset_flush(dev); i915_gem_chipset_flush(dev);
} }
@ -1344,6 +1341,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write); trace_i915_gem_object_fault(obj, page_offset, true, write);
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
ret = -EINVAL;
goto unlock;
}
/* Now bind it into the GTT if needed */ /* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false); ret = i915_gem_object_pin(obj, 0, true, false);
if (ret) if (ret)
@ -1933,30 +1936,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
} }
static int static int
i915_gem_handle_seqno_wrap(struct drm_device *dev) i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
int ret, i, j; int ret, i, j;
/* The hardware uses various monotonic 32-bit counters, if we /* Carefully retire all requests without writing to the rings */
* detect that they will wraparound we need to idle the GPU
* and reset those counters.
*/
ret = 0;
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) ret = intel_ring_idle(ring);
ret |= ring->sync_seqno[j] != 0; if (ret)
return ret;
} }
if (ret == 0)
return ret;
ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
/* Finally reset hw state */
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
intel_ring_init_seqno(ring, seqno);
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0; ring->sync_seqno[j] = 0;
} }
@ -1964,6 +1961,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
return 0; return 0;
} }
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (seqno == 0)
return -EINVAL;
/* HWS page needs to be set less than what we
* will inject to ring
*/
ret = i915_gem_init_seqno(dev, seqno - 1);
if (ret)
return ret;
/* Carefully set the last_seqno value so that wrap
* detection still works
*/
dev_priv->next_seqno = seqno;
dev_priv->last_seqno = seqno - 1;
if (dev_priv->last_seqno == 0)
dev_priv->last_seqno--;
return 0;
}
int int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{ {
@ -1971,14 +1994,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
/* reserve 0 for non-seqno */ /* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) { if (dev_priv->next_seqno == 0) {
int ret = i915_gem_handle_seqno_wrap(dev); int ret = i915_gem_init_seqno(dev, 0);
if (ret) if (ret)
return ret; return ret;
dev_priv->next_seqno = 1; dev_priv->next_seqno = 1;
} }
*seqno = dev_priv->next_seqno++; *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
return 0; return 0;
} }
@ -2648,7 +2671,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
case 4: i965_write_fence_reg(dev, reg, obj); break; case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break; case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break; case 2: i830_write_fence_reg(dev, reg, obj); break;
default: break; default: BUG();
} }
} }
@ -2823,7 +2846,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
/* On non-LLC machines we have to be careful when putting differing /* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher * types of snoopable memory together to avoid the prefetcher
* crossing memory domains and dieing. * crossing memory domains and dying.
*/ */
if (HAS_LLC(dev)) if (HAS_LLC(dev))
return true; return true;
@ -3698,14 +3721,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct address_space *mapping; struct address_space *mapping;
u32 mask; gfp_t mask;
obj = kzalloc(sizeof(*obj), GFP_KERNEL); obj = i915_gem_object_alloc(dev);
if (obj == NULL) if (obj == NULL)
return NULL; return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) { if (drm_gem_object_init(dev, &obj->base, size) != 0) {
kfree(obj); i915_gem_object_free(obj);
return NULL; return NULL;
} }
@ -3777,6 +3800,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
obj->pages_pin_count = 0; obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj); i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj); i915_gem_object_free_mmap_offset(obj);
i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages); BUG_ON(obj->pages);
@ -3787,7 +3811,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_info_remove_obj(dev_priv, obj->base.size); i915_gem_info_remove_obj(dev_priv, obj->base.size);
kfree(obj->bit_17); kfree(obj->bit_17);
kfree(obj); i915_gem_object_free(obj);
} }
int int
@ -3883,8 +3907,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev)) if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else
BUG();
} }
static bool static bool
@ -3919,6 +3945,8 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
ret = intel_init_render_ring_buffer(dev); ret = intel_init_render_ring_buffer(dev);
if (ret) if (ret)
return ret; return ret;
@ -3935,8 +3963,6 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_bsd_ring; goto cleanup_bsd_ring;
} }
dev_priv->next_seqno = 1;
/* /*
* XXX: There was some w/a described somewhere suggesting loading * XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT. * contexts before PPGTT.
@ -3953,58 +3979,13 @@ cleanup_render_ring:
return ret; return ret;
} }
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
return true;
}
int i915_gem_init(struct drm_device *dev) int i915_gem_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
int ret; int ret;
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { i915_gem_init_global_gtt(dev);
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_init_global_gtt(dev, 0, mappable_size,
gtt_size);
}
ret = i915_gem_init_hw(dev); ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret) { if (ret) {
@ -4105,8 +4086,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
void void
i915_gem_load(struct drm_device *dev) i915_gem_load(struct drm_device *dev)
{ {
int i;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int i;
dev_priv->slab =
kmem_cache_create("i915_gem_object",
sizeof(struct drm_i915_gem_object), 0,
SLAB_HWCACHE_ALIGN,
NULL);
INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list);

View file

@ -281,8 +281,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
if (IS_ERR(attach)) if (IS_ERR(attach))
return ERR_CAST(attach); return ERR_CAST(attach);
obj = i915_gem_object_alloc(dev);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL) { if (obj == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail_detach; goto fail_detach;
@ -290,7 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
if (ret) { if (ret) {
kfree(obj); i915_gem_object_free(obj);
goto fail_detach; goto fail_detach;
} }

View file

@ -150,17 +150,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
reloc->write_domain); reloc->write_domain);
return ret; return ret;
} }
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
return ret;
}
target_obj->pending_read_domains |= reloc->read_domains; target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain; target_obj->pending_write_domain |= reloc->write_domain;
@ -601,45 +590,12 @@ err:
return ret; return ret;
} }
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
return 0;
}
static int static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects) struct list_head *objects)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0; uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret; int ret;
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
@ -650,18 +606,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);
flush_domains |= obj->base.write_domain; flush_domains |= obj->base.write_domain;
} }
if (flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev); i915_gem_chipset_flush(ring->dev);

View file

@ -282,7 +282,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
uint32_t pd_offset; uint32_t pd_offset;
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr; gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry; uint32_t pd_entry;
int i; int i;
@ -290,7 +290,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
return; return;
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) { for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr; dma_addr_t pt_addr;
@ -367,7 +367,7 @@ static void i915_ggtt_clear_range(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte; gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i; int i;
@ -432,7 +432,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
struct scatterlist *sg = st->sgl; struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; gtt_pte_t __iomem *gtt_entries =
(gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry;
int unused, i = 0; int unused, i = 0;
unsigned int len, m = 0; unsigned int len, m = 0;
dma_addr_t addr; dma_addr_t addr;
@ -525,26 +526,103 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
} }
} }
void i915_gem_init_global_gtt(struct drm_device *dev, void i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
unsigned long end) unsigned long end)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
/* Substract the guard page ... */ /* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev)) if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_offset,
obj->base.size,
false);
obj->has_global_gtt_mapping = 1;
}
dev_priv->mm.gtt_start = start; dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end; dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end; dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start; dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */ /* Clear any non-preallocated blocks */
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
i915_ggtt_clear_range(dev,
hole_start / PAGE_SIZE,
(hole_end-hole_start) / PAGE_SIZE);
}
/* And finally clear the reserved guard page */
i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
}
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
return true;
}
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
int ret;
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return;
}
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
} }
static int setup_scratch_page(struct drm_device *dev) static int setup_scratch_page(struct drm_device *dev)
@ -674,9 +752,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
goto err_out; goto err_out;
} }
dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) { if (!dev_priv->mm.gsm) {
DRM_ERROR("Failed to map the gtt page table\n"); DRM_ERROR("Failed to map the gtt page table\n");
teardown_scratch_page(dev); teardown_scratch_page(dev);
ret = -ENOMEM; ret = -ENOMEM;
@ -700,7 +778,7 @@ err_out:
void i915_gem_gtt_fini(struct drm_device *dev) void i915_gem_gtt_fini(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->mm.gtt->gtt); iounmap(dev_priv->mm.gsm);
teardown_scratch_page(dev); teardown_scratch_page(dev);
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove(); intel_gmch_remove();

View file

@ -42,85 +42,73 @@
* for is a boon. * for is a boon.
*/ */
#define PTE_ADDRESS_MASK 0xfffff000 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
/**
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
* a physical one
* @dev: drm device
* @offset: address to translate
*
* Some chip functions require allocations from stolen space and need the
* physical address of the memory in question.
*/
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev; struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base; u32 base;
#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory /* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so compute the base by subtracting the stolen memory * is unreliable, so on those compute the base by subtracting the
* from the Top of Low Usable DRAM which is where the BIOS places * stolen memory from the Top of Low Usable DRAM which is where the
* the graphics stolen memory. * BIOS places the graphics stolen memory.
*
* On gen2, the layout is slightly different with the Graphics Segment
* immediately following Top of Memory (or Top of Usable DRAM). Note
* it appears that TOUD is only reported by 865g, so we just use the
* top of memory as determined by the e820 probe.
*
* XXX gen2 requires an unavailable symbol and 945gm fails with
* its value of TOLUD.
*/ */
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { base = 0;
/* top 32bits are reserved = 0 */ if (INTEL_INFO(dev)->gen >= 6) {
/* Read Base Data of Stolen Memory Register (BDSM) directly.
* Note that there is also a MCHBAR miror at 0x1080c0 or
* we could use device 2:0x5c instead.
*/
pci_read_config_dword(pdev, 0xB0, &base);
base &= ~4095; /* lower bits used for locking register */
} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base); pci_read_config_dword(pdev, 0xA4, &base);
} else { #if 0
/* XXX presume 8xx is the same as i915 */ } else if (IS_GEN3(dev)) {
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
}
#else
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
u16 val;
pci_read_config_word(pdev, 0xb0, &val);
base = val >> 4 << 20;
} else {
u8 val; u8 val;
/* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val); pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27; base = val >> 3 << 27;
} base -= dev_priv->mm.gtt->stolen_size;
base -= dev_priv->mm.gtt->stolen_size; } else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif #endif
}
return base + offset; return base;
} }
static void i915_warn_stolen(struct drm_device *dev) static int i915_setup_compression(struct drm_device *dev, int size)
{
DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
}
static void i915_setup_compression(struct drm_device *dev, int size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
/* Just in case the BIOS is doing something questionable. */ /* Try to over-allocate to reduce reallocations and fragmentation */
intel_disable_fbc(dev); compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size <<= 1, 4096, 0);
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); if (!compressed_fb)
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size >>= 1, 4096, 0);
if (compressed_fb) if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb) if (!compressed_fb)
goto err; goto err;
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); if (HAS_PCH_SPLIT(dev))
if (!cfb_base) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
goto err_fb; else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { } else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0); 4096, 4096, 0);
if (compressed_llb) if (compressed_llb)
@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
if (!compressed_llb) if (!compressed_llb)
goto err_fb; goto err_fb;
ll_base = i915_stolen_to_phys(dev, compressed_llb->start); dev_priv->compressed_llb = compressed_llb;
if (!ll_base)
goto err_llb;
}
dev_priv->cfb_size = size; I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
dev_priv->compressed_fb = compressed_fb; dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev)) dev_priv->cfb_size = size;
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
cfb_base, ll_base, size >> 20); size);
return;
return 0;
err_llb:
drm_mm_put_block(compressed_llb);
err_fb: err_fb:
drm_mm_put_block(compressed_fb); drm_mm_put_block(compressed_fb);
err: err:
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; return -ENOSPC;
i915_warn_stolen(dev);
} }
static void i915_cleanup_compression(struct drm_device *dev) int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
drm_mm_put_block(dev_priv->compressed_fb); if (dev_priv->mm.stolen_base == 0)
return -ENODEV;
if (size < dev_priv->cfb_size)
return 0;
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
return i915_setup_compression(dev, size);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->cfb_size == 0)
return;
if (dev_priv->compressed_fb)
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->compressed_llb) if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb); drm_mm_put_block(dev_priv->compressed_llb);
dev_priv->cfb_size = 0;
} }
void i915_gem_cleanup_stolen(struct drm_device *dev) void i915_gem_cleanup_stolen(struct drm_device *dev)
{ {
if (I915_HAS_FBC(dev) && i915_powersave) struct drm_i915_private *dev_priv = dev->dev_private;
i915_cleanup_compression(dev);
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
} }
int i915_gem_init_stolen(struct drm_device *dev) int i915_gem_init_stolen(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */ /* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size);
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
/* Leave 1M for line length buffer & misc. */
/* Try to get a 32M buffer... */
if (prealloc_size > (36*1024*1024))
cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
return 0; return 0;
} }
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st;
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
* dma mapping in a single scatterlist.
*/
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return NULL;
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
return NULL;
}
sg = st->sgl;
sg->offset = offset;
sg->length = size;
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
sg_dma_len(sg) = size;
return st;
}
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
BUG();
return -EINVAL;
}
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
{
/* Should only be called during free */
sg_free_table(obj->pages);
kfree(obj->pages);
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
};
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return NULL;
if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
goto cleanup;
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size);
if (obj->pages == NULL)
goto cleanup;
obj->has_dma_mapping = true;
obj->pages_pin_count = 1;
obj->stolen = stolen;
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->cache_level = I915_CACHE_NONE;
return obj;
cleanup:
i915_gem_object_free(obj);
return NULL;
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
if (dev_priv->mm.stolen_base == 0)
return NULL;
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (stolen)
stolen = drm_mm_get_block(stolen, size, 4096);
if (stolen == NULL)
return NULL;
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
drm_mm_put_block(stolen);
return NULL;
}
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_put_block(obj->stolen);
obj->stolen = NULL;
}
}

View file

@ -396,6 +396,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* we have to maintain this existing ABI... */ /* we have to maintain this existing ABI... */
args->stride = obj->stride; args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode; args->tiling_mode = obj->tiling_mode;
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
sizeof(long), GFP_KERNEL);
}
} else {
kfree(obj->bit_17);
obj->bit_17 = NULL;
}
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);

View file

@ -287,6 +287,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config; struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder; struct intel_encoder *encoder;
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
return;
mutex_lock(&mode_config->mutex); mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n"); DRM_DEBUG_KMS("running encoder hotplug functions\n");
@ -300,9 +304,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev); drm_helper_hpd_irq_event(dev);
} }
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;
static void ironlake_handle_rps_change(struct drm_device *dev) static void ironlake_handle_rps_change(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
@ -524,6 +525,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work); queue_work(dev_priv->wq, &dev_priv->rps.work);
} }
static void gmbus_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
wake_up_all(&dev_priv->gmbus_wait_queue);
}
static void dp_aux_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
wake_up_all(&dev_priv->gmbus_wait_queue);
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg) static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
@ -533,7 +548,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
unsigned long irqflags; unsigned long irqflags;
int pipe; int pipe;
u32 pipe_stats[I915_MAX_PIPES]; u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
@ -590,8 +604,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_READ(PORT_HOTPLUG_STAT); I915_READ(PORT_HOTPLUG_STAT);
} }
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
blc_event = true; gmbus_irq_handler(dev);
if (pm_iir & GEN6_PM_DEFERRED_EVENTS) if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir); gen6_queue_rps_work(dev_priv, pm_iir);
@ -618,8 +632,11 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
(pch_iir & SDE_AUDIO_POWER_MASK) >> (pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT); SDE_AUDIO_POWER_SHIFT);
if (pch_iir & SDE_AUX_MASK)
dp_aux_irq_handler(dev);
if (pch_iir & SDE_GMBUS) if (pch_iir & SDE_GMBUS)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_HDCP_MASK) if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@ -662,10 +679,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
SDE_AUDIO_POWER_SHIFT_CPT); SDE_AUDIO_POWER_SHIFT_CPT);
if (pch_iir & SDE_AUX_MASK_CPT) if (pch_iir & SDE_AUX_MASK_CPT)
DRM_DEBUG_DRIVER("AUX channel interrupt\n"); dp_aux_irq_handler(dev);
if (pch_iir & SDE_GMBUS_CPT) if (pch_iir & SDE_GMBUS_CPT)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); gmbus_irq_handler(dev);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT) if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@ -703,6 +720,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR); de_iir = I915_READ(DEIIR);
if (de_iir) { if (de_iir) {
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev);
if (de_iir & DE_GSE_IVB) if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev); intel_opregion_gse_intr(dev);
@ -758,7 +778,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE; int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 de_iir, gt_iir, de_ier, pm_iir;
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
@ -769,11 +789,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
de_iir = I915_READ(DEIIR); de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR); gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR); pm_iir = I915_READ(GEN6_PMIIR);
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
(!IS_GEN6(dev) || pm_iir == 0))
goto done; goto done;
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
@ -783,6 +801,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
else else
snb_gt_irq_handler(dev, dev_priv, gt_iir); snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
if (de_iir & DE_GSE) if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev); intel_opregion_gse_intr(dev);
@ -804,10 +825,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
/* check event from PCH */ /* check event from PCH */
if (de_iir & DE_PCH_EVENT) { if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = I915_READ(SDEIIR);
if (HAS_PCH_CPT(dev)) if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir); cpt_irq_handler(dev, pch_iir);
else else
ibx_irq_handler(dev, pch_iir); ibx_irq_handler(dev, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
} }
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
@ -816,8 +842,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir); gen6_queue_rps_work(dev_priv, pm_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir); I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir); I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir);
@ -928,6 +952,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
} else if (src->stolen) {
unsigned long offset;
offset = dev_priv->mm.stolen_base;
offset += src->stolen->start;
offset += i << PAGE_SHIFT;
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else { } else {
struct page *page; struct page *page;
void *s; void *s;
@ -1074,6 +1106,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break; break;
default:
BUG();
} }
} }
@ -1854,7 +1888,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */ /* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A;
u32 render_irqs; u32 render_irqs;
u32 hotplug_mask; u32 hotplug_mask;
@ -1888,12 +1923,15 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
hotplug_mask = (SDE_CRT_HOTPLUG_CPT | hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT |
SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT);
} else { } else {
hotplug_mask = (SDE_CRT_HOTPLUG | hotplug_mask = (SDE_CRT_HOTPLUG |
SDE_PORTB_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTC_HOTPLUG |
SDE_PORTD_HOTPLUG | SDE_PORTD_HOTPLUG |
SDE_GMBUS |
SDE_AUX_MASK); SDE_AUX_MASK);
} }
@ -1924,7 +1962,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB | DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB; DE_PLANEA_FLIP_DONE_IVB |
DE_AUX_CHANNEL_A_IVB;
u32 render_irqs; u32 render_irqs;
u32 hotplug_mask; u32 hotplug_mask;
@ -1953,7 +1992,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
hotplug_mask = (SDE_CRT_HOTPLUG_CPT | hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT |
SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask; dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIIR, I915_READ(SDEIIR));
@ -1970,7 +2011,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask; u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs; u32 render_irqs;
u16 msid; u16 msid;
@ -1999,6 +2039,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
msid |= (1<<14); msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
I915_WRITE(VLV_IMR, dev_priv->irq_mask); I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask); I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
@ -2007,6 +2050,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
POSTING_READ(VLV_IER); POSTING_READ(VLV_IER);
i915_enable_pipestat(dev_priv, 0, pipestat_enable); i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable); i915_enable_pipestat(dev_priv, 1, pipestat_enable);
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
@ -2027,6 +2071,15 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif #endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
return 0;
}
static void valleyview_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
/* Note HDMI and DP share bits */ /* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN; hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@ -2044,8 +2097,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
} }
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
return 0;
} }
static void valleyview_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev)
@ -2275,6 +2326,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT; I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) { if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
/* Enable in IER... */ /* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */ /* and unmask in IMR */
@ -2285,8 +2339,18 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask); I915_WRITE(IER, enable_mask);
POSTING_READ(IER); POSTING_READ(IER);
intel_opregion_enable_asle(dev);
return 0;
}
static void i915_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
if (I915_HAS_HOTPLUG(dev)) { if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); hotplug_en = I915_READ(PORT_HOTPLUG_EN);
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN; hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@ -2307,10 +2371,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
} }
intel_opregion_enable_asle(dev);
return 0;
} }
static irqreturn_t i915_irq_handler(int irq, void *arg) static irqreturn_t i915_irq_handler(int irq, void *arg)
@ -2470,7 +2530,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev) static int i965_irq_postinstall(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
u32 enable_mask; u32 enable_mask;
u32 error_mask; u32 error_mask;
@ -2491,6 +2550,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0; dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0; dev_priv->pipestat[1] = 0;
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
/* /*
* Enable some error detection, note the instruction error mask * Enable some error detection, note the instruction error mask
@ -2511,6 +2571,19 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask); I915_WRITE(IER, enable_mask);
POSTING_READ(IER); POSTING_READ(IER);
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
intel_opregion_enable_asle(dev);
return 0;
}
static void i965_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
/* Note HDMI and DP share hotplug bits */ /* Note HDMI and DP share hotplug bits */
hotplug_en = 0; hotplug_en = 0;
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
@ -2545,10 +2618,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
/* Ignore TV since it's buggy */ /* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
intel_opregion_enable_asle(dev);
return 0;
} }
static irqreturn_t i965_irq_handler(int irq, void *arg) static irqreturn_t i965_irq_handler(int irq, void *arg)
@ -2644,6 +2713,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (blc_event || (iir & I915_ASLE_INTERRUPT)) if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev); intel_opregion_asle_intr(dev);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
/* With MSI, interrupts are only generated when iir /* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got * transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then * set while we were handling the existing iir bits, then
@ -2699,6 +2771,11 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
@ -2719,7 +2796,8 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = valleyview_irq_uninstall; dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank;
} else if (IS_IVYBRIDGE(dev)) { dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */ /* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_preinstall = ironlake_irq_preinstall;
@ -2727,14 +2805,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_uninstall = ironlake_irq_uninstall; dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank; dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank; dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (IS_HASWELL(dev)) {
/* Share interrupts handling with IVB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_preinstall = ironlake_irq_preinstall;
@ -2753,13 +2823,23 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall; dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler; dev->driver->irq_handler = i915_irq_handler;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else { } else {
dev->driver->irq_preinstall = i965_irq_preinstall; dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall; dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall; dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler; dev->driver->irq_handler = i965_irq_handler;
dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
} }
dev->driver->enable_vblank = i915_enable_vblank; dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank; dev->driver->disable_vblank = i915_disable_vblank;
} }
} }
void intel_hpd_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
}

View file

@ -142,6 +142,7 @@
#define VGA_MSR_CGA_MODE (1<<0) #define VGA_MSR_CGA_MODE (1<<0)
#define VGA_SR_INDEX 0x3c4 #define VGA_SR_INDEX 0x3c4
#define SR01 1
#define VGA_SR_DATA 0x3c5 #define VGA_SR_DATA 0x3c5
#define VGA_AR_INDEX 0x3c0 #define VGA_AR_INDEX 0x3c0
@ -940,23 +941,6 @@
#define DPLL_LOCK_VLV (1<<15) #define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) #define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
#define SR01_SCREEN_OFF (1<<5)
#define PPCR 0x61204
#define PPCR_ON (1<<0)
#define DVOB 0x61140
#define DVOB_ON (1<<31)
#define DVOC 0x61160
#define DVOC_ON (1<<31)
#define LVDS 0x61180
#define LVDS_ON (1<<31)
/* Scratch pad debug 0 reg:
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/* /*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@ -1893,8 +1877,6 @@
#define PFIT_SCALING_PILLAR (2 << 26) #define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26) #define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234 #define PFIT_PGM_RATIOS 0x61234
#define PFIT_VERT_SCALE_MASK 0xfff00000
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
/* Pre-965 */ /* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20 #define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000 #define PFIT_VERT_SCALE_MASK 0xfff00000
@ -2668,11 +2650,11 @@
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16) #define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0) #define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_BPP_8 (0<<5) #define PIPECONF_8BPC (0<<5)
#define PIPECONF_BPP_10 (1<<5) #define PIPECONF_10BPC (1<<5)
#define PIPECONF_BPP_6 (2<<5) #define PIPECONF_6BPC (2<<5)
#define PIPECONF_BPP_12 (3<<5) #define PIPECONF_12BPC (3<<5)
#define PIPECONF_DITHER_EN (1<<4) #define PIPECONF_DITHER_EN (1<<4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) #define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
#define PIPECONF_DITHER_TYPE_SP (0<<2) #define PIPECONF_DITHER_TYPE_SP (0<<2)
@ -2716,11 +2698,6 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
#define PIPE_8BPC (0 << 5)
#define PIPE_10BPC (1 << 5)
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
@ -3578,27 +3555,30 @@
#define PORTD_PULSE_DURATION_6ms (2 << 18) #define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18) #define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18) #define PORTD_PULSE_DURATION_MASK (3 << 18)
#define PORTD_HOTPLUG_NO_DETECT (0) #define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) #define PORTD_HOTPLUG_NO_DETECT (0 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17) #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12) #define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0) #define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) #define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10) #define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10) #define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10) #define PORTC_PULSE_DURATION_MASK (3 << 10)
#define PORTC_HOTPLUG_NO_DETECT (0) #define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) #define PORTC_HOTPLUG_NO_DETECT (0 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9) #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4) #define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0) #define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) #define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2) #define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2) #define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2) #define PORTB_PULSE_DURATION_MASK (3 << 2)
#define PORTB_HOTPLUG_NO_DETECT (0) #define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1) #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define PCH_GPIOA 0xc5010 #define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014 #define PCH_GPIOB 0xc5014
@ -3817,8 +3797,6 @@
#define TRANS_FSYNC_DELAY_HB2 (1<<27) #define TRANS_FSYNC_DELAY_HB2 (1<<27)
#define TRANS_FSYNC_DELAY_HB3 (2<<27) #define TRANS_FSYNC_DELAY_HB3 (2<<27)
#define TRANS_FSYNC_DELAY_HB4 (3<<27) #define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21) #define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21) #define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21) #define TRANS_INTERLACED (3<<21)

View file

@ -776,7 +776,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt; crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt; crt->base.enable = intel_enable_crt;
if (IS_HASWELL(dev)) if (HAS_DDI(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state; crt->base.get_hw_state = intel_ddi_get_hw_state;
else else
crt->base.get_hw_state = intel_crt_get_hw_state; crt->base.get_hw_state = intel_crt_get_hw_state;

View file

@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both * in either FDI or DP modes only, as HDMI connections will work with both
* of those * of those
*/ */
void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool use_fdi_mode)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg; u32 reg;
@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
{ {
int port; int port;
if (IS_HASWELL(dev)) { if (!HAS_DDI(dev))
for (port = PORT_A; port < PORT_E; port++) return;
intel_prepare_ddi_buffers(dev, port, false);
/* DDI E is the suggested one to work in FDI mode, so program is as such by for (port = PORT_A; port < PORT_E; port++)
* default. It will have to be re-programmed in case a digital DP output intel_prepare_ddi_buffers(dev, port, false);
* will be detected on it
*/ /* DDI E is the suggested one to work in FDI mode, so program is as such
intel_prepare_ddi_buffers(dev, PORT_E, true); * by default. It will have to be re-programmed in case a digital DP
} * output will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
} }
static const long hsw_ddi_buf_ctl_values[] = { static const long hsw_ddi_buf_ctl_values[] = {
@ -1069,7 +1071,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
if (port == PORT_A) if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP; cpu_transcoder = TRANSCODER_EDP;
else else
cpu_transcoder = pipe; cpu_transcoder = (enum transcoder) pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));

File diff suppressed because it is too large Load diff

View file

@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
return max_link_bw; return max_link_bw;
} }
static int
intel_dp_link_clock(uint8_t link_bw)
{
if (link_bw == DP_LINK_BW_2_7)
return 270000;
else
return 162000;
}
/* /*
* The units on the numbers in the next two are... bizarre. Examples will * The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec. * make it clearer; this one parallels an example in the eDP spec.
@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode, struct drm_display_mode *mode,
bool adjust_mode) bool adjust_mode)
{ {
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_link_clock =
drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate; int max_rate, mode_rate;
@ -330,6 +322,48 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
} }
} }
static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->output_reg + 0x10;
uint32_t status;
bool done;
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
ch_ctl = DPA_AUX_CH_CTL;
break;
case PORT_B:
ch_ctl = PCH_DPB_AUX_CH_CTL;
break;
case PORT_C:
ch_ctl = PCH_DPC_AUX_CH_CTL;
break;
case PORT_D:
ch_ctl = PCH_DPD_AUX_CH_CTL;
break;
default:
BUG();
}
}
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
else
done = wait_for_atomic(C, 10) == 0;
if (!done)
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
has_aux_irq);
#undef C
return status;
}
static int static int
intel_dp_aux_ch(struct intel_dp *intel_dp, intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes, uint8_t *send, int send_bytes,
@ -341,11 +375,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10; uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4; uint32_t ch_data = ch_ctl + 4;
int i; int i, ret, recv_bytes;
int recv_bytes;
uint32_t status; uint32_t status;
uint32_t aux_clock_divider; uint32_t aux_clock_divider;
int try, precharge; int try, precharge;
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
pm_qos_update_request(&dev_priv->pm_qos, 0);
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) { switch (intel_dig_port->port) {
@ -379,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider. * clock divider.
*/ */
if (is_cpu_edp(intel_dp)) { if (is_cpu_edp(intel_dp)) {
if (IS_HASWELL(dev)) if (HAS_DDI(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev)) else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100; aux_clock_divider = 100;
@ -399,7 +439,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */ /* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) { for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl); status = I915_READ_NOTRACE(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break; break;
msleep(1); msleep(1);
@ -408,7 +448,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if (try == 3) { if (try == 3) {
WARN(1, "dp_aux_ch not started status 0x%08x\n", WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl)); I915_READ(ch_ctl));
return -EBUSY; ret = -EBUSY;
goto out;
} }
/* Must try at least 3 times according to DP spec */ /* Must try at least 3 times according to DP spec */
@ -421,6 +462,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
/* Send the command and wait for it to complete */ /* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_SEND_BUSY |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_400us | DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@ -428,12 +470,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_DONE | DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR); DP_AUX_CH_CTL_RECEIVE_ERROR);
for (;;) {
status = I915_READ(ch_ctl); status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
udelay(100);
}
/* Clear done status and any errors */ /* Clear done status and any errors */
I915_WRITE(ch_ctl, I915_WRITE(ch_ctl,
@ -451,7 +489,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
if ((status & DP_AUX_CH_CTL_DONE) == 0) { if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
return -EBUSY; ret = -EBUSY;
goto out;
} }
/* Check for timeout or receive error. /* Check for timeout or receive error.
@ -459,14 +498,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
*/ */
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
return -EIO; ret = -EIO;
goto out;
} }
/* Timeouts occur when the device isn't connected, so they're /* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */ * "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT; ret = -ETIMEDOUT;
goto out;
} }
/* Unload any bytes sent back from the other side */ /* Unload any bytes sent back from the other side */
@ -479,7 +520,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
unpack_aux(I915_READ(ch_data + i), unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i); recv + i, recv_bytes - i);
return recv_bytes; ret = recv_bytes;
out:
pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
return ret;
} }
/* Write data to the aux channel in native mode */ /* Write data to the aux channel in native mode */
@ -722,12 +767,15 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
for (clock = 0; clock <= max_clock; clock++) { for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); int link_bw_clock =
drm_dp_bw_code_to_link_rate(bws[clock]);
int link_avail = intel_dp_max_data_rate(link_bw_clock,
lane_count);
if (mode_rate <= link_avail) { if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock]; intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count; intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); adjusted_mode->clock = link_bw_clock;
DRM_DEBUG_KMS("DP link bw %02x lane " DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n", "count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count, intel_dp->link_bw, intel_dp->lane_count,
@ -742,39 +790,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false; return false;
} }
struct intel_dp_m_n {
uint32_t tu;
uint32_t gmch_m;
uint32_t gmch_n;
uint32_t link_m;
uint32_t link_n;
};
static void
intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
*den >>= 1;
}
}
static void
intel_dp_compute_m_n(int bpp,
int nlanes,
int pixel_clock,
int link_clock,
struct intel_dp_m_n *m_n)
{
m_n->tu = 64;
m_n->gmch_m = (pixel_clock * bpp) >> 3;
m_n->gmch_n = link_clock * nlanes;
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
void void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
@ -785,7 +800,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4; int lane_count = 4;
struct intel_dp_m_n m_n; struct intel_link_m_n m_n;
int pipe = intel_crtc->pipe; int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
@ -808,8 +823,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always * the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total. * set up for 8-bits of R/G/B, or 3 bytes total.
*/ */
intel_dp_compute_m_n(intel_crtc->bpp, lane_count, intel_link_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n); mode->clock, adjusted_mode->clock, &m_n);
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@ -851,6 +866,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
} }
} }
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
if (clock < 200000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
dpa_ctl |= DP_PLL_FREQ_160MHZ;
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
}
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
udelay(500);
}
static void static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
@ -950,6 +991,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
} else { } else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
} }
if (is_cpu_edp(intel_dp))
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
} }
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@ -1543,7 +1587,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
} }
static uint32_t static uint32_t
intel_dp_signal_levels(uint8_t train_set) intel_gen4_signal_levels(uint8_t train_set)
{ {
uint32_t signal_levels = 0; uint32_t signal_levels = 0;
@ -1641,7 +1685,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t static uint32_t
intel_dp_signal_levels_hsw(uint8_t train_set) intel_hsw_signal_levels(uint8_t train_set)
{ {
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK); DP_TRAIN_PRE_EMPHASIS_MASK);
@ -1673,6 +1717,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
} }
} }
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
if (IS_HASWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
signal_levels = intel_gen4_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
*DP = (*DP & ~mask) | signal_levels;
}
static bool static bool
intel_dp_set_link_train(struct intel_dp *intel_dp, intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value, uint32_t dp_reg_value,
@ -1791,7 +1863,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
int voltage_tries, loop_tries; int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
if (IS_HASWELL(dev)) if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder); intel_ddi_prepare_link_retrain(encoder);
/* Write the link configuration data */ /* Write the link configuration data */
@ -1809,24 +1881,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (;;) { for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE]; uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
if (IS_HASWELL(dev)) { intel_dp_set_signal_levels(intel_dp, &DP);
signal_levels = intel_dp_signal_levels_hsw(
intel_dp->train_set[0]);
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
signal_levels);
/* Set training pattern 1 */ /* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP, if (!intel_dp_set_link_train(intel_dp, DP,
@ -1882,7 +1938,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
void void
intel_dp_complete_link_train(struct intel_dp *intel_dp) intel_dp_complete_link_train(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false; bool channel_eq = false;
int tries, cr_tries; int tries, cr_tries;
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
@ -1892,8 +1947,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
cr_tries = 0; cr_tries = 0;
channel_eq = false; channel_eq = false;
for (;;) { for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE]; uint8_t link_status[DP_LINK_STATUS_SIZE];
if (cr_tries > 5) { if (cr_tries > 5) {
@ -1902,19 +1955,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break; break;
} }
if (IS_HASWELL(dev)) { intel_dp_set_signal_levels(intel_dp, &DP);
signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
/* channel eq pattern */ /* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, DP, if (!intel_dp_set_link_train(intel_dp, DP,
@ -1964,6 +2005,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP; uint32_t DP = intel_dp->DP;
/* /*
@ -1981,7 +2024,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
* intel_ddi_prepare_link_retrain will take care of redoing the link * intel_ddi_prepare_link_retrain will take care of redoing the link
* train. * train.
*/ */
if (IS_HASWELL(dev)) if (HAS_DDI(dev))
return; return;
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@ -1998,7 +2041,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
} }
POSTING_READ(intel_dp->output_reg); POSTING_READ(intel_dp->output_reg);
msleep(17); /* We don't really know why we're doing this */
intel_wait_for_vblank(dev, intel_crtc->pipe);
if (HAS_PCH_IBX(dev) && if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@ -2018,19 +2062,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank /* Changes to enable or select take place the vblank
* after being written. * after being written.
*/ */
if (crtc == NULL) { if (WARN_ON(crtc == NULL)) {
/* We can arrive here never having been attached /* We should never try to disable a port without a crtc
* to a CRTC, for instance, due to inheriting * attached. For paranoia keep the code around for a
* random state from the BIOS. * bit. */
*
* If the pipe is not running, play safe and
* wait for the clocks to stabilise before
* continuing.
*/
POSTING_READ(intel_dp->output_reg); POSTING_READ(intel_dp->output_reg);
msleep(50); msleep(50);
} else } else
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
} }
DP &= ~DP_AUDIO_OUTPUT_ENABLE; DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@ -2042,10 +2081,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static bool static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_dp_get_dpcd(struct intel_dp *intel_dp)
{ {
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0) sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */ return false; /* aux transfer failed */
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
if (intel_dp->dpcd[DP_DPCD_REV] == 0) if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */ return false; /* DPCD not present */
@ -2206,6 +2251,8 @@ static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp) ironlake_dp_detect(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status; enum drm_connector_status status;
/* Can't disconnect eDP, but you can close the lid... */ /* Can't disconnect eDP, but you can close the lid... */
@ -2216,6 +2263,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status; return status;
} }
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp); return intel_dp_detect_dpcd(intel_dp);
} }
@ -2290,13 +2340,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
return intel_ddc_get_modes(connector, adapter); return intel_ddc_get_modes(connector, adapter);
} }
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
* \return false if DP port is disconnected.
*/
static enum drm_connector_status static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force) intel_dp_detect(struct drm_connector *connector, bool force)
{ {
@ -2306,7 +2349,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
enum drm_connector_status status; enum drm_connector_status status;
struct edid *edid = NULL; struct edid *edid = NULL;
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
intel_dp->has_audio = false; intel_dp->has_audio = false;
@ -2315,10 +2357,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
else else
status = g4x_dp_detect(intel_dp); status = g4x_dp_detect(intel_dp);
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
if (status != connector_status_connected) if (status != connector_status_connected)
return status; return status;
@ -2445,11 +2483,8 @@ intel_dp_set_property(struct drm_connector *connector,
return -EINVAL; return -EINVAL;
done: done:
if (intel_encoder->base.crtc) { if (intel_encoder->base.crtc)
struct drm_crtc *crtc = intel_encoder->base.crtc; intel_crtc_restore_mode(intel_encoder->base.crtc);
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
return 0; return 0;
} }
@ -2742,7 +2777,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector_attach_encoder(intel_connector, intel_encoder); intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
if (IS_HASWELL(dev)) if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else else
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;

View file

@ -153,6 +153,7 @@ struct intel_encoder {
bool cloneable; bool cloneable;
bool connectors_active; bool connectors_active;
void (*hot_plug)(struct intel_encoder *); void (*hot_plug)(struct intel_encoder *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *); void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *); void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *); void (*disable)(struct intel_encoder *);
@ -443,6 +444,7 @@ extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev); extern bool intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg, extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port); enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@ -502,9 +504,10 @@ struct intel_set_config {
bool mode_changed; bool mode_changed;
}; };
extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb); int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev); extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc); extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_noop(struct drm_encoder *encoder); extern void intel_encoder_noop(struct drm_encoder *encoder);
@ -546,6 +549,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi); return container_of(intel_hdmi, struct intel_digital_port, hdmi);
} }
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
extern void intel_connector_attach_encoder(struct intel_connector *connector, extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder); struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@ -589,6 +595,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev); extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane); extern void intel_prepare_page_flip(struct drm_device *dev, int plane);

View file

@ -83,7 +83,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitches[0] * mode_cmd.height; size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
if (!obj) { if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n"); DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM; ret = -ENOMEM;
@ -153,6 +155,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
if (ifbdev->ifb.obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
@ -241,10 +250,18 @@ int intel_fbdev_init(struct drm_device *dev)
} }
drm_fb_helper_single_add_all_connectors(&ifbdev->helper); drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
drm_fb_helper_initial_config(&ifbdev->helper, 32);
return 0; return 0;
} }
void intel_fbdev_initial_config(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
}
void intel_fbdev_fini(struct drm_device *dev) void intel_fbdev_fini(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;

View file

@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits; uint32_t enabled_bits;
enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n"); "HDMI port enabled, expecting disabled\n");
@ -793,16 +793,21 @@ static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
static enum drm_connector_status static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi_detect(struct drm_connector *connector, bool force)
{ {
struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port = struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi); hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base; struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid; struct edid *edid;
enum drm_connector_status status = connector_status_disconnected; enum drm_connector_status status = connector_status_disconnected;
if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
if (IS_G4X(dev) && !g4x_hdmi_connected(intel_hdmi))
return status; return status;
else if (HAS_PCH_SPLIT(dev) &&
!ibx_digital_port_connected(dev_priv, intel_dig_port))
return status;
intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false; intel_hdmi->has_audio = false;
@ -912,11 +917,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL; return -EINVAL;
done: done:
if (intel_dig_port->base.base.crtc) { if (intel_dig_port->base.base.crtc)
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
return 0; return 0;
} }
@ -1013,7 +1015,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi->set_infoframes = cpt_set_infoframes; intel_hdmi->set_infoframes = cpt_set_infoframes;
} }
if (IS_HASWELL(dev)) if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else else
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;

View file

@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
} }
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@ -202,6 +203,68 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus; algo->data = bus;
} }
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
u32 gmbus4_irq_en)
{
int i;
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
/* Important: The hw handles only the first bit, so set only one! Since
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves. */
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
break;
schedule_timeout(1);
}
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
I915_WRITE(GMBUS4 + reg_offset, 0);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
if (gmbus2 & gmbus2_status)
return 0;
return -ETIMEDOUT;
}
static int
gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
int ret;
int reg_offset = dev_priv->gpio_mmio_base;
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
if (!HAS_GMBUS_IRQ(dev_priv->dev))
return wait_for(C, 10);
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
I915_WRITE(GMBUS4 + reg_offset, 0);
if (ret)
return 0;
else
return -ETIMEDOUT;
#undef C
}
static int static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index) u32 gmbus1_index)
@ -219,15 +282,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
while (len) { while (len) {
int ret; int ret;
u32 val, loop = 0; u32 val, loop = 0;
u32 gmbus2;
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
(GMBUS_SATOER | GMBUS_HW_RDY), GMBUS_HW_RDY_EN);
50);
if (ret) if (ret)
return -ETIMEDOUT; return ret;
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
val = I915_READ(GMBUS3 + reg_offset); val = I915_READ(GMBUS3 + reg_offset);
do { do {
@ -261,7 +320,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) { while (len) {
int ret; int ret;
u32 gmbus2;
val = loop = 0; val = loop = 0;
do { do {
@ -270,13 +328,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS3 + reg_offset, val);
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
(GMBUS_SATOER | GMBUS_HW_RDY), GMBUS_HW_RDY_EN);
50);
if (ret) if (ret)
return -ETIMEDOUT; return ret;
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
} }
return 0; return 0;
} }
@ -345,8 +400,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
I915_WRITE(GMBUS0 + reg_offset, bus->reg0); I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
u32 gmbus2;
if (gmbus_is_index_read(msgs, i, num)) { if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */ i += 1; /* set i to the index of the read xfer */
@ -361,13 +414,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (ret == -ENXIO) if (ret == -ENXIO)
goto clear_err; goto clear_err;
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
(GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), GMBUS_HW_WAIT_EN);
50); if (ret == -ENXIO)
goto clear_err;
if (ret) if (ret)
goto timeout; goto timeout;
if (gmbus2 & GMBUS_SATOER)
goto clear_err;
} }
/* Generate a STOP condition on the bus. Note that gmbus can't generata /* Generate a STOP condition on the bus. Note that gmbus can't generata
@ -380,8 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
* We will re-enable it at the start of the next xfer, * We will re-enable it at the start of the next xfer,
* till then let it sleep. * till then let it sleep.
*/ */
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, if (gmbus_wait_idle(dev_priv)) {
10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name); adapter->name);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
@ -405,8 +456,7 @@ clear_err:
* it's slow responding and only answers on the 2nd retry. * it's slow responding and only answers on the 2nd retry.
*/ */
ret = -ENXIO; ret = -ENXIO;
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, if (gmbus_wait_idle(dev_priv)) {
10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name); adapter->name);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
@ -469,6 +519,7 @@ int intel_setup_gmbus(struct drm_device *dev)
dev_priv->gpio_mmio_base = 0; dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex); mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
for (i = 0; i < GMBUS_NUM_PORTS; i++) { for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i]; struct intel_gmbus *bus = &dev_priv->gmbus[i];

View file

@ -52,6 +52,8 @@ struct intel_lvds_encoder {
u32 pfit_control; u32 pfit_control;
u32 pfit_pgm_ratios; u32 pfit_pgm_ratios;
bool pfit_dirty; bool pfit_dirty;
bool is_dual_link;
u32 reg;
struct intel_lvds_connector *attached_connector; struct intel_lvds_connector *attached_connector;
}; };
@ -71,15 +73,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp;
if (HAS_PCH_SPLIT(dev)) { tmp = I915_READ(lvds_encoder->reg);
lvds_reg = PCH_LVDS;
} else {
lvds_reg = LVDS;
}
tmp = I915_READ(lvds_reg);
if (!(tmp & LVDS_PORT_EN)) if (!(tmp & LVDS_PORT_EN))
return false; return false;
@ -92,6 +89,68 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
return true; return true;
} }
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
int pipe = intel_crtc->pipe;
u32 temp;
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (lvds_encoder->is_dual_link)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
if (INTEL_INFO(dev)->gen == 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(lvds_encoder->reg, temp);
}
/** /**
* Sets the power state for the panel. * Sets the power state for the panel.
*/ */
@ -101,19 +160,17 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg; u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL; ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS; stat_reg = PCH_PP_STATUS;
} else { } else {
ctl_reg = PP_CONTROL; ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
stat_reg = PP_STATUS; stat_reg = PP_STATUS;
} }
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
if (lvds_encoder->pfit_dirty) { if (lvds_encoder->pfit_dirty) {
/* /*
@ -132,7 +189,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
} }
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg); POSTING_READ(lvds_encoder->reg);
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n"); DRM_ERROR("timed out waiting for panel to power on\n");
@ -144,15 +201,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg; u32 ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL; ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS; stat_reg = PCH_PP_STATUS;
} else { } else {
ctl_reg = PP_CONTROL; ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
stat_reg = PP_STATUS; stat_reg = PP_STATUS;
} }
@ -167,8 +222,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
lvds_encoder->pfit_dirty = true; lvds_encoder->pfit_dirty = true;
} }
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg); POSTING_READ(lvds_encoder->reg);
} }
static int intel_lvds_mode_valid(struct drm_connector *connector, static int intel_lvds_mode_valid(struct drm_connector *connector,
@ -591,8 +646,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
* If the CRTC is enabled, the display will be changed * If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode. * according to the new panel fitting mode.
*/ */
intel_set_mode(crtc, &crtc->mode, intel_crtc_restore_mode(crtc);
crtc->x, crtc->y, crtc->fb);
} }
} }
@ -903,6 +957,66 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
return false; return false;
} }
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
{
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
{ } /* terminating entry */
};
bool intel_is_dual_link_lvds(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_lvds_encoder *lvds_encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->type == INTEL_OUTPUT_LVDS) {
lvds_encoder = to_lvds_encoder(&encoder->base);
return lvds_encoder->is_dual_link;
}
}
return false;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
struct drm_device *dev = lvds_encoder->base.base.dev;
unsigned int val;
struct drm_i915_private *dev_priv = dev->dev_private;
/* use the module option value if specified */
if (i915_lvds_channel_mode > 0)
return i915_lvds_channel_mode == 2;
if (dmi_check_system(intel_dual_link_lvds))
return true;
/* BIOS should set the proper LVDS register value at boot, but
* in reality, it doesn't set the value when the lid is closed;
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
val = I915_READ(lvds_encoder->reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
static bool intel_lvds_supported(struct drm_device *dev) static bool intel_lvds_supported(struct drm_device *dev)
{ {
/* With the introduction of the PCH we gained a dedicated /* With the introduction of the PCH we gained a dedicated
@ -988,6 +1102,7 @@ bool intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS); DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds; intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->disable = intel_disable_lvds; intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;
@ -1009,6 +1124,12 @@ bool intel_lvds_init(struct drm_device *dev)
connector->interlace_allowed = false; connector->interlace_allowed = false;
connector->doublescan_allowed = false; connector->doublescan_allowed = false;
if (HAS_PCH_SPLIT(dev)) {
lvds_encoder->reg = PCH_LVDS;
} else {
lvds_encoder->reg = LVDS;
}
/* create the scaling mode property */ /* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev); drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base, drm_object_attach_property(&connector->base,
@ -1109,6 +1230,10 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed; goto failed;
out: out:
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
/* /*
* Unlock registers and just * Unlock registers and just
* leave them unlocked * leave them unlocked

View file

@ -28,7 +28,6 @@
#include <linux/fb.h> #include <linux/fb.h>
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_drv.h" #include "i915_drv.h"

View file

@ -1333,8 +1333,10 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev; overlay->dev = dev;
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
if (!reg_bo) if (reg_bo == NULL)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (reg_bo == NULL)
goto out_free; goto out_free;
overlay->reg_bo = reg_bo; overlay->reg_bo = reg_bo;

View file

@ -440,12 +440,6 @@ void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_MODULE_PARAM; dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable; goto out_disable;
} }
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, " DRM_DEBUG_KMS("mode incompatible with compression, "
@ -479,6 +473,14 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master()) if (in_dbg_master())
goto out_disable; goto out_disable;
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
/* If the scanout has not changed, don't modify the FBC settings. /* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj * Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked) * cannot be unpinned (and have its GTT offset and fence revoked)
@ -526,6 +528,7 @@ out_disable:
DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev); intel_disable_fbc(dev);
} }
i915_gem_stolen_cleanup_compression(dev);
} }
static void i915_pineview_get_mem_freq(struct drm_device *dev) static void i915_pineview_get_mem_freq(struct drm_device *dev)

View file

@ -601,6 +601,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
return 0; return 0;
} }
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
/** /**
* intel_ring_sync - sync the waiter to the signaller on seqno * intel_ring_sync - sync the waiter to the signaller on seqno
* *
@ -631,11 +638,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
if (ret) if (ret)
return ret; return ret;
intel_ring_emit(waiter, /* If seqno wrap happened, omit the wait with no-ops */
dw1 | signaller->semaphore_register[waiter->id]); if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
intel_ring_emit(waiter, seqno); intel_ring_emit(waiter,
intel_ring_emit(waiter, 0); dw1 |
intel_ring_emit(waiter, MI_NOOP); signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
} else {
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
}
intel_ring_advance(waiter); intel_ring_advance(waiter);
return 0; return 0;
@ -716,6 +732,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return intel_read_status_page(ring, I915_GEM_HWS_INDEX); return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
} }
static void
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
static u32 static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{ {
@ -723,6 +745,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
return pc->cpu_page[0]; return pc->cpu_page[0];
} }
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct pipe_control *pc = ring->private;
pc->cpu_page[0] = seqno;
}
static bool static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring) gen5_ring_get_irq(struct intel_ring_buffer *ring)
{ {
@ -1152,7 +1181,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
return ret; return ret;
} }
obj = i915_gem_alloc_object(dev, ring->size); obj = NULL;
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ring->size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) { if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n"); DRM_ERROR("Failed to allocate ringbuffer\n");
ret = -ENOMEM; ret = -ENOMEM;
@ -1190,6 +1223,8 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (IS_I830(ring->dev) || IS_845G(ring->dev)) if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128; ring->effective_size -= 128;
intel_ring_init_seqno(ring, dev_priv->last_seqno);
return 0; return 0;
err_unmap: err_unmap:
@ -1398,11 +1433,31 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
} }
static int __intel_ring_begin(struct intel_ring_buffer *ring,
int bytes)
{
int ret;
if (unlikely(ring->tail + bytes > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
}
if (unlikely(ring->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (unlikely(ret))
return ret;
}
ring->space -= bytes;
return 0;
}
int intel_ring_begin(struct intel_ring_buffer *ring, int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords) int num_dwords)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret; int ret;
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
@ -1414,20 +1469,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret) if (ret)
return ret; return ret;
if (unlikely(ring->tail + n > ring->effective_size)) { return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
ret = intel_wrap_ring_buffer(ring); }
if (unlikely(ret))
return ret; void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
} }
if (unlikely(ring->space < n)) { ring->set_seqno(ring, seqno);
ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
}
ring->space -= n;
return 0;
} }
void intel_ring_advance(struct intel_ring_buffer *ring) void intel_ring_advance(struct intel_ring_buffer *ring)
@ -1592,6 +1648,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_put = gen6_ring_put_irq; ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT; ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno; ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync; ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
@ -1602,6 +1659,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->add_request = pc_render_add_request; ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush; ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno; ring->get_seqno = pc_render_get_seqno;
ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq; ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq; ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
@ -1612,6 +1670,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
else else
ring->flush = gen4_render_ring_flush; ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno; ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) { if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq; ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq; ring->irq_put = i8xx_ring_put_irq;
@ -1683,6 +1742,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
else else
ring->flush = gen4_render_ring_flush; ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno; ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) { if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq; ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq; ring->irq_put = i8xx_ring_put_irq;
@ -1743,6 +1803,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = gen6_ring_flush; ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno; ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq; ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq; ring->irq_put = gen6_ring_put_irq;
@ -1758,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = bsd_ring_flush; ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request; ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno; ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) { if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq; ring->irq_get = gen5_ring_get_irq;
@ -1787,6 +1849,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->flush = blt_ring_flush; ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno; ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq; ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq; ring->irq_put = gen6_ring_put_irq;

View file

@ -90,6 +90,8 @@ struct intel_ring_buffer {
*/ */
u32 (*get_seqno)(struct intel_ring_buffer *ring, u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency); bool lazy_coherency);
void (*set_seqno)(struct intel_ring_buffer *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length, u32 offset, u32 length,
unsigned flags); unsigned flags);
@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ring->status_page.page_addr[reg]; return ring->status_page.page_addr[reg];
} }
static inline void
intel_write_status_page(struct intel_ring_buffer *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
}
/** /**
* Reads a dword out of the status page, which is written to from the command * Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
} }
void intel_ring_advance(struct intel_ring_buffer *ring); void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring); int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);

View file

@ -1997,11 +1997,8 @@ set_value:
done: done:
if (intel_sdvo->base.base.crtc) { if (intel_sdvo->base.base.crtc)
struct drm_crtc *crtc = intel_sdvo->base.base.crtc; intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
return 0; return 0;
#undef CHECK_PROPERTY #undef CHECK_PROPERTY

View file

@ -1479,8 +1479,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
} }
if (changed && crtc) if (changed && crtc)
intel_set_mode(crtc, &crtc->mode, intel_crtc_restore_mode(crtc);
crtc->x, crtc->y, crtc->fb);
out: out:
return ret; return ret;
} }

View file

@ -89,6 +89,29 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
{ {
return mm->hole_stack.next; return mm->hole_stack.next;
} }
static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
BUG_ON(!hole_node->hole_follows);
return __drm_mm_hole_node_start(hole_node);
}
static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return list_entry(hole_node->node_list.next,
struct drm_mm_node, node_list)->start;
}
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
return __drm_mm_hole_node_end(hole_node);
}
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \ &(mm)->head_node.node_list, \
node_list) node_list)
@ -99,9 +122,26 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
entry != NULL; entry = next, \ entry != NULL; entry = next, \
next = entry ? list_entry(entry->node_list.next, \ next = entry ? list_entry(entry->node_list.next, \
struct drm_mm_node, node_list) : NULL) \ struct drm_mm_node, node_list) : NULL) \
/* Note that we need to unroll list_for_each_entry in order to inline
* setting hole_start and hole_end on each iteration and keep the
* macro sane.
*/
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
&entry->hole_stack != &(mm)->hole_stack ? \
hole_start = drm_mm_hole_node_start(entry), \
hole_end = drm_mm_hole_node_end(entry), \
1 : 0; \
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
/* /*
* Basic range manager support (drm_mm.c) * Basic range manager support (drm_mm.c)
*/ */
extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic);
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size, unsigned long size,
unsigned alignment, unsigned alignment,

View file

@ -18,8 +18,6 @@ struct intel_gtt {
/* Share the scratch page dma with ppgtts. */ /* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma; dma_addr_t scratch_page_dma;
struct page *scratch_page; struct page *scratch_page;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */ /* needed for ioremap in drm/i915 */
phys_addr_t gma_bus_addr; phys_addr_t gma_bus_addr;
} *intel_gtt_get(void); } *intel_gtt_get(void);