Skip to content

Commit

Permalink
Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danve…
Browse files Browse the repository at this point in the history
…t/drm-intel into drm-next

Some fixes for 3.8:
- Watermark fixups from Chris Wilson (4 pieces).
- 2 snb workarounds, seem to be recently added to our internal DB.
- workaround for the infamous i830/i845 hang, seems now finally solid!
  Based on Chris' fix for SNA, now also for UXA/mesa&old SNA.
- Some more fixlets for shrinker-pulls-the-rug issues (Chris&me).
- Fix dma-buf flags when exporting (you).
- Disable the VGA plane if it's enabled on lid open - similar fix in
  spirit to the one I've sent you last weeek, BIOS' really like to mess
  with the display when closing the lid (awesome debug work from Krzysztof
  Mazur).

* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel:
  drm/i915: disable shrinker lock stealing for create_mmap_offset
  drm/i915: optionally disable shrinker lock stealing
  drm/i915: fix flags in dma buf exporting
  i915: ensure that VGA plane is disabled
  drm/i915: Preallocate the drm_mm_node prior to manipulating the GTT drm_mm manager
  drm: Export routines for inserting preallocated nodes into the mm manager
  drm/i915: don't disable disconnected outputs
  drm/i915: Implement workaround for broken CS tlb on i830/845
  drm/i915: Implement WaSetupGtModeTdRowDispatch
  drm/i915: Implement WaDisableHiZPlanesWhenMSAAEnabled
  drm/i915: Prefer CRTC 'active' rather than 'enabled' during WM computations
  drm/i915: Clear self-refresh watermarks when disabled
  drm/i915: Double the cursor self-refresh latency on Valleyview
  drm/i915: Fixup cursor latency used for IVB lp3 watermarks
  • Loading branch information
airlied committed Dec 30, 2012
2 parents b1d778b + da494d7 commit 8be0e5c
Show file tree
Hide file tree
Showing 14 changed files with 356 additions and 88 deletions.
41 changes: 29 additions & 12 deletions drivers/gpu/drm/drm_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
* -ENOSPC if no suitable free area is available. The preallocated memory node
* must be cleared.
*/
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color)
{
struct drm_mm_node *hole_node;

hole_node = drm_mm_search_free(mm, size, alignment, false);
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, 0);
if (!hole_node)
return -ENOSPC;

drm_mm_insert_helper(hole_node, node, size, alignment, 0);

drm_mm_insert_helper(hole_node, node, size, alignment, color);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);

int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment)
{
return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
}
EXPORT_SYMBOL(drm_mm_insert_node);

static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
Expand Down Expand Up @@ -275,22 +283,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
* -ENOSPC if no suitable free area is available. This is for range
* restricted allocations. The preallocated memory node must be cleared.
*/
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm_node *hole_node;

hole_node = drm_mm_search_free_in_range(mm, size, alignment,
start, end, false);
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, 0);
if (!hole_node)
return -ENOSPC;

drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
drm_mm_insert_helper_range(hole_node, node,
size, alignment, color,
start, end);

return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);

int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long start, unsigned long end)
{
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);

/**
Expand Down
3 changes: 3 additions & 0 deletions drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -989,6 +989,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
Expand Down
8 changes: 8 additions & 0 deletions drivers/gpu/drm/i915/i915_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -780,6 +780,7 @@ typedef struct drm_i915_private {
struct i915_hw_ppgtt *aliasing_ppgtt;

struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;

/**
* List of objects currently involved in rendering.
Expand Down Expand Up @@ -1100,6 +1101,7 @@ struct drm_i915_gem_object {
*/
atomic_t pending_flip;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)

#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)

Expand Down Expand Up @@ -1166,6 +1168,9 @@ struct drm_i915_file_private {
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
(dev)->pci_device == 0x0106 || \
(dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
Expand Down Expand Up @@ -1196,6 +1201,9 @@ struct drm_i915_file_private {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)

/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))

/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
Expand Down
77 changes: 37 additions & 40 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;

dev_priv->mm.shrinker_no_lock_stealing = true;

ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
return ret;
goto out;

/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
Expand All @@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
return ret;
goto out;

i915_gem_shrink_all(dev_priv);
return drm_gem_create_mmap_offset(&obj->base);
ret = drm_gem_create_mmap_offset(&obj->base);
out:
dev_priv->mm.shrinker_no_lock_stealing = false;

return ret;
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
Expand Down Expand Up @@ -2890,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
Expand Down Expand Up @@ -2936,66 +2942,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,

i915_gem_object_pin_pages(obj);

node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}

search_free:
if (map_and_fenceable)
free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end);
else
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
false);

if (free_space != NULL) {
if (map_and_fenceable)
free_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
free_space =
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
if (free_space == NULL) {
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
if (ret) {
i915_gem_object_unpin_pages(obj);
return ret;
}
if (ret == 0)
goto search_free;

goto search_free;
i915_gem_object_unpin_pages(obj);
kfree(node);
return ret;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
free_space,
obj->cache_level))) {
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(free_space);
drm_mm_put_block(node);
return -EINVAL;
}

ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(free_space);
drm_mm_put_block(node);
return ret;
}

list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);

obj->gtt_space = free_space;
obj->gtt_offset = free_space->start;
obj->gtt_space = node;
obj->gtt_offset = node->start;

fenceable =
free_space->size == fence_size &&
(free_space->start & (fence_alignment - 1)) == 0;
node->size == fence_size &&
(node->start & (fence_alignment - 1)) == 0;

mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
Expand Down Expand Up @@ -4392,6 +4386,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;

if (dev_priv->mm.shrinker_no_lock_stealing)
return 0;

unlock = false;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_gem_dmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);

return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
}

static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
Expand Down
2 changes: 2 additions & 0 deletions drivers/gpu/drm/i915/i915_gem_execbuffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -808,6 +808,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,

flags |= I915_DISPATCH_SECURE;
}
if (args->flags & I915_EXEC_IS_PINNED)
flags |= I915_DISPATCH_PINNED;

switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
Expand Down
12 changes: 12 additions & 0 deletions drivers/gpu/drm/i915/i915_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (!ring->get_seqno)
return NULL;

if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);

if (WARN_ON(ring->id != RCS))
return NULL;

obj = ring->private;
if (acthd >= obj->gtt_offset &&
acthd < obj->gtt_offset + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}

seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
Expand Down
4 changes: 3 additions & 1 deletion drivers/gpu/drm/i915/i915_reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,7 @@
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
Expand All @@ -532,7 +533,8 @@
# define MI_FLUSH_ENABLE (1 << 12)

#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
#define GEN6_GT_MODE_HI (1 << 9)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)

#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
Expand Down
23 changes: 19 additions & 4 deletions drivers/gpu/drm/i915/intel_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -8144,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}

/* Disable all disconnected encoders. */
if (connector->base.status == connector_status_disconnected)
connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */

Expand Down Expand Up @@ -9167,6 +9163,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}

static void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg;

if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;

if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
}

/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev,
Expand Down Expand Up @@ -9275,6 +9288,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_set_mode(&crtc->base, &crtc->base.mode,
crtc->base.x, crtc->base.y, crtc->base.fb);
}

i915_redisable_vga(dev);
} else {
intel_modeset_update_staged_output_state(dev);
}
Expand Down
Loading

0 comments on commit 8be0e5c

Please sign in to comment.