diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 245 |
1 files changed, 24 insertions, 221 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index d70ee0df5c13..1d38a8b2f2ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -34,51 +34,6 @@ #define VMW_RES_EVICT_ERR_COUNT 10 -/** - * vmw_resource_mob_attach - Mark a resource as attached to its backing mob - * @res: The resource - */ -void vmw_resource_mob_attach(struct vmw_resource *res) -{ - struct vmw_buffer_object *backup = res->backup; - struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; - - lockdep_assert_held(&backup->base.resv->lock.base); - res->used_prio = (res->res_dirty) ? res->func->dirty_prio : - res->func->prio; - - while (*new) { - struct vmw_resource *this = - container_of(*new, struct vmw_resource, mob_node); - - parent = *new; - new = (res->backup_offset < this->backup_offset) ? - &((*new)->rb_left) : &((*new)->rb_right); - } - - rb_link_node(&res->mob_node, parent, new); - rb_insert_color(&res->mob_node, &backup->res_tree); - - vmw_bo_prio_add(backup, res->used_prio); -} - -/** - * vmw_resource_mob_detach - Mark a resource as detached from its backing mob - * @res: The resource - */ -void vmw_resource_mob_detach(struct vmw_resource *res) -{ - struct vmw_buffer_object *backup = res->backup; - - lockdep_assert_held(&backup->base.resv->lock.base); - if (vmw_resource_mob_attached(res)) { - rb_erase(&res->mob_node, &backup->res_tree); - RB_CLEAR_NODE(&res->mob_node); - vmw_bo_prio_del(backup, res->used_prio); - } -} - - struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) { kref_get(&res->kref); @@ -125,7 +80,7 @@ static void vmw_resource_release(struct kref *kref) struct ttm_buffer_object *bo = &res->backup->base; ttm_bo_reserve(bo, false, false, NULL); - if (vmw_resource_mob_attached(res) && + if (!list_empty(&res->mob_head) && res->func->unbind != NULL) { struct ttm_validate_buffer val_buf; @@ -134,11 +89,7 @@ static void vmw_resource_release(struct kref *kref) res->func->unbind(res, false, &val_buf); } res->backup_dirty = false; - vmw_resource_mob_detach(res); - if (res->dirty) - res->func->dirty_free(res); - if (res->coherent) - vmw_bo_dirty_release(res->backup); + list_del_init(&res->mob_head); ttm_bo_unreserve(bo); vmw_bo_unreference(&res->backup); } @@ -220,17 +171,14 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, res->res_free = res_free; res->dev_priv = dev_priv; res->func = func; - RB_CLEAR_NODE(&res->mob_node); INIT_LIST_HEAD(&res->lru_head); + INIT_LIST_HEAD(&res->mob_head); INIT_LIST_HEAD(&res->binding_head); res->id = -1; res->backup = NULL; res->backup_offset = 0; res->backup_dirty = false; res->res_dirty = false; - res->coherent = false; - res->used_prio = 3; - res->dirty = NULL; if (delay_id) return 0; else @@ -395,8 +343,7 @@ out_no_bo: * should be retried once resources have been freed up. */ static int vmw_resource_do_validate(struct vmw_resource *res, - struct ttm_validate_buffer *val_buf, - bool dirtying) + struct ttm_validate_buffer *val_buf) { int ret = 0; const struct vmw_res_func *func = res->func; @@ -408,47 +355,14 @@ static int vmw_resource_do_validate(struct vmw_resource *res, } if (func->bind && - ((func->needs_backup && !vmw_resource_mob_attached(res) && + ((func->needs_backup && list_empty(&res->mob_head) && val_buf->bo != NULL) || (!func->needs_backup && val_buf->bo != NULL))) { ret = func->bind(res, val_buf); if (unlikely(ret != 0)) goto out_bind_failed; if (func->needs_backup) - vmw_resource_mob_attach(res); - } - - /* - * Handle the case where the backup mob is marked coherent but - * the resource isn't. - */ - if (func->dirty_alloc && vmw_resource_mob_attached(res) && - !res->coherent) { - if (res->backup->dirty && !res->dirty) { - ret = func->dirty_alloc(res); - if (ret) - return ret; - } else if (!res->backup->dirty && res->dirty) { - func->dirty_free(res); - } - } - - /* - * Transfer the dirty regions to the resource and update - * the resource. - */ - if (res->dirty) { - if (dirtying && !res->res_dirty) { - pgoff_t start = res->backup_offset >> PAGE_SHIFT; - pgoff_t end = __KERNEL_DIV_ROUND_UP - (res->backup_offset + res->backup_size, - PAGE_SIZE); - - vmw_bo_dirty_unmap(res->backup, start, end); - } - - vmw_bo_dirty_transfer_to_res(res); - return func->dirty_sync(res); + list_add_tail(&res->mob_head, &res->backup->res_list); } return 0; @@ -488,29 +402,19 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (switch_backup && new_backup != res->backup) { if (res->backup) { - vmw_resource_mob_detach(res); - if (res->coherent) - vmw_bo_dirty_release(res->backup); + lockdep_assert_held(&res->backup->base.resv->lock.base); + list_del_init(&res->mob_head); vmw_bo_unreference(&res->backup); } if (new_backup) { res->backup = vmw_bo_reference(new_backup); - - /* - * The validation code should already have added a - * dirty tracker here. - */ - WARN_ON(res->coherent && !new_backup->dirty); - - vmw_resource_mob_attach(res); + lockdep_assert_held(&new_backup->base.resv->lock.base); + list_add_tail(&res->mob_head, &new_backup->res_list); } else { res->backup = NULL; } - } else if (switch_backup && res->coherent) { - vmw_bo_dirty_release(res->backup); } - if (switch_backup) res->backup_offset = new_backup_offset; @@ -565,7 +469,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, if (unlikely(ret != 0)) goto out_no_reserve; - if (res->func->needs_backup && !vmw_resource_mob_attached(res)) + if (res->func->needs_backup && list_empty(&res->mob_head)) return 0; backup_dirty = res->backup_dirty; @@ -670,11 +574,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, return ret; if (unlikely(func->unbind != NULL && - (!func->needs_backup || vmw_resource_mob_attached(res)))) { + (!func->needs_backup || !list_empty(&res->mob_head)))) { ret = func->unbind(res, res->res_dirty, &val_buf); if (unlikely(ret != 0)) goto out_no_unbind; - vmw_resource_mob_detach(res); + list_del_init(&res->mob_head); } ret = func->destroy(res); res->backup_dirty = true; @@ -691,7 +595,6 @@ out_no_unbind: * to the device. * @res: The resource to make visible to the device. * @intr: Perform waits interruptible if possible. - * @dirtying: Pending GPU operation will dirty the resource * * On succesful return, any backup DMA buffer pointed to by @res->backup will * be reserved and validated. @@ -701,8 +604,7 @@ out_no_unbind: * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code * on failure. */ -int vmw_resource_validate(struct vmw_resource *res, bool intr, - bool dirtying) +int vmw_resource_validate(struct vmw_resource *res, bool intr) { int ret; struct vmw_resource *evict_res; @@ -719,7 +621,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, if (res->backup) val_buf.bo = &res->backup->base; do { - ret = vmw_resource_do_validate(res, &val_buf, dirtying); + ret = vmw_resource_do_validate(res, &val_buf); if (likely(ret != -EBUSY)) break; @@ -758,7 +660,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, if (unlikely(ret != 0)) goto out_no_validate; else if (!res->func->needs_backup && res->backup) { - WARN_ON_ONCE(vmw_resource_mob_attached(res)); + list_del_init(&res->mob_head); vmw_bo_unreference(&res->backup); } @@ -782,23 +684,22 @@ out_no_validate: */ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) { + + struct vmw_resource *res, *next; struct ttm_validate_buffer val_buf = { .bo = &vbo->base, .num_shared = 0 }; lockdep_assert_held(&vbo->base.resv->lock.base); - while (!RB_EMPTY_ROOT(&vbo->res_tree)) { - struct rb_node *node = vbo->res_tree.rb_node; - struct vmw_resource *res = - container_of(node, struct vmw_resource, mob_node); - - if (!WARN_ON_ONCE(!res->func->unbind)) - (void) res->func->unbind(res, res->res_dirty, &val_buf); + list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { + if (!res->func->unbind) + continue; + (void) res->func->unbind(res, res->res_dirty, &val_buf); res->backup_dirty = true; res->res_dirty = false; - vmw_resource_mob_detach(res); + list_del_init(&res->mob_head); } (void) ttm_bo_wait(&vbo->base, false, false); @@ -1019,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) /* Do we really need to pin the MOB as well? */ vmw_bo_pin_reserved(vbo, true); } - ret = vmw_resource_validate(res, interruptible, true); + ret = vmw_resource_validate(res, interruptible); if (vbo) ttm_bo_unreserve(&vbo->base); if (ret) @@ -1079,101 +980,3 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res) { return res->func->res_type; } - -/** - * vmw_resource_update_dirty - Update a resource's dirty tracker with a - * sequential range of touched backing store memory. - * @res: The resource. - * @start: The first page touched. - * @end: The last page touched + 1. - */ -void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, - pgoff_t end) -{ - if (res->dirty) - res->func->dirty_range_add(res, start << PAGE_SHIFT, - end << PAGE_SHIFT); -} - -/** - * vmw_resources_clean - Clean resources intersecting a mob range - * @vbo: The mob buffer object - * @start: The mob page offset starting the range - * @end: The mob page offset ending the range - * @num_prefault: Returns how many pages including the first have been - * cleaned and are ok to prefault - */ -int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, - pgoff_t end, pgoff_t *num_prefault) -{ - struct rb_node *cur = vbo->res_tree.rb_node; - struct vmw_resource *found = NULL; - unsigned long res_start = start << PAGE_SHIFT; - unsigned long res_end = end << PAGE_SHIFT; - unsigned long last_cleaned = 0; - - /* - * Find the resource with lowest backup_offset that intersects the - * range. - */ - while (cur) { - struct vmw_resource *cur_res = - container_of(cur, struct vmw_resource, mob_node); - - if (cur_res->backup_offset >= res_end) { - cur = cur->rb_left; - } else if (cur_res->backup_offset + cur_res->backup_size <= - res_start) { - cur = cur->rb_right; - } else { - found = cur_res; - cur = cur->rb_left; - /* Continue to look for resources with lower offsets */ - } - } - - /* - * In order of increasing backup_offset, clean dirty resorces - * intersecting the range. - */ - while (found) { - if (found->res_dirty) { - int ret; - - if (!found->func->clean) - return -EINVAL; - - ret = found->func->clean(found); - if (ret) - return ret; - - found->res_dirty = false; - } - last_cleaned = found->backup_offset + found->backup_size; - cur = rb_next(&found->mob_node); - if (!cur) - break; - - found = container_of(cur, struct vmw_resource, mob_node); - if (found->backup_offset >= res_end) - break; - } - - /* - * Set number of pages allowed prefaulting and fence the buffer object - */ - *num_prefault = 1; - if (last_cleaned > res_start) { - struct ttm_buffer_object *bo = &vbo->base; - - *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, - PAGE_SIZE); - vmw_bo_fence_single(bo, NULL); - if (bo->moving) - dma_fence_put(bo->moving); - bo->moving = dma_fence_get - (reservation_object_get_excl(bo->resv)); - } - - return 0; -} |