aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c34
2 files changed, 36 insertions, 42 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cd266a067773..326a3d13a829 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1087,47 +1087,35 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
/**
- * ttm_bo_wait - wait for buffer idle.
+ * ttm_bo_wait_ctx - wait for buffer idle.
*
* @bo: The buffer object.
- * @interruptible: Use interruptible wait.
- * @no_wait: Return immediately if buffer is busy.
+ * @ctx: defines how to wait
*
- * This function must be called with the bo::mutex held, and makes
- * sure any previous rendering to the buffer is completed.
- * Note: It might be necessary to block validations before the
- * wait by reserving the buffer.
- * Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTARTSYS if interrupted by a signal.
+ * Waits for the buffer to be idle. Used timeout depends on the context.
+ * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
+ * zero on success.
*/
-int ttm_bo_wait(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait)
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
{
- long timeout = 15 * HZ;
+ long ret;
- if (no_wait) {
- if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
+ if (ctx->no_wait_gpu) {
+ if (dma_resv_test_signaled(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible, timeout);
- if (timeout < 0)
- return timeout;
-
- if (timeout == 0)
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ ctx->interruptible, 15 * HZ);
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret == 0))
return -EBUSY;
-
return 0;
}
-EXPORT_SYMBOL(ttm_bo_wait);
-
-int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
-{
- return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-}
EXPORT_SYMBOL(ttm_bo_wait_ctx);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
@@ -1135,7 +1123,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
{
struct ttm_place place;
bool locked;
- int ret;
+ long ret;
/*
* While the bo may already reside in SYSTEM placement, set
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fee7c20775c0..fca8b0e0e30a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -29,6 +29,8 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+#include <linux/vmalloc.h>
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
@@ -125,9 +127,8 @@ EXPORT_SYMBOL(ttm_move_memcpy);
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
+ * @ctx: operation context
+ * @dst_mem: struct ttm_resource indicating where to move.
*
* Fallback move function for a mappable buffer object in mappable memory.
* The function will, if successful,
@@ -279,8 +280,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
/**
* ttm_io_prot
*
- * bo: ttm buffer object
- * res: ttm resource object
+ * @bo: ttm buffer object
+ * @res: ttm resource object
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
@@ -547,9 +548,13 @@ EXPORT_SYMBOL(ttm_bo_vunmap);
static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
bool dst_use_tt)
{
- int ret;
- ret = ttm_bo_wait(bo, false, false);
- if (ret)
+ long ret;
+
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, 15 * HZ);
+ if (ret == 0)
+ return -EBUSY;
+ if (ret < 0)
return ret;
if (!dst_use_tt)
@@ -619,7 +624,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
}
/**
- * ttm_bo_move_accel_cleanup.
+ * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
*
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
@@ -663,7 +668,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
/**
- * ttm_bo_move_sync_cleanup.
+ * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
*
* @bo: A pointer to a struct ttm_buffer_object.
* @new_mem: struct ttm_resource indicating where to move.
@@ -710,8 +715,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
return ret;
/* If already idle, no need for ghost object dance. */
- ret = ttm_bo_wait(bo, false, true);
- if (ret != -EBUSY) {
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
@@ -748,8 +752,10 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
- if (ret)
- ttm_bo_wait(bo, false, false);
+ if (ret) {
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ }
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);