From 647371a6609ddf8700fe151af72e32daebb9baa7 Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Tue, 17 Jan 2023 10:27:19 +0100 Subject: accel/ivpu: Add GEM buffer object management Adds four types of GEM-based BOs for the VPU: - shmem - internal - prime All types are implemented as struct ivpu_bo, based on struct drm_gem_object. VPU address is allocated when buffer is created except for imported prime buffers that allocate it in BO_INFO IOCTL due to missing file_priv arg in gem_prime_import callback. Internal buffers are pinned on creation, the rest of buffers types can be pinned on demand (in SUBMIT IOCTL). Buffer VPU address, allocated pages and mappings are released when the buffer is destroyed. Eviction mechanism is planned for future versions. Add two new IOCTLs: BO_CREATE, BO_INFO Signed-off-by: Jacek Lawrynowicz Reviewed-by: Oded Gabbay Reviewed-by: Jeffrey Hugo Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20230117092723.60441-4-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_gem.h | 126 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 drivers/accel/ivpu/ivpu_gem.h (limited to 'drivers/accel/ivpu/ivpu_gem.h') diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h new file mode 100644 index 000000000000..1891c90702c2 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020-2023 Intel Corporation + */ +#ifndef __IVPU_GEM_H__ +#define __IVPU_GEM_H__ + +#include +#include + +struct dma_buf; +struct ivpu_bo_ops; +struct ivpu_file_priv; + +struct ivpu_bo { + struct drm_gem_object base; + const struct ivpu_bo_ops *ops; + + struct ivpu_mmu_context *ctx; + struct list_head ctx_node; + struct drm_mm_node mm_node; + + struct mutex lock; /* Protects: pages, sgt, mmu_mapped */ + struct sg_table *sgt; + struct page **pages; + bool mmu_mapped; + + void *kvaddr; + u64 vpu_addr; + u32 handle; + u32 flags; + uintptr_t user_ptr; +}; + +enum ivpu_bo_type { + IVPU_BO_TYPE_SHMEM = 1, + IVPU_BO_TYPE_INTERNAL, + IVPU_BO_TYPE_PRIME, +}; + +struct ivpu_bo_ops { + enum ivpu_bo_type type; + const char *name; + int (*alloc_pages)(struct ivpu_bo *bo); + void (*free_pages)(struct ivpu_bo *bo); + int (*map_pages)(struct ivpu_bo *bo); + void (*unmap_pages)(struct ivpu_bo *bo); +}; + +int ivpu_bo_pin(struct ivpu_bo *bo); +void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx); +void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); +void ivpu_bo_list_print(struct drm_device *dev); + +struct ivpu_bo * +ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags); +void ivpu_bo_free_internal(struct ivpu_bo *bo); +struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); +void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo); + +int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + +static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj) +{ + return container_of(obj, struct ivpu_bo, base); +} + +static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset) +{ + if (offset > bo->base.size || !bo->pages) + return NULL; + + return bo->pages[offset / PAGE_SIZE]; +} + +static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_CACHE_MASK; +} + +static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) +{ + return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; +} + +static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot) +{ + if (bo->flags & DRM_IVPU_BO_WC) + return pgprot_writecombine(prot); + + if (bo->flags & DRM_IVPU_BO_UNCACHED) + return pgprot_noncached(prot); + + return prot; +} + +static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo) +{ + return to_ivpu_device(bo->base.dev); +} + +static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) +{ + if (vpu_addr < bo->vpu_addr) + return NULL; + + if (vpu_addr >= (bo->vpu_addr + bo->base.size)) + return NULL; + + return bo->kvaddr + (vpu_addr - bo->vpu_addr); +} + +static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) +{ + if (cpu_addr < bo->kvaddr) + return 0; + + if (cpu_addr >= (bo->kvaddr + bo->base.size)) + return 0; + + return bo->vpu_addr + (cpu_addr - bo->kvaddr); +} + +#endif /* __IVPU_GEM_H__ */ -- cgit From cd7272215c44676dba236491941c6c406701cc5e Mon Sep 17 00:00:00 2001 From: Jacek Lawrynowicz Date: Tue, 17 Jan 2023 10:27:22 +0100 Subject: accel/ivpu: Add command buffer submission logic Each of the user contexts has two command queues, one for compute engine and one for the copy engine. Command queues are allocated and registered in the device when the first job (command buffer) is submitted from the user space to the VPU device. The userspace provides a list of GEM buffer object handles to submit to the VPU, the driver resolves buffer handles, pins physical memory if needed, increments ref count for each buffer and stores pointers to buffer objects in the ivpu_job objects that track jobs submitted to the device. The VPU signals job completion with an asynchronous message that contains the job id passed to firmware when the job was submitted. Currently, the driver supports simple scheduling logic where jobs submitted from user space are immediately pushed to the VPU device command queues. In the future, it will be extended to use hardware base scheduling and/or drm_sched. Co-developed-by: Andrzej Kacprowski Signed-off-by: Andrzej Kacprowski Signed-off-by: Jacek Lawrynowicz Reviewed-by: Oded Gabbay Reviewed-by: Jeffrey Hugo Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20230117092723.60441-7-jacek.lawrynowicz@linux.intel.com --- drivers/accel/ivpu/ivpu_gem.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/accel/ivpu/ivpu_gem.h') diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index 1891c90702c2..6b0ceda5f253 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -30,6 +30,7 @@ struct ivpu_bo { u32 handle; u32 flags; uintptr_t user_ptr; + u32 job_status; }; enum ivpu_bo_type { -- cgit