diff options
author | Ingo Molnar <mingo@kernel.org> | 2022-11-21 22:54:36 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2022-11-21 23:01:51 +0100 |
commit | 0ce096db719ebaf46d4faf93e1ed1341c1853919 (patch) | |
tree | 41c8826034eb5b430adf97d43fb0f2dce78325ba /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |
parent | 2d08a893b87cf9b2f9dbb3afaff60ca4530d55a2 (diff) | |
parent | eb7081409f94a9a8608593d0fb63a1aa3d6f95d8 (diff) | |
download | linux-0ce096db719ebaf46d4faf93e1ed1341c1853919.tar.gz |
Merge tag 'v6.1-rc6' into x86/core, to resolve conflicts
Resolve conflicts between these commits in arch/x86/kernel/asm-offsets.c:
# upstream:
debc5a1ec0d1 ("KVM: x86: use a separate asm-offsets.c file")
# retbleed work in x86/core:
5d8213864ade ("x86/retbleed: Add SKL return thunk")
... and these commits in include/linux/bpf.h:
# upstram:
18acb7fac22f ("bpf: Revert ("Fix dispatcher patchable function entry to 5 bytes nop")")
# x86/core commits:
931ab63664f0 ("x86/ibt: Implement FineIBT")
bea75b33895f ("x86/Kconfig: Introduce function padding")
The latter two modify BPF_DISPATCHER_ATTRIBUTES(), which was removed upstream.
Conflicts:
arch/x86/kernel/asm-offsets.c
include/linux/bpf.h
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 83acb7bd80fe..6546e786bf00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); */ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) { + unsigned long flags; + spinlock_t *lock; + + /* + * Workaround to stop racing between the fence signaling and handling + * the cb. The lock is static after initially setting it up, just make + * sure that the dma_fence structure isn't freed up. + */ + rcu_read_lock(); + lock = vm->last_tlb_flush->lock; + rcu_read_unlock(); + + spin_lock_irqsave(lock, flags); + spin_unlock_irqrestore(lock, flags); + return atomic64_read(&vm->tlb_seq); } +/* + * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS + * happens while holding this lock anywhere to prevent deadlocks when + * an MMU notifier runs in reclaim-FS context. + */ +static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) +{ + mutex_lock(&vm->eviction_lock); + vm->saved_flags = memalloc_noreclaim_save(); +} + +static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) +{ + if (mutex_trylock(&vm->eviction_lock)) { + vm->saved_flags = memalloc_noreclaim_save(); + return true; + } + return false; +} + +static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) +{ + memalloc_noreclaim_restore(vm->saved_flags); + mutex_unlock(&vm->eviction_lock); +} + #endif |