diff options
author | Dongyan Qian <qiandongyan@loongson.cn> | 2023-04-27 20:57:12 +0800 |
---|---|---|
committer | mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> | 2023-05-05 13:10:09 +0000 |
commit | b65c0eed6bc028388d790fe4e30a76770ebb46c4 (patch) | |
tree | 009c86844cc2bba1d449dddbe5c0ae627d156479 | |
parent | 757f502a3b350436877102c3043744e021537c19 (diff) | |
download | edk2-b65c0eed6bc028388d790fe4e30a76770ebb46c4.tar.gz |
BaseSynchronizationLib: Fix LoongArch64 synchronization functions
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4432
There is a return value bug:
The sc.w/sc.d instruction will destroy the reg_t0,
use reg_t1 to avoid context reg_t0 being corrupted.
Adjust Check that ptr align is UINT16.
Optimize function SyncIncrement and SyncDecrement.
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>
Cc: Chao Li <lichao@loongson.cn>
Signed-off-by: Dongyan Qian <qiandongyan@loongson.cn>
Reviewed-by: Chao Li <lichao@loongson.cn>
-rw-r--r-- | MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S | 30 | ||||
-rw-r--r-- | MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c | 2 |
2 files changed, 13 insertions, 19 deletions
diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S index fdd50c54b5..03865bf2c9 100644 --- a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S @@ -53,9 +53,9 @@ ASM_PFX(AsmInternalSyncCompareExchange32): 1:
ll.w $t0, $a0, 0x0
bne $t0, $a1, 2f
- move $t0, $a2
- sc.w $t0, $a0, 0x0
- beqz $t0, 1b
+ move $t1, $a2
+ sc.w $t1, $a0, 0x0
+ beqz $t1, 1b
b 3f
2:
dbar 0
@@ -76,9 +76,9 @@ ASM_PFX(AsmInternalSyncCompareExchange64): 1:
ll.d $t0, $a0, 0x0
bne $t0, $a1, 2f
- move $t0, $a2
- sc.d $t0, $a0, 0x0
- beqz $t0, 1b
+ move $t1, $a2
+ sc.d $t1, $a0, 0x0
+ beqz $t1, 1b
b 3f
2:
dbar 0
@@ -94,13 +94,10 @@ AsmInternalSyncIncrement ( )
**/
ASM_PFX(AsmInternalSyncIncrement):
- move $t0, $a0
- dbar 0
- ld.w $t1, $t0, 0x0
- li.w $t2, 1
- amadd.w $t1, $t2, $t0
+ li.w $t0, 1
+ amadd.w $zero, $t0, $a0
- ld.w $a0, $t0, 0x0
+ ld.w $a0, $a0, 0
jirl $zero, $ra, 0
/**
@@ -111,12 +108,9 @@ AsmInternalSyncDecrement ( )
**/
ASM_PFX(AsmInternalSyncDecrement):
- move $t0, $a0
- dbar 0
- ld.w $t1, $t0, 0x0
- li.w $t2, -1
- amadd.w $t1, $t2, $t0
+ li.w $t0, -1
+ amadd.w $zero, $t0, $a0
- ld.w $a0, $t0, 0x0
+ ld.w $a0, $a0, 0
jirl $zero, $ra, 0
.end
diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c index d696c8ce10..6baf841c9b 100644 --- a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c @@ -81,7 +81,7 @@ InternalSyncCompareExchange16 ( volatile UINT32 *Ptr32;
/* Check that ptr is naturally aligned */
- ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));
+ ASSERT (!((UINT64)Value & (sizeof (UINT16) - 1)));
/* Mask inputs to the correct size. */
Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));
|