mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
7118b8a8dd
optimizations As discussed in the thread http://lists.llvm.org/pipermail/llvm-dev/2020-May/141838.html, some bit field access width can be reduced by ReduceLoadOpStoreWidth, some can't. If two accesses are very close, and the first access width is reduced, the second is not. Then the wide load of second access will be stalled for long time. This patch add command line options to guard ReduceLoadOpStoreWidth and ShrinkLoadReplaceStoreWithStore, so users can use them to disable these store width reduction optimizations. Differential Revision: https://reviews.llvm.org/D80745
31 lines
863 B
LLVM
31 lines
863 B
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-- -combiner-reduce-load-op-store-width=false | FileCheck %s
|
|
|
|
%struct.bit_fields = type { i32 }
|
|
|
|
define void @clear_b1(%struct.bit_fields* %ptr) {
|
|
; CHECK-LABEL: clear_b1:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: andl $-2, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%0 = bitcast %struct.bit_fields* %ptr to i32*
|
|
%bf.load = load i32, i32* %0
|
|
%bf.clear = and i32 %bf.load, -2
|
|
store i32 %bf.clear, i32* %0
|
|
ret void
|
|
}
|
|
|
|
define void @clear16(%struct.bit_fields* %ptr) {
|
|
; CHECK-LABEL: clear16:
|
|
; CHECK: # %bb.0: # %entry
|
|
; CHECK-NEXT: andw $-2, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%0 = bitcast %struct.bit_fields* %ptr to i16*
|
|
%bf.load = load i16, i16* %0
|
|
%bf.clear = and i16 %bf.load, -2
|
|
store i16 %bf.clear, i16* %0
|
|
ret void
|
|
}
|