mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:32:44 +01:00
0cfb2248a6
Commit 13f6c81c5d9a ("[BPF] simplify zero extension with MOV_32_64") tried to use MOV_32_64 instructions instead of lshift/rshift instructions for zero extension. This has the benefit to remove the number of instructions and may help verifier too. But the same commit also removed the old MOV_32_64 pruning as it deems unsafe as MOV_32_64 does have the side effect, zeroing out the top 32bit in the register. This caused the following failure in kernel selftest test_cls_redirect.o. In linux kernel, we have struct __sk_buff { __u32 data; __u32 data_end; }; The compiler will generate 32bit load for __sk_buff->data and __sk_buff->data_end. But kernel verifier will actually loads an address (64bit address on 64bit kernel) to the result register. In this particular example, the explicit zext was not optimized away and destroyed top 32bit address and the verifier rejected the program : w2 = *(u32 *)(r1 + 76) ... r2 = w2 /* MOV_32_64: this will clear top 32bit */ Currently, if the load and the zext are next to each other, the instruction pattern match can actually capture this to avoid MOV_32_64, e.g., in BPFInstrInfo.td, we have def : Pat<(i64 (zextloadi32 ADDRri:$src)), (SUBREG_TO_REG (i64 0), (LDW32 ADDRri:$src), sub_32)>; However, if they are not next to each other, LDW32 and MOV_32_64 are generated, which may cause the above mentioned problem. BPF Backend already tried to optimize away pattern mov_32_64 + lshift + rshift Commit 13f6c81c5d9a may generate mov_32_64 not followed by shifts. This patch added optimization for only mov_32_64 too. Differential Revision: https://reviews.llvm.org/D81048
56 lines
1.4 KiB
LLVM
56 lines
1.4 KiB
LLVM
; RUN: llc < %s -march=bpf -mattr=+alu32 -verify-machineinstrs | FileCheck %s
|
|
;
|
|
; Source:
|
|
; struct __sk_buff {
|
|
; unsigned data;
|
|
; unsigned data_end;
|
|
; };
|
|
;
|
|
; void * test(int flag, struct __sk_buff *skb)
|
|
; {
|
|
; void *p;
|
|
;
|
|
; if (flag) {
|
|
; p = (void *)(long)skb->data;
|
|
; __asm__ __volatile__("": : :"memory");
|
|
; } else {
|
|
; p = (void *)(long)skb->data_end;
|
|
; __asm__ __volatile__("": : :"memory");
|
|
; }
|
|
;
|
|
; return p;
|
|
; }
|
|
; Compilation flag:
|
|
; clang -target bpf -O2 -S -emit-llvm t.c
|
|
|
|
%struct.__sk_buff = type { i32, i32 }
|
|
|
|
define dso_local i8* @test(i32 %flag, %struct.__sk_buff* nocapture readonly %skb) local_unnamed_addr {
|
|
entry:
|
|
%tobool = icmp eq i32 %flag, 0
|
|
br i1 %tobool, label %if.else, label %if.then
|
|
|
|
if.then:
|
|
%data = getelementptr inbounds %struct.__sk_buff, %struct.__sk_buff* %skb, i64 0, i32 0
|
|
%0 = load i32, i32* %data, align 4
|
|
tail call void asm sideeffect "", "~{memory}"()
|
|
br label %if.end
|
|
|
|
if.else:
|
|
%data_end = getelementptr inbounds %struct.__sk_buff, %struct.__sk_buff* %skb, i64 0, i32 1
|
|
%1 = load i32, i32* %data_end, align 4
|
|
tail call void asm sideeffect "", "~{memory}"()
|
|
br label %if.end
|
|
|
|
if.end:
|
|
%p.0.in.in = phi i32 [ %0, %if.then ], [ %1, %if.else ]
|
|
%p.0.in = zext i32 %p.0.in.in to i64
|
|
%p.0 = inttoptr i64 %p.0.in to i8*
|
|
ret i8* %p.0
|
|
}
|
|
|
|
; CHECK: w0 = *(u32 *)(r2 + 0)
|
|
; CHECK: w0 = *(u32 *)(r2 + 4)
|
|
; CHECK-NOT: r[[#]] = w[[#]]
|
|
; CHECK: exit
|