2
2
3
3
#include <linux/bpf.h>
4
4
#include <bpf/bpf_helpers.h>
5
+ #include <linux/if_ether.h>
6
+ #include <linux/ip.h>
5
7
6
8
/* Dummy prog to test TC-BPF API */
7
9
@@ -10,3 +12,96 @@ int cls(struct __sk_buff *skb)
10
12
{
11
13
return 0 ;
12
14
}
15
+
16
+ /* Prog to verify tc-bpf without cap_sys_admin and cap_perfmon is rejected as
17
+ * required to prevent Spectre v1 using CPU multiplication port contention
18
+ * side-channel. This is not a full exploit but rather a PoC for x86_64. With
19
+ * extensions to the verifier's mitigations this may become obsolete.
20
+ *
21
+ * This should compile to the following bytecode if the kernel would allow
22
+ * unprivileged packet pointer accesses:
23
+ *
24
+
25
+ 0000000000000000 <pkt_ptr>:
26
+ 0: b4 00 00 00 00 00 00 00 w0 = 0
27
+ 1: 61 12 50 00 00 00 00 00 r2 = *(u32 *)(r1 + 80)
28
+ 2: 61 11 4c 00 00 00 00 00 r1 = *(u32 *)(r1 + 76)
29
+ 3: bf 13 00 00 00 00 00 00 r3 = r1
30
+ 4: 07 03 00 00 22 00 00 00 r3 += 34
31
+ 5: bd 23 07 00 00 00 00 00 if r3 <= r2 goto +7 <LBB1_3>
32
+ 6: 71 10 0e 00 00 00 00 00 r0 = *(u8 *)(r1 + 14)
33
+ 7: 64 00 00 00 18 00 00 00 w0 <<= 24
34
+ 8: c4 00 00 00 18 00 00 00 w0 s>>= 24
35
+ 9: bc 01 00 00 00 00 00 00 w1 = w0
36
+ 10: 54 01 00 00 01 00 00 00 w1 &= 1
37
+ 11: 16 01 01 00 00 00 00 00 if w1 == 0 goto +1 <LBB1_3>
38
+ 12: 24 00 00 00 61 00 00 00 w0 *= 97
39
+
40
+ 0000000000000068 <LBB1_3>:
41
+ 13: 95 00 00 00 00 00 00 00 exit
42
+
43
+ *
44
+ * Which should in turn translate to this x86_64 assembly with !allow_ptr_leaks
45
+ * and !bypass_spec_v1:
46
+ *
47
+
48
+ int pkt_ptr(struct __sk_buff * skb):
49
+ bpf_prog_7c3834bad32f2b0f_pkt_ptr:
50
+ ; int pkt_ptr(struct __sk_buff *skb)
51
+ 0: endbr64
52
+ 4: nopl 0x0(%rax,%rax,1)
53
+ 9: xchg %ax,%ax
54
+ b: push %rbp
55
+ c: mov %rsp,%rbp
56
+ f: endbr64
57
+ 13: xor %eax,%eax
58
+ ; if ((long)(iph + 1) > (long)skb->data_end)
59
+ 15: mov 0x50(%rdi),%rsi
60
+ ; struct iphdr *iph = (void *)(long)skb->data + sizeof(struct ethhdr);
61
+ 19: mov 0xc8(%rdi),%rdi
62
+ ; if ((long)(iph + 1) > (long)skb->data_end)
63
+ 20: mov %rdi,%rdx
64
+ 23: add $0x22,%rdx
65
+ ; if ((long)(iph + 1) > (long)skb->data_end)
66
+ 27: cmp %rsi,%rdx
67
+ 2a: ja 0x0000000000000043
68
+ ; char secret = *((char *) iph);
69
+ 2c: movzbq 0xe(%rdi),%rax
70
+ 31: shl $0x18,%eax
71
+ 34: sar $0x18,%eax
72
+ ; if (secret & 1) {
73
+ 37: mov %eax,%edi
74
+ 39: and $0x1,%edi
75
+ ; if (secret & 1) {
76
+ 3c: test %edi,%edi
77
+ 3e: je 0x0000000000000043
78
+ 40: imul $0x61,%eax,%eax
79
+ ; }
80
+ 43: leaveq
81
+ 44: retq
82
+
83
+ *
84
+ */
85
+ SEC ("tcx/ingress" )
86
+ int pkt_ptr (struct __sk_buff * skb )
87
+ {
88
+ struct iphdr * iph = (void * )(long )skb -> data + sizeof (struct ethhdr );
89
+
90
+ /* Branch to be speculatively bypassed. */
91
+ if ((long )(iph + 1 ) > (long )skb -> data_end )
92
+ return 0 ;
93
+
94
+ /* Speculative access to be prevented. */
95
+ char secret = * ((char * ) iph );
96
+
97
+ /* Leak the first bit of the secret value that lies behind data_end to a
98
+ * SMP silbling thread that also executes imul instructions. If the bit
99
+ * is 1, the silbling will experience a slowdown. */
100
+ long long x = secret ;
101
+ if (secret & 1 ) {
102
+ x *= 97 ;
103
+ }
104
+
105
+ /* To prevent optimization. */
106
+ return x ;
107
+ }
0 commit comments