summaryrefslogtreecommitdiffstats
path: root/sandbox/linux/seccomp-bpf/verifier.cc
blob: 352b36fbaa3c5ff66f204bc3c9547632f09afa8e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/seccomp-bpf/verifier.h"


namespace playground2 {

bool Verifier::verifyBPF(const std::vector<struct sock_filter>& program,
                         const Sandbox::Evaluators& evaluators,
                         const char **err) {
  *err = NULL;
  if (evaluators.size() != 1) {
    *err = "Not implemented";
    return false;
  }
  Sandbox::EvaluateSyscall evaluateSyscall = evaluators.begin()->first;
  for (int nr = MIN_SYSCALL-1; nr <= static_cast<int>(MAX_SYSCALL)+1; ++nr) {
    // We ideally want to iterate over the full system call range and values
    // just above and just below this range. This gives us the full result set
    // of the "evaluators".
    // On Intel systems, this can fail in a surprising way, as a cleared bit 30
    // indicates either i386 or x86-64; and a set bit 30 indicates x32. And
    // unless we pay attention to setting this bit correctly, an early check in
    // our BPF program will make us fail with a misleading error code.
#if defined(__i386__) || defined(__x86_64__)
#if defined(__x86_64__) && defined(__ILP32__)
    int sysnum = nr |  0x40000000;
#else
    int sysnum = nr & ~0x40000000;
#endif
#else
    int sysnum = nr;
#endif

    struct arch_seccomp_data data = { sysnum, SECCOMP_ARCH };
    uint32_t expectedRet;
    Sandbox::ErrorCode code = evaluateSyscall(sysnum);
    switch (code) {
    case Sandbox::SB_TRAP:
      expectedRet = SECCOMP_RET_TRAP;
      break;
    case Sandbox::SB_ALLOWED:
      expectedRet = SECCOMP_RET_ALLOW;
      break;
    case Sandbox::SB_INSPECT_ARG_1...Sandbox::SB_INSPECT_ARG_6:
      *err = "Not implemented";
      return false;
    default:
      if (code >= 1 && code < 4096) {
        expectedRet = SECCOMP_RET_ERRNO + static_cast<int>(code);
      } else {
        *err = "Invalid errno value";
        return false;
      }
      break;
    }
    uint32_t computedRet = evaluateBPF(program, data, err);
    if (*err) {
      return false;
    } else if (computedRet != expectedRet) {
      *err = "Exit code from BPF program doesn't match";
      return false;
    }
  }
  return true;
}

uint32_t Verifier::evaluateBPF(const std::vector<struct sock_filter>& program,
                               const struct arch_seccomp_data& data,
                               const char **err) {
  *err = NULL;
  for (State state(program, data); !*err; ++state.ip) {
    if (state.ip >= program.size()) {
      *err = "Invalid instruction pointer in BPF program";
      break;
    }
    const struct sock_filter& insn = program[state.ip];
    switch (BPF_CLASS(insn.code)) {
    case BPF_LD:
      ld(&state, insn, err);
      break;
    case BPF_JMP:
      jmp(&state, insn, err);
      break;
    case BPF_RET:
      return ret(&state, insn, err);
    default:
      *err = "Unexpected instruction in BPF program";
      break;
    }
  }
  return 0;
}

void Verifier::ld(State *state, const struct sock_filter& insn,
                  const char **err) {
  if (BPF_SIZE(insn.code) != BPF_W ||
      BPF_MODE(insn.code) != BPF_ABS) {
    *err = "Invalid BPF_LD instruction";
    return;
  }
  if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
    // We only allow loading of properly aligned 32bit quantities.
    memcpy(&state->accumulator,
           reinterpret_cast<const char *>(&state->data) + insn.k,
           4);
  } else {
    *err = "Invalid operand in BPF_LD instruction";
    return;
  }
  state->accIsValid = true;
  return;
}

void Verifier::jmp(State *state, const struct sock_filter& insn,
                   const char **err) {
  if (BPF_OP(insn.code) == BPF_JA) {
    if (state->ip + insn.k + 1 >= state->program.size() ||
        state->ip + insn.k + 1 <= state->ip) {
    compilation_failure:
      *err = "Invalid BPF_JMP instruction";
      return;
    }
    state->ip += insn.k;
  } else {
    if (BPF_SRC(insn.code) != BPF_K ||
        !state->accIsValid ||
        state->ip + insn.jt + 1 >= state->program.size() ||
        state->ip + insn.jf + 1 >= state->program.size()) {
      goto compilation_failure;
    }
    switch (BPF_OP(insn.code)) {
    case BPF_JEQ:
      if (state->accumulator == insn.k) {
        state->ip += insn.jt;
      } else {
        state->ip += insn.jf;
      }
      break;
    case BPF_JGT:
      if (state->accumulator > insn.k) {
        state->ip += insn.jt;
      } else {
        state->ip += insn.jf;
      }
      break;
    case BPF_JGE:
      if (state->accumulator >= insn.k) {
        state->ip += insn.jt;
      } else {
        state->ip += insn.jf;
      }
      break;
    case BPF_JSET:
      if (state->accumulator & insn.k) {
        state->ip += insn.jt;
      } else {
        state->ip += insn.jf;
      }
      break;
    default:
      goto compilation_failure;
    }
  }
}

uint32_t Verifier::ret(State *, const struct sock_filter& insn,
                       const char **err) {
  if (BPF_SRC(insn.code) != BPF_K) {
    *err = "Invalid BPF_RET instruction";
    return 0;
  }
  return insn.k;
}

}  // namespace