summaryrefslogtreecommitdiffstats
path: root/sandbox
diff options
context:
space:
mode:
authorjln@chromium.org <jln@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-08 21:57:57 +0000
committerjln@chromium.org <jln@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-11-08 21:57:57 +0000
commit0e28e5c10b908639309dccfece079358dfe2ad9d (patch)
tree1db31aebbbc31ec817d18df3f050f709a70b59de /sandbox
parent65a0bf350aa888843e5f84f848ae964d4e4a337e (diff)
downloadchromium_src-0e28e5c10b908639309dccfece079358dfe2ad9d.zip
chromium_src-0e28e5c10b908639309dccfece079358dfe2ad9d.tar.gz
chromium_src-0e28e5c10b908639309dccfece079358dfe2ad9d.tar.bz2
Make sandbox/linux/seccomp-bpf/ follow the style guide.
This CL has mostly be generated with clang-format and should not introduce any code change other than reformatting. BUG=316486 R=markus@chromium.org, rsesek@chromium.org Review URL: https://codereview.chromium.org/66723007 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@234013 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'sandbox')
-rw-r--r--sandbox/linux/seccomp-bpf/basicblock.cc7
-rw-r--r--sandbox/linux/seccomp-bpf/basicblock.h18
-rw-r--r--sandbox/linux/seccomp-bpf/bpf_tests.h34
-rw-r--r--sandbox/linux/seccomp-bpf/codegen.cc339
-rw-r--r--sandbox/linux/seccomp-bpf/codegen.h61
-rw-r--r--sandbox/linux/seccomp-bpf/die.cc35
-rw-r--r--sandbox/linux/seccomp-bpf/die.h27
-rw-r--r--sandbox/linux/seccomp-bpf/errorcode.cc46
-rw-r--r--sandbox/linux/seccomp-bpf/errorcode.h51
-rw-r--r--sandbox/linux/seccomp-bpf/instruction.h13
-rw-r--r--sandbox/linux/seccomp-bpf/port.h31
-rw-r--r--sandbox/linux/seccomp-bpf/sandbox_bpf.cc590
-rw-r--r--sandbox/linux/seccomp-bpf/sandbox_bpf.h42
-rw-r--r--sandbox/linux/seccomp-bpf/sandbox_bpf_policy_forward.h7
-rw-r--r--sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc486
-rw-r--r--sandbox/linux/seccomp-bpf/syscall.h97
-rw-r--r--sandbox/linux/seccomp-bpf/syscall_iterator.cc26
-rw-r--r--sandbox/linux/seccomp-bpf/syscall_iterator.h9
-rw-r--r--sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc5
-rw-r--r--sandbox/linux/seccomp-bpf/syscall_unittest.cc100
-rw-r--r--sandbox/linux/seccomp-bpf/trap.cc88
-rw-r--r--sandbox/linux/seccomp-bpf/trap.h41
-rw-r--r--sandbox/linux/seccomp-bpf/verifier.cc498
-rw-r--r--sandbox/linux/seccomp-bpf/verifier.h7
-rw-r--r--sandbox/linux/tests/main.cc2
-rw-r--r--sandbox/linux/tests/unit_tests.cc53
-rw-r--r--sandbox/linux/tests/unit_tests.h70
27 files changed, 1439 insertions, 1344 deletions
diff --git a/sandbox/linux/seccomp-bpf/basicblock.cc b/sandbox/linux/seccomp-bpf/basicblock.cc
index bf27c58..58d27b2 100644
--- a/sandbox/linux/seccomp-bpf/basicblock.cc
+++ b/sandbox/linux/seccomp-bpf/basicblock.cc
@@ -4,13 +4,10 @@
#include "sandbox/linux/seccomp-bpf/basicblock.h"
-
namespace playground2 {
-BasicBlock::BasicBlock() {
-}
+BasicBlock::BasicBlock() {}
-BasicBlock::~BasicBlock() {
-}
+BasicBlock::~BasicBlock() {}
} // namespace
diff --git a/sandbox/linux/seccomp-bpf/basicblock.h b/sandbox/linux/seccomp-bpf/basicblock.h
index 1782a80..a116f41 100644
--- a/sandbox/linux/seccomp-bpf/basicblock.h
+++ b/sandbox/linux/seccomp-bpf/basicblock.h
@@ -9,7 +9,6 @@
#include "sandbox/linux/seccomp-bpf/instruction.h"
-
namespace playground2 {
struct BasicBlock {
@@ -20,25 +19,24 @@ struct BasicBlock {
// identify common sequences of basic blocks. This would normally be
// really easy to do, but STL requires us to wrap the comparator into
// a class. We begrudgingly add some code here that provides this wrapping.
- template<class T> class Less {
+ template <class T>
+ class Less {
public:
- Less(const T& data, int (*cmp)(const BasicBlock *, const BasicBlock *,
- const T& data))
- : data_(data),
- cmp_(cmp) {
- }
+ Less(const T& data,
+ int (*cmp)(const BasicBlock*, const BasicBlock*, const T& data))
+ : data_(data), cmp_(cmp) {}
- bool operator() (const BasicBlock *a, const BasicBlock *b) const {
+ bool operator()(const BasicBlock* a, const BasicBlock* b) const {
return cmp_(a, b, data_) < 0;
}
private:
const T& data_;
- int (*cmp_)(const BasicBlock *, const BasicBlock *, const T&);
+ int (*cmp_)(const BasicBlock*, const BasicBlock*, const T&);
};
// Basic blocks are essentially nothing more than a set of instructions.
- std::vector<Instruction *> instructions;
+ std::vector<Instruction*> instructions;
// In order to compute relative branch offsets we need to keep track of
// how far our block is away from the very last basic block. The "offset_"
diff --git a/sandbox/linux/seccomp-bpf/bpf_tests.h b/sandbox/linux/seccomp-bpf/bpf_tests.h
index 92cc1ed..13ccf7d 100644
--- a/sandbox/linux/seccomp-bpf/bpf_tests.h
+++ b/sandbox/linux/seccomp-bpf/bpf_tests.h
@@ -20,14 +20,13 @@ namespace sandbox {
// macros from unit_tests.h to specify the expected error condition.
// A BPF_DEATH_TEST is always disabled under ThreadSanitizer, see
// crbug.com/243968.
-#define BPF_DEATH_TEST(test_case_name, test_name, death, policy, aux...) \
- void BPF_TEST_##test_name(sandbox::BpfTests<aux>::AuxType& BPF_AUX); \
- TEST(test_case_name, DISABLE_ON_TSAN(test_name)) { \
- sandbox::BpfTests<aux>::TestArgs arg(BPF_TEST_##test_name, policy); \
- sandbox::BpfTests<aux>::RunTestInProcess( \
- sandbox::BpfTests<aux>::TestWrapper, &arg, \
- death); \
- } \
+#define BPF_DEATH_TEST(test_case_name, test_name, death, policy, aux...) \
+ void BPF_TEST_##test_name(sandbox::BpfTests<aux>::AuxType& BPF_AUX); \
+ TEST(test_case_name, DISABLE_ON_TSAN(test_name)) { \
+ sandbox::BpfTests<aux>::TestArgs arg(BPF_TEST_##test_name, policy); \
+ sandbox::BpfTests<aux>::RunTestInProcess( \
+ sandbox::BpfTests<aux>::TestWrapper, &arg, death); \
+ } \
void BPF_TEST_##test_name(sandbox::BpfTests<aux>::AuxType& BPF_AUX)
// BPF_TEST() is a special version of SANDBOX_TEST(). It turns into a no-op,
@@ -40,18 +39,16 @@ namespace sandbox {
// variable will be passed as an argument to the "policy" function. Policies
// would typically use it as an argument to Sandbox::Trap(), if they want to
// communicate data between the BPF_TEST() and a Trap() function.
-#define BPF_TEST(test_case_name, test_name, policy, aux...) \
+#define BPF_TEST(test_case_name, test_name, policy, aux...) \
BPF_DEATH_TEST(test_case_name, test_name, DEATH_SUCCESS(), policy, aux)
-
// Assertions are handled exactly the same as with a normal SANDBOX_TEST()
#define BPF_ASSERT SANDBOX_ASSERT
-
// The "Aux" type is optional. We use an "empty" type by default, so that if
// the caller doesn't provide any type, all the BPF_AUX related data compiles
// to nothing.
-template<class Aux = int[0]>
+template <class Aux = int[0]>
class BpfTests : public UnitTests {
public:
typedef Aux AuxType;
@@ -59,10 +56,7 @@ class BpfTests : public UnitTests {
class TestArgs {
public:
TestArgs(void (*t)(AuxType&), playground2::Sandbox::EvaluateSyscall p)
- : test_(t),
- policy_(p),
- aux_() {
- }
+ : test_(t), policy_(p), aux_() {}
void (*test() const)(AuxType&) { return test_; }
playground2::Sandbox::EvaluateSyscall policy() const { return policy_; }
@@ -75,14 +69,14 @@ class BpfTests : public UnitTests {
AuxType aux_;
};
- static void TestWrapper(void *void_arg) {
- TestArgs *arg = reinterpret_cast<TestArgs *>(void_arg);
+ static void TestWrapper(void* void_arg) {
+ TestArgs* arg = reinterpret_cast<TestArgs*>(void_arg);
playground2::Die::EnableSimpleExit();
if (playground2::Sandbox::SupportsSeccompSandbox(-1) ==
playground2::Sandbox::STATUS_AVAILABLE) {
// Ensure the the sandbox is actually available at this time
int proc_fd;
- BPF_ASSERT((proc_fd = open("/proc", O_RDONLY|O_DIRECTORY)) >= 0);
+ BPF_ASSERT((proc_fd = open("/proc", O_RDONLY | O_DIRECTORY)) >= 0);
BPF_ASSERT(playground2::Sandbox::SupportsSeccompSandbox(proc_fd) ==
playground2::Sandbox::STATUS_AVAILABLE);
@@ -106,7 +100,7 @@ class BpfTests : public UnitTests {
// if we don't have kernel support.
playground2::Sandbox sandbox;
sandbox.SetSandboxPolicyDeprecated(arg->policy(), &arg->aux_);
- playground2::Sandbox::Program *program =
+ playground2::Sandbox::Program* program =
sandbox.AssembleFilter(true /* force_verification */);
delete program;
sandbox::UnitTests::IgnoreThisTest();
diff --git a/sandbox/linux/seccomp-bpf/codegen.cc b/sandbox/linux/seccomp-bpf/codegen.cc
index 17b5d84..77df612 100644
--- a/sandbox/linux/seccomp-bpf/codegen.cc
+++ b/sandbox/linux/seccomp-bpf/codegen.cc
@@ -6,26 +6,25 @@
#include "sandbox/linux/seccomp-bpf/codegen.h"
-
namespace {
// Helper function for Traverse().
-void TraverseRecursively(std::set<playground2::Instruction *> *visited,
- playground2::Instruction *instruction) {
+void TraverseRecursively(std::set<playground2::Instruction*>* visited,
+ playground2::Instruction* instruction) {
if (visited->find(instruction) == visited->end()) {
visited->insert(instruction);
switch (BPF_CLASS(instruction->code)) {
- case BPF_JMP:
- if (BPF_OP(instruction->code) != BPF_JA) {
- TraverseRecursively(visited, instruction->jf_ptr);
- }
- TraverseRecursively(visited, instruction->jt_ptr);
- break;
- case BPF_RET:
- break;
- default:
- TraverseRecursively(visited, instruction->next);
- break;
+ case BPF_JMP:
+ if (BPF_OP(instruction->code) != BPF_JA) {
+ TraverseRecursively(visited, instruction->jf_ptr);
+ }
+ TraverseRecursively(visited, instruction->jt_ptr);
+ break;
+ case BPF_RET:
+ break;
+ default:
+ TraverseRecursively(visited, instruction->next);
+ break;
}
}
}
@@ -34,9 +33,7 @@ void TraverseRecursively(std::set<playground2::Instruction *> *visited,
namespace playground2 {
-CodeGen::CodeGen()
- : compiled_(false) {
-}
+CodeGen::CodeGen() : compiled_(false) {}
CodeGen::~CodeGen() {
for (Instructions::iterator iter = instructions_.begin();
@@ -58,108 +55,114 @@ void CodeGen::PrintProgram(const Sandbox::Program& program) {
int ip = (int)(iter - program.begin());
fprintf(stderr, "%3d) ", ip);
switch (BPF_CLASS(iter->code)) {
- case BPF_LD:
- if (iter->code == BPF_LD+BPF_W+BPF_ABS) {
- fprintf(stderr, "LOAD %d // ", (int)iter->k);
- if (iter->k == offsetof(struct arch_seccomp_data, nr)) {
- fprintf(stderr, "System call number\n");
- } else if (iter->k == offsetof(struct arch_seccomp_data, arch)) {
- fprintf(stderr, "Architecture\n");
- } else if (iter->k == offsetof(struct arch_seccomp_data,
- instruction_pointer)) {
- fprintf(stderr, "Instruction pointer (LSB)\n");
- } else if (iter->k == offsetof(struct arch_seccomp_data,
- instruction_pointer) + 4) {
- fprintf(stderr, "Instruction pointer (MSB)\n");
- } else if (iter->k >= offsetof(struct arch_seccomp_data, args) &&
- iter->k < offsetof(struct arch_seccomp_data, args)+48 &&
- (iter->k-offsetof(struct arch_seccomp_data, args))%4 == 0) {
- fprintf(stderr, "Argument %d (%cSB)\n",
- (int)(iter->k-offsetof(struct arch_seccomp_data, args))/8,
- (iter->k-offsetof(struct arch_seccomp_data,
- args))%8 ? 'M' : 'L');
+ case BPF_LD:
+ if (iter->code == BPF_LD + BPF_W + BPF_ABS) {
+ fprintf(stderr, "LOAD %d // ", (int)iter->k);
+ if (iter->k == offsetof(struct arch_seccomp_data, nr)) {
+ fprintf(stderr, "System call number\n");
+ } else if (iter->k == offsetof(struct arch_seccomp_data, arch)) {
+ fprintf(stderr, "Architecture\n");
+ } else if (iter->k ==
+ offsetof(struct arch_seccomp_data, instruction_pointer)) {
+ fprintf(stderr, "Instruction pointer (LSB)\n");
+ } else if (iter->k ==
+ offsetof(struct arch_seccomp_data, instruction_pointer) +
+ 4) {
+ fprintf(stderr, "Instruction pointer (MSB)\n");
+ } else if (iter->k >= offsetof(struct arch_seccomp_data, args) &&
+ iter->k < offsetof(struct arch_seccomp_data, args) + 48 &&
+ (iter->k - offsetof(struct arch_seccomp_data, args)) % 4 ==
+ 0) {
+ fprintf(
+ stderr,
+ "Argument %d (%cSB)\n",
+ (int)(iter->k - offsetof(struct arch_seccomp_data, args)) / 8,
+ (iter->k - offsetof(struct arch_seccomp_data, args)) % 8 ? 'M'
+ : 'L');
+ } else {
+ fprintf(stderr, "???\n");
+ }
+ } else {
+ fprintf(stderr, "LOAD ???\n");
+ }
+ break;
+ case BPF_JMP:
+ if (BPF_OP(iter->code) == BPF_JA) {
+ fprintf(stderr, "JMP %d\n", ip + iter->k + 1);
+ } else {
+ fprintf(stderr, "if A %s 0x%x; then JMP %d else JMP %d\n",
+ BPF_OP(iter->code) == BPF_JSET ? "&" :
+ BPF_OP(iter->code) == BPF_JEQ ? "==" :
+ BPF_OP(iter->code) == BPF_JGE ? ">=" :
+ BPF_OP(iter->code) == BPF_JGT ? ">" : "???",
+ (int)iter->k,
+ ip + iter->jt + 1, ip + iter->jf + 1);
+ }
+ break;
+ case BPF_RET:
+ fprintf(stderr, "RET 0x%x // ", iter->k);
+ if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
+ fprintf(stderr, "Trap #%d\n", iter->k & SECCOMP_RET_DATA);
+ } else if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
+ fprintf(stderr, "errno = %d\n", iter->k & SECCOMP_RET_DATA);
+ } else if (iter->k == SECCOMP_RET_ALLOW) {
+ fprintf(stderr, "Allowed\n");
} else {
fprintf(stderr, "???\n");
}
- } else {
- fprintf(stderr, "LOAD ???\n");
- }
- break;
- case BPF_JMP:
- if (BPF_OP(iter->code) == BPF_JA) {
- fprintf(stderr, "JMP %d\n", ip + iter->k + 1);
- } else {
- fprintf(stderr, "if A %s 0x%x; then JMP %d else JMP %d\n",
- BPF_OP(iter->code) == BPF_JSET ? "&" :
- BPF_OP(iter->code) == BPF_JEQ ? "==" :
- BPF_OP(iter->code) == BPF_JGE ? ">=" :
- BPF_OP(iter->code) == BPF_JGT ? ">" : "???",
- (int)iter->k,
- ip + iter->jt + 1, ip + iter->jf + 1);
- }
- break;
- case BPF_RET:
- fprintf(stderr, "RET 0x%x // ", iter->k);
- if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
- fprintf(stderr, "Trap #%d\n", iter->k & SECCOMP_RET_DATA);
- } else if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
- fprintf(stderr, "errno = %d\n", iter->k & SECCOMP_RET_DATA);
- } else if (iter->k == SECCOMP_RET_ALLOW) {
- fprintf(stderr, "Allowed\n");
- } else {
+ break;
+ case BPF_ALU:
+ fprintf(stderr, BPF_OP(iter->code) == BPF_NEG
+ ? "A := -A\n" : "A := A %s 0x%x\n",
+ BPF_OP(iter->code) == BPF_ADD ? "+" :
+ BPF_OP(iter->code) == BPF_SUB ? "-" :
+ BPF_OP(iter->code) == BPF_MUL ? "*" :
+ BPF_OP(iter->code) == BPF_DIV ? "/" :
+ BPF_OP(iter->code) == BPF_MOD ? "%" :
+ BPF_OP(iter->code) == BPF_OR ? "|" :
+ BPF_OP(iter->code) == BPF_XOR ? "^" :
+ BPF_OP(iter->code) == BPF_AND ? "&" :
+ BPF_OP(iter->code) == BPF_LSH ? "<<" :
+ BPF_OP(iter->code) == BPF_RSH ? ">>" : "???",
+ (int)iter->k);
+ break;
+ default:
fprintf(stderr, "???\n");
- }
- break;
- case BPF_ALU:
- fprintf(stderr, BPF_OP(iter->code) == BPF_NEG
- ? "A := -A\n" : "A := A %s 0x%x\n",
- BPF_OP(iter->code) == BPF_ADD ? "+" :
- BPF_OP(iter->code) == BPF_SUB ? "-" :
- BPF_OP(iter->code) == BPF_MUL ? "*" :
- BPF_OP(iter->code) == BPF_DIV ? "/" :
- BPF_OP(iter->code) == BPF_MOD ? "%" :
- BPF_OP(iter->code) == BPF_OR ? "|" :
- BPF_OP(iter->code) == BPF_XOR ? "^" :
- BPF_OP(iter->code) == BPF_AND ? "&" :
- BPF_OP(iter->code) == BPF_LSH ? "<<" :
- BPF_OP(iter->code) == BPF_RSH ? ">>" : "???",
- (int)iter->k);
- break;
- default:
- fprintf(stderr, "???\n");
- break;
+ break;
}
}
return;
}
-Instruction *CodeGen::MakeInstruction(uint16_t code, uint32_t k,
- Instruction *next) {
+Instruction* CodeGen::MakeInstruction(uint16_t code,
+ uint32_t k,
+ Instruction* next) {
// We can handle non-jumping instructions and "always" jumps. Both of
// them are followed by exactly one "next" instruction.
// We allow callers to defer specifying "next", but then they must call
// "joinInstructions" later.
if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) {
- SANDBOX_DIE("Must provide both \"true\" and \"false\" branch "
- "for a BPF_JMP");
+ SANDBOX_DIE(
+ "Must provide both \"true\" and \"false\" branch "
+ "for a BPF_JMP");
}
if (next && BPF_CLASS(code) == BPF_RET) {
SANDBOX_DIE("Cannot append instructions after a return statement");
}
if (BPF_CLASS(code) == BPF_JMP) {
// "Always" jumps use the "true" branch target, only.
- Instruction *insn = new Instruction(code, 0, next, NULL);
+ Instruction* insn = new Instruction(code, 0, next, NULL);
instructions_.push_back(insn);
return insn;
} else {
// Non-jumping instructions do not use any of the branch targets.
- Instruction *insn = new Instruction(code, k, next);
+ Instruction* insn = new Instruction(code, k, next);
instructions_.push_back(insn);
return insn;
}
}
-Instruction *CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) {
+Instruction* CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) {
if (BPF_CLASS(code) != BPF_RET) {
SANDBOX_DIE("ErrorCodes can only be used in return expressions");
}
@@ -170,8 +173,10 @@ Instruction *CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) {
return MakeInstruction(code, err.err_);
}
-Instruction *CodeGen::MakeInstruction(uint16_t code, uint32_t k,
- Instruction *jt, Instruction *jf) {
+Instruction* CodeGen::MakeInstruction(uint16_t code,
+ uint32_t k,
+ Instruction* jt,
+ Instruction* jf) {
// We can handle all conditional jumps. They are followed by both a
// "true" and a "false" branch.
if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_JA) {
@@ -182,12 +187,12 @@ Instruction *CodeGen::MakeInstruction(uint16_t code, uint32_t k,
// targets. It must then be set later by calling "JoinInstructions".
SANDBOX_DIE("Branches must jump to a valid instruction");
}
- Instruction *insn = new Instruction(code, k, jt, jf);
+ Instruction* insn = new Instruction(code, k, jt, jf);
instructions_.push_back(insn);
return insn;
}
-void CodeGen::JoinInstructions(Instruction *head, Instruction *tail) {
+void CodeGen::JoinInstructions(Instruction* head, Instruction* tail) {
// Merge two instructions, or set the branch target for an "always" jump.
// This function should be called, if the caller didn't initially provide
// a value for "next" when creating the instruction.
@@ -216,11 +221,12 @@ void CodeGen::JoinInstructions(Instruction *head, Instruction *tail) {
return;
}
-void CodeGen::Traverse(Instruction *instruction,
- void (*fnc)(Instruction *, void *), void *aux) {
- std::set<Instruction *> visited;
+void CodeGen::Traverse(Instruction* instruction,
+ void (*fnc)(Instruction*, void*),
+ void* aux) {
+ std::set<Instruction*> visited;
TraverseRecursively(&visited, instruction);
- for (std::set<Instruction *>::const_iterator iter = visited.begin();
+ for (std::set<Instruction*>::const_iterator iter = visited.begin();
iter != visited.end();
++iter) {
fnc(*iter, aux);
@@ -228,15 +234,15 @@ void CodeGen::Traverse(Instruction *instruction,
}
void CodeGen::FindBranchTargets(const Instruction& instructions,
- BranchTargets *branch_targets) {
+ BranchTargets* branch_targets) {
// Follow all possible paths through the "instructions" graph and compute
// a list of branch targets. This will later be needed to compute the
// boundaries of basic blocks.
// We maintain a set of all instructions that we have previously seen. This
// set ultimately converges on all instructions in the program.
- std::set<const Instruction *> seen_instructions;
+ std::set<const Instruction*> seen_instructions;
Instructions stack;
- for (const Instruction *insn = &instructions; insn; ) {
+ for (const Instruction* insn = &instructions; insn;) {
seen_instructions.insert(insn);
if (BPF_CLASS(insn->code) == BPF_JMP) {
// Found a jump. Increase count of incoming edges for each of the jump
@@ -244,7 +250,7 @@ void CodeGen::FindBranchTargets(const Instruction& instructions,
++(*branch_targets)[insn->jt_ptr];
if (BPF_OP(insn->code) != BPF_JA) {
++(*branch_targets)[insn->jf_ptr];
- stack.push_back(const_cast<Instruction *>(insn));
+ stack.push_back(const_cast<Instruction*>(insn));
}
// Start a recursive decent for depth-first traversal.
if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
@@ -262,8 +268,9 @@ void CodeGen::FindBranchTargets(const Instruction& instructions,
// (if any). It's OK if "insn" becomes NULL when reaching a return
// instruction.
if (!insn->next != (BPF_CLASS(insn->code) == BPF_RET)) {
- SANDBOX_DIE("Internal compiler error; return instruction must be at "
- "the end of the BPF program");
+ SANDBOX_DIE(
+ "Internal compiler error; return instruction must be at "
+ "the end of the BPF program");
}
if (seen_instructions.find(insn->next) == seen_instructions.end()) {
insn = insn->next;
@@ -288,8 +295,9 @@ void CodeGen::FindBranchTargets(const Instruction& instructions,
// We have seen both the "true" and the "false" branch, continue
// up the stack.
if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
- SANDBOX_DIE("Internal compiler error; cannot find all "
- "branch targets");
+ SANDBOX_DIE(
+ "Internal compiler error; cannot find all "
+ "branch targets");
}
insn = NULL;
}
@@ -298,11 +306,10 @@ void CodeGen::FindBranchTargets(const Instruction& instructions,
return;
}
-BasicBlock *CodeGen::MakeBasicBlock(Instruction *head,
- Instruction *tail) {
+BasicBlock* CodeGen::MakeBasicBlock(Instruction* head, Instruction* tail) {
// Iterate over all the instructions between "head" and "tail" and
// insert them into a new basic block.
- BasicBlock *bb = new BasicBlock;
+ BasicBlock* bb = new BasicBlock;
for (;; head = head->next) {
bb->instructions.push_back(head);
if (head == tail) {
@@ -316,20 +323,21 @@ BasicBlock *CodeGen::MakeBasicBlock(Instruction *head,
return bb;
}
-void CodeGen::AddBasicBlock(Instruction *head,
- Instruction *tail,
+void CodeGen::AddBasicBlock(Instruction* head,
+ Instruction* tail,
const BranchTargets& branch_targets,
- TargetsToBlocks *basic_blocks,
- BasicBlock **firstBlock) {
+ TargetsToBlocks* basic_blocks,
+ BasicBlock** firstBlock) {
// Add a new basic block to "basic_blocks". Also set "firstBlock", if it
// has not been set before.
BranchTargets::const_iterator iter = branch_targets.find(head);
if ((iter == branch_targets.end()) != !*firstBlock ||
!*firstBlock != basic_blocks->empty()) {
- SANDBOX_DIE("Only the very first basic block should have no "
- "incoming jumps");
+ SANDBOX_DIE(
+ "Only the very first basic block should have no "
+ "incoming jumps");
}
- BasicBlock *bb = MakeBasicBlock(head, tail);
+ BasicBlock* bb = MakeBasicBlock(head, tail);
if (!*firstBlock) {
*firstBlock = bb;
}
@@ -337,19 +345,20 @@ void CodeGen::AddBasicBlock(Instruction *head,
return;
}
-BasicBlock *CodeGen::CutGraphIntoBasicBlocks(
- Instruction *instructions, const BranchTargets& branch_targets,
- TargetsToBlocks *basic_blocks) {
+BasicBlock* CodeGen::CutGraphIntoBasicBlocks(
+ Instruction* instructions,
+ const BranchTargets& branch_targets,
+ TargetsToBlocks* basic_blocks) {
// Textbook implementation of a basic block generator. All basic blocks
// start with a branch target and end with either a return statement or
// a jump (or are followed by an instruction that forms the beginning of a
// new block). Both conditional and "always" jumps are supported.
- BasicBlock *first_block = NULL;
- std::set<const Instruction *> seen_instructions;
+ BasicBlock* first_block = NULL;
+ std::set<const Instruction*> seen_instructions;
Instructions stack;
- Instruction *tail = NULL;
- Instruction *head = instructions;
- for (Instruction *insn = head; insn; ) {
+ Instruction* tail = NULL;
+ Instruction* head = instructions;
+ for (Instruction* insn = head; insn;) {
if (seen_instructions.find(insn) != seen_instructions.end()) {
// We somehow went in a circle. This should never be possible. Not even
// cyclic graphs are supposed to confuse us this much.
@@ -410,7 +419,8 @@ BasicBlock *CodeGen::CutGraphIntoBasicBlocks(
// used in a "less" comparator for the purpose of storing pointers to basic
// blocks in STL containers; this gives an easy option to use STL to find
// shared tail sequences of basic blocks.
-static int PointerCompare(const BasicBlock *block1, const BasicBlock *block2,
+static int PointerCompare(const BasicBlock* block1,
+ const BasicBlock* block2,
const TargetsToBlocks& blocks) {
// Return <0, 0, or >0 depending on the ordering of "block1" and "block2".
// If we are looking at the exact same block, this is trivial and we don't
@@ -486,7 +496,7 @@ static int PointerCompare(const BasicBlock *block1, const BasicBlock *block2,
}
}
-void CodeGen::MergeTails(TargetsToBlocks *blocks) {
+void CodeGen::MergeTails(TargetsToBlocks* blocks) {
// We enter all of our basic blocks into a set using the BasicBlock::Less()
// comparator. This naturally results in blocks with identical tails of
// instructions to map to the same entry in the set. Whenever we discover
@@ -500,12 +510,11 @@ void CodeGen::MergeTails(TargetsToBlocks *blocks) {
// the future, we might decide to revisit this decision and attempt to
// merge arbitrary sub-sequences of instructions.
BasicBlock::Less<TargetsToBlocks> less(*blocks, PointerCompare);
- typedef std::set<BasicBlock *, BasicBlock::Less<TargetsToBlocks> > Set;
+ typedef std::set<BasicBlock*, BasicBlock::Less<TargetsToBlocks> > Set;
Set seen_basic_blocks(less);
- for (TargetsToBlocks::iterator iter = blocks->begin();
- iter != blocks->end();
+ for (TargetsToBlocks::iterator iter = blocks->begin(); iter != blocks->end();
++iter) {
- BasicBlock *bb = iter->second;
+ BasicBlock* bb = iter->second;
Set::const_iterator entry = seen_basic_blocks.find(bb);
if (entry == seen_basic_blocks.end()) {
// This is the first time we see this particular sequence of
@@ -521,34 +530,36 @@ void CodeGen::MergeTails(TargetsToBlocks *blocks) {
}
}
-void CodeGen::ComputeIncomingBranches(BasicBlock *block,
+void CodeGen::ComputeIncomingBranches(BasicBlock* block,
const TargetsToBlocks& targets_to_blocks,
- IncomingBranches *incoming_branches) {
+ IncomingBranches* incoming_branches) {
// We increment the number of incoming branches each time we encounter a
// basic block. But we only traverse recursively the very first time we
// encounter a new block. This is necessary to make topological sorting
// work correctly.
if (++(*incoming_branches)[block] == 1) {
- Instruction *last_insn = block->instructions.back();
+ Instruction* last_insn = block->instructions.back();
if (BPF_CLASS(last_insn->code) == BPF_JMP) {
- ComputeIncomingBranches(
- targets_to_blocks.find(last_insn->jt_ptr)->second,
- targets_to_blocks, incoming_branches);
+ ComputeIncomingBranches(targets_to_blocks.find(last_insn->jt_ptr)->second,
+ targets_to_blocks,
+ incoming_branches);
if (BPF_OP(last_insn->code) != BPF_JA) {
ComputeIncomingBranches(
- targets_to_blocks.find(last_insn->jf_ptr)->second,
- targets_to_blocks, incoming_branches);
+ targets_to_blocks.find(last_insn->jf_ptr)->second,
+ targets_to_blocks,
+ incoming_branches);
}
} else if (BPF_CLASS(last_insn->code) != BPF_RET) {
ComputeIncomingBranches(targets_to_blocks.find(last_insn->next)->second,
- targets_to_blocks, incoming_branches);
+ targets_to_blocks,
+ incoming_branches);
}
}
}
-void CodeGen::TopoSortBasicBlocks(BasicBlock *first_block,
+void CodeGen::TopoSortBasicBlocks(BasicBlock* first_block,
const TargetsToBlocks& blocks,
- BasicBlocks *basic_blocks) {
+ BasicBlocks* basic_blocks) {
// Textbook implementation of a toposort. We keep looking for basic blocks
// that don't have any incoming branches (initially, this is just the
// "first_block") and add them to the topologically sorted list of
@@ -562,7 +573,7 @@ void CodeGen::TopoSortBasicBlocks(BasicBlock *first_block,
IncomingBranches unordered_blocks;
ComputeIncomingBranches(first_block, blocks, &unordered_blocks);
- std::set<BasicBlock *> heads;
+ std::set<BasicBlock*> heads;
for (;;) {
// Move block from "unordered_blocks" to "basic_blocks".
basic_blocks->push_back(first_block);
@@ -570,7 +581,7 @@ void CodeGen::TopoSortBasicBlocks(BasicBlock *first_block,
// Inspect last instruction in the basic block. This is typically either a
// jump or a return statement. But it could also be a "normal" instruction
// that is followed by a jump target.
- Instruction *last_insn = first_block->instructions.back();
+ Instruction* last_insn = first_block->instructions.back();
if (BPF_CLASS(last_insn->code) == BPF_JMP) {
// Remove outgoing branches. This might end up moving our descendants
// into set of "head" nodes that no longer have any incoming branches.
@@ -598,7 +609,7 @@ void CodeGen::TopoSortBasicBlocks(BasicBlock *first_block,
// Our basic block is supposed to be followed by "last_insn->next",
// but dependencies prevent this from happening. Insert a BPF_JA
// instruction to correct the code flow.
- Instruction *ja = MakeInstruction(BPF_JMP+BPF_JA, 0, last_insn->next);
+ Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, last_insn->next);
first_block->instructions.push_back(ja);
last_insn->next = ja;
}
@@ -616,7 +627,7 @@ void CodeGen::TopoSortBasicBlocks(BasicBlock *first_block,
}
}
-void CodeGen::ComputeRelativeJumps(BasicBlocks *basic_blocks,
+void CodeGen::ComputeRelativeJumps(BasicBlocks* basic_blocks,
const TargetsToBlocks& targets_to_blocks) {
// While we previously used pointers in jt_ptr and jf_ptr to link jump
// instructions to their targets, we now convert these jumps to relative
@@ -626,38 +637,37 @@ void CodeGen::ComputeRelativeJumps(BasicBlocks *basic_blocks,
// Since we just completed a toposort, all jump targets are guaranteed to
// go forward. This means, iterating over the basic blocks in reverse makes
// it trivial to compute the correct offsets.
- BasicBlock *bb = NULL;
- BasicBlock *last_bb = NULL;
+ BasicBlock* bb = NULL;
+ BasicBlock* last_bb = NULL;
for (BasicBlocks::reverse_iterator iter = basic_blocks->rbegin();
iter != basic_blocks->rend();
++iter) {
last_bb = bb;
bb = *iter;
- Instruction *insn = bb->instructions.back();
+ Instruction* insn = bb->instructions.back();
if (BPF_CLASS(insn->code) == BPF_JMP) {
// Basic block ended in a jump instruction. We can now compute the
// appropriate offsets.
if (BPF_OP(insn->code) == BPF_JA) {
// "Always" jumps use the 32bit "k" field for the offset, instead
// of the 8bit "jt" and "jf" fields.
- int jmp =
- offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
- insn->k = jmp;
+ int jmp = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
+ insn->k = jmp;
insn->jt = insn->jf = 0;
} else {
// The offset computations for conditional jumps are just the same
// as for "always" jumps.
- int jt = offset-targets_to_blocks.find(insn->jt_ptr)->second->offset;
- int jf = offset-targets_to_blocks.find(insn->jf_ptr)->second->offset;
+ int jt = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
+ int jf = offset - targets_to_blocks.find(insn->jf_ptr)->second->offset;
// There is an added complication, because conditional relative jumps
// can only jump at most 255 instructions forward. If we have to jump
// further, insert an extra "always" jump.
Instructions::size_type jmp = bb->instructions.size();
if (jt > 255 || (jt == 255 && jf > 255)) {
- Instruction *ja = MakeInstruction(BPF_JMP+BPF_JA, 0, insn->jt_ptr);
+ Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jt_ptr);
bb->instructions.push_back(ja);
- ja->k = jt;
+ ja->k = jt;
ja->jt = ja->jf = 0;
// The newly inserted "always" jump, of course, requires us to adjust
@@ -666,9 +676,9 @@ void CodeGen::ComputeRelativeJumps(BasicBlocks *basic_blocks,
++jf;
}
if (jf > 255) {
- Instruction *ja = MakeInstruction(BPF_JMP+BPF_JA, 0, insn->jf_ptr);
+ Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jf_ptr);
bb->instructions.insert(bb->instructions.begin() + jmp, ja);
- ja->k = jf;
+ ja->k = jf;
ja->jt = ja->jf = 0;
// Again, we have to adjust the jump targets in the original
@@ -696,7 +706,7 @@ void CodeGen::ComputeRelativeJumps(BasicBlocks *basic_blocks,
}
void CodeGen::ConcatenateBasicBlocks(const BasicBlocks& basic_blocks,
- Sandbox::Program *program) {
+ Sandbox::Program* program) {
// Our basic blocks have been sorted and relative jump offsets have been
// computed. The last remaining step is for all the instructions in our
// basic blocks to be concatenated into a BPF program.
@@ -710,24 +720,25 @@ void CodeGen::ConcatenateBasicBlocks(const BasicBlocks& basic_blocks,
++insn_iter) {
const Instruction& insn = **insn_iter;
program->push_back(
- (struct sock_filter) { insn.code, insn.jt, insn.jf, insn.k });
+ (struct sock_filter) {insn.code, insn.jt, insn.jf, insn.k});
}
}
return;
}
-void CodeGen::Compile(Instruction *instructions, Sandbox::Program *program) {
+void CodeGen::Compile(Instruction* instructions, Sandbox::Program* program) {
if (compiled_) {
- SANDBOX_DIE("Cannot call Compile() multiple times. Create a new code "
- "generator instead");
+ SANDBOX_DIE(
+ "Cannot call Compile() multiple times. Create a new code "
+ "generator instead");
}
compiled_ = true;
BranchTargets branch_targets;
FindBranchTargets(*instructions, &branch_targets);
TargetsToBlocks all_blocks;
- BasicBlock *first_block =
- CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks);
+ BasicBlock* first_block =
+ CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks);
MergeTails(&all_blocks);
BasicBlocks basic_blocks;
TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks);
diff --git a/sandbox/linux/seccomp-bpf/codegen.h b/sandbox/linux/seccomp-bpf/codegen.h
index 88521c2..6ef7603 100644
--- a/sandbox/linux/seccomp-bpf/codegen.h
+++ b/sandbox/linux/seccomp-bpf/codegen.h
@@ -13,14 +13,13 @@
#include "sandbox/linux/seccomp-bpf/instruction.h"
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
-
namespace playground2 {
-typedef std::vector<Instruction *> Instructions;
-typedef std::vector<BasicBlock *> BasicBlocks;
-typedef std::map<const Instruction *, int> BranchTargets;
-typedef std::map<const Instruction *, BasicBlock *> TargetsToBlocks;
-typedef std::map<const BasicBlock *, int> IncomingBranches;
+typedef std::vector<Instruction*> Instructions;
+typedef std::vector<BasicBlock*> BasicBlocks;
+typedef std::map<const Instruction*, int> BranchTargets;
+typedef std::map<const Instruction*, BasicBlock*> TargetsToBlocks;
+typedef std::map<const BasicBlock*, int> IncomingBranches;
// The code generator instantiates a basic compiler that can convert a
// graph of BPF instructions into a well-formed stream of BPF instructions.
@@ -66,16 +65,19 @@ class CodeGen {
// are owned by the CodeGen object. They do not need to be explicitly
// deleted.
// For details on the possible parameters refer to <linux/filter.h>
- Instruction *MakeInstruction(uint16_t code, uint32_t k,
- Instruction *next = NULL);
- Instruction *MakeInstruction(uint16_t code, const ErrorCode& err);
- Instruction *MakeInstruction(uint16_t code, uint32_t k,
- Instruction *jt, Instruction *jf);
+ Instruction* MakeInstruction(uint16_t code,
+ uint32_t k,
+ Instruction* next = NULL);
+ Instruction* MakeInstruction(uint16_t code, const ErrorCode& err);
+ Instruction* MakeInstruction(uint16_t code,
+ uint32_t k,
+ Instruction* jt,
+ Instruction* jf);
// Join two (sequences of) instructions. This is useful, if the "next"
// parameter had not originally been given in the call to MakeInstruction(),
// or if a (conditional) jump still has an unsatisfied target.
- void JoinInstructions(Instruction *head, Instruction *tail);
+ void JoinInstructions(Instruction* head, Instruction* tail);
// Traverse the graph of instructions and visit each instruction once.
// Traversal order is implementation-defined. It is acceptable to make
@@ -83,68 +85,69 @@ class CodeGen {
// do not affect traversal.
// The "fnc" function gets called with both the instruction and the opaque
// "aux" pointer.
- void Traverse(Instruction *, void (*fnc)(Instruction *, void *aux),
- void *aux);
+ void Traverse(Instruction*, void (*fnc)(Instruction*, void* aux), void* aux);
// Compiles the graph of instructions into a BPF program that can be passed
// to the kernel. Please note that this function modifies the graph in place
// and must therefore only be called once per graph.
- void Compile(Instruction *instructions, Sandbox::Program *program);
+ void Compile(Instruction* instructions, Sandbox::Program* program);
private:
friend class CodeGenUnittestHelper;
// Find all the instructions that are the target of BPF_JMPs.
void FindBranchTargets(const Instruction& instructions,
- BranchTargets *branch_targets);
+ BranchTargets* branch_targets);
// Combine instructions between "head" and "tail" into a new basic block.
// Basic blocks are defined as sequences of instructions whose only branch
// target is the very first instruction; furthermore, any BPF_JMP or BPF_RET
// instruction must be at the very end of the basic block.
- BasicBlock *MakeBasicBlock(Instruction *head, Instruction *tail);
+ BasicBlock* MakeBasicBlock(Instruction* head, Instruction* tail);
// Creates a basic block and adds it to "basic_blocks"; sets "first_block"
// if it is still NULL.
- void AddBasicBlock(Instruction *head, Instruction *tail,
+ void AddBasicBlock(Instruction* head,
+ Instruction* tail,
const BranchTargets& branch_targets,
- TargetsToBlocks *basic_blocks, BasicBlock **first_block);
+ TargetsToBlocks* basic_blocks,
+ BasicBlock** first_block);
// Cuts the DAG of instructions into basic blocks.
- BasicBlock *CutGraphIntoBasicBlocks(Instruction *instructions,
+ BasicBlock* CutGraphIntoBasicBlocks(Instruction* instructions,
const BranchTargets& branch_targets,
- TargetsToBlocks *blocks);
+ TargetsToBlocks* blocks);
// Find common tail sequences of basic blocks and coalesce them.
- void MergeTails(TargetsToBlocks *blocks);
+ void MergeTails(TargetsToBlocks* blocks);
// For each basic block, compute the number of incoming branches.
- void ComputeIncomingBranches(BasicBlock *block,
+ void ComputeIncomingBranches(BasicBlock* block,
const TargetsToBlocks& targets_to_blocks,
- IncomingBranches *incoming_branches);
+ IncomingBranches* incoming_branches);
// Topologically sort the basic blocks so that all jumps are forward jumps.
// This is a requirement for any well-formed BPF program.
- void TopoSortBasicBlocks(BasicBlock *first_block,
+ void TopoSortBasicBlocks(BasicBlock* first_block,
const TargetsToBlocks& blocks,
- BasicBlocks *basic_blocks);
+ BasicBlocks* basic_blocks);
// Convert jt_ptr_ and jf_ptr_ fields in BPF_JMP instructions to valid
// jt_ and jf_ jump offsets. This can result in BPF_JA instructions being
// inserted, if we need to jump over more than 256 instructions.
- void ComputeRelativeJumps(BasicBlocks *basic_blocks,
+ void ComputeRelativeJumps(BasicBlocks* basic_blocks,
const TargetsToBlocks& targets_to_blocks);
// Concatenate instructions from all basic blocks into a BPF program that
// can be passed to the kernel.
- void ConcatenateBasicBlocks(const BasicBlocks&, Sandbox::Program *program);
+ void ConcatenateBasicBlocks(const BasicBlocks&, Sandbox::Program* program);
// We stick all instructions and basic blocks into pools that get destroyed
// when the CodeGen object is destroyed. This way, we neither need to worry
// about explicitly managing ownership, nor do we need to worry about using
// smart pointers in the presence of circular references.
Instructions instructions_;
- BasicBlocks basic_blocks_;
+ BasicBlocks basic_blocks_;
// Compile() must only ever be called once as it makes destructive changes
// to the DAG.
diff --git a/sandbox/linux/seccomp-bpf/die.cc b/sandbox/linux/seccomp-bpf/die.cc
index dfc59a5..53cebab 100644
--- a/sandbox/linux/seccomp-bpf/die.cc
+++ b/sandbox/linux/seccomp-bpf/die.cc
@@ -12,7 +12,6 @@
#include "sandbox/linux/seccomp-bpf/sandbox_bpf.h"
#include "sandbox/linux/seccomp-bpf/syscall.h"
-
namespace playground2 {
void Die::ExitGroup() {
@@ -29,8 +28,9 @@ void Die::ExitGroup() {
// succeeded in doing so. Nonetheless, triggering a fatal signal could help
// us terminate.
signal(SIGSEGV, SIG_DFL);
- SandboxSyscall(__NR_prctl, PR_SET_DUMPABLE, (void *)0, (void *)0, (void *)0);
- if (*(volatile char *)0) { }
+ SandboxSyscall(__NR_prctl, PR_SET_DUMPABLE, (void*)0, (void*)0, (void*)0);
+ if (*(volatile char*)0) {
+ }
// If there is no way for us to ask for the program to exit, the next
// best thing we can do is to loop indefinitely. Maybe, somebody will notice
@@ -42,37 +42,37 @@ void Die::ExitGroup() {
}
}
-void Die::SandboxDie(const char *msg, const char *file, int line) {
+void Die::SandboxDie(const char* msg, const char* file, int line) {
if (simple_exit_) {
LogToStderr(msg, file, line);
} else {
- #if defined(SECCOMP_BPF_STANDALONE)
- Die::LogToStderr(msg, file, line);
- #else
- logging::LogMessage(file, line, logging::LOG_FATAL).stream() << msg;
- #endif
+#if defined(SECCOMP_BPF_STANDALONE)
+ Die::LogToStderr(msg, file, line);
+#else
+ logging::LogMessage(file, line, logging::LOG_FATAL).stream() << msg;
+#endif
}
ExitGroup();
}
-void Die::RawSandboxDie(const char *msg) {
+void Die::RawSandboxDie(const char* msg) {
if (!msg)
msg = "";
RAW_LOG(FATAL, msg);
ExitGroup();
}
-void Die::SandboxInfo(const char *msg, const char *file, int line) {
+void Die::SandboxInfo(const char* msg, const char* file, int line) {
if (!suppress_info_) {
- #if defined(SECCOMP_BPF_STANDALONE)
+#if defined(SECCOMP_BPF_STANDALONE)
Die::LogToStderr(msg, file, line);
- #else
+#else
logging::LogMessage(file, line, logging::LOG_INFO).stream() << msg;
- #endif
+#endif
}
}
-void Die::LogToStderr(const char *msg, const char *file, int line) {
+void Die::LogToStderr(const char* msg, const char* file, int line) {
if (msg) {
char buf[40];
snprintf(buf, sizeof(buf), "%d", line);
@@ -80,11 +80,12 @@ void Die::LogToStderr(const char *msg, const char *file, int line) {
// No need to loop. Short write()s are unlikely and if they happen we
// probably prefer them over a loop that blocks.
- if (HANDLE_EINTR(SandboxSyscall(__NR_write, 2, s.c_str(), s.length()))) { }
+ if (HANDLE_EINTR(SandboxSyscall(__NR_write, 2, s.c_str(), s.length()))) {
+ }
}
}
-bool Die::simple_exit_ = false;
+bool Die::simple_exit_ = false;
bool Die::suppress_info_ = false;
} // namespace
diff --git a/sandbox/linux/seccomp-bpf/die.h b/sandbox/linux/seccomp-bpf/die.h
index 7c95997..8dc0045 100644
--- a/sandbox/linux/seccomp-bpf/die.h
+++ b/sandbox/linux/seccomp-bpf/die.h
@@ -7,21 +7,20 @@
#include "sandbox/linux/seccomp-bpf/port.h"
-
namespace playground2 {
class Die {
public:
- // This is the main API for using this file. Prints a error message and
- // exits with a fatal error. This is not async-signal safe.
- #define SANDBOX_DIE(m) playground2::Die::SandboxDie(m, __FILE__, __LINE__)
+// This is the main API for using this file. Prints a error message and
+// exits with a fatal error. This is not async-signal safe.
+#define SANDBOX_DIE(m) playground2::Die::SandboxDie(m, __FILE__, __LINE__)
- // An async signal safe version of the same API. Won't print the filename
- // and line numbers.
- #define RAW_SANDBOX_DIE(m) playground2::Die::RawSandboxDie(m)
+// An async signal safe version of the same API. Won't print the filename
+// and line numbers.
+#define RAW_SANDBOX_DIE(m) playground2::Die::RawSandboxDie(m)
- // Adds an informational message to the log file or stderr as appropriate.
- #define SANDBOX_INFO(m) playground2::Die::SandboxInfo(m, __FILE__, __LINE__)
+// Adds an informational message to the log file or stderr as appropriate.
+#define SANDBOX_INFO(m) playground2::Die::SandboxInfo(m, __FILE__, __LINE__)
// Terminate the program, even if the current sandbox policy prevents some
// of the more commonly used functions used for exiting.
@@ -32,18 +31,18 @@ class Die {
// This method gets called by SANDBOX_DIE(). There is normally no reason
// to call it directly unless you are defining your own exiting macro.
- static void SandboxDie(const char *msg, const char *file, int line)
- __attribute__((noreturn));
+ static void SandboxDie(const char* msg, const char* file, int line)
+ __attribute__((noreturn));
- static void RawSandboxDie(const char *msg) __attribute__((noreturn));
+ static void RawSandboxDie(const char* msg) __attribute__((noreturn));
// This method gets called by SANDBOX_INFO(). There is normally no reason
// to call it directly unless you are defining your own logging macro.
- static void SandboxInfo(const char *msg, const char *file, int line);
+ static void SandboxInfo(const char* msg, const char* file, int line);
// Writes a message to stderr. Used as a fall-back choice, if we don't have
// any other way to report an error.
- static void LogToStderr(const char *msg, const char *file, int line);
+ static void LogToStderr(const char* msg, const char* file, int line);
// We generally want to run all exit handlers. This means, on SANDBOX_DIE()
// we should be calling LOG(FATAL). But there are some situations where
diff --git a/sandbox/linux/seccomp-bpf/errorcode.cc b/sandbox/linux/seccomp-bpf/errorcode.cc
index ab89d73..e517d38 100644
--- a/sandbox/linux/seccomp-bpf/errorcode.cc
+++ b/sandbox/linux/seccomp-bpf/errorcode.cc
@@ -5,35 +5,36 @@
#include "sandbox/linux/seccomp-bpf/die.h"
#include "sandbox/linux/seccomp-bpf/errorcode.h"
-
namespace playground2 {
ErrorCode::ErrorCode(int err) {
switch (err) {
- case ERR_ALLOWED:
- err_ = SECCOMP_RET_ALLOW;
- error_type_ = ET_SIMPLE;
- break;
- case ERR_MIN_ERRNO ... ERR_MAX_ERRNO:
- err_ = SECCOMP_RET_ERRNO + err;
- error_type_ = ET_SIMPLE;
- break;
- default:
- SANDBOX_DIE("Invalid use of ErrorCode object");
+ case ERR_ALLOWED:
+ err_ = SECCOMP_RET_ALLOW;
+ error_type_ = ET_SIMPLE;
+ break;
+ case ERR_MIN_ERRNO... ERR_MAX_ERRNO:
+ err_ = SECCOMP_RET_ERRNO + err;
+ error_type_ = ET_SIMPLE;
+ break;
+ default:
+ SANDBOX_DIE("Invalid use of ErrorCode object");
}
}
-ErrorCode::ErrorCode(Trap::TrapFnc fnc, const void *aux, bool safe,
- uint16_t id)
+ErrorCode::ErrorCode(Trap::TrapFnc fnc, const void* aux, bool safe, uint16_t id)
: error_type_(ET_TRAP),
fnc_(fnc),
- aux_(const_cast<void *>(aux)),
+ aux_(const_cast<void*>(aux)),
safe_(safe),
- err_(SECCOMP_RET_TRAP + id) {
-}
+ err_(SECCOMP_RET_TRAP + id) {}
-ErrorCode::ErrorCode(int argno, ArgType width, Operation op, uint64_t value,
- const ErrorCode *passed, const ErrorCode *failed)
+ErrorCode::ErrorCode(int argno,
+ ArgType width,
+ Operation op,
+ uint64_t value,
+ const ErrorCode* passed,
+ const ErrorCode* failed)
: error_type_(ET_COND),
value_(value),
argno_(argno),
@@ -57,12 +58,9 @@ bool ErrorCode::Equals(const ErrorCode& err) const {
if (error_type_ == ET_SIMPLE || error_type_ == ET_TRAP) {
return err_ == err.err_;
} else if (error_type_ == ET_COND) {
- return value_ == err.value_ &&
- argno_ == err.argno_ &&
- width_ == err.width_ &&
- op_ == err.op_ &&
- passed_->Equals(*err.passed_) &&
- failed_->Equals(*err.failed_);
+ return value_ == err.value_ && argno_ == err.argno_ &&
+ width_ == err.width_ && op_ == err.op_ &&
+ passed_->Equals(*err.passed_) && failed_->Equals(*err.failed_);
} else {
SANDBOX_DIE("Corrupted ErrorCode");
}
diff --git a/sandbox/linux/seccomp-bpf/errorcode.h b/sandbox/linux/seccomp-bpf/errorcode.h
index 61ec110..182fadb 100644
--- a/sandbox/linux/seccomp-bpf/errorcode.h
+++ b/sandbox/linux/seccomp-bpf/errorcode.h
@@ -27,7 +27,7 @@ class ErrorCode {
// completely arbitrary. But we want to pick it so that is is unlikely
// to be passed in accidentally, when the user intended to return an
// "errno" (see below) value instead.
- ERR_ALLOWED = 0x04000000,
+ ERR_ALLOWED = 0x04000000,
// Deny the system call with a particular "errno" value.
// N.B.: It is also possible to return "0" here. That would normally
@@ -85,21 +85,26 @@ class ErrorCode {
// need.
// TODO(markus): Check whether we should automatically emulate signed
// operations.
- OP_GREATER_UNSIGNED, OP_GREATER_EQUAL_UNSIGNED,
+ OP_GREATER_UNSIGNED,
+ OP_GREATER_EQUAL_UNSIGNED,
// Tests a system call argument against a bit mask.
// The "ALL_BITS" variant performs this test: "arg & mask == mask"
// This implies that a mask of zero always results in a passing test.
// The "ANY_BITS" variant performs this test: "arg & mask != 0"
// This implies that a mask of zero always results in a failing test.
- OP_HAS_ALL_BITS, OP_HAS_ANY_BITS,
+ OP_HAS_ALL_BITS,
+ OP_HAS_ANY_BITS,
// Total number of operations.
OP_NUM_OPS,
};
enum ErrorType {
- ET_INVALID, ET_SIMPLE, ET_TRAP, ET_COND,
+ ET_INVALID,
+ ET_SIMPLE,
+ ET_TRAP,
+ ET_COND,
};
// We allow the default constructor, as it makes the ErrorCode class
@@ -107,10 +112,7 @@ class ErrorCode {
// when compiling a BPF filter, we deliberately generate an invalid
// program that will get flagged both by our Verifier class and by
// the Linux kernel.
- ErrorCode() :
- error_type_(ET_INVALID),
- err_(SECCOMP_RET_INVALID) {
- }
+ ErrorCode() : error_type_(ET_INVALID), err_(SECCOMP_RET_INVALID) {}
explicit ErrorCode(int err);
// For all practical purposes, ErrorCodes are treated as if they were
@@ -121,7 +123,7 @@ class ErrorCode {
// callers handle life-cycle management for these objects.
// Destructor
- ~ErrorCode() { }
+ ~ErrorCode() {}
bool Equals(const ErrorCode& err) const;
bool LessThan(const ErrorCode& err) const;
@@ -135,8 +137,8 @@ class ErrorCode {
int argno() const { return argno_; }
ArgType width() const { return width_; }
Operation op() const { return op_; }
- const ErrorCode *passed() const { return passed_; }
- const ErrorCode *failed() const { return failed_; }
+ const ErrorCode* passed() const { return passed_; }
+ const ErrorCode* failed() const { return failed_; }
struct LessThan {
bool operator()(const ErrorCode& a, const ErrorCode& b) const {
@@ -152,31 +154,35 @@ class ErrorCode {
// If we are wrapping a callback, we must assign a unique id. This id is
// how the kernel tells us which one of our different SECCOMP_RET_TRAP
// cases has been triggered.
- ErrorCode(Trap::TrapFnc fnc, const void *aux, bool safe, uint16_t id);
+ ErrorCode(Trap::TrapFnc fnc, const void* aux, bool safe, uint16_t id);
// Some system calls require inspection of arguments. This constructor
// allows us to specify additional constraints.
- ErrorCode(int argno, ArgType width, Operation op, uint64_t value,
- const ErrorCode *passed, const ErrorCode *failed);
+ ErrorCode(int argno,
+ ArgType width,
+ Operation op,
+ uint64_t value,
+ const ErrorCode* passed,
+ const ErrorCode* failed);
ErrorType error_type_;
union {
// Fields needed for SECCOMP_RET_TRAP callbacks
struct {
- Trap::TrapFnc fnc_; // Callback function and arg, if trap was
- void *aux_; // triggered by the kernel's BPF filter.
- bool safe_; // Keep sandbox active while calling fnc_()
+ Trap::TrapFnc fnc_; // Callback function and arg, if trap was
+ void* aux_; // triggered by the kernel's BPF filter.
+ bool safe_; // Keep sandbox active while calling fnc_()
};
// Fields needed when inspecting additional arguments.
struct {
- uint64_t value_; // Value that we are comparing with.
- int argno_; // Syscall arg number that we are inspecting.
- ArgType width_; // Whether we are looking at a 32/64bit value.
+ uint64_t value_; // Value that we are comparing with.
+ int argno_; // Syscall arg number that we are inspecting.
+ ArgType width_; // Whether we are looking at a 32/64bit value.
Operation op_; // Comparison operation.
- const ErrorCode *passed_; // Value to be returned if comparison passed,
- const ErrorCode *failed_; // or if it failed.
+ const ErrorCode* passed_; // Value to be returned if comparison passed,
+ const ErrorCode* failed_; // or if it failed.
};
};
@@ -184,7 +190,6 @@ class ErrorCode {
// the value that uniquely identifies any ErrorCode and it (typically) can
// be emitted directly into a BPF filter program.
uint32_t err_;
-
};
} // namespace
diff --git a/sandbox/linux/seccomp-bpf/instruction.h b/sandbox/linux/seccomp-bpf/instruction.h
index 0fc8123..8d35187 100644
--- a/sandbox/linux/seccomp-bpf/instruction.h
+++ b/sandbox/linux/seccomp-bpf/instruction.h
@@ -7,7 +7,6 @@
#include <stdint.h>
-
namespace playground2 {
// The fields in this structure have the same meaning as the corresponding
@@ -27,12 +26,12 @@ namespace playground2 {
struct Instruction {
// Constructor for an non-jumping instruction or for an unconditional
// "always" jump.
- Instruction(uint16_t c, uint32_t parm, Instruction *n) :
- code(c), next(n), k(parm) { }
+ Instruction(uint16_t c, uint32_t parm, Instruction* n)
+ : code(c), next(n), k(parm) {}
// Constructor for a conditional jump instruction.
- Instruction(uint16_t c, uint32_t parm, Instruction *jt, Instruction *jf) :
- code(c), jt_ptr(jt), jf_ptr(jf), k(parm) { }
+ Instruction(uint16_t c, uint32_t parm, Instruction* jt, Instruction* jf)
+ : code(c), jt_ptr(jt), jf_ptr(jf), k(parm) {}
uint16_t code;
union {
@@ -47,13 +46,13 @@ struct Instruction {
// keys in a TargetsToBlocks map and should no longer be dereferenced
// directly.
struct {
- Instruction *jt_ptr, *jf_ptr;
+ Instruction* jt_ptr, *jf_ptr;
};
// While assembling the BPF program, non-jumping instructions are linked
// by the "next_" pointer. This field is no longer needed when we have
// computed basic blocks.
- Instruction *next;
+ Instruction* next;
};
uint32_t k;
};
diff --git a/sandbox/linux/seccomp-bpf/port.h b/sandbox/linux/seccomp-bpf/port.h
index f10b148..e9cf6c7 100644
--- a/sandbox/linux/seccomp-bpf/port.h
+++ b/sandbox/linux/seccomp-bpf/port.h
@@ -9,28 +9,27 @@
#define SANDBOX_LINUX_SECCOMP_BPF_PORT_H__
#if !defined(SECCOMP_BPF_STANDALONE)
- #include "base/basictypes.h"
- #include "base/logging.h"
- #include "base/posix/eintr_wrapper.h"
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
#else
- #define arraysize(x) (sizeof(x)/sizeof(*(x)))
+#define arraysize(x) (sizeof(x) / sizeof(*(x)))
- #define HANDLE_EINTR TEMP_FAILURE_RETRY
+#define HANDLE_EINTR TEMP_FAILURE_RETRY
- #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
- #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
- template <bool>
- struct CompileAssert {
- };
+template <bool>
+struct CompileAssert {};
- #define COMPILE_ASSERT(expr, msg) \
- typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
+#define COMPILE_ASSERT(expr, msg) \
+ typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
#endif
#endif // SANDBOX_LINUX_SECCOMP_BPF_PORT_H__
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf.cc b/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
index 07de144..49fdd86 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf.cc
@@ -38,23 +38,28 @@ namespace {
const int kExpectedExitCode = 100;
-template<class T> int popcount(T x);
-template<> int popcount<unsigned int>(unsigned int x) {
+template <class T>
+int popcount(T x);
+template <>
+int popcount<unsigned int>(unsigned int x) {
return __builtin_popcount(x);
}
-template<> int popcount<unsigned long>(unsigned long x) {
+template <>
+int popcount<unsigned long>(unsigned long x) {
return __builtin_popcountl(x);
}
-template<> int popcount<unsigned long long>(unsigned long long x) {
+template <>
+int popcount<unsigned long long>(unsigned long long x) {
return __builtin_popcountll(x);
}
void WriteFailedStderrSetupMessage(int out_fd) {
const char* error_string = strerror(errno);
- static const char msg[] = "You have reproduced a puzzling issue.\n"
- "Please, report to crbug.com/152530!\n"
- "Failed to set up stderr: ";
- if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg)-1)) > 0 && error_string &&
+ static const char msg[] =
+ "You have reproduced a puzzling issue.\n"
+ "Please, report to crbug.com/152530!\n"
+ "Failed to set up stderr: ";
+ if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg) - 1)) > 0 && error_string &&
HANDLE_EINTR(write(out_fd, error_string, strlen(error_string))) > 0 &&
HANDLE_EINTR(write(out_fd, "\n", 1))) {
}
@@ -62,18 +67,18 @@ void WriteFailedStderrSetupMessage(int out_fd) {
// We define a really simple sandbox policy. It is just good enough for us
// to tell that the sandbox has actually been activated.
-ErrorCode ProbeEvaluator(Sandbox *, int sysnum, void *) __attribute__((const));
-ErrorCode ProbeEvaluator(Sandbox *, int sysnum, void *) {
+ErrorCode ProbeEvaluator(Sandbox*, int sysnum, void*) __attribute__((const));
+ErrorCode ProbeEvaluator(Sandbox*, int sysnum, void*) {
switch (sysnum) {
- case __NR_getpid:
- // Return EPERM so that we can check that the filter actually ran.
- return ErrorCode(EPERM);
- case __NR_exit_group:
- // Allow exit() with a non-default return code.
- return ErrorCode(ErrorCode::ERR_ALLOWED);
- default:
- // Make everything else fail in an easily recognizable way.
- return ErrorCode(EINVAL);
+ case __NR_getpid:
+ // Return EPERM so that we can check that the filter actually ran.
+ return ErrorCode(EPERM);
+ case __NR_exit_group:
+ // Allow exit() with a non-default return code.
+ return ErrorCode(ErrorCode::ERR_ALLOWED);
+ default:
+ // Make everything else fail in an easily recognizable way.
+ return ErrorCode(EINVAL);
}
}
@@ -83,7 +88,7 @@ void ProbeProcess(void) {
}
}
-ErrorCode AllowAllEvaluator(Sandbox *, int sysnum, void *) {
+ErrorCode AllowAllEvaluator(Sandbox*, int sysnum, void*) {
if (!Sandbox::IsValidSyscallNumber(sysnum)) {
return ErrorCode(ENOSYS);
}
@@ -109,12 +114,11 @@ bool IsSingleThreaded(int proc_fd) {
struct stat sb;
int task = -1;
- if ((task = openat(proc_fd, "self/task", O_RDONLY|O_DIRECTORY)) < 0 ||
- fstat(task, &sb) != 0 ||
- sb.st_nlink != 3 ||
- HANDLE_EINTR(close(task))) {
+ if ((task = openat(proc_fd, "self/task", O_RDONLY | O_DIRECTORY)) < 0 ||
+ fstat(task, &sb) != 0 || sb.st_nlink != 3 || HANDLE_EINTR(close(task))) {
if (task >= 0) {
- if (HANDLE_EINTR(close(task))) { }
+ if (HANDLE_EINTR(close(task))) {
+ }
}
return false;
}
@@ -130,14 +134,13 @@ bool IsDenied(const ErrorCode& code) {
// Function that can be passed as a callback function to CodeGen::Traverse().
// Checks whether the "insn" returns an UnsafeTrap() ErrorCode. If so, it
// sets the "bool" variable pointed to by "aux".
-void CheckForUnsafeErrorCodes(Instruction *insn, void *aux) {
- bool *is_unsafe = static_cast<bool *>(aux);
+void CheckForUnsafeErrorCodes(Instruction* insn, void* aux) {
+ bool* is_unsafe = static_cast<bool*>(aux);
if (!*is_unsafe) {
- if (BPF_CLASS(insn->code) == BPF_RET &&
- insn->k > SECCOMP_RET_TRAP &&
+ if (BPF_CLASS(insn->code) == BPF_RET && insn->k > SECCOMP_RET_TRAP &&
insn->k - SECCOMP_RET_TRAP <= SECCOMP_RET_DATA) {
const ErrorCode& err =
- Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA);
+ Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA);
if (err.error_type() != ErrorCode::ET_INVALID && !err.safe()) {
*is_unsafe = true;
}
@@ -147,7 +150,7 @@ void CheckForUnsafeErrorCodes(Instruction *insn, void *aux) {
// A Trap() handler that returns an "errno" value. The value is encoded
// in the "aux" parameter.
-intptr_t ReturnErrno(const struct arch_seccomp_data&, void *aux) {
+intptr_t ReturnErrno(const struct arch_seccomp_data&, void* aux) {
// TrapFnc functions report error by following the native kernel convention
// of returning an exit code in the range of -1..-4096. They do not try to
// set errno themselves. The glibc wrapper that triggered the SIGSYS will
@@ -160,7 +163,7 @@ intptr_t ReturnErrno(const struct arch_seccomp_data&, void *aux) {
// Checks whether the "insn" returns an errno value from a BPF filter. If so,
// it rewrites the instruction to instead call a Trap() handler that does
// the same thing. "aux" is ignored.
-void RedirectToUserspace(Instruction *insn, void *aux) {
+void RedirectToUserspace(Instruction* insn, void* aux) {
// When inside an UnsafeTrap() callback, we want to allow all system calls.
// This means, we must conditionally disable the sandbox -- and that's not
// something that kernel-side BPF filters can do, as they cannot inspect
@@ -170,11 +173,11 @@ void RedirectToUserspace(Instruction *insn, void *aux) {
// The performance penalty for this extra round-trip to user-space is not
// actually that bad, as we only ever pay it for denied system calls; and a
// typical program has very few of these.
- Sandbox *sandbox = static_cast<Sandbox *>(aux);
+ Sandbox* sandbox = static_cast<Sandbox*>(aux);
if (BPF_CLASS(insn->code) == BPF_RET &&
(insn->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
insn->k = sandbox->Trap(ReturnErrno,
- reinterpret_cast<void *>(insn->k & SECCOMP_RET_DATA)).err();
+ reinterpret_cast<void*>(insn->k & SECCOMP_RET_DATA)).err();
}
}
@@ -195,8 +198,8 @@ class RedirectToUserSpacePolicyWrapper : public SandboxBpfPolicy {
ErrorCode err =
wrapped_policy_->EvaluateSyscall(sandbox_compiler, system_call_number);
if ((err.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
- return sandbox_compiler->Trap(ReturnErrno,
- reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA));
+ return sandbox_compiler->Trap(
+ ReturnErrno, reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA));
}
return err;
}
@@ -206,16 +209,17 @@ class RedirectToUserSpacePolicyWrapper : public SandboxBpfPolicy {
DISALLOW_COPY_AND_ASSIGN(RedirectToUserSpacePolicyWrapper);
};
-intptr_t BpfFailure(const struct arch_seccomp_data&, void *aux) {
- SANDBOX_DIE(static_cast<char *>(aux));
+intptr_t BpfFailure(const struct arch_seccomp_data&, void* aux) {
+ SANDBOX_DIE(static_cast<char*>(aux));
}
// This class allows compatibility with the old, deprecated SetSandboxPolicy.
class CompatibilityPolicy : public SandboxBpfPolicy {
public:
CompatibilityPolicy(Sandbox::EvaluateSyscall syscall_evaluator, void* aux)
- : syscall_evaluator_(syscall_evaluator),
- aux_(aux) { DCHECK(syscall_evaluator_); }
+ : syscall_evaluator_(syscall_evaluator), aux_(aux) {
+ DCHECK(syscall_evaluator_);
+ }
virtual ErrorCode EvaluateSyscall(Sandbox* sandbox_compiler,
int system_call_number) const OVERRIDE {
@@ -234,8 +238,7 @@ Sandbox::Sandbox()
: quiet_(false),
proc_fd_(-1),
conds_(new Conds),
- sandbox_has_started_(false) {
-}
+ sandbox_has_started_(false) {}
Sandbox::~Sandbox() {
// It is generally unsafe to call any memory allocator operations or to even
@@ -258,19 +261,17 @@ bool Sandbox::IsValidSyscallNumber(int sysnum) {
return SyscallIterator::IsValid(sysnum);
}
-
bool Sandbox::RunFunctionInPolicy(void (*code_in_sandbox)(),
Sandbox::EvaluateSyscall syscall_evaluator,
- void *aux) {
+ void* aux) {
// Block all signals before forking a child process. This prevents an
// attacker from manipulating our test by sending us an unexpected signal.
sigset_t old_mask, new_mask;
- if (sigfillset(&new_mask) ||
- sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) {
+ if (sigfillset(&new_mask) || sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) {
SANDBOX_DIE("sigprocmask() failed");
}
int fds[2];
- if (pipe2(fds, O_NONBLOCK|O_CLOEXEC)) {
+ if (pipe2(fds, O_NONBLOCK | O_CLOEXEC)) {
SANDBOX_DIE("pipe() failed");
}
@@ -360,7 +361,7 @@ bool Sandbox::RunFunctionInPolicy(void (*code_in_sandbox)(),
char buf[4096];
ssize_t len = HANDLE_EINTR(read(fds[0], buf, sizeof(buf) - 1));
if (len > 0) {
- while (len > 1 && buf[len-1] == '\n') {
+ while (len > 1 && buf[len - 1] == '\n') {
--len;
}
buf[len] = '\000';
@@ -375,9 +376,8 @@ bool Sandbox::RunFunctionInPolicy(void (*code_in_sandbox)(),
}
bool Sandbox::KernelSupportSeccompBPF() {
- return
- RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) &&
- RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0);
+ return RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) &&
+ RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0);
}
Sandbox::SandboxStatus Sandbox::SupportsSeccompSandbox(int proc_fd) {
@@ -421,8 +421,8 @@ Sandbox::SandboxStatus Sandbox::SupportsSeccompSandbox(int proc_fd) {
// failures (e.g. if the current kernel lacks support for BPF filters).
sandbox.quiet_ = true;
sandbox.set_proc_fd(proc_fd);
- status_ = sandbox.KernelSupportSeccompBPF()
- ? STATUS_AVAILABLE : STATUS_UNSUPPORTED;
+ status_ = sandbox.KernelSupportSeccompBPF() ? STATUS_AVAILABLE
+ : STATUS_UNSUPPORTED;
// As we are performing our tests from a child process, the run-time
// environment that is visible to the sandbox is always guaranteed to be
@@ -435,20 +435,20 @@ Sandbox::SandboxStatus Sandbox::SupportsSeccompSandbox(int proc_fd) {
return status_;
}
-void Sandbox::set_proc_fd(int proc_fd) {
- proc_fd_ = proc_fd;
-}
+void Sandbox::set_proc_fd(int proc_fd) { proc_fd_ = proc_fd; }
void Sandbox::StartSandbox() {
if (status_ == STATUS_UNSUPPORTED || status_ == STATUS_UNAVAILABLE) {
- SANDBOX_DIE("Trying to start sandbox, even though it is known to be "
- "unavailable");
+ SANDBOX_DIE(
+ "Trying to start sandbox, even though it is known to be "
+ "unavailable");
} else if (sandbox_has_started_ || !conds_) {
- SANDBOX_DIE("Cannot repeatedly start sandbox. Create a separate Sandbox "
- "object instead.");
+ SANDBOX_DIE(
+ "Cannot repeatedly start sandbox. Create a separate Sandbox "
+ "object instead.");
}
if (proc_fd_ < 0) {
- proc_fd_ = open("/proc", O_RDONLY|O_DIRECTORY);
+ proc_fd_ = open("/proc", O_RDONLY | O_DIRECTORY);
}
if (proc_fd_ < 0) {
// For now, continue in degraded mode, if we can't access /proc.
@@ -476,11 +476,12 @@ void Sandbox::StartSandbox() {
}
void Sandbox::PolicySanityChecks(SandboxBpfPolicy* policy) {
- for (SyscallIterator iter(true); !iter.Done(); ) {
+ for (SyscallIterator iter(true); !iter.Done();) {
uint32_t sysnum = iter.Next();
if (!IsDenied(policy->EvaluateSyscall(this, sysnum))) {
- SANDBOX_DIE("Policies should deny system calls that are outside the "
- "expected range (typically MIN_SYSCALL..MAX_SYSCALL)");
+ SANDBOX_DIE(
+ "Policies should deny system calls that are outside the "
+ "expected range (typically MIN_SYSCALL..MAX_SYSCALL)");
}
}
return;
@@ -517,11 +518,11 @@ void Sandbox::InstallFilter() {
// installed the BPF filter program in the kernel. Depending on the
// system memory allocator that is in effect, these operators can result
// in system calls to things like munmap() or brk().
- Program *program = AssembleFilter(false /* force_verification */);
+ Program* program = AssembleFilter(false /* force_verification */);
struct sock_filter bpf[program->size()];
- const struct sock_fprog prog = {
- static_cast<unsigned short>(program->size()), bpf };
+ const struct sock_fprog prog = {static_cast<unsigned short>(program->size()),
+ bpf};
memcpy(bpf, &(*program)[0], sizeof(bpf));
delete program;
@@ -546,7 +547,7 @@ void Sandbox::InstallFilter() {
return;
}
-Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
+Sandbox::Program* Sandbox::AssembleFilter(bool force_verification) {
#if !defined(NDEBUG)
force_verification = true;
#endif
@@ -555,21 +556,24 @@ Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
DCHECK(policy_);
// Assemble the BPF filter program.
- CodeGen *gen = new CodeGen();
+ CodeGen* gen = new CodeGen();
if (!gen) {
SANDBOX_DIE("Out of memory");
}
// If the architecture doesn't match SECCOMP_ARCH, disallow the
// system call.
- Instruction *tail;
- Instruction *head =
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_ARCH_IDX,
- tail =
- gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, SECCOMP_ARCH,
- NULL,
- gen->MakeInstruction(BPF_RET+BPF_K,
- Kill("Invalid audit architecture in BPF filter"))));
+ Instruction* tail;
+ Instruction* head = gen->MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ SECCOMP_ARCH_IDX,
+ tail = gen->MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ SECCOMP_ARCH,
+ NULL,
+ gen->MakeInstruction(
+ BPF_RET + BPF_K,
+ Kill("Invalid audit architecture in BPF filter"))));
bool has_unsafe_traps = false;
{
@@ -579,8 +583,8 @@ Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
FindRanges(&ranges);
// Compile the system call ranges to an optimized BPF jumptable
- Instruction *jumptable =
- AssembleJumpTable(gen, ranges.begin(), ranges.end());
+ Instruction* jumptable =
+ AssembleJumpTable(gen, ranges.begin(), ranges.end());
// If there is at least one UnsafeTrap() in our program, the entire sandbox
// is unsafe. We need to modify the program so that all non-
@@ -590,8 +594,8 @@ Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps);
// Grab the system call number, so that we can implement jump tables.
- Instruction *load_nr =
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_NR_IDX);
+ Instruction* load_nr =
+ gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX);
// If our BPF program has unsafe jumps, enable support for them. This
// test happens very early in the BPF filter program. Even before we
@@ -602,25 +606,29 @@ Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
// is actually requested by the sandbox policy.
if (has_unsafe_traps) {
if (SandboxSyscall(-1) == -1 && errno == ENOSYS) {
- SANDBOX_DIE("Support for UnsafeTrap() has not yet been ported to this "
- "architecture");
+ SANDBOX_DIE(
+ "Support for UnsafeTrap() has not yet been ported to this "
+ "architecture");
}
- if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask).
- Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) ||
- !policy_->EvaluateSyscall(this, __NR_rt_sigreturn).
- Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
+ if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask)
+ .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) ||
+ !policy_->EvaluateSyscall(this, __NR_rt_sigreturn)
+ .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
#if defined(__NR_sigprocmask)
- || !policy_->EvaluateSyscall(this, __NR_sigprocmask).
- Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
+ ||
+ !policy_->EvaluateSyscall(this, __NR_sigprocmask)
+ .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
#endif
#if defined(__NR_sigreturn)
- || !policy_->EvaluateSyscall(this, __NR_sigreturn).
- Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
+ ||
+ !policy_->EvaluateSyscall(this, __NR_sigreturn)
+ .Equals(ErrorCode(ErrorCode::ERR_ALLOWED))
#endif
) {
- SANDBOX_DIE("Invalid seccomp policy; if using UnsafeTrap(), you must "
- "unconditionally allow sigreturn() and sigprocmask()");
+ SANDBOX_DIE(
+ "Invalid seccomp policy; if using UnsafeTrap(), you must "
+ "unconditionally allow sigreturn() and sigprocmask()");
}
if (!Trap::EnableUnsafeTrapsInSigSysHandler()) {
@@ -636,49 +644,58 @@ Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
// Allow system calls, if they originate from our magic return address
// (which we can query by calling SandboxSyscall(-1)).
uintptr_t syscall_entry_point =
- static_cast<uintptr_t>(SandboxSyscall(-1));
+ static_cast<uintptr_t>(SandboxSyscall(-1));
uint32_t low = static_cast<uint32_t>(syscall_entry_point);
#if __SIZEOF_POINTER__ > 4
- uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32);
+ uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32);
#endif
// BPF cannot do native 64bit comparisons. On 64bit architectures, we
// have to compare both 32bit halves of the instruction pointer. If they
// match what we expect, we return ERR_ALLOWED. If either or both don't
// match, we continue evalutating the rest of the sandbox policy.
- Instruction *escape_hatch =
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_IP_LSB_IDX,
- gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, low,
+ Instruction* escape_hatch = gen->MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ SECCOMP_IP_LSB_IDX,
+ gen->MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ low,
#if __SIZEOF_POINTER__ > 4
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_IP_MSB_IDX,
- gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, hi,
+ gen->MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS,
+ SECCOMP_IP_MSB_IDX,
+ gen->MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ hi,
#endif
- gen->MakeInstruction(BPF_RET+BPF_K, ErrorCode(ErrorCode::ERR_ALLOWED)),
+ gen->MakeInstruction(BPF_RET + BPF_K,
+ ErrorCode(ErrorCode::ERR_ALLOWED)),
#if __SIZEOF_POINTER__ > 4
- load_nr)),
+ load_nr)),
#endif
- load_nr));
+ load_nr));
gen->JoinInstructions(tail, escape_hatch);
} else {
gen->JoinInstructions(tail, load_nr);
}
tail = load_nr;
- // On Intel architectures, verify that system call numbers are in the
- // expected number range. The older i386 and x86-64 APIs clear bit 30
- // on all system calls. The newer x32 API always sets bit 30.
+// On Intel architectures, verify that system call numbers are in the
+// expected number range. The older i386 and x86-64 APIs clear bit 30
+// on all system calls. The newer x32 API always sets bit 30.
#if defined(__i386__) || defined(__x86_64__)
- Instruction *invalidX32 =
- gen->MakeInstruction(BPF_RET+BPF_K,
- Kill("Illegal mixing of system call ABIs").err_);
- Instruction *checkX32 =
+ Instruction* invalidX32 = gen->MakeInstruction(
+ BPF_RET + BPF_K, Kill("Illegal mixing of system call ABIs").err_);
+ Instruction* checkX32 =
#if defined(__x86_64__) && defined(__ILP32__)
- gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, 0x40000000, 0, invalidX32);
+ gen->MakeInstruction(
+ BPF_JMP + BPF_JSET + BPF_K, 0x40000000, 0, invalidX32);
#else
- gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, 0x40000000, invalidX32, 0);
+ gen->MakeInstruction(
+ BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, 0);
#endif
- gen->JoinInstructions(tail, checkX32);
- tail = checkX32;
+ gen->JoinInstructions(tail, checkX32);
+ tail = checkX32;
#endif
// Append jump table to our pre-amble
@@ -686,7 +703,7 @@ Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) {
}
// Turn the DAG into a vector of instructions.
- Program *program = new Program();
+ Program* program = new Program();
gen->Compile(head, program);
delete gen;
@@ -712,17 +729,16 @@ void Sandbox::VerifyProgram(const Program& program, bool has_unsafe_traps) {
new RedirectToUserSpacePolicyWrapper(policy_.get()));
const char* err = NULL;
- if (!Verifier::VerifyBPF(
- this,
- program,
- has_unsafe_traps ? *redirected_policy : *policy_,
- &err)) {
+ if (!Verifier::VerifyBPF(this,
+ program,
+ has_unsafe_traps ? *redirected_policy : *policy_,
+ &err)) {
CodeGen::PrintProgram(program);
SANDBOX_DIE(err);
}
}
-void Sandbox::FindRanges(Ranges *ranges) {
+void Sandbox::FindRanges(Ranges* ranges) {
// Please note that "struct seccomp_data" defines system calls as a signed
// int32_t, but BPF instructions always operate on unsigned quantities. We
// deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL,
@@ -732,9 +748,9 @@ void Sandbox::FindRanges(Ranges *ranges) {
ErrorCode old_err = policy_->EvaluateSyscall(this, old_sysnum);
ErrorCode invalid_err = policy_->EvaluateSyscall(this, MIN_SYSCALL - 1);
- for (SyscallIterator iter(false); !iter.Done(); ) {
+ for (SyscallIterator iter(false); !iter.Done();) {
uint32_t sysnum = iter.Next();
- ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum));
+ ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum));
if (!iter.IsValid(sysnum) && !invalid_err.Equals(err)) {
// A proper sandbox policy should always treat system calls outside of
// the range MIN_SYSCALL..MAX_SYSCALL (i.e. anything that returns
@@ -745,12 +761,12 @@ void Sandbox::FindRanges(Ranges *ranges) {
if (!err.Equals(old_err) || iter.Done()) {
ranges->push_back(Range(old_sysnum, sysnum - 1, old_err));
old_sysnum = sysnum;
- old_err = err;
+ old_err = err;
}
}
}
-Instruction *Sandbox::AssembleJumpTable(CodeGen *gen,
+Instruction* Sandbox::AssembleJumpTable(CodeGen* gen,
Ranges::const_iterator start,
Ranges::const_iterator stop) {
// We convert the list of system call ranges into jump table that performs
@@ -769,166 +785,170 @@ Instruction *Sandbox::AssembleJumpTable(CodeGen *gen,
// We compare our system call number against the lowest valid system call
// number in this range object. If our number is lower, it is outside of
// this range object. If it is greater or equal, it might be inside.
- Ranges::const_iterator mid = start + (stop - start)/2;
+ Ranges::const_iterator mid = start + (stop - start) / 2;
// Sub-divide the list of ranges and continue recursively.
- Instruction *jf = AssembleJumpTable(gen, start, mid);
- Instruction *jt = AssembleJumpTable(gen, mid, stop);
- return gen->MakeInstruction(BPF_JMP+BPF_JGE+BPF_K, mid->from, jt, jf);
+ Instruction* jf = AssembleJumpTable(gen, start, mid);
+ Instruction* jt = AssembleJumpTable(gen, mid, stop);
+ return gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, mid->from, jt, jf);
}
-Instruction *Sandbox::RetExpression(CodeGen *gen, const ErrorCode& err) {
+Instruction* Sandbox::RetExpression(CodeGen* gen, const ErrorCode& err) {
if (err.error_type_ == ErrorCode::ET_COND) {
return CondExpression(gen, err);
} else {
- return gen->MakeInstruction(BPF_RET+BPF_K, err);
+ return gen->MakeInstruction(BPF_RET + BPF_K, err);
}
}
-Instruction *Sandbox::CondExpression(CodeGen *gen, const ErrorCode& cond) {
+Instruction* Sandbox::CondExpression(CodeGen* gen, const ErrorCode& cond) {
// We can only inspect the six system call arguments that are passed in
// CPU registers.
if (cond.argno_ < 0 || cond.argno_ >= 6) {
- SANDBOX_DIE("Internal compiler error; invalid argument number "
- "encountered");
+ SANDBOX_DIE(
+ "Internal compiler error; invalid argument number "
+ "encountered");
}
// BPF programs operate on 32bit entities. Load both halfs of the 64bit
// system call argument and then generate suitable conditional statements.
- Instruction *msb_head =
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS,
- SECCOMP_ARG_MSB_IDX(cond.argno_));
- Instruction *msb_tail = msb_head;
- Instruction *lsb_head =
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS,
- SECCOMP_ARG_LSB_IDX(cond.argno_));
- Instruction *lsb_tail = lsb_head;
+ Instruction* msb_head = gen->MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_MSB_IDX(cond.argno_));
+ Instruction* msb_tail = msb_head;
+ Instruction* lsb_head = gen->MakeInstruction(
+ BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_LSB_IDX(cond.argno_));
+ Instruction* lsb_tail = lsb_head;
// Emit a suitable comparison statement.
switch (cond.op_) {
- case ErrorCode::OP_EQUAL:
- // Compare the least significant bits for equality
- lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K,
- static_cast<uint32_t>(cond.value_),
- RetExpression(gen, *cond.passed_),
- RetExpression(gen, *cond.failed_));
- gen->JoinInstructions(lsb_head, lsb_tail);
-
- // If we are looking at a 64bit argument, we need to also compare the
- // most significant bits.
- if (cond.width_ == ErrorCode::TP_64BIT) {
- msb_tail = gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K,
- static_cast<uint32_t>(cond.value_ >> 32),
- lsb_head,
+ case ErrorCode::OP_EQUAL:
+ // Compare the least significant bits for equality
+ lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K,
+ static_cast<uint32_t>(cond.value_),
+ RetExpression(gen, *cond.passed_),
RetExpression(gen, *cond.failed_));
- gen->JoinInstructions(msb_head, msb_tail);
- }
- break;
- case ErrorCode::OP_HAS_ALL_BITS:
- // Check the bits in the LSB half of the system call argument. Our
- // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is
- // different from the kernel's BPF_JSET operation which passes, if any of
- // the bits are set.
- // Of course, if there is only a single set bit (or none at all), then
- // things get easier.
- {
- uint32_t lsb_bits = static_cast<uint32_t>(cond.value_);
- int lsb_bit_count = popcount(lsb_bits);
- if (lsb_bit_count == 0) {
- // No bits are set in the LSB half. The test will always pass.
- lsb_head = RetExpression(gen, *cond.passed_);
- lsb_tail = NULL;
- } else if (lsb_bit_count == 1) {
- // Exactly one bit is set in the LSB half. We can use the BPF_JSET
- // operator.
- lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K,
- lsb_bits,
- RetExpression(gen, *cond.passed_),
- RetExpression(gen, *cond.failed_));
- gen->JoinInstructions(lsb_head, lsb_tail);
- } else {
- // More than one bit is set in the LSB half. We need to combine
- // BPF_AND and BPF_JEQ to test whether all of these bits are in fact
- // set in the system call argument.
- gen->JoinInstructions(lsb_head,
- gen->MakeInstruction(BPF_ALU+BPF_AND+BPF_K,
- lsb_bits,
- lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K,
+ gen->JoinInstructions(lsb_head, lsb_tail);
+
+ // If we are looking at a 64bit argument, we need to also compare the
+ // most significant bits.
+ if (cond.width_ == ErrorCode::TP_64BIT) {
+ msb_tail =
+ gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K,
+ static_cast<uint32_t>(cond.value_ >> 32),
+ lsb_head,
+ RetExpression(gen, *cond.failed_));
+ gen->JoinInstructions(msb_head, msb_tail);
+ }
+ break;
+ case ErrorCode::OP_HAS_ALL_BITS:
+ // Check the bits in the LSB half of the system call argument. Our
+ // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is
+ // different from the kernel's BPF_JSET operation which passes, if any of
+ // the bits are set.
+ // Of course, if there is only a single set bit (or none at all), then
+ // things get easier.
+ {
+ uint32_t lsb_bits = static_cast<uint32_t>(cond.value_);
+ int lsb_bit_count = popcount(lsb_bits);
+ if (lsb_bit_count == 0) {
+ // No bits are set in the LSB half. The test will always pass.
+ lsb_head = RetExpression(gen, *cond.passed_);
+ lsb_tail = NULL;
+ } else if (lsb_bit_count == 1) {
+ // Exactly one bit is set in the LSB half. We can use the BPF_JSET
+ // operator.
+ lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
lsb_bits,
RetExpression(gen, *cond.passed_),
- RetExpression(gen, *cond.failed_))));
+ RetExpression(gen, *cond.failed_));
+ gen->JoinInstructions(lsb_head, lsb_tail);
+ } else {
+ // More than one bit is set in the LSB half. We need to combine
+ // BPF_AND and BPF_JEQ to test whether all of these bits are in fact
+ // set in the system call argument.
+ gen->JoinInstructions(
+ lsb_head,
+ gen->MakeInstruction(BPF_ALU + BPF_AND + BPF_K,
+ lsb_bits,
+ lsb_tail = gen->MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ lsb_bits,
+ RetExpression(gen, *cond.passed_),
+ RetExpression(gen, *cond.failed_))));
+ }
}
- }
- // If we are looking at a 64bit argument, we need to also check the bits
- // in the MSB half of the system call argument.
- if (cond.width_ == ErrorCode::TP_64BIT) {
- uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32);
- int msb_bit_count = popcount(msb_bits);
- if (msb_bit_count == 0) {
- // No bits are set in the MSB half. The test will always pass.
- msb_head = lsb_head;
- } else if (msb_bit_count == 1) {
- // Exactly one bit is set in the MSB half. We can use the BPF_JSET
- // operator.
- msb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K,
- msb_bits,
- lsb_head,
- RetExpression(gen, *cond.failed_));
- gen->JoinInstructions(msb_head, msb_tail);
- } else {
- // More than one bit is set in the MSB half. We need to combine
- // BPF_AND and BPF_JEQ to test whether all of these bits are in fact
- // set in the system call argument.
- gen->JoinInstructions(msb_head,
- gen->MakeInstruction(BPF_ALU+BPF_AND+BPF_K,
- msb_bits,
- gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K,
- msb_bits,
- lsb_head,
- RetExpression(gen, *cond.failed_))));
+ // If we are looking at a 64bit argument, we need to also check the bits
+ // in the MSB half of the system call argument.
+ if (cond.width_ == ErrorCode::TP_64BIT) {
+ uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32);
+ int msb_bit_count = popcount(msb_bits);
+ if (msb_bit_count == 0) {
+ // No bits are set in the MSB half. The test will always pass.
+ msb_head = lsb_head;
+ } else if (msb_bit_count == 1) {
+ // Exactly one bit is set in the MSB half. We can use the BPF_JSET
+ // operator.
+ msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+ msb_bits,
+ lsb_head,
+ RetExpression(gen, *cond.failed_));
+ gen->JoinInstructions(msb_head, msb_tail);
+ } else {
+ // More than one bit is set in the MSB half. We need to combine
+ // BPF_AND and BPF_JEQ to test whether all of these bits are in fact
+ // set in the system call argument.
+ gen->JoinInstructions(
+ msb_head,
+ gen->MakeInstruction(
+ BPF_ALU + BPF_AND + BPF_K,
+ msb_bits,
+ gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K,
+ msb_bits,
+ lsb_head,
+ RetExpression(gen, *cond.failed_))));
+ }
}
- }
- break;
- case ErrorCode::OP_HAS_ANY_BITS:
- // Check the bits in the LSB half of the system call argument. Our
- // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps
- // nicely to the kernel's BPF_JSET operation.
- {
- uint32_t lsb_bits = static_cast<uint32_t>(cond.value_);
- if (!lsb_bits) {
- // No bits are set in the LSB half. The test will always fail.
- lsb_head = RetExpression(gen, *cond.failed_);
- lsb_tail = NULL;
- } else {
- lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K,
- lsb_bits,
- RetExpression(gen, *cond.passed_),
- RetExpression(gen, *cond.failed_));
- gen->JoinInstructions(lsb_head, lsb_tail);
+ break;
+ case ErrorCode::OP_HAS_ANY_BITS:
+ // Check the bits in the LSB half of the system call argument. Our
+ // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps
+ // nicely to the kernel's BPF_JSET operation.
+ {
+ uint32_t lsb_bits = static_cast<uint32_t>(cond.value_);
+ if (!lsb_bits) {
+ // No bits are set in the LSB half. The test will always fail.
+ lsb_head = RetExpression(gen, *cond.failed_);
+ lsb_tail = NULL;
+ } else {
+ lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+ lsb_bits,
+ RetExpression(gen, *cond.passed_),
+ RetExpression(gen, *cond.failed_));
+ gen->JoinInstructions(lsb_head, lsb_tail);
+ }
}
- }
- // If we are looking at a 64bit argument, we need to also check the bits
- // in the MSB half of the system call argument.
- if (cond.width_ == ErrorCode::TP_64BIT) {
- uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32);
- if (!msb_bits) {
- // No bits are set in the MSB half. The test will always fail.
- msb_head = lsb_head;
- } else {
- msb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K,
- msb_bits,
- RetExpression(gen, *cond.passed_),
- lsb_head);
- gen->JoinInstructions(msb_head, msb_tail);
+ // If we are looking at a 64bit argument, we need to also check the bits
+ // in the MSB half of the system call argument.
+ if (cond.width_ == ErrorCode::TP_64BIT) {
+ uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32);
+ if (!msb_bits) {
+ // No bits are set in the MSB half. The test will always fail.
+ msb_head = lsb_head;
+ } else {
+ msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K,
+ msb_bits,
+ RetExpression(gen, *cond.passed_),
+ lsb_head);
+ gen->JoinInstructions(msb_head, msb_tail);
+ }
}
- }
- break;
- default:
- // TODO(markus): Need to add support for OP_GREATER
- SANDBOX_DIE("Not implemented");
- break;
+ break;
+ default:
+ // TODO(markus): Need to add support for OP_GREATER
+ SANDBOX_DIE("Not implemented");
+ break;
}
// Ensure that we never pass a 64bit value, when we only expect a 32bit
@@ -937,26 +957,28 @@ Instruction *Sandbox::CondExpression(CodeGen *gen, const ErrorCode& cond) {
// LSB has been sign-extended into the MSB.
if (cond.width_ == ErrorCode::TP_32BIT) {
if (cond.value_ >> 32) {
- SANDBOX_DIE("Invalid comparison of a 32bit system call argument "
- "against a 64bit constant; this test is always false.");
+ SANDBOX_DIE(
+ "Invalid comparison of a 32bit system call argument "
+ "against a 64bit constant; this test is always false.");
}
- Instruction *invalid_64bit = RetExpression(gen, Unexpected64bitArgument());
- #if __SIZEOF_POINTER__ > 4
- invalid_64bit =
- gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, 0xFFFFFFFF,
- gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS,
- SECCOMP_ARG_LSB_IDX(cond.argno_),
- gen->MakeInstruction(BPF_JMP+BPF_JGE+BPF_K, 0x80000000,
- lsb_head,
- invalid_64bit)),
- invalid_64bit);
- #endif
+ Instruction* invalid_64bit = RetExpression(gen, Unexpected64bitArgument());
+#if __SIZEOF_POINTER__ > 4
+ invalid_64bit = gen->MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K,
+ 0xFFFFFFFF,
+ gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS,
+ SECCOMP_ARG_LSB_IDX(cond.argno_),
+ gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K,
+ 0x80000000,
+ lsb_head,
+ invalid_64bit)),
+ invalid_64bit);
+#endif
gen->JoinInstructions(
- msb_tail,
- gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, 0,
- lsb_head,
- invalid_64bit));
+ msb_tail,
+ gen->MakeInstruction(
+ BPF_JMP + BPF_JEQ + BPF_K, 0, lsb_head, invalid_64bit));
}
return msb_head;
@@ -966,11 +988,11 @@ ErrorCode Sandbox::Unexpected64bitArgument() {
return Kill("Unexpected 64bit argument detected");
}
-ErrorCode Sandbox::Trap(Trap::TrapFnc fnc, const void *aux) {
+ErrorCode Sandbox::Trap(Trap::TrapFnc fnc, const void* aux) {
return Trap::MakeTrap(fnc, aux, true /* Safe Trap */);
}
-ErrorCode Sandbox::UnsafeTrap(Trap::TrapFnc fnc, const void *aux) {
+ErrorCode Sandbox::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) {
return Trap::MakeTrap(fnc, aux, false /* Unsafe Trap */);
}
@@ -984,16 +1006,22 @@ intptr_t Sandbox::ForwardSyscall(const struct arch_seccomp_data& args) {
static_cast<intptr_t>(args.args[5]));
}
-ErrorCode Sandbox::Cond(int argno, ErrorCode::ArgType width,
- ErrorCode::Operation op, uint64_t value,
- const ErrorCode& passed, const ErrorCode& failed) {
- return ErrorCode(argno, width, op, value,
+ErrorCode Sandbox::Cond(int argno,
+ ErrorCode::ArgType width,
+ ErrorCode::Operation op,
+ uint64_t value,
+ const ErrorCode& passed,
+ const ErrorCode& failed) {
+ return ErrorCode(argno,
+ width,
+ op,
+ value,
&*conds_->insert(passed).first,
&*conds_->insert(failed).first);
}
-ErrorCode Sandbox::Kill(const char *msg) {
- return Trap(BpfFailure, const_cast<char *>(msg));
+ErrorCode Sandbox::Kill(const char* msg) {
+ return Trap(BpfFailure, const_cast<char*>(msg));
}
Sandbox::SandboxStatus Sandbox::status_ = STATUS_UNKNOWN;
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf.h b/sandbox/linux/seccomp-bpf/sandbox_bpf.h
index 72e17b2..ae412c6 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf.h
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf.h
@@ -26,15 +26,15 @@
namespace playground2 {
struct arch_seccomp_data {
- int nr;
+ int nr;
uint32_t arch;
uint64_t instruction_pointer;
uint64_t args[6];
};
struct arch_sigsys {
- void *ip;
- int nr;
+ void* ip;
+ int nr;
unsigned int arch;
};
@@ -61,7 +61,7 @@ class Sandbox {
// pointer. One common use case would be to pass the "aux" pointer as an
// argument to Trap() functions.
typedef BpfSandboxPolicy* EvaluateSyscall;
- typedef std::vector<std::pair<EvaluateSyscall, void *> >Evaluators;
+ typedef std::vector<std::pair<EvaluateSyscall, void*> > Evaluators;
// A vector of BPF instructions that need to be installed as a filter
// program in the kernel.
@@ -111,7 +111,7 @@ class Sandbox {
// valid for the entire time that Trap() handlers can be called; typically,
// this would be the lifetime of the program.
// DEPRECATED: use the policy interface below.
- void SetSandboxPolicyDeprecated(EvaluateSyscall syscallEvaluator, void *aux);
+ void SetSandboxPolicyDeprecated(EvaluateSyscall syscallEvaluator, void* aux);
// Set the BPF policy as |policy|. Ownership of |policy| is transfered here
// to the sandbox object.
@@ -123,7 +123,7 @@ class Sandbox {
// The "aux" field can carry a pointer to arbitrary data. See EvaluateSyscall
// for a description of how to pass data from SetSandboxPolicy() to a Trap()
// handler.
- ErrorCode Trap(Trap::TrapFnc fnc, const void *aux);
+ ErrorCode Trap(Trap::TrapFnc fnc, const void* aux);
// Calls a user-space trap handler and disables all sandboxing for system
// calls made from this trap handler.
@@ -135,7 +135,7 @@ class Sandbox {
// very useful to diagnose code that is incompatible with the sandbox.
// If even a single system call returns "UnsafeTrap", the security of
// entire sandbox should be considered compromised.
- ErrorCode UnsafeTrap(Trap::TrapFnc fnc, const void *aux);
+ ErrorCode UnsafeTrap(Trap::TrapFnc fnc, const void* aux);
// From within an UnsafeTrap() it is often useful to be able to execute
// the system call that triggered the trap. The ForwardSyscall() method
@@ -157,13 +157,15 @@ class Sandbox {
// If it is outside this range, the sandbox treats the system call just
// the same as any other ABI violation (i.e. it aborts with an error
// message).
- ErrorCode Cond(int argno, ErrorCode::ArgType is_32bit,
+ ErrorCode Cond(int argno,
+ ErrorCode::ArgType is_32bit,
ErrorCode::Operation op,
- uint64_t value, const ErrorCode& passed,
+ uint64_t value,
+ const ErrorCode& passed,
const ErrorCode& failed);
// Kill the program and print an error message.
- ErrorCode Kill(const char *msg);
+ ErrorCode Kill(const char* msg);
// This is the main public entry point. It finds all system calls that
// need rewriting, sets up the resources needed by the sandbox, and
@@ -186,7 +188,7 @@ class Sandbox {
// through the verifier, iff the program was built in debug mode.
// But by setting "force_verification", the caller can request that the
// verifier is run unconditionally. This is useful for unittests.
- Program *AssembleFilter(bool force_verification);
+ Program* AssembleFilter(bool force_verification);
// Returns the fatal ErrorCode that is used to indicate that somebody
// attempted to pass a 64bit value in a 32bit system call argument.
@@ -200,11 +202,8 @@ class Sandbox {
struct Range {
Range(uint32_t f, uint32_t t, const ErrorCode& e)
- : from(f),
- to(t),
- err(e) {
- }
- uint32_t from, to;
+ : from(f), to(t), err(e) {}
+ uint32_t from, to;
ErrorCode err;
};
typedef std::vector<Range> Ranges;
@@ -218,7 +217,8 @@ class Sandbox {
// policy. The caller has to make sure that "this" has not yet been
// initialized with any other policies.
bool RunFunctionInPolicy(void (*code_in_sandbox)(),
- EvaluateSyscall syscall_evaluator, void *aux);
+ EvaluateSyscall syscall_evaluator,
+ void* aux);
// Performs a couple of sanity checks to verify that the kernel supports the
// features that we need for successful sandboxing.
@@ -242,11 +242,11 @@ class Sandbox {
// sorted in ascending order of system call numbers. There are no gaps in the
// ranges. System calls with identical ErrorCodes are coalesced into a single
// range.
- void FindRanges(Ranges *ranges);
+ void FindRanges(Ranges* ranges);
// Returns a BPF program snippet that implements a jump table for the
// given range of system call numbers. This function runs recursively.
- Instruction *AssembleJumpTable(CodeGen *gen,
+ Instruction* AssembleJumpTable(CodeGen* gen,
Ranges::const_iterator start,
Ranges::const_iterator stop);
@@ -255,13 +255,13 @@ class Sandbox {
// conditional expression; if so, this function will recursively call
// CondExpression() and possibly RetExpression() to build a complex set of
// instructions.
- Instruction *RetExpression(CodeGen *gen, const ErrorCode& err);
+ Instruction* RetExpression(CodeGen* gen, const ErrorCode& err);
// Returns a BPF program that evaluates the conditional expression in
// "cond" and returns the appropriate value from the BPF filter program.
// This function recursively calls RetExpression(); it should only ever be
// called from RetExpression().
- Instruction *CondExpression(CodeGen *gen, const ErrorCode& cond);
+ Instruction* CondExpression(CodeGen* gen, const ErrorCode& cond);
static SandboxStatus status_;
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf_policy_forward.h b/sandbox/linux/seccomp-bpf/sandbox_bpf_policy_forward.h
index afc9d87..77d9b53 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf_policy_forward.h
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf_policy_forward.h
@@ -11,10 +11,9 @@ namespace playground2 {
class Sandbox;
class ErrorCode;
-typedef ErrorCode BpfSandboxPolicy(
- Sandbox* sandbox_compiler,
- int system_call_number,
- void* aux);
+typedef ErrorCode BpfSandboxPolicy(Sandbox* sandbox_compiler,
+ int system_call_number,
+ void* aux);
typedef base::Callback<BpfSandboxPolicy> BpfSandboxPolicyCallback;
diff --git a/sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc b/sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc
index d28b06c..9d67db8 100644
--- a/sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc
+++ b/sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc
@@ -33,7 +33,7 @@
// Workaround for Android's prctl.h file.
#ifndef PR_GET_ENDIAN
-#define PR_GET_ENDIAN 19
+#define PR_GET_ENDIAN 19
#endif
#ifndef PR_CAPBSET_READ
#define PR_CAPBSET_READ 23
@@ -45,7 +45,7 @@ using sandbox::BrokerProcess;
namespace {
-const int kExpectedReturnValue = 42;
+const int kExpectedReturnValue = 42;
const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
// This test should execute no matter whether we have kernel support. So,
@@ -60,8 +60,7 @@ TEST(SandboxBpf, CallSupports) {
RecordProperty("SeccompBPFSupported",
seccomp_bpf_supported ? "true." : "false.");
std::cout << "Seccomp BPF supported: "
- << (seccomp_bpf_supported ? "true." : "false.")
- << "\n";
+ << (seccomp_bpf_supported ? "true." : "false.") << "\n";
RecordProperty("PointerSize", sizeof(void*));
std::cout << "Pointer size: " << sizeof(void*) << "\n";
}
@@ -78,13 +77,13 @@ SANDBOX_TEST(SandboxBpf, CallSupportsTwice) {
// setting up the sandbox. But it wouldn't hurt to have at least one test
// that explicitly walks through all these steps.
-intptr_t FakeGetPid(const struct arch_seccomp_data& args, void *aux) {
+intptr_t FakeGetPid(const struct arch_seccomp_data& args, void* aux) {
BPF_ASSERT(aux);
- pid_t *pid_ptr = static_cast<pid_t *>(aux);
+ pid_t* pid_ptr = static_cast<pid_t*>(aux);
return (*pid_ptr)++;
}
-ErrorCode VerboseAPITestingPolicy(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode VerboseAPITestingPolicy(Sandbox* sandbox, int sysno, void* aux) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
return ErrorCode(ENOSYS);
} else if (sysno == __NR_getpid) {
@@ -116,7 +115,7 @@ SANDBOX_TEST(SandboxBpf, DISABLE_ON_TSAN(VerboseAPITesting)) {
// A simple blacklist test
-ErrorCode BlacklistNanosleepPolicy(Sandbox *, int sysno, void *) {
+ErrorCode BlacklistNanosleepPolicy(Sandbox*, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
@@ -140,7 +139,7 @@ BPF_TEST(SandboxBpf, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) {
// Now do a simple whitelist test
-ErrorCode WhitelistGetpidPolicy(Sandbox *, int sysno, void *) {
+ErrorCode WhitelistGetpidPolicy(Sandbox*, int sysno, void*) {
switch (sysno) {
case __NR_getpid:
case __NR_exit_group:
@@ -163,15 +162,16 @@ BPF_TEST(SandboxBpf, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) {
// A simple blacklist policy, with a SIGSYS handler
-intptr_t EnomemHandler(const struct arch_seccomp_data& args, void *aux) {
+intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
// We also check that the auxiliary data is correct
SANDBOX_ASSERT(aux);
*(static_cast<int*>(aux)) = kExpectedReturnValue;
return -ENOMEM;
}
-ErrorCode BlacklistNanosleepPolicySigsys(Sandbox *sandbox, int sysno,
- void *aux) {
+ErrorCode BlacklistNanosleepPolicySigsys(Sandbox* sandbox,
+ int sysno,
+ void* aux) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
@@ -185,8 +185,10 @@ ErrorCode BlacklistNanosleepPolicySigsys(Sandbox *sandbox, int sysno,
}
}
-BPF_TEST(SandboxBpf, BasicBlacklistWithSigsys,
- BlacklistNanosleepPolicySigsys, int /* BPF_AUX */) {
+BPF_TEST(SandboxBpf,
+ BasicBlacklistWithSigsys,
+ BlacklistNanosleepPolicySigsys,
+ int /* BPF_AUX */) {
// getpid() should work properly
errno = 0;
BPF_ASSERT(syscall(__NR_getpid) > 0);
@@ -204,33 +206,33 @@ BPF_TEST(SandboxBpf, BasicBlacklistWithSigsys,
// A simple test that verifies we can return arbitrary errno values.
-ErrorCode ErrnoTestPolicy(Sandbox *, int sysno, void *) {
+ErrorCode ErrnoTestPolicy(Sandbox*, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
}
switch (sysno) {
- case __NR_dup2:
- // Pretend that dup2() worked, but don't actually do anything.
- return ErrorCode(0);
- case __NR_setuid:
+ case __NR_dup2:
+ // Pretend that dup2() worked, but don't actually do anything.
+ return ErrorCode(0);
+ case __NR_setuid:
#if defined(__NR_setuid32)
- case __NR_setuid32:
+ case __NR_setuid32:
#endif
- // Return errno = 1.
- return ErrorCode(1);
- case __NR_setgid:
+ // Return errno = 1.
+ return ErrorCode(1);
+ case __NR_setgid:
#if defined(__NR_setgid32)
- case __NR_setgid32:
+ case __NR_setgid32:
#endif
- // Return maximum errno value (typically 4095).
- return ErrorCode(ErrorCode::ERR_MAX_ERRNO);
- case __NR_uname:
- // Return errno = 42;
- return ErrorCode(42);
- default:
- return ErrorCode(ErrorCode::ERR_ALLOWED);
+ // Return maximum errno value (typically 4095).
+ return ErrorCode(ErrorCode::ERR_MAX_ERRNO);
+ case __NR_uname:
+ // Return errno = 42;
+ return ErrorCode(42);
+ default:
+ return ErrorCode(ErrorCode::ERR_ALLOWED);
}
}
@@ -238,9 +240,9 @@ BPF_TEST(SandboxBpf, ErrnoTest, ErrnoTestPolicy) {
// Verify that dup2() returns success, but doesn't actually run.
int fds[4];
BPF_ASSERT(pipe(fds) == 0);
- BPF_ASSERT(pipe(fds+2) == 0);
+ BPF_ASSERT(pipe(fds + 2) == 0);
BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
- char buf[1] = { };
+ char buf[1] = {};
BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
BPF_ASSERT(read(fds[0], buf, 1) == 1);
@@ -276,14 +278,17 @@ BPF_TEST(SandboxBpf, ErrnoTest, ErrnoTestPolicy) {
// Testing the stacking of two sandboxes
-ErrorCode StackingPolicyPartOne(Sandbox *sandbox, int sysno, void *) {
+ErrorCode StackingPolicyPartOne(Sandbox* sandbox, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
return ErrorCode(ENOSYS);
}
switch (sysno) {
case __NR_getppid:
- return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
+ return sandbox->Cond(0,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ 0,
ErrorCode(ErrorCode::ERR_ALLOWED),
ErrorCode(EPERM));
default:
@@ -291,14 +296,17 @@ ErrorCode StackingPolicyPartOne(Sandbox *sandbox, int sysno, void *) {
}
}
-ErrorCode StackingPolicyPartTwo(Sandbox *sandbox, int sysno, void *) {
+ErrorCode StackingPolicyPartTwo(Sandbox* sandbox, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
return ErrorCode(ENOSYS);
}
switch (sysno) {
case __NR_getppid:
- return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
+ return sandbox->Cond(0,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ 0,
ErrorCode(EINVAL),
ErrorCode(ErrorCode::ERR_ALLOWED));
default:
@@ -343,7 +351,7 @@ int SysnoToRandomErrno(int sysno) {
return ((sysno & ~3) >> 2) % 29 + 1;
}
-ErrorCode SyntheticPolicy(Sandbox *, int sysno, void *) {
+ErrorCode SyntheticPolicy(Sandbox*, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
@@ -368,15 +376,13 @@ ErrorCode SyntheticPolicy(Sandbox *, int sysno, void *) {
BPF_TEST(SandboxBpf, SyntheticPolicy, SyntheticPolicy) {
// Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
// overflow.
- BPF_ASSERT(
- std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
- static_cast<int>(MAX_PUBLIC_SYSCALL));
-
- for (int syscall_number = static_cast<int>(MIN_SYSCALL);
- syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
- ++syscall_number) {
- if (syscall_number == __NR_exit_group ||
- syscall_number == __NR_write) {
+ BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
+ static_cast<int>(MAX_PUBLIC_SYSCALL));
+
+ for (int syscall_number = static_cast<int>(MIN_SYSCALL);
+ syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
+ ++syscall_number) {
+ if (syscall_number == __NR_exit_group || syscall_number == __NR_write) {
// exit_group() is special
continue;
}
@@ -401,7 +407,7 @@ int ArmPrivateSysnoToErrno(int sysno) {
}
}
-ErrorCode ArmPrivatePolicy(Sandbox *, int sysno, void *) {
+ErrorCode ArmPrivatePolicy(Sandbox*, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy.
return ErrorCode(ENOSYS);
@@ -418,9 +424,9 @@ ErrorCode ArmPrivatePolicy(Sandbox *, int sysno, void *) {
}
BPF_TEST(SandboxBpf, ArmPrivatePolicy, ArmPrivatePolicy) {
- for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
- syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
- ++syscall_number) {
+ for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
+ syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
+ ++syscall_number) {
errno = 0;
BPF_ASSERT(syscall(syscall_number) == -1);
BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
@@ -428,9 +434,9 @@ BPF_TEST(SandboxBpf, ArmPrivatePolicy, ArmPrivatePolicy) {
}
#endif // defined(__arm__)
-intptr_t CountSyscalls(const struct arch_seccomp_data& args, void *aux) {
+intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
// Count all invocations of our callback function.
- ++*reinterpret_cast<int *>(aux);
+ ++*reinterpret_cast<int*>(aux);
// Verify that within the callback function all filtering is temporarily
// disabled.
@@ -441,7 +447,7 @@ intptr_t CountSyscalls(const struct arch_seccomp_data& args, void *aux) {
return Sandbox::ForwardSyscall(args);
}
-ErrorCode GreyListedPolicy(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode GreyListedPolicy(Sandbox* sandbox, int sysno, void* aux) {
// The use of UnsafeTrap() causes us to print a warning message. This is
// generally desirable, but it results in the unittest failing, as it doesn't
// expect any messages on "stderr". So, temporarily disable messages. The
@@ -452,13 +458,14 @@ ErrorCode GreyListedPolicy(Sandbox *sandbox, int sysno, void *aux) {
// Some system calls must always be allowed, if our policy wants to make
// use of UnsafeTrap()
- if (sysno == __NR_rt_sigprocmask ||
- sysno == __NR_rt_sigreturn
+ if (sysno == __NR_rt_sigprocmask || sysno == __NR_rt_sigreturn
#if defined(__NR_sigprocmask)
- || sysno == __NR_sigprocmask
+ ||
+ sysno == __NR_sigprocmask
#endif
#if defined(__NR_sigreturn)
- || sysno == __NR_sigreturn
+ ||
+ sysno == __NR_sigreturn
#endif
) {
return ErrorCode(ErrorCode::ERR_ALLOWED);
@@ -467,22 +474,25 @@ ErrorCode GreyListedPolicy(Sandbox *sandbox, int sysno, void *aux) {
return ErrorCode(EPERM);
} else if (Sandbox::IsValidSyscallNumber(sysno)) {
// Allow (and count) all other system calls.
- return sandbox->UnsafeTrap(CountSyscalls, aux);
+ return sandbox->UnsafeTrap(CountSyscalls, aux);
} else {
return ErrorCode(ENOSYS);
}
}
-BPF_TEST(SandboxBpf, GreyListedPolicy,
- GreyListedPolicy, int /* BPF_AUX */) {
+BPF_TEST(SandboxBpf, GreyListedPolicy, GreyListedPolicy, int /* BPF_AUX */) {
BPF_ASSERT(syscall(__NR_getpid) == -1);
BPF_ASSERT(errno == EPERM);
BPF_ASSERT(BPF_AUX == 0);
BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
BPF_ASSERT(BPF_AUX == 2);
- char name[17] = { };
- BPF_ASSERT(!syscall(__NR_prctl, PR_GET_NAME, name, (void *)NULL,
- (void *)NULL, (void *)NULL));
+ char name[17] = {};
+ BPF_ASSERT(!syscall(__NR_prctl,
+ PR_GET_NAME,
+ name,
+ (void*)NULL,
+ (void*)NULL,
+ (void*)NULL));
BPF_ASSERT(BPF_AUX == 3);
BPF_ASSERT(*name);
}
@@ -500,9 +510,8 @@ SANDBOX_TEST(SandboxBpf, EnableUnsafeTrapsInSigSysHandler) {
SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true);
}
-intptr_t PrctlHandler(const struct arch_seccomp_data& args, void *) {
- if (args.args[0] == PR_CAPBSET_DROP &&
- static_cast<int>(args.args[1]) == -1) {
+intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
+ if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
// prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
// return an error. But our handler allows this call.
return 0;
@@ -511,7 +520,7 @@ intptr_t PrctlHandler(const struct arch_seccomp_data& args, void *) {
}
}
-ErrorCode PrctlPolicy(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode PrctlPolicy(Sandbox* sandbox, int sysno, void* aux) {
setenv(kSandboxDebuggingEnv, "t", 0);
Die::SuppressInfoMessages(true);
@@ -529,43 +538,48 @@ ErrorCode PrctlPolicy(Sandbox *sandbox, int sysno, void *aux) {
BPF_TEST(SandboxBpf, ForwardSyscall, PrctlPolicy) {
// This call should never be allowed. But our policy will intercept it and
// let it pass successfully.
- BPF_ASSERT(!prctl(PR_CAPBSET_DROP, -1, (void *)NULL, (void *)NULL,
- (void *)NULL));
+ BPF_ASSERT(
+ !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
// Verify that the call will fail, if it makes it all the way to the kernel.
- BPF_ASSERT(prctl(PR_CAPBSET_DROP, -2, (void *)NULL, (void *)NULL,
- (void *)NULL) == -1);
+ BPF_ASSERT(
+ prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
// And verify that other uses of prctl() work just fine.
- char name[17] = { };
- BPF_ASSERT(!syscall(__NR_prctl, PR_GET_NAME, name, (void *)NULL,
- (void *)NULL, (void *)NULL));
+ char name[17] = {};
+ BPF_ASSERT(!syscall(__NR_prctl,
+ PR_GET_NAME,
+ name,
+ (void*)NULL,
+ (void*)NULL,
+ (void*)NULL));
BPF_ASSERT(*name);
// Finally, verify that system calls other than prctl() are completely
// unaffected by our policy.
- struct utsname uts = { };
+ struct utsname uts = {};
BPF_ASSERT(!uname(&uts));
BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
}
-intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void *) {
+intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
return Sandbox::ForwardSyscall(args);
}
-ErrorCode RedirectAllSyscallsPolicy(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode RedirectAllSyscallsPolicy(Sandbox* sandbox, int sysno, void* aux) {
setenv(kSandboxDebuggingEnv, "t", 0);
Die::SuppressInfoMessages(true);
// Some system calls must always be allowed, if our policy wants to make
// use of UnsafeTrap()
- if (sysno == __NR_rt_sigprocmask ||
- sysno == __NR_rt_sigreturn
+ if (sysno == __NR_rt_sigprocmask || sysno == __NR_rt_sigreturn
#if defined(__NR_sigprocmask)
- || sysno == __NR_sigprocmask
+ ||
+ sysno == __NR_sigprocmask
#endif
#if defined(__NR_sigreturn)
- || sysno == __NR_sigreturn
+ ||
+ sysno == __NR_sigreturn
#endif
) {
return ErrorCode(ErrorCode::ERR_ALLOWED);
@@ -578,7 +592,7 @@ ErrorCode RedirectAllSyscallsPolicy(Sandbox *sandbox, int sysno, void *aux) {
int bus_handler_fd_ = -1;
-void SigBusHandler(int, siginfo_t *info, void *void_context) {
+void SigBusHandler(int, siginfo_t* info, void* void_context) {
BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
}
@@ -593,7 +607,7 @@ BPF_TEST(SandboxBpf, SigBus, RedirectAllSyscallsPolicy) {
int fds[2];
BPF_ASSERT(pipe(fds) == 0);
bus_handler_fd_ = fds[1];
- struct sigaction sa = { };
+ struct sigaction sa = {};
sa.sa_sigaction = SigBusHandler;
sa.sa_flags = SA_SIGINFO;
BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
@@ -629,7 +643,7 @@ BPF_TEST(SandboxBpf, SigMask, RedirectAllSyscallsPolicy) {
sigaddset(&mask0, SIGUSR2);
BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL));
BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2));
- BPF_ASSERT( sigismember(&mask2, SIGUSR2));
+ BPF_ASSERT(sigismember(&mask2, SIGUSR2));
}
BPF_TEST(SandboxBpf, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
@@ -650,8 +664,8 @@ BPF_TEST(SandboxBpf, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
// would make system calls, but it allows us to verify that we don't
// accidentally mess with errno, when we shouldn't.
errno = 0;
- struct arch_seccomp_data args = { };
- args.nr = __NR_close;
+ struct arch_seccomp_data args = {};
+ args.nr = __NR_close;
args.args[0] = -1;
BPF_ASSERT(Sandbox::ForwardSyscall(args) == -EBADF);
BPF_ASSERT(errno == 0);
@@ -666,9 +680,8 @@ class InitializedOpenBroker {
allowed_files.push_back("/proc/allowed");
allowed_files.push_back("/proc/cpuinfo");
- broker_process_.reset(new BrokerProcess(EPERM,
- allowed_files,
- std::vector<std::string>()));
+ broker_process_.reset(
+ new BrokerProcess(EPERM, allowed_files, std::vector<std::string>()));
BPF_ASSERT(broker_process() != NULL);
BPF_ASSERT(broker_process_->Init(NULL));
@@ -676,6 +689,7 @@ class InitializedOpenBroker {
}
bool initialized() { return initialized_; }
class BrokerProcess* broker_process() { return broker_process_.get(); }
+
private:
bool initialized_;
scoped_ptr<class BrokerProcess> broker_process_;
@@ -683,29 +697,29 @@ class InitializedOpenBroker {
};
intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args,
- void *aux) {
+ void* aux) {
BPF_ASSERT(aux);
BrokerProcess* broker_process = static_cast<BrokerProcess*>(aux);
- switch(args.nr) {
+ switch (args.nr) {
case __NR_access:
return broker_process->Access(reinterpret_cast<const char*>(args.args[0]),
- static_cast<int>(args.args[1]));
+ static_cast<int>(args.args[1]));
case __NR_open:
return broker_process->Open(reinterpret_cast<const char*>(args.args[0]),
- static_cast<int>(args.args[1]));
+ static_cast<int>(args.args[1]));
case __NR_openat:
// We only call open() so if we arrive here, it's because glibc uses
// the openat() system call.
BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
return broker_process->Open(reinterpret_cast<const char*>(args.args[1]),
- static_cast<int>(args.args[2]));
+ static_cast<int>(args.args[2]));
default:
BPF_ASSERT(false);
return -ENOSYS;
}
}
-ErrorCode DenyOpenPolicy(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode DenyOpenPolicy(Sandbox* sandbox, int sysno, void* aux) {
InitializedOpenBroker* iob = static_cast<InitializedOpenBroker*>(aux);
if (!Sandbox::IsValidSyscallNumber(sysno)) {
return ErrorCode(ENOSYS);
@@ -717,8 +731,8 @@ ErrorCode DenyOpenPolicy(Sandbox *sandbox, int sysno, void *aux) {
case __NR_openat:
// We get a InitializedOpenBroker class, but our trap handler wants
// the BrokerProcess object.
- return ErrorCode(sandbox->Trap(BrokerOpenTrapHandler,
- iob->broker_process()));
+ return ErrorCode(
+ sandbox->Trap(BrokerOpenTrapHandler, iob->broker_process()));
default:
return ErrorCode(ErrorCode::ERR_ALLOWED);
}
@@ -726,10 +740,12 @@ ErrorCode DenyOpenPolicy(Sandbox *sandbox, int sysno, void *aux) {
// We use a InitializedOpenBroker class, so that we can run unsandboxed
// code in its constructor, which is the only way to do so in a BPF_TEST.
-BPF_TEST(SandboxBpf, UseOpenBroker, DenyOpenPolicy,
+BPF_TEST(SandboxBpf,
+ UseOpenBroker,
+ DenyOpenPolicy,
InitializedOpenBroker /* BPF_AUX */) {
BPF_ASSERT(BPF_AUX.initialized());
- BrokerProcess* broker_process = BPF_AUX.broker_process();
+ BrokerProcess* broker_process = BPF_AUX.broker_process();
BPF_ASSERT(broker_process != NULL);
// First, use the broker "manually"
@@ -771,7 +787,7 @@ BPF_TEST(SandboxBpf, UseOpenBroker, DenyOpenPolicy,
// Simple test demonstrating how to use Sandbox::Cond()
-ErrorCode SimpleCondTestPolicy(Sandbox *sandbox, int sysno, void *) {
+ErrorCode SimpleCondTestPolicy(Sandbox* sandbox, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
@@ -784,20 +800,26 @@ ErrorCode SimpleCondTestPolicy(Sandbox *sandbox, int sysno, void *) {
case __NR_open:
// Allow opening files for reading, but don't allow writing.
COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits);
- return sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
+ return sandbox->Cond(1,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_HAS_ANY_BITS,
O_ACCMODE /* 0x3 */,
ErrorCode(EROFS),
ErrorCode(ErrorCode::ERR_ALLOWED));
case __NR_prctl:
// Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
// disallow everything else.
- return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
+ return sandbox->Cond(0,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
PR_SET_DUMPABLE,
ErrorCode(ErrorCode::ERR_ALLOWED),
- sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
- PR_GET_DUMPABLE,
- ErrorCode(ErrorCode::ERR_ALLOWED),
- ErrorCode(ENOMEM)));
+ sandbox->Cond(0,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ PR_GET_DUMPABLE,
+ ErrorCode(ErrorCode::ERR_ALLOWED),
+ ErrorCode(ENOMEM)));
default:
return ErrorCode(ErrorCode::ERR_ALLOWED);
}
@@ -812,7 +834,7 @@ BPF_TEST(SandboxBpf, SimpleCondTest, SimpleCondTestPolicy) {
int ret;
BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
- BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1-ret) == 0);
+ BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
BPF_ASSERT(errno == ENOMEM);
}
@@ -832,8 +854,9 @@ class EqualityStressTest {
// We are actually constructing a graph of ArgValue objects. This
// graph will later be used to a) compute our sandbox policy, and
// b) drive the code that verifies the output from the BPF program.
- COMPILE_ASSERT(kNumTestCases < (int)(MAX_PUBLIC_SYSCALL-MIN_SYSCALL-10),
- num_test_cases_must_be_significantly_smaller_than_num_system_calls);
+ COMPILE_ASSERT(
+ kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
+ num_test_cases_must_be_significantly_smaller_than_num_system_calls);
for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
if (IsReservedSyscall(sysno)) {
// Skip reserved system calls. This ensures that our test frame
@@ -842,21 +865,21 @@ class EqualityStressTest {
++end;
arg_values_.push_back(NULL);
} else {
- arg_values_.push_back(RandomArgValue(rand() % kMaxArgs, 0,
- rand() % kMaxArgs));
+ arg_values_.push_back(
+ RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
}
}
}
~EqualityStressTest() {
- for (std::vector<ArgValue *>::iterator iter = arg_values_.begin();
+ for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
iter != arg_values_.end();
++iter) {
DeleteArgValue(*iter);
}
}
- ErrorCode Policy(Sandbox *sandbox, int sysno) {
+ ErrorCode Policy(Sandbox* sandbox, int sysno) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
@@ -888,22 +911,22 @@ class EqualityStressTest {
// We arbitrarily start by setting all six system call arguments to
// zero. And we then recursive traverse our tree of ArgValues to
// determine the necessary combinations of parameters.
- intptr_t args[6] = { };
+ intptr_t args[6] = {};
Verify(sysno, args, *arg_values_[sysno]);
}
}
private:
struct ArgValue {
- int argno; // Argument number to inspect.
- int size; // Number of test cases (must be > 0).
+ int argno; // Argument number to inspect.
+ int size; // Number of test cases (must be > 0).
struct Tests {
uint32_t k_value; // Value to compare syscall arg against.
- int err; // If non-zero, errno value to return.
- struct ArgValue *arg_value; // Otherwise, more args needs inspecting.
- } *tests;
- int err; // If none of the tests passed, this is what
- struct ArgValue *arg_value; // we'll return (this is the "else" branch).
+ int err; // If non-zero, errno value to return.
+ struct ArgValue* arg_value; // Otherwise, more args needs inspecting.
+ }* tests;
+ int err; // If none of the tests passed, this is what
+ struct ArgValue* arg_value; // we'll return (this is the "else" branch).
};
bool IsReservedSyscall(int sysno) {
@@ -917,41 +940,38 @@ class EqualityStressTest {
// calls that will be made by this particular test. So, this small list is
// sufficient. But if anybody copy'n'pasted this code for other uses, they
// would have to review that the list.
- return sysno == __NR_read ||
- sysno == __NR_write ||
- sysno == __NR_exit ||
- sysno == __NR_exit_group ||
- sysno == __NR_restart_syscall;
+ return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
+ sysno == __NR_exit_group || sysno == __NR_restart_syscall;
}
- ArgValue *RandomArgValue(int argno, int args_mask, int remaining_args) {
+ ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
// Create a new ArgValue and fill it with random data. We use as bit mask
// to keep track of the system call parameters that have previously been
// set; this ensures that we won't accidentally define a contradictory
// set of equality tests.
- struct ArgValue *arg_value = new ArgValue();
- args_mask |= 1 << argno;
- arg_value->argno = argno;
+ struct ArgValue* arg_value = new ArgValue();
+ args_mask |= 1 << argno;
+ arg_value->argno = argno;
// Apply some restrictions on just how complex our tests can be.
// Otherwise, we end up with a BPF program that is too complicated for
// the kernel to load.
- int fan_out = kMaxFanOut;
+ int fan_out = kMaxFanOut;
if (remaining_args > 3) {
- fan_out = 1;
+ fan_out = 1;
} else if (remaining_args > 2) {
- fan_out = 2;
+ fan_out = 2;
}
// Create a couple of different test cases with randomized values that
// we want to use when comparing system call parameter number "argno".
- arg_value->size = rand() % fan_out + 1;
- arg_value->tests = new ArgValue::Tests[arg_value->size];
+ arg_value->size = rand() % fan_out + 1;
+ arg_value->tests = new ArgValue::Tests[arg_value->size];
- uint32_t k_value = rand();
+ uint32_t k_value = rand();
for (int n = 0; n < arg_value->size; ++n) {
// Ensure that we have unique values
- k_value += rand() % (RAND_MAX/(kMaxFanOut+1)) + 1;
+ k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
// There are two possible types of nodes. Either this is a leaf node;
// in that case, we have completed all the equality tests that we
@@ -967,7 +987,7 @@ class EqualityStressTest {
} else {
arg_value->tests[n].err = 0;
arg_value->tests[n].arg_value =
- RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
+ RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
}
}
// Finally, we have to define what we should return if none of the
@@ -979,7 +999,7 @@ class EqualityStressTest {
} else {
arg_value->err = 0;
arg_value->arg_value =
- RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
+ RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
}
// We have now built a new (sub-)tree of ArgValues defining a set of
// boolean expressions for testing random system call arguments against
@@ -1000,7 +1020,7 @@ class EqualityStressTest {
return argno;
}
- void DeleteArgValue(ArgValue *arg_value) {
+ void DeleteArgValue(ArgValue* arg_value) {
// Delete an ArgValue and all of its child nodes. This requires
// recursively descending into the tree.
if (arg_value) {
@@ -1019,7 +1039,7 @@ class EqualityStressTest {
}
}
- ErrorCode ToErrorCode(Sandbox *sandbox, ArgValue *arg_value) {
+ ErrorCode ToErrorCode(Sandbox* sandbox, ArgValue* arg_value) {
// Compute the ErrorCode that should be returned, if none of our
// tests succeed (i.e. the system call parameter doesn't match any
// of the values in arg_value->tests[].k_value).
@@ -1038,7 +1058,7 @@ class EqualityStressTest {
// Now, iterate over all the test cases that we want to compare against.
// This builds a chain of Sandbox::Cond() tests
// (aka "if ... elif ... elif ... elif ... fi")
- for (int n = arg_value->size; n-- > 0; ) {
+ for (int n = arg_value->size; n-- > 0;) {
ErrorCode matched;
// Again, we distinguish between leaf nodes and subtrees.
if (arg_value->tests[n].err) {
@@ -1049,19 +1069,22 @@ class EqualityStressTest {
// For now, all of our tests are limited to 32bit.
// We have separate tests that check the behavior of 32bit vs. 64bit
// conditional expressions.
- err = sandbox->Cond(arg_value->argno, ErrorCode::TP_32BIT,
- ErrorCode::OP_EQUAL, arg_value->tests[n].k_value,
- matched, err);
+ err = sandbox->Cond(arg_value->argno,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ arg_value->tests[n].k_value,
+ matched,
+ err);
}
return err;
}
- void Verify(int sysno, intptr_t *args, const ArgValue& arg_value) {
+ void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
uint32_t mismatched = 0;
// Iterate over all the k_values in arg_value.tests[] and verify that
// we see the expected return values from system calls, when we pass
// the k_value as a parameter in a system call.
- for (int n = arg_value.size; n-- > 0; ) {
+ for (int n = arg_value.size; n-- > 0;) {
mismatched += arg_value.tests[n].k_value;
args[arg_value.argno] = arg_value.tests[n].k_value;
if (arg_value.tests[n].err) {
@@ -1070,12 +1093,12 @@ class EqualityStressTest {
Verify(sysno, args, *arg_value.tests[n].arg_value);
}
}
- // Find a k_value that doesn't match any of the k_values in
- // arg_value.tests[]. In most cases, the current value of "mismatched"
- // would fit this requirement. But on the off-chance that it happens
- // to collide, we double-check.
+ // Find a k_value that doesn't match any of the k_values in
+ // arg_value.tests[]. In most cases, the current value of "mismatched"
+ // would fit this requirement. But on the off-chance that it happens
+ // to collide, we double-check.
try_again:
- for (int n = arg_value.size; n-- > 0; ) {
+ for (int n = arg_value.size; n-- > 0;) {
if (mismatched == arg_value.tests[n].k_value) {
++mismatched;
goto try_again;
@@ -1095,18 +1118,19 @@ class EqualityStressTest {
args[arg_value.argno] = 0;
}
- void VerifyErrno(int sysno, intptr_t *args, int err) {
+ void VerifyErrno(int sysno, intptr_t* args, int err) {
// We installed BPF filters that return different errno values
// based on the system call number and the parameters that we decided
// to pass in. Verify that this condition holds true.
- BPF_ASSERT(SandboxSyscall(sysno,
- args[0], args[1], args[2],
- args[3], args[4], args[5]) == -err);
+ BPF_ASSERT(
+ SandboxSyscall(
+ sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
+ -err);
}
// Vector of ArgValue trees. These trees define all the possible boolean
// expressions that we want to turn into a BPF filter program.
- std::vector<ArgValue *> arg_values_;
+ std::vector<ArgValue*> arg_values_;
// Don't increase these values. We are pushing the limits of the maximum
// BPF program that the kernel will allow us to load. If the values are
@@ -1116,33 +1140,47 @@ class EqualityStressTest {
static const int kMaxArgs = 6;
};
-ErrorCode EqualityStressTestPolicy(Sandbox *sandbox, int sysno, void *aux) {
- return reinterpret_cast<EqualityStressTest *>(aux)->Policy(sandbox, sysno);
+ErrorCode EqualityStressTestPolicy(Sandbox* sandbox, int sysno, void* aux) {
+ return reinterpret_cast<EqualityStressTest*>(aux)->Policy(sandbox, sysno);
}
-BPF_TEST(SandboxBpf, EqualityTests, EqualityStressTestPolicy,
+BPF_TEST(SandboxBpf,
+ EqualityTests,
+ EqualityStressTestPolicy,
EqualityStressTest /* BPF_AUX */) {
BPF_AUX.VerifyFilter();
}
-ErrorCode EqualityArgumentWidthPolicy(Sandbox *sandbox, int sysno, void *) {
+ErrorCode EqualityArgumentWidthPolicy(Sandbox* sandbox, int sysno, void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
} else if (sysno == __NR_uname) {
- return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
- sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
- 0x55555555, ErrorCode(1), ErrorCode(2)),
- // The BPF compiler and the BPF interpreter in the kernel are
- // (mostly) agnostic of the host platform's word size. The compiler
- // will happily generate code that tests a 64bit value, and the
- // interpreter will happily perform this test.
- // But unless there is a kernel bug, there is no way for us to pass
- // in a 64bit quantity on a 32bit platform. The upper 32bits should
- // always be zero. So, this test should always evaluate as false on
- // 32bit systems.
- sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_EQUAL,
- 0x55555555AAAAAAAAULL, ErrorCode(1), ErrorCode(2)));
+ return sandbox->Cond(
+ 0,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ 0,
+ sandbox->Cond(1,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ 0x55555555,
+ ErrorCode(1),
+ ErrorCode(2)),
+ // The BPF compiler and the BPF interpreter in the kernel are
+ // (mostly) agnostic of the host platform's word size. The compiler
+ // will happily generate code that tests a 64bit value, and the
+ // interpreter will happily perform this test.
+ // But unless there is a kernel bug, there is no way for us to pass
+ // in a 64bit quantity on a 32bit platform. The upper 32bits should
+ // always be zero. So, this test should always evaluate as false on
+ // 32bit systems.
+ sandbox->Cond(1,
+ ErrorCode::TP_64BIT,
+ ErrorCode::OP_EQUAL,
+ 0x55555555AAAAAAAAULL,
+ ErrorCode(1),
+ ErrorCode(2)));
} else {
return ErrorCode(ErrorCode::ERR_ALLOWED);
}
@@ -1168,27 +1206,34 @@ BPF_TEST(SandboxBpf, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
// On 32bit machines, there is no way to pass a 64bit argument through the
// syscall interface. So, we have to skip the part of the test that requires
// 64bit arguments.
-BPF_DEATH_TEST(SandboxBpf, EqualityArgumentUnallowed64bit,
+BPF_DEATH_TEST(SandboxBpf,
+ EqualityArgumentUnallowed64bit,
DEATH_MESSAGE("Unexpected 64bit argument detected"),
EqualityArgumentWidthPolicy) {
SandboxSyscall(__NR_uname, 0, 0x5555555555555555ULL);
}
#endif
-ErrorCode EqualityWithNegativeArgumentsPolicy(Sandbox *sandbox, int sysno,
- void *) {
+ErrorCode EqualityWithNegativeArgumentsPolicy(Sandbox* sandbox,
+ int sysno,
+ void*) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
// FIXME: we should really not have to do that in a trivial policy
return ErrorCode(ENOSYS);
} else if (sysno == __NR_uname) {
- return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
- 0xFFFFFFFF, ErrorCode(1), ErrorCode(2));
+ return sandbox->Cond(0,
+ ErrorCode::TP_32BIT,
+ ErrorCode::OP_EQUAL,
+ 0xFFFFFFFF,
+ ErrorCode(1),
+ ErrorCode(2));
} else {
return ErrorCode(ErrorCode::ERR_ALLOWED);
}
}
-BPF_TEST(SandboxBpf, EqualityWithNegativeArguments,
+BPF_TEST(SandboxBpf,
+ EqualityWithNegativeArguments,
EqualityWithNegativeArgumentsPolicy) {
BPF_ASSERT(SandboxSyscall(__NR_uname, 0xFFFFFFFF) == -1);
BPF_ASSERT(SandboxSyscall(__NR_uname, -1) == -1);
@@ -1196,7 +1241,8 @@ BPF_TEST(SandboxBpf, EqualityWithNegativeArguments,
}
#if __SIZEOF_POINTER__ > 4
-BPF_DEATH_TEST(SandboxBpf, EqualityWithNegative64bitArguments,
+BPF_DEATH_TEST(SandboxBpf,
+ EqualityWithNegative64bitArguments,
DEATH_MESSAGE("Unexpected 64bit argument detected"),
EqualityWithNegativeArgumentsPolicy) {
// When expecting a 32bit system call argument, we look at the MSB of the
@@ -1205,7 +1251,6 @@ BPF_DEATH_TEST(SandboxBpf, EqualityWithNegative64bitArguments,
BPF_ASSERT(SandboxSyscall(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
}
#endif
-
ErrorCode AllBitTestPolicy(Sandbox *sandbox, int sysno, void *) {
// Test the OP_HAS_ALL_BITS conditional test operator with a couple of
// different bitmasks. We try to find bitmasks that could conceivably
@@ -1284,13 +1329,13 @@ ErrorCode AllBitTestPolicy(Sandbox *sandbox, int sysno, void *) {
// Most notably, "op" and "mask" are unused by the macro. If you want
// to make changes to these values, you will have to edit the
// test policy instead.
-#define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
+#define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
BPF_ASSERT(SandboxSyscall(__NR_uname, (testcase), (arg)) == (expected_value))
// Our uname() system call returns ErrorCode(1) for success and
// ErrorCode(0) for failure. SandboxSyscall() turns this into an
// exit code of -1 or 0.
-#define EXPECT_FAILURE 0
+#define EXPECT_FAILURE 0
#define EXPECT_SUCCESS -1
// A couple of our tests behave differently on 32bit and 64bit systems, as
@@ -1298,9 +1343,7 @@ ErrorCode AllBitTestPolicy(Sandbox *sandbox, int sysno, void *) {
// argument "arg".
// We expect these tests to succeed on 64bit systems, but to tail on 32bit
// systems.
-#define EXPT64_SUCCESS \
- (sizeof(void *) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
-
+#define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
BPF_TEST(SandboxBpf, AllBitTests, AllBitTestPolicy) {
// 32bit test: all of 0x0 (should always be true)
BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS);
@@ -1404,7 +1447,7 @@ BPF_TEST(SandboxBpf, AllBitTests, AllBitTestPolicy) {
BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
}
-ErrorCode AnyBitTestPolicy(Sandbox *sandbox, int sysno, void *) {
+ErrorCode AnyBitTestPolicy(Sandbox* sandbox, int sysno, void*) {
// Test the OP_HAS_ANY_BITS conditional test operator with a couple of
// different bitmasks. We try to find bitmasks that could conceivably
// touch corner cases.
@@ -1581,31 +1624,34 @@ BPF_TEST(SandboxBpf, AnyBitTests, AnyBitTestPolicy) {
BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
}
-intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void *aux) {
- if (args.args[0] != (CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD)) {
+intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
+ if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
// We expect to get called for an attempt to fork(). No need to log that
// call. But if we ever get called for anything else, we want to verbosely
// print as much information as possible.
- const char *msg = (const char *)aux;
- printf("Clone() was called with unexpected arguments\n"
- " nr: %d\n"
- " 1: 0x%llX\n"
- " 2: 0x%llX\n"
- " 3: 0x%llX\n"
- " 4: 0x%llX\n"
- " 5: 0x%llX\n"
- " 6: 0x%llX\n"
- "%s\n",
- args.nr,
- (long long)args.args[0], (long long)args.args[1],
- (long long)args.args[2], (long long)args.args[3],
- (long long)args.args[4], (long long)args.args[5],
- msg);
+ const char* msg = (const char*)aux;
+ printf(
+ "Clone() was called with unexpected arguments\n"
+ " nr: %d\n"
+ " 1: 0x%llX\n"
+ " 2: 0x%llX\n"
+ " 3: 0x%llX\n"
+ " 4: 0x%llX\n"
+ " 5: 0x%llX\n"
+ " 6: 0x%llX\n"
+ "%s\n",
+ args.nr,
+ (long long)args.args[0],
+ (long long)args.args[1],
+ (long long)args.args[2],
+ (long long)args.args[3],
+ (long long)args.args[4],
+ (long long)args.args[5],
+ msg);
}
return -EPERM;
}
-
-ErrorCode PthreadPolicyEquality(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode PthreadPolicyEquality(Sandbox* sandbox, int sysno, void* aux) {
// This policy allows creating threads with pthread_create(). But it
// doesn't allow any other uses of clone(). Most notably, it does not
// allow callers to implement fork() or vfork() by passing suitable flags
@@ -1645,7 +1691,7 @@ ErrorCode PthreadPolicyEquality(Sandbox *sandbox, int sysno, void *aux) {
}
}
-ErrorCode PthreadPolicyBitMask(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode PthreadPolicyBitMask(Sandbox* sandbox, int sysno, void* aux) {
// This policy allows creating threads with pthread_create(). But it
// doesn't allow any other uses of clone(). Most notably, it does not
// allow callers to implement fork() or vfork() by passing suitable flags
@@ -1690,8 +1736,8 @@ ErrorCode PthreadPolicyBitMask(Sandbox *sandbox, int sysno, void *aux) {
}
}
-static void *ThreadFnc(void *arg) {
- ++*reinterpret_cast<int *>(arg);
+static void* ThreadFnc(void* arg) {
+ ++*reinterpret_cast<int*>(arg);
SandboxSyscall(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
return NULL;
}
@@ -1711,8 +1757,8 @@ static void PthreadTest() {
BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
BPF_ASSERT(!pthread_attr_destroy(&attr));
- while (SandboxSyscall(__NR_futex, &thread_ran, FUTEX_WAIT,
- 0, 0, 0, 0) == -EINTR) {
+ while (SandboxSyscall(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
+ -EINTR) {
}
BPF_ASSERT(thread_ran);
@@ -1723,16 +1769,14 @@ static void PthreadTest() {
// __NR_clone, and that would introduce a bogus test failure.
int pid;
BPF_ASSERT(SandboxSyscall(__NR_clone,
- CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD,
- 0, 0, &pid) == -EPERM);
+ CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
+ 0,
+ 0,
+ &pid) == -EPERM);
}
-BPF_TEST(SandboxBpf, PthreadEquality, PthreadPolicyEquality) {
- PthreadTest();
-}
+BPF_TEST(SandboxBpf, PthreadEquality, PthreadPolicyEquality) { PthreadTest(); }
-BPF_TEST(SandboxBpf, PthreadBitMask, PthreadPolicyBitMask) {
- PthreadTest();
-}
+BPF_TEST(SandboxBpf, PthreadBitMask, PthreadPolicyBitMask) { PthreadTest(); }
} // namespace
diff --git a/sandbox/linux/seccomp-bpf/syscall.h b/sandbox/linux/seccomp-bpf/syscall.h
index 39b1bca..f63516b 100644
--- a/sandbox/linux/seccomp-bpf/syscall.h
+++ b/sandbox/linux/seccomp-bpf/syscall.h
@@ -16,9 +16,12 @@ namespace playground2 {
// Passing "nr" as "-1" computes the "magic" return address. Passing any
// other value invokes the appropriate system call.
intptr_t SandboxSyscall(int nr,
- intptr_t p0, intptr_t p1, intptr_t p2,
- intptr_t p3, intptr_t p4, intptr_t p5);
-
+ intptr_t p0,
+ intptr_t p1,
+ intptr_t p2,
+ intptr_t p3,
+ intptr_t p4,
+ intptr_t p5);
// System calls can take up to six parameters. Traditionally, glibc
// implements this property by using variadic argument lists. This works, but
@@ -37,19 +40,30 @@ intptr_t SandboxSyscall(int nr,
// easier to read as it hides implementation details.
#if __cplusplus >= 201103 // C++11
-template<class T0 = intptr_t, class T1 = intptr_t, class T2 = intptr_t,
- class T3 = intptr_t, class T4 = intptr_t, class T5 = intptr_t>
-inline intptr_t SandboxSyscall(int nr,
- T0 p0 = 0, T1 p1 = 0, T2 p2 = 0,
- T3 p3 = 0, T4 p4 = 0, T5 p5 = 0)
- __attribute__((always_inline));
-
-template<class T0, class T1, class T2, class T3, class T4, class T5>
+template <class T0 = intptr_t,
+ class T1 = intptr_t,
+ class T2 = intptr_t,
+ class T3 = intptr_t,
+ class T4 = intptr_t,
+ class T5 = intptr_t>
inline intptr_t SandboxSyscall(int nr,
- T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
+ T0 p0 = 0,
+ T1 p1 = 0,
+ T2 p2 = 0,
+ T3 p3 = 0,
+ T4 p4 = 0,
+ T5 p5 = 0) __attribute__((always_inline));
+
+template <class T0, class T1, class T2, class T3, class T4, class T5>
+inline intptr_t
+SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
return SandboxSyscall(nr,
- (intptr_t)p0, (intptr_t)p1, (intptr_t)p2,
- (intptr_t)p3, (intptr_t)p4, (intptr_t)p5);
+ (intptr_t)p0,
+ (intptr_t)p1,
+ (intptr_t)p2,
+ (intptr_t)p3,
+ (intptr_t)p4,
+ (intptr_t)p5);
}
#else // Pre-C++11
@@ -58,60 +72,61 @@ inline intptr_t SandboxSyscall(int nr,
// expressing what we are doing here. Delete the fall-back code for older
// compilers as soon as we have fully switched to C++11
-template<class T0, class T1, class T2, class T3, class T4, class T5>
-inline intptr_t SandboxSyscall(int nr,
- T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5)
- __attribute__((always_inline));
-template<class T0, class T1, class T2, class T3, class T4, class T5>
-inline intptr_t SandboxSyscall(int nr,
- T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
+template <class T0, class T1, class T2, class T3, class T4, class T5>
+inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5)
+ __attribute__((always_inline));
+template <class T0, class T1, class T2, class T3, class T4, class T5>
+inline intptr_t
+SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4, T5 p5) {
return SandboxSyscall(nr,
- (intptr_t)p0, (intptr_t)p1, (intptr_t)p2,
- (intptr_t)p3, (intptr_t)p4, (intptr_t)p5);
+ (intptr_t)p0,
+ (intptr_t)p1,
+ (intptr_t)p2,
+ (intptr_t)p3,
+ (intptr_t)p4,
+ (intptr_t)p5);
}
-template<class T0, class T1, class T2, class T3, class T4>
+template <class T0, class T1, class T2, class T3, class T4>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4)
- __attribute__((always_inline));
-template<class T0, class T1, class T2, class T3, class T4>
+ __attribute__((always_inline));
+template <class T0, class T1, class T2, class T3, class T4>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3, T4 p4) {
return SandboxSyscall(nr, p0, p1, p2, p3, p4, 0);
}
-template<class T0, class T1, class T2, class T3>
+template <class T0, class T1, class T2, class T3>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3)
- __attribute__((always_inline));
-template<class T0, class T1, class T2, class T3>
+ __attribute__((always_inline));
+template <class T0, class T1, class T2, class T3>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2, T3 p3) {
return SandboxSyscall(nr, p0, p1, p2, p3, 0, 0);
}
-template<class T0, class T1, class T2>
+template <class T0, class T1, class T2>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2)
- __attribute__((always_inline));
-template<class T0, class T1, class T2>
+ __attribute__((always_inline));
+template <class T0, class T1, class T2>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1, T2 p2) {
return SandboxSyscall(nr, p0, p1, p2, 0, 0, 0);
}
-template<class T0, class T1>
+template <class T0, class T1>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1)
- __attribute__((always_inline));
-template<class T0, class T1>
+ __attribute__((always_inline));
+template <class T0, class T1>
inline intptr_t SandboxSyscall(int nr, T0 p0, T1 p1) {
return SandboxSyscall(nr, p0, p1, 0, 0, 0, 0);
}
-template<class T0>
-inline intptr_t SandboxSyscall(int nr, T0 p0)
- __attribute__((always_inline));
-template<class T0>
+template <class T0>
+inline intptr_t SandboxSyscall(int nr, T0 p0) __attribute__((always_inline));
+template <class T0>
inline intptr_t SandboxSyscall(int nr, T0 p0) {
return SandboxSyscall(nr, p0, 0, 0, 0, 0, 0);
}
-inline intptr_t SandboxSyscall(int nr)
- __attribute__((always_inline));
+inline intptr_t SandboxSyscall(int nr) __attribute__((always_inline));
inline intptr_t SandboxSyscall(int nr) {
return SandboxSyscall(nr, 0, 0, 0, 0, 0, 0);
}
diff --git a/sandbox/linux/seccomp-bpf/syscall_iterator.cc b/sandbox/linux/seccomp-bpf/syscall_iterator.cc
index 4ea979a..2799df4 100644
--- a/sandbox/linux/seccomp-bpf/syscall_iterator.cc
+++ b/sandbox/linux/seccomp-bpf/syscall_iterator.cc
@@ -17,8 +17,7 @@ uint32_t SyscallIterator::Next() {
do {
// |num_| has been initialized to 0, which we assume is also MIN_SYSCALL.
// This true for supported architectures (Intel and ARM EABI).
- COMPILE_ASSERT(MIN_SYSCALL == 0u,
- min_syscall_should_always_be_zero);
+ COMPILE_ASSERT(MIN_SYSCALL == 0u, min_syscall_should_always_be_zero);
val = num_;
// First we iterate up to MAX_PUBLIC_SYSCALL, which is equal to MAX_SYSCALL
@@ -30,9 +29,9 @@ uint32_t SyscallIterator::Next() {
++num_;
}
#if defined(__arm__)
- // ARM EABI includes "ARM private" system calls starting at
- // MIN_PRIVATE_SYSCALL, and a "ghost syscall private to the kernel" at
- // MIN_GHOST_SYSCALL.
+ // ARM EABI includes "ARM private" system calls starting at
+ // MIN_PRIVATE_SYSCALL, and a "ghost syscall private to the kernel" at
+ // MIN_GHOST_SYSCALL.
} else if (num_ < MIN_PRIVATE_SYSCALL - 1) {
num_ = MIN_PRIVATE_SYSCALL - 1;
} else if (num_ <= MAX_PRIVATE_SYSCALL) {
@@ -50,12 +49,12 @@ uint32_t SyscallIterator::Next() {
++num_;
}
#endif
- // BPF programs only ever operate on unsigned quantities. So, that's how
- // we iterate; we return values from 0..0xFFFFFFFFu. But there are places,
- // where the kernel might interpret system call numbers as signed
- // quantities, so the boundaries between signed and unsigned values are
- // potential problem cases. We want to explicitly return these values from
- // our iterator.
+ // BPF programs only ever operate on unsigned quantities. So, that's how
+ // we iterate; we return values from 0..0xFFFFFFFFu. But there are places,
+ // where the kernel might interpret system call numbers as signed
+ // quantities, so the boundaries between signed and unsigned values are
+ // potential problem cases. We want to explicitly return these values from
+ // our iterator.
} else if (num_ < 0x7FFFFFFFu) {
num_ = 0x7FFFFFFFu;
} else if (num_ < 0x80000000u) {
@@ -86,10 +85,7 @@ bool SyscallIterator::IsArmPrivate(uint32_t num) {
(num >= MIN_GHOST_SYSCALL && num <= MAX_SYSCALL);
}
#else
-bool SyscallIterator::IsArmPrivate(uint32_t) {
- return false;
-}
+bool SyscallIterator::IsArmPrivate(uint32_t) { return false; }
#endif
} // namespace
-
diff --git a/sandbox/linux/seccomp-bpf/syscall_iterator.h b/sandbox/linux/seccomp-bpf/syscall_iterator.h
index e17593d..9e12b8b 100644
--- a/sandbox/linux/seccomp-bpf/syscall_iterator.h
+++ b/sandbox/linux/seccomp-bpf/syscall_iterator.h
@@ -32,9 +32,7 @@ namespace playground2 {
class SyscallIterator {
public:
explicit SyscallIterator(bool invalid_only)
- : invalid_only_(invalid_only),
- done_(false),
- num_(0) {}
+ : invalid_only_(invalid_only), done_(false), num_(0) {}
bool Done() const { return done_; }
uint32_t Next();
@@ -43,8 +41,8 @@ class SyscallIterator {
private:
static bool IsArmPrivate(uint32_t num);
- bool invalid_only_;
- bool done_;
+ bool invalid_only_;
+ bool done_;
uint32_t num_;
DISALLOW_IMPLICIT_CONSTRUCTORS(SyscallIterator);
@@ -53,4 +51,3 @@ class SyscallIterator {
} // namespace playground2
#endif // SANDBOX_LINUX_SECCOMP_BPF_SYSCALL_ITERATOR_H__
-
diff --git a/sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc b/sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc
index 26f11ce..61e95d7 100644
--- a/sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc
+++ b/sandbox/linux/seccomp-bpf/syscall_iterator_unittest.cc
@@ -12,7 +12,7 @@ namespace {
SANDBOX_TEST(SyscallIterator, Monotonous) {
for (int i = 0; i < 2; ++i) {
- bool invalid_only = !i; // Testing both |invalid_only| cases.
+ bool invalid_only = !i; // Testing both |invalid_only| cases.
SyscallIterator iter(invalid_only);
uint32_t next = iter.Next();
@@ -79,7 +79,7 @@ SANDBOX_TEST(SyscallIterator, ARMHiddenSyscallRange) {
SANDBOX_TEST(SyscallIterator, Invalid) {
for (int i = 0; i < 2; ++i) {
- bool invalid_only = !i; // Testing both |invalid_only| cases.
+ bool invalid_only = !i; // Testing both |invalid_only| cases.
SyscallIterator iter(invalid_only);
uint32_t next = iter.Next();
@@ -132,4 +132,3 @@ SANDBOX_TEST(SyscallIterator, InvalidOnly) {
}
} // namespace
-
diff --git a/sandbox/linux/seccomp-bpf/syscall_unittest.cc b/sandbox/linux/seccomp-bpf/syscall_unittest.cc
index 136deb6..0472448 100644
--- a/sandbox/linux/seccomp-bpf/syscall_unittest.cc
+++ b/sandbox/linux/seccomp-bpf/syscall_unittest.cc
@@ -31,27 +31,27 @@ const int kMMapNr = __NR_mmap;
#endif
TEST(Syscall, WellKnownEntryPoint) {
- // Test that SandboxSyscall(-1) is handled specially. Don't do this on ARM,
- // where syscall(-1) crashes with SIGILL. Not running the test is fine, as we
- // are still testing ARM code in the next set of tests.
+// Test that SandboxSyscall(-1) is handled specially. Don't do this on ARM,
+// where syscall(-1) crashes with SIGILL. Not running the test is fine, as we
+// are still testing ARM code in the next set of tests.
#if !defined(__arm__)
EXPECT_NE(SandboxSyscall(-1), syscall(-1));
#endif
- // If possible, test that SandboxSyscall(-1) returns the address right after
- // a kernel entry point.
+// If possible, test that SandboxSyscall(-1) returns the address right after
+// a kernel entry point.
#if defined(__i386__)
- EXPECT_EQ(0x80CDu, ((uint16_t *)SandboxSyscall(-1))[-1]); // INT 0x80
+ EXPECT_EQ(0x80CDu, ((uint16_t*)SandboxSyscall(-1))[-1]); // INT 0x80
#elif defined(__x86_64__)
- EXPECT_EQ(0x050Fu, ((uint16_t *)SandboxSyscall(-1))[-1]); // SYSCALL
+ EXPECT_EQ(0x050Fu, ((uint16_t*)SandboxSyscall(-1))[-1]); // SYSCALL
#elif defined(__arm__)
#if defined(__thumb__)
- EXPECT_EQ(0xDF00u, ((uint16_t *)SandboxSyscall(-1))[-1]); // SWI 0
+ EXPECT_EQ(0xDF00u, ((uint16_t*)SandboxSyscall(-1))[-1]); // SWI 0
#else
- EXPECT_EQ(0xEF000000u, ((uint32_t *)SandboxSyscall(-1))[-1]); // SVC 0
+ EXPECT_EQ(0xEF000000u, ((uint32_t*)SandboxSyscall(-1))[-1]); // SVC 0
#endif
#else
- #warning Incomplete test case; need port for target platform
+#warning Incomplete test case; need port for target platform
#endif
}
@@ -69,7 +69,7 @@ TEST(Syscall, TrivialSyscallOneArg) {
}
// SIGSYS trap handler that will be called on __NR_uname.
-intptr_t CopySyscallArgsToAux(const struct arch_seccomp_data& args, void *aux) {
+intptr_t CopySyscallArgsToAux(const struct arch_seccomp_data& args, void* aux) {
// |aux| is a pointer to our BPF_AUX.
std::vector<uint64_t>* const seen_syscall_args =
static_cast<std::vector<uint64_t>*>(aux);
@@ -78,7 +78,7 @@ intptr_t CopySyscallArgsToAux(const struct arch_seccomp_data& args, void *aux) {
return -ENOMEM;
}
-ErrorCode CopyAllArgsOnUnamePolicy(Sandbox *sandbox, int sysno, void *aux) {
+ErrorCode CopyAllArgsOnUnamePolicy(Sandbox* sandbox, int sysno, void* aux) {
if (!Sandbox::IsValidSyscallNumber(sysno)) {
return ErrorCode(ENOSYS);
}
@@ -91,7 +91,9 @@ ErrorCode CopyAllArgsOnUnamePolicy(Sandbox *sandbox, int sysno, void *aux) {
// We are testing SandboxSyscall() by making use of a BPF filter that allows us
// to inspect the system call arguments that the kernel saw.
-BPF_TEST(Syscall, SyntheticSixArgs, CopyAllArgsOnUnamePolicy,
+BPF_TEST(Syscall,
+ SyntheticSixArgs,
+ CopyAllArgsOnUnamePolicy,
std::vector<uint64_t> /* BPF_AUX */) {
const int kExpectedValue = 42;
// In this test we only pass integers to the kernel. We might want to make
@@ -105,12 +107,13 @@ BPF_TEST(Syscall, SyntheticSixArgs, CopyAllArgsOnUnamePolicy,
// We could use pretty much any system call we don't need here. uname() is
// nice because it doesn't have any dangerous side effects.
- BPF_ASSERT(SandboxSyscall(__NR_uname, syscall_args[0],
- syscall_args[1],
- syscall_args[2],
- syscall_args[3],
- syscall_args[4],
- syscall_args[5]) == -ENOMEM);
+ BPF_ASSERT(SandboxSyscall(__NR_uname,
+ syscall_args[0],
+ syscall_args[1],
+ syscall_args[2],
+ syscall_args[3],
+ syscall_args[4],
+ syscall_args[5]) == -ENOMEM);
// We expect the trap handler to have copied the 6 arguments.
BPF_ASSERT(BPF_AUX.size() == 6);
@@ -131,20 +134,29 @@ TEST(Syscall, ComplexSyscallSixArgs) {
ASSERT_LE(0, fd = SandboxSyscall(__NR_open, "/dev/null", O_RDWR, 0L));
// Use mmap() to allocate some read-only memory
- char *addr0;
- ASSERT_NE((char *)NULL,
- addr0 = reinterpret_cast<char *>(
- SandboxSyscall(kMMapNr, (void *)NULL, 4096, PROT_READ,
- MAP_PRIVATE|MAP_ANONYMOUS, fd, 0L)));
+ char* addr0;
+ ASSERT_NE((char*)NULL,
+ addr0 = reinterpret_cast<char*>(
+ SandboxSyscall(kMMapNr,
+ (void*)NULL,
+ 4096,
+ PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ fd,
+ 0L)));
// Try to replace the existing mapping with a read-write mapping
- char *addr1;
+ char* addr1;
ASSERT_EQ(addr0,
- addr1 = reinterpret_cast<char *>(
- SandboxSyscall(kMMapNr, addr0, 4096L, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
- fd, 0L)));
- ++*addr1; // This should not seg fault
+ addr1 = reinterpret_cast<char*>(
+ SandboxSyscall(kMMapNr,
+ addr0,
+ 4096L,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ fd,
+ 0L)));
+ ++*addr1; // This should not seg fault
// Clean up
EXPECT_EQ(0, SandboxSyscall(__NR_munmap, addr1, 4096L));
@@ -153,21 +165,23 @@ TEST(Syscall, ComplexSyscallSixArgs) {
// Check that the offset argument (i.e. the sixth argument) is processed
// correctly.
ASSERT_GE(fd = SandboxSyscall(__NR_open, "/proc/self/exe", O_RDONLY, 0L), 0);
- char *addr2, *addr3;
- ASSERT_NE((char *)NULL,
- addr2 = reinterpret_cast<char *>(
- SandboxSyscall(kMMapNr, (void *)NULL, 8192L, PROT_READ,
- MAP_PRIVATE, fd, 0L)));
- ASSERT_NE((char *)NULL,
- addr3 = reinterpret_cast<char *>(
- SandboxSyscall(kMMapNr, (void *)NULL, 4096L, PROT_READ,
- MAP_PRIVATE, fd,
+ char* addr2, *addr3;
+ ASSERT_NE((char*)NULL,
+ addr2 = reinterpret_cast<char*>(SandboxSyscall(
+ kMMapNr, (void*)NULL, 8192L, PROT_READ, MAP_PRIVATE, fd, 0L)));
+ ASSERT_NE((char*)NULL,
+ addr3 = reinterpret_cast<char*>(SandboxSyscall(kMMapNr,
+ (void*)NULL,
+ 4096L,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
#if defined(__NR_mmap2)
- 1L
+ 1L
#else
- 4096L
+ 4096L
#endif
- )));
+ )));
EXPECT_EQ(0, memcmp(addr2 + 4096, addr3, 4096));
// Just to be absolutely on the safe side, also verify that the file
@@ -182,4 +196,4 @@ TEST(Syscall, ComplexSyscallSixArgs) {
EXPECT_EQ(0, HANDLE_EINTR(SandboxSyscall(__NR_close, fd)));
}
-} // namespace
+} // namespace
diff --git a/sandbox/linux/seccomp-bpf/trap.cc b/sandbox/linux/seccomp-bpf/trap.cc
index 499c81b..ea95244 100644
--- a/sandbox/linux/seccomp-bpf/trap.cc
+++ b/sandbox/linux/seccomp-bpf/trap.cc
@@ -25,7 +25,6 @@
#include <limits>
-
namespace {
const int kCapacityIncrement = 20;
@@ -47,23 +46,21 @@ const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
// realtime signals. There are plenty of them. Unfortunately, there is no
// way to mark a signal as allocated. So, the potential for collision is
// possibly even worse.
-bool GetIsInSigHandler(const ucontext_t *ctx) {
+bool GetIsInSigHandler(const ucontext_t* ctx) {
// Note: on Android, sigismember does not take a pointer to const.
return sigismember(const_cast<sigset_t*>(&ctx->uc_sigmask), SIGBUS);
}
void SetIsInSigHandler() {
sigset_t mask;
- if (sigemptyset(&mask) ||
- sigaddset(&mask, SIGBUS) ||
+ if (sigemptyset(&mask) || sigaddset(&mask, SIGBUS) ||
sigprocmask(SIG_BLOCK, &mask, NULL)) {
SANDBOX_DIE("Failed to block SIGBUS");
}
}
bool IsDefaultSignalAction(const struct sigaction& sa) {
- if (sa.sa_flags & SA_SIGINFO ||
- sa.sa_handler != SIG_DFL) {
+ if (sa.sa_flags & SA_SIGINFO || sa.sa_handler != SIG_DFL) {
return false;
}
return true;
@@ -79,7 +76,7 @@ Trap::Trap()
trap_array_capacity_(0),
has_unsafe_traps_(false) {
// Set new SIGSYS handler
- struct sigaction sa = { };
+ struct sigaction sa = {};
sa.sa_sigaction = SigSysAction;
sa.sa_flags = SA_SIGINFO | SA_NODEFER;
struct sigaction old_sa;
@@ -94,14 +91,13 @@ Trap::Trap()
// Unmask SIGSYS
sigset_t mask;
- if (sigemptyset(&mask) ||
- sigaddset(&mask, SIGSYS) ||
+ if (sigemptyset(&mask) || sigaddset(&mask, SIGSYS) ||
sigprocmask(SIG_UNBLOCK, &mask, NULL)) {
SANDBOX_DIE("Failed to configure SIGSYS handler");
}
}
-Trap *Trap::GetInstance() {
+Trap* Trap::GetInstance() {
// Note: This class is not thread safe. It is the caller's responsibility
// to avoid race conditions. Normally, this is a non-issue as the sandbox
// can only be initialized if there are no other threads present.
@@ -116,15 +112,16 @@ Trap *Trap::GetInstance() {
return global_trap_;
}
-void Trap::SigSysAction(int nr, siginfo_t *info, void *void_context) {
+void Trap::SigSysAction(int nr, siginfo_t* info, void* void_context) {
if (!global_trap_) {
- RAW_SANDBOX_DIE("This can't happen. Found no global singleton instance "
- "for Trap() handling.");
+ RAW_SANDBOX_DIE(
+ "This can't happen. Found no global singleton instance "
+ "for Trap() handling.");
}
global_trap_->SigSys(nr, info, void_context);
}
-void Trap::SigSys(int nr, siginfo_t *info, void *void_context) {
+void Trap::SigSys(int nr, siginfo_t* info, void* void_context) {
// Signal handlers should always preserve "errno". Otherwise, we could
// trigger really subtle bugs.
const int old_errno = errno;
@@ -145,7 +142,7 @@ void Trap::SigSys(int nr, siginfo_t *info, void *void_context) {
// Obtain the signal context. This, most notably, gives us access to
// all CPU registers at the time of the signal.
- ucontext_t *ctx = reinterpret_cast<ucontext_t *>(void_context);
+ ucontext_t* ctx = reinterpret_cast<ucontext_t*>(void_context);
// Obtain the siginfo information that is specific to SIGSYS. Unfortunately,
// most versions of glibc don't include this information in siginfo_t. So,
@@ -154,7 +151,7 @@ void Trap::SigSys(int nr, siginfo_t *info, void *void_context) {
memcpy(&sigsys, &info->_sifields, sizeof(sigsys));
// Some more sanity checks.
- if (sigsys.ip != reinterpret_cast<void *>(SECCOMP_IP(ctx)) ||
+ if (sigsys.ip != reinterpret_cast<void*>(SECCOMP_IP(ctx)) ||
sigsys.nr != static_cast<int>(SECCOMP_SYSCALL(ctx)) ||
sigsys.arch != SECCOMP_ARCH) {
// TODO(markus):
@@ -172,9 +169,12 @@ void Trap::SigSys(int nr, siginfo_t *info, void *void_context) {
RAW_SANDBOX_DIE("Cannot call clone() from an UnsafeTrap() handler.");
}
rc = SandboxSyscall(sigsys.nr,
- SECCOMP_PARM1(ctx), SECCOMP_PARM2(ctx),
- SECCOMP_PARM3(ctx), SECCOMP_PARM4(ctx),
- SECCOMP_PARM5(ctx), SECCOMP_PARM6(ctx));
+ SECCOMP_PARM1(ctx),
+ SECCOMP_PARM2(ctx),
+ SECCOMP_PARM3(ctx),
+ SECCOMP_PARM4(ctx),
+ SECCOMP_PARM5(ctx),
+ SECCOMP_PARM6(ctx));
} else {
const ErrorCode& err = trap_array_[info->si_errno - 1];
if (!err.safe_) {
@@ -185,18 +185,13 @@ void Trap::SigSys(int nr, siginfo_t *info, void *void_context) {
// is what we are showing to TrapFnc callbacks that the system call
// evaluator registered with the sandbox.
struct arch_seccomp_data data = {
- sigsys.nr,
- SECCOMP_ARCH,
- reinterpret_cast<uint64_t>(sigsys.ip),
- {
- static_cast<uint64_t>(SECCOMP_PARM1(ctx)),
- static_cast<uint64_t>(SECCOMP_PARM2(ctx)),
- static_cast<uint64_t>(SECCOMP_PARM3(ctx)),
- static_cast<uint64_t>(SECCOMP_PARM4(ctx)),
- static_cast<uint64_t>(SECCOMP_PARM5(ctx)),
- static_cast<uint64_t>(SECCOMP_PARM6(ctx))
- }
- };
+ sigsys.nr, SECCOMP_ARCH, reinterpret_cast<uint64_t>(sigsys.ip),
+ {static_cast<uint64_t>(SECCOMP_PARM1(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM2(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM3(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM4(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM5(ctx)),
+ static_cast<uint64_t>(SECCOMP_PARM6(ctx))}};
// Now call the TrapFnc callback associated with this particular instance
// of SECCOMP_RET_TRAP.
@@ -207,7 +202,7 @@ void Trap::SigSys(int nr, siginfo_t *info, void *void_context) {
// that we just handled, and restore "errno" to the value that it had
// before entering the signal handler.
SECCOMP_RESULT(ctx) = static_cast<greg_t>(rc);
- errno = old_errno;
+ errno = old_errno;
return;
}
@@ -222,11 +217,11 @@ bool Trap::TrapKey::operator<(const TrapKey& o) const {
}
}
-ErrorCode Trap::MakeTrap(TrapFnc fnc, const void *aux, bool safe) {
+ErrorCode Trap::MakeTrap(TrapFnc fnc, const void* aux, bool safe) {
return GetInstance()->MakeTrapImpl(fnc, aux, safe);
}
-ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void *aux, bool safe) {
+ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void* aux, bool safe) {
if (!safe && !SandboxDebuggingAllowedByUser()) {
// Unless the user set the CHROME_SANDBOX_DEBUGGING environment variable,
// we never return an ErrorCode that is marked as "unsafe". This also
@@ -239,8 +234,9 @@ ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void *aux, bool safe) {
// to understand. Removing the SANDBOX_DIE() allows callers to easyly check
// whether unsafe traps are supported (by checking whether the returned
// ErrorCode is ET_INVALID).
- SANDBOX_DIE("Cannot use unsafe traps unless CHROME_SANDBOX_DEBUGGING "
- "is enabled");
+ SANDBOX_DIE(
+ "Cannot use unsafe traps unless CHROME_SANDBOX_DEBUGGING "
+ "is enabled");
return ErrorCode();
}
@@ -290,9 +286,9 @@ ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void *aux, bool safe) {
// against issues with the memory model or with completely asynchronous
// events.
if (trap_array_size_ >= trap_array_capacity_) {
- trap_array_capacity_ += kCapacityIncrement;
- ErrorCode *old_trap_array = trap_array_;
- ErrorCode *new_trap_array = new ErrorCode[trap_array_capacity_];
+ trap_array_capacity_ += kCapacityIncrement;
+ ErrorCode* old_trap_array = trap_array_;
+ ErrorCode* new_trap_array = new ErrorCode[trap_array_capacity_];
// Language specs are unclear on whether the compiler is allowed to move
// the "delete[]" above our preceding assignments and/or memory moves,
@@ -305,7 +301,7 @@ ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void *aux, bool safe) {
// legitimate worry; but they at least thought that the barrier is
// sufficient to prevent the (so far hypothetical) problem of re-ordering
// of instructions by the compiler.
- memcpy(new_trap_array, trap_array_, trap_array_size_*sizeof(ErrorCode));
+ memcpy(new_trap_array, trap_array_, trap_array_size_ * sizeof(ErrorCode));
asm volatile("" : "=r"(new_trap_array) : "0"(new_trap_array) : "memory");
trap_array_ = new_trap_array;
asm volatile("" : "=r"(trap_array_) : "0"(trap_array_) : "memory");
@@ -321,13 +317,12 @@ ErrorCode Trap::MakeTrapImpl(TrapFnc fnc, const void *aux, bool safe) {
}
bool Trap::SandboxDebuggingAllowedByUser() const {
- const char *debug_flag = getenv(kSandboxDebuggingEnv);
+ const char* debug_flag = getenv(kSandboxDebuggingEnv);
return debug_flag && *debug_flag;
}
-
bool Trap::EnableUnsafeTrapsInSigSysHandler() {
- Trap *trap = GetInstance();
+ Trap* trap = GetInstance();
if (!trap->has_unsafe_traps_) {
// Unsafe traps are a one-way fuse. Once enabled, they can never be turned
// off again.
@@ -340,8 +335,9 @@ bool Trap::EnableUnsafeTrapsInSigSysHandler() {
SANDBOX_INFO("WARNING! Disabling sandbox for debugging purposes");
trap->has_unsafe_traps_ = true;
} else {
- SANDBOX_INFO("Cannot disable sandbox and use unsafe traps unless "
- "CHROME_SANDBOX_DEBUGGING is turned on first");
+ SANDBOX_INFO(
+ "Cannot disable sandbox and use unsafe traps unless "
+ "CHROME_SANDBOX_DEBUGGING is turned on first");
}
}
// Returns the, possibly updated, value of has_unsafe_traps_.
@@ -356,6 +352,6 @@ ErrorCode Trap::ErrorCodeFromTrapId(uint16_t id) {
}
}
-Trap *Trap::global_trap_;
+Trap* Trap::global_trap_;
} // namespace playground2
diff --git a/sandbox/linux/seccomp-bpf/trap.h b/sandbox/linux/seccomp-bpf/trap.h
index 2a4c6ed..e839870 100644
--- a/sandbox/linux/seccomp-bpf/trap.h
+++ b/sandbox/linux/seccomp-bpf/trap.h
@@ -13,7 +13,6 @@
#include "sandbox/linux/seccomp-bpf/port.h"
-
namespace playground2 {
class ErrorCode;
@@ -41,13 +40,13 @@ class Trap {
// range -1..-4096. It should not set errno when reporting errors; on the
// other hand, accidentally modifying errno is harmless and the changes will
// be undone afterwards.
- typedef intptr_t (*TrapFnc)(const struct arch_seccomp_data& args, void *aux);
+ typedef intptr_t (*TrapFnc)(const struct arch_seccomp_data& args, void* aux);
// Registers a new trap handler and sets up the appropriate SIGSYS handler
// as needed.
// N.B.: This makes a permanent state change. Traps cannot be unregistered,
// as that would break existing BPF filters that are still active.
- static ErrorCode MakeTrap(TrapFnc fnc, const void *aux, bool safe);
+ static ErrorCode MakeTrap(TrapFnc fnc, const void* aux, bool safe);
// Enables support for unsafe traps in the SIGSYS signal handler. This is a
// one-way fuse. It works in conjunction with the BPF compiler emitting code
@@ -68,14 +67,10 @@ class Trap {
~Trap();
struct TrapKey {
- TrapKey(TrapFnc f, const void *a, bool s)
- : fnc(f),
- aux(a),
- safe(s) {
- }
- TrapFnc fnc;
- const void *aux;
- bool safe;
+ TrapKey(TrapFnc f, const void* a, bool s) : fnc(f), aux(a), safe(s) {}
+ TrapFnc fnc;
+ const void* aux;
+ bool safe;
bool operator<(const TrapKey&) const;
};
typedef std::map<TrapKey, uint16_t> TrapIds;
@@ -87,29 +82,27 @@ class Trap {
// It also gracefully deals with methods that should check for the singleton,
// but avoid instantiating it, if it doesn't exist yet
// (e.g. ErrorCodeFromTrapId()).
- static Trap *GetInstance();
- static void SigSysAction(int nr, siginfo_t *info, void *void_context);
+ static Trap* GetInstance();
+ static void SigSysAction(int nr, siginfo_t* info, void* void_context);
// Make sure that SigSys is not inlined in order to get slightly better crash
// dumps.
- void SigSys(int nr, siginfo_t *info, void *void_context)
- __attribute__ ((noinline));
- ErrorCode MakeTrapImpl(TrapFnc fnc, const void *aux, bool safe);
+ void SigSys(int nr, siginfo_t* info, void* void_context)
+ __attribute__((noinline));
+ ErrorCode MakeTrapImpl(TrapFnc fnc, const void* aux, bool safe);
bool SandboxDebuggingAllowedByUser() const;
-
-
// We have a global singleton that handles all of our SIGSYS traps. This
// variable must never be deallocated after it has been set up initially, as
// there is no way to reset in-kernel BPF filters that generate SIGSYS
// events.
- static Trap *global_trap_;
+ static Trap* global_trap_;
- TrapIds trap_ids_; // Maps from TrapKeys to numeric ids
- ErrorCode *trap_array_; // Array of ErrorCodes indexed by ids
- size_t trap_array_size_; // Currently used size of array
- size_t trap_array_capacity_; // Currently allocated capacity of array
- bool has_unsafe_traps_; // Whether unsafe traps have been enabled
+ TrapIds trap_ids_; // Maps from TrapKeys to numeric ids
+ ErrorCode* trap_array_; // Array of ErrorCodes indexed by ids
+ size_t trap_array_size_; // Currently used size of array
+ size_t trap_array_capacity_; // Currently allocated capacity of array
+ bool has_unsafe_traps_; // Whether unsafe traps have been enabled
// Our constructor is private. A shared global instance is created
// automatically as needed.
diff --git a/sandbox/linux/seccomp-bpf/verifier.cc b/sandbox/linux/seccomp-bpf/verifier.cc
index 798aced..1d6b26d 100644
--- a/sandbox/linux/seccomp-bpf/verifier.cc
+++ b/sandbox/linux/seccomp-bpf/verifier.cc
@@ -9,7 +9,6 @@
#include "sandbox/linux/seccomp-bpf/syscall_iterator.h"
#include "sandbox/linux/seccomp-bpf/verifier.h"
-
namespace {
using playground2::ErrorCode;
@@ -19,24 +18,20 @@ using playground2::arch_seccomp_data;
struct State {
State(const std::vector<struct sock_filter>& p,
- const struct arch_seccomp_data& d) :
- program(p),
- data(d),
- ip(0),
- accumulator(0),
- acc_is_valid(false) {
- }
+ const struct arch_seccomp_data& d)
+ : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {}
const std::vector<struct sock_filter>& program;
- const struct arch_seccomp_data& data;
- unsigned int ip;
- uint32_t accumulator;
- bool acc_is_valid;
+ const struct arch_seccomp_data& data;
+ unsigned int ip;
+ uint32_t accumulator;
+ bool acc_is_valid;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(State);
};
-uint32_t EvaluateErrorCode(Sandbox *sandbox, const ErrorCode& code,
+uint32_t EvaluateErrorCode(Sandbox* sandbox,
+ const ErrorCode& code,
const struct arch_seccomp_data& data) {
if (code.error_type() == ErrorCode::ET_SIMPLE ||
code.error_type() == ErrorCode::ET_TRAP) {
@@ -45,49 +40,50 @@ uint32_t EvaluateErrorCode(Sandbox *sandbox, const ErrorCode& code,
if (code.width() == ErrorCode::TP_32BIT &&
(data.args[code.argno()] >> 32) &&
(data.args[code.argno()] & 0xFFFFFFFF80000000ull) !=
- 0xFFFFFFFF80000000ull) {
+ 0xFFFFFFFF80000000ull) {
return sandbox->Unexpected64bitArgument().err();
}
switch (code.op()) {
- case ErrorCode::OP_EQUAL:
- return EvaluateErrorCode(sandbox,
- (code.width() == ErrorCode::TP_32BIT
- ? uint32_t(data.args[code.argno()])
- : data.args[code.argno()]) == code.value()
- ? *code.passed()
- : *code.failed(),
- data);
- case ErrorCode::OP_HAS_ALL_BITS:
- return EvaluateErrorCode(sandbox,
- ((code.width() == ErrorCode::TP_32BIT
- ? uint32_t(data.args[code.argno()])
- : data.args[code.argno()]) & code.value())
- == code.value()
- ? *code.passed()
- : *code.failed(),
- data);
- case ErrorCode::OP_HAS_ANY_BITS:
- return EvaluateErrorCode(sandbox,
- (code.width() == ErrorCode::TP_32BIT
- ? uint32_t(data.args[code.argno()])
- : data.args[code.argno()]) & code.value()
- ? *code.passed()
- : *code.failed(),
- data);
- default:
- return SECCOMP_RET_INVALID;
+ case ErrorCode::OP_EQUAL:
+ return EvaluateErrorCode(sandbox,
+ (code.width() == ErrorCode::TP_32BIT
+ ? uint32_t(data.args[code.argno()])
+ : data.args[code.argno()]) == code.value()
+ ? *code.passed()
+ : *code.failed(),
+ data);
+ case ErrorCode::OP_HAS_ALL_BITS:
+ return EvaluateErrorCode(sandbox,
+ ((code.width() == ErrorCode::TP_32BIT
+ ? uint32_t(data.args[code.argno()])
+ : data.args[code.argno()]) &
+ code.value()) == code.value()
+ ? *code.passed()
+ : *code.failed(),
+ data);
+ case ErrorCode::OP_HAS_ANY_BITS:
+ return EvaluateErrorCode(sandbox,
+ (code.width() == ErrorCode::TP_32BIT
+ ? uint32_t(data.args[code.argno()])
+ : data.args[code.argno()]) &
+ code.value()
+ ? *code.passed()
+ : *code.failed(),
+ data);
+ default:
+ return SECCOMP_RET_INVALID;
}
} else {
return SECCOMP_RET_INVALID;
}
}
-bool VerifyErrorCode(Sandbox *sandbox,
+bool VerifyErrorCode(Sandbox* sandbox,
const std::vector<struct sock_filter>& program,
- struct arch_seccomp_data *data,
+ struct arch_seccomp_data* data,
const ErrorCode& root_code,
const ErrorCode& code,
- const char **err) {
+ const char** err) {
if (code.error_type() == ErrorCode::ET_SIMPLE ||
code.error_type() == ErrorCode::ET_TRAP) {
uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err);
@@ -110,102 +106,113 @@ bool VerifyErrorCode(Sandbox *sandbox,
return false;
}
switch (code.op()) {
- case ErrorCode::OP_EQUAL:
- // Verify that we can check a 32bit value (or the LSB of a 64bit value)
- // for equality.
- data->args[code.argno()] = code.value();
- if (!VerifyErrorCode(sandbox, program, data, root_code,
- *code.passed(), err)) {
- return false;
- }
-
- // Change the value to no longer match and verify that this is detected
- // as an inequality.
- data->args[code.argno()] = code.value() ^ 0x55AA55AA;
- if (!VerifyErrorCode(sandbox, program, data, root_code,
- *code.failed(), err)) {
- return false;
- }
-
- // BPF programs can only ever operate on 32bit values. So, we have
- // generated additional BPF instructions that inspect the MSB. Verify
- // that they behave as intended.
- if (code.width() == ErrorCode::TP_32BIT) {
- if (code.value() >> 32) {
- SANDBOX_DIE("Invalid comparison of a 32bit system call argument "
- "against a 64bit constant; this test is always false.");
- }
-
- // If the system call argument was intended to be a 32bit parameter,
- // verify that it is a fatal error if a 64bit value is ever passed
- // here.
- data->args[code.argno()] = 0x100000000ull;
- if (!VerifyErrorCode(sandbox, program, data, root_code,
- sandbox->Unexpected64bitArgument(),
- err)) {
+ case ErrorCode::OP_EQUAL:
+ // Verify that we can check a 32bit value (or the LSB of a 64bit value)
+ // for equality.
+ data->args[code.argno()] = code.value();
+ if (!VerifyErrorCode(
+ sandbox, program, data, root_code, *code.passed(), err)) {
return false;
}
- } else {
- // If the system call argument was intended to be a 64bit parameter,
- // verify that we can handle (in-)equality for the MSB. This is
- // essentially the same test that we did earlier for the LSB.
- // We only need to verify the behavior of the inequality test. We
- // know that the equality test already passed, as unlike the kernel
- // the Verifier does operate on 64bit quantities.
- data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull;
- if (!VerifyErrorCode(sandbox, program, data, root_code,
- *code.failed(), err)) {
+
+ // Change the value to no longer match and verify that this is detected
+ // as an inequality.
+ data->args[code.argno()] = code.value() ^ 0x55AA55AA;
+ if (!VerifyErrorCode(
+ sandbox, program, data, root_code, *code.failed(), err)) {
return false;
}
- }
- break;
- case ErrorCode::OP_HAS_ALL_BITS:
- case ErrorCode::OP_HAS_ANY_BITS:
- // A comprehensive test of bit values is difficult and potentially rather
- // time-expensive. We avoid doing so at run-time and instead rely on the
- // unittest for full testing. The test that we have here covers just the
- // common cases. We test against the bitmask itself, all zeros and all
- // ones.
- {
- // Testing "any" bits against a zero mask is always false. So, there
- // are some cases, where we expect tests to take the "failed()" branch
- // even though this is a test that normally should take "passed()".
- const ErrorCode& passed =
- (!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) ||
- // On a 32bit system, it is impossible to pass a 64bit value as a
- // system call argument. So, some additional tests always evaluate
- // as false.
- ((code.value() & ~uint64_t(uintptr_t(-1))) &&
- code.op() == ErrorCode::OP_HAS_ALL_BITS) ||
- (code.value() && !(code.value() & uintptr_t(-1)) &&
- code.op() == ErrorCode::OP_HAS_ANY_BITS)
+ // BPF programs can only ever operate on 32bit values. So, we have
+ // generated additional BPF instructions that inspect the MSB. Verify
+ // that they behave as intended.
+ if (code.width() == ErrorCode::TP_32BIT) {
+ if (code.value() >> 32) {
+ SANDBOX_DIE(
+ "Invalid comparison of a 32bit system call argument "
+ "against a 64bit constant; this test is always false.");
+ }
- ? *code.failed() : *code.passed();
+ // If the system call argument was intended to be a 32bit parameter,
+ // verify that it is a fatal error if a 64bit value is ever passed
+ // here.
+ data->args[code.argno()] = 0x100000000ull;
+ if (!VerifyErrorCode(sandbox,
+ program,
+ data,
+ root_code,
+ sandbox->Unexpected64bitArgument(),
+ err)) {
+ return false;
+ }
+ } else {
+ // If the system call argument was intended to be a 64bit parameter,
+ // verify that we can handle (in-)equality for the MSB. This is
+ // essentially the same test that we did earlier for the LSB.
+ // We only need to verify the behavior of the inequality test. We
+ // know that the equality test already passed, as unlike the kernel
+ // the Verifier does operate on 64bit quantities.
+ data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull;
+ if (!VerifyErrorCode(
+ sandbox, program, data, root_code, *code.failed(), err)) {
+ return false;
+ }
+ }
+ break;
+ case ErrorCode::OP_HAS_ALL_BITS:
+ case ErrorCode::OP_HAS_ANY_BITS:
+ // A comprehensive test of bit values is difficult and potentially
+ // rather
+ // time-expensive. We avoid doing so at run-time and instead rely on the
+ // unittest for full testing. The test that we have here covers just the
+ // common cases. We test against the bitmask itself, all zeros and all
+ // ones.
+ {
+ // Testing "any" bits against a zero mask is always false. So, there
+ // are some cases, where we expect tests to take the "failed()" branch
+ // even though this is a test that normally should take "passed()".
+ const ErrorCode& passed =
+ (!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) ||
- // Similary, testing for "all" bits in a zero mask is always true. So,
- // some cases pass despite them normally failing.
- const ErrorCode& failed =
- !code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS
- ? *code.passed() : *code.failed();
+ // On a 32bit system, it is impossible to pass a 64bit
+ // value as a
+ // system call argument. So, some additional tests always
+ // evaluate
+ // as false.
+ ((code.value() & ~uint64_t(uintptr_t(-1))) &&
+ code.op() == ErrorCode::OP_HAS_ALL_BITS) ||
+ (code.value() && !(code.value() & uintptr_t(-1)) &&
+ code.op() == ErrorCode::OP_HAS_ANY_BITS)
+ ? *code.failed()
+ : *code.passed();
- data->args[code.argno()] = code.value() & uintptr_t(-1);
- if (!VerifyErrorCode(sandbox, program, data, root_code, passed, err)) {
- return false;
- }
- data->args[code.argno()] = uintptr_t(-1);
- if (!VerifyErrorCode(sandbox, program, data, root_code, passed, err)) {
- return false;
- }
- data->args[code.argno()] = 0;
- if (!VerifyErrorCode(sandbox, program, data, root_code, failed, err)) {
- return false;
+ // Similary, testing for "all" bits in a zero mask is always true. So,
+ // some cases pass despite them normally failing.
+ const ErrorCode& failed =
+ !code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS
+ ? *code.passed()
+ : *code.failed();
+
+ data->args[code.argno()] = code.value() & uintptr_t(-1);
+ if (!VerifyErrorCode(
+ sandbox, program, data, root_code, passed, err)) {
+ return false;
+ }
+ data->args[code.argno()] = uintptr_t(-1);
+ if (!VerifyErrorCode(
+ sandbox, program, data, root_code, passed, err)) {
+ return false;
+ }
+ data->args[code.argno()] = 0;
+ if (!VerifyErrorCode(
+ sandbox, program, data, root_code, failed, err)) {
+ return false;
+ }
}
- }
- break;
- default: // TODO(markus): Need to add support for OP_GREATER
- *err = "Unsupported operation in conditional error code";
- return false;
+ break;
+ default: // TODO(markus): Need to add support for OP_GREATER
+ *err = "Unsupported operation in conditional error code";
+ return false;
}
} else {
*err = "Attempting to return invalid error code from BPF program";
@@ -214,16 +221,15 @@ bool VerifyErrorCode(Sandbox *sandbox,
return true;
}
-void Ld(State *state, const struct sock_filter& insn, const char **err) {
- if (BPF_SIZE(insn.code) != BPF_W ||
- BPF_MODE(insn.code) != BPF_ABS) {
+void Ld(State* state, const struct sock_filter& insn, const char** err) {
+ if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) {
*err = "Invalid BPF_LD instruction";
return;
}
if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) {
// We only allow loading of properly aligned 32bit quantities.
memcpy(&state->accumulator,
- reinterpret_cast<const char *>(&state->data) + insn.k,
+ reinterpret_cast<const char*>(&state->data) + insn.k,
4);
} else {
*err = "Invalid operand in BPF_LD instruction";
@@ -233,7 +239,7 @@ void Ld(State *state, const struct sock_filter& insn, const char **err) {
return;
}
-void Jmp(State *state, const struct sock_filter& insn, const char **err) {
+void Jmp(State* state, const struct sock_filter& insn, const char** err) {
if (BPF_OP(insn.code) == BPF_JA) {
if (state->ip + insn.k + 1 >= state->program.size() ||
state->ip + insn.k + 1 <= state->ip) {
@@ -243,48 +249,47 @@ void Jmp(State *state, const struct sock_filter& insn, const char **err) {
}
state->ip += insn.k;
} else {
- if (BPF_SRC(insn.code) != BPF_K ||
- !state->acc_is_valid ||
+ if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid ||
state->ip + insn.jt + 1 >= state->program.size() ||
state->ip + insn.jf + 1 >= state->program.size()) {
goto compilation_failure;
}
switch (BPF_OP(insn.code)) {
- case BPF_JEQ:
- if (state->accumulator == insn.k) {
- state->ip += insn.jt;
- } else {
- state->ip += insn.jf;
- }
- break;
- case BPF_JGT:
- if (state->accumulator > insn.k) {
- state->ip += insn.jt;
- } else {
- state->ip += insn.jf;
- }
- break;
- case BPF_JGE:
- if (state->accumulator >= insn.k) {
- state->ip += insn.jt;
- } else {
- state->ip += insn.jf;
- }
- break;
- case BPF_JSET:
- if (state->accumulator & insn.k) {
- state->ip += insn.jt;
- } else {
- state->ip += insn.jf;
- }
- break;
- default:
- goto compilation_failure;
+ case BPF_JEQ:
+ if (state->accumulator == insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ case BPF_JGT:
+ if (state->accumulator > insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ case BPF_JGE:
+ if (state->accumulator >= insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ case BPF_JSET:
+ if (state->accumulator & insn.k) {
+ state->ip += insn.jt;
+ } else {
+ state->ip += insn.jf;
+ }
+ break;
+ default:
+ goto compilation_failure;
}
}
}
-uint32_t Ret(State *, const struct sock_filter& insn, const char **err) {
+uint32_t Ret(State*, const struct sock_filter& insn, const char** err) {
if (BPF_SRC(insn.code) != BPF_K) {
*err = "Invalid BPF_RET instruction";
return 0;
@@ -292,7 +297,7 @@ uint32_t Ret(State *, const struct sock_filter& insn, const char **err) {
return insn.k;
}
-void Alu(State *state, const struct sock_filter& insn, const char **err) {
+void Alu(State* state, const struct sock_filter& insn, const char** err) {
if (BPF_OP(insn.code) == BPF_NEG) {
state->accumulator = -state->accumulator;
return;
@@ -302,55 +307,55 @@ void Alu(State *state, const struct sock_filter& insn, const char **err) {
return;
}
switch (BPF_OP(insn.code)) {
- case BPF_ADD:
- state->accumulator += insn.k;
- break;
- case BPF_SUB:
- state->accumulator -= insn.k;
- break;
- case BPF_MUL:
- state->accumulator *= insn.k;
- break;
- case BPF_DIV:
- if (!insn.k) {
- *err = "Illegal division by zero";
+ case BPF_ADD:
+ state->accumulator += insn.k;
break;
- }
- state->accumulator /= insn.k;
- break;
- case BPF_MOD:
- if (!insn.k) {
- *err = "Illegal division by zero";
+ case BPF_SUB:
+ state->accumulator -= insn.k;
break;
- }
- state->accumulator %= insn.k;
- break;
- case BPF_OR:
- state->accumulator |= insn.k;
- break;
- case BPF_XOR:
- state->accumulator ^= insn.k;
- break;
- case BPF_AND:
- state->accumulator &= insn.k;
- break;
- case BPF_LSH:
- if (insn.k > 32) {
- *err = "Illegal shift operation";
+ case BPF_MUL:
+ state->accumulator *= insn.k;
break;
- }
- state->accumulator <<= insn.k;
- break;
- case BPF_RSH:
- if (insn.k > 32) {
- *err = "Illegal shift operation";
+ case BPF_DIV:
+ if (!insn.k) {
+ *err = "Illegal division by zero";
+ break;
+ }
+ state->accumulator /= insn.k;
+ break;
+ case BPF_MOD:
+ if (!insn.k) {
+ *err = "Illegal division by zero";
+ break;
+ }
+ state->accumulator %= insn.k;
+ break;
+ case BPF_OR:
+ state->accumulator |= insn.k;
+ break;
+ case BPF_XOR:
+ state->accumulator ^= insn.k;
+ break;
+ case BPF_AND:
+ state->accumulator &= insn.k;
+ break;
+ case BPF_LSH:
+ if (insn.k > 32) {
+ *err = "Illegal shift operation";
+ break;
+ }
+ state->accumulator <<= insn.k;
+ break;
+ case BPF_RSH:
+ if (insn.k > 32) {
+ *err = "Illegal shift operation";
+ break;
+ }
+ state->accumulator >>= insn.k;
+ break;
+ default:
+ *err = "Invalid operator in arithmetic operation";
break;
- }
- state->accumulator >>= insn.k;
- break;
- default:
- *err = "Invalid operator in arithmetic operation";
- break;
}
}
}
@@ -359,12 +364,12 @@ void Alu(State *state, const struct sock_filter& insn, const char **err) {
namespace playground2 {
-bool Verifier::VerifyBPF(Sandbox *sandbox,
+bool Verifier::VerifyBPF(Sandbox* sandbox,
const std::vector<struct sock_filter>& program,
const SandboxBpfPolicy& policy,
- const char **err) {
+ const char** err) {
*err = NULL;
- for (SyscallIterator iter(false); !iter.Done(); ) {
+ for (SyscallIterator iter(false); !iter.Done();) {
uint32_t sysnum = iter.Next();
// We ideally want to iterate over the full system call range and values
// just above and just below this range. This gives us the full result set
@@ -373,8 +378,8 @@ bool Verifier::VerifyBPF(Sandbox *sandbox,
// indicates either i386 or x86-64; and a set bit 30 indicates x32. And
// unless we pay attention to setting this bit correctly, an early check in
// our BPF program will make us fail with a misleading error code.
- struct arch_seccomp_data data = { static_cast<int>(sysnum),
- static_cast<uint32_t>(SECCOMP_ARCH) };
+ struct arch_seccomp_data data = {static_cast<int>(sysnum),
+ static_cast<uint32_t>(SECCOMP_ARCH)};
#if defined(__i386__) || defined(__x86_64__)
#if defined(__x86_64__) && defined(__ILP32__)
if (!(sysnum & 0x40000000u)) {
@@ -396,7 +401,7 @@ bool Verifier::VerifyBPF(Sandbox *sandbox,
uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
const struct arch_seccomp_data& data,
- const char **err) {
+ const char** err) {
*err = NULL;
if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) {
*err = "Invalid program length";
@@ -409,33 +414,34 @@ uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program,
}
const struct sock_filter& insn = program[state.ip];
switch (BPF_CLASS(insn.code)) {
- case BPF_LD:
- Ld(&state, insn, err);
- break;
- case BPF_JMP:
- Jmp(&state, insn, err);
- break;
- case BPF_RET: {
- uint32_t r = Ret(&state, insn, err);
- switch (r & SECCOMP_RET_ACTION) {
- case SECCOMP_RET_TRAP:
- case SECCOMP_RET_ERRNO:
- case SECCOMP_RET_ALLOW:
+ case BPF_LD:
+ Ld(&state, insn, err);
break;
- case SECCOMP_RET_KILL: // We don't ever generate this
- case SECCOMP_RET_TRACE: // We don't ever generate this
- case SECCOMP_RET_INVALID: // Should never show up in BPF program
- default:
- *err = "Unexpected return code found in BPF program";
- return 0;
+ case BPF_JMP:
+ Jmp(&state, insn, err);
+ break;
+ case BPF_RET: {
+ uint32_t r = Ret(&state, insn, err);
+ switch (r & SECCOMP_RET_ACTION) {
+ case SECCOMP_RET_TRAP:
+ case SECCOMP_RET_ERRNO:
+ case SECCOMP_RET_ALLOW:
+ break;
+ case SECCOMP_RET_KILL: // We don't ever generate this
+ case SECCOMP_RET_TRACE: // We don't ever generate this
+ case SECCOMP_RET_INVALID: // Should never show up in BPF program
+ default:
+ *err = "Unexpected return code found in BPF program";
+ return 0;
+ }
+ return r;
}
- return r; }
- case BPF_ALU:
- Alu(&state, insn, err);
- break;
- default:
- *err = "Unexpected instruction in BPF program";
- break;
+ case BPF_ALU:
+ Alu(&state, insn, err);
+ break;
+ default:
+ *err = "Unexpected instruction in BPF program";
+ break;
}
}
return 0;
diff --git a/sandbox/linux/seccomp-bpf/verifier.h b/sandbox/linux/seccomp-bpf/verifier.h
index 02a9d37..fff5b63 100644
--- a/sandbox/linux/seccomp-bpf/verifier.h
+++ b/sandbox/linux/seccomp-bpf/verifier.h
@@ -10,7 +10,6 @@
#include <utility>
#include <vector>
-
namespace playground2 {
class SandboxBpfPolicy;
@@ -24,10 +23,10 @@ class Verifier {
// set by the "evaluators".
// Upon success, "err" is set to NULL. Upon failure, it contains a static
// error message that does not need to be free()'d.
- static bool VerifyBPF(Sandbox *sandbox,
+ static bool VerifyBPF(Sandbox* sandbox,
const std::vector<struct sock_filter>& program,
const SandboxBpfPolicy& policy,
- const char **err);
+ const char** err);
// Evaluate a given BPF program for a particular set of system call
// parameters. If evaluation failed for any reason, "err" will be set to
@@ -39,7 +38,7 @@ class Verifier {
// BPF compiler, we might have to extend this BPF interpreter.
static uint32_t EvaluateBPF(const std::vector<struct sock_filter>& program,
const struct arch_seccomp_data& data,
- const char **err);
+ const char** err);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Verifier);
diff --git a/sandbox/linux/tests/main.cc b/sandbox/linux/tests/main.cc
index 754b310..8fd85d9 100644
--- a/sandbox/linux/tests/main.cc
+++ b/sandbox/linux/tests/main.cc
@@ -5,7 +5,7 @@
#include "base/at_exit.h"
#include "testing/gtest/include/gtest/gtest.h"
-int main(int argc, char *argv[]) {
+int main(int argc, char* argv[]) {
// The use of Callbacks requires an AtExitManager.
base::AtExitManager exit_manager;
testing::InitGoogleTest(&argc, argv);
diff --git a/sandbox/linux/tests/unit_tests.cc b/sandbox/linux/tests/unit_tests.cc
index 02996b7..3dced7f 100644
--- a/sandbox/linux/tests/unit_tests.cc
+++ b/sandbox/linux/tests/unit_tests.cc
@@ -59,9 +59,7 @@ bool IsArchitectureArm() {
// TODO(jln): figure out why base/.../dynamic_annotations.h's
// RunningOnValgrind() cannot link.
-bool IsRunningOnValgrind() {
- return RUNNING_ON_VALGRIND;
-}
+bool IsRunningOnValgrind() { return RUNNING_ON_VALGRIND; }
static const int kExpectedValue = 42;
static const int kIgnoreThisTest = 43;
@@ -69,13 +67,13 @@ static const int kExitWithAssertionFailure = 1;
static const int kExitForTimeout = 2;
static void SigAlrmHandler(int) {
- const char failure_message[] = "Timeout reached!\n";
- // Make sure that we never block here.
- if (!fcntl(2, F_SETFL, O_NONBLOCK)) {
- if (write(2, failure_message, sizeof(failure_message) - 1) < 0) {
- }
+ const char failure_message[] = "Timeout reached!\n";
+ // Make sure that we never block here.
+ if (!fcntl(2, F_SETFL, O_NONBLOCK)) {
+ if (write(2, failure_message, sizeof(failure_message) - 1) < 0) {
}
- _exit(kExitForTimeout);
+ }
+ _exit(kExitForTimeout);
}
// Set a timeout with a handler that will automatically fail the
@@ -105,8 +103,10 @@ static void SetProcessTimeout(int time_in_seconds) {
// in the BPF sandbox, as it potentially makes global state changes and as
// it also tends to raise fatal errors, if the code has been used in an
// insecure manner.
-void UnitTests::RunTestInProcess(UnitTests::Test test, void *arg,
- DeathCheck death, const void *death_aux) {
+void UnitTests::RunTestInProcess(UnitTests::Test test,
+ void* arg,
+ DeathCheck death,
+ const void* death_aux) {
// We need to fork(), so we can't be multi-threaded, as threads could hold
// locks.
int num_threads = CountThreads();
@@ -144,7 +144,7 @@ void UnitTests::RunTestInProcess(UnitTests::Test test, void *arg,
// Disable core files. They are not very useful for our individual test
// cases.
- struct rlimit no_core = { 0 };
+ struct rlimit no_core = {0};
setrlimit(RLIMIT_CORE, &no_core);
test(arg);
@@ -157,9 +157,9 @@ void UnitTests::RunTestInProcess(UnitTests::Test test, void *arg,
// Make sure read() will never block as we'll use poll() to
// block with a timeout instead.
- const int fcntl_ret = fcntl(fds[0], F_SETFL, O_NONBLOCK);
+ const int fcntl_ret = fcntl(fds[0], F_SETFL, O_NONBLOCK);
ASSERT_EQ(fcntl_ret, 0);
- struct pollfd poll_fd = { fds[0], POLLIN | POLLRDHUP, 0 };
+ struct pollfd poll_fd = {fds[0], POLLIN | POLLRDHUP, 0};
int poll_ret;
// We prefer the SIGALRM timeout to trigger in the child than this timeout
@@ -198,8 +198,7 @@ void UnitTests::RunTestInProcess(UnitTests::Test test, void *arg,
}
}
-void UnitTests::DeathSuccess(int status, const std::string& msg,
- const void *) {
+void UnitTests::DeathSuccess(int status, const std::string& msg, const void*) {
std::string details(TestFailedMessage(msg));
bool subprocess_terminated_normally = WIFEXITED(status);
@@ -210,22 +209,24 @@ void UnitTests::DeathSuccess(int status, const std::string& msg,
EXPECT_FALSE(subprocess_exited_but_printed_messages) << details;
}
-void UnitTests::DeathMessage(int status, const std::string& msg,
- const void *aux) {
+void UnitTests::DeathMessage(int status,
+ const std::string& msg,
+ const void* aux) {
std::string details(TestFailedMessage(msg));
- const char *expected_msg = static_cast<const char *>(aux);
+ const char* expected_msg = static_cast<const char*>(aux);
bool subprocess_terminated_normally = WIFEXITED(status);
ASSERT_TRUE(subprocess_terminated_normally) << details;
int subprocess_exit_status = WEXITSTATUS(status);
ASSERT_EQ(kExitWithAssertionFailure, subprocess_exit_status) << details;
bool subprocess_exited_without_matching_message =
- msg.find(expected_msg) == std::string::npos;
+ msg.find(expected_msg) == std::string::npos;
EXPECT_FALSE(subprocess_exited_without_matching_message) << details;
}
-void UnitTests::DeathExitCode(int status, const std::string& msg,
- const void *aux) {
+void UnitTests::DeathExitCode(int status,
+ const std::string& msg,
+ const void* aux) {
int expected_exit_code = static_cast<int>(reinterpret_cast<intptr_t>(aux));
std::string details(TestFailedMessage(msg));
@@ -235,8 +236,9 @@ void UnitTests::DeathExitCode(int status, const std::string& msg,
ASSERT_EQ(subprocess_exit_status, expected_exit_code) << details;
}
-void UnitTests::DeathBySignal(int status, const std::string& msg,
- const void *aux) {
+void UnitTests::DeathBySignal(int status,
+ const std::string& msg,
+ const void* aux) {
int expected_signo = static_cast<int>(reinterpret_cast<intptr_t>(aux));
std::string details(TestFailedMessage(msg));
@@ -246,8 +248,7 @@ void UnitTests::DeathBySignal(int status, const std::string& msg,
ASSERT_EQ(subprocess_signal_number, expected_signo) << details;
}
-void UnitTests::AssertionFailure(const char *expr, const char *file,
- int line) {
+void UnitTests::AssertionFailure(const char* expr, const char* file, int line) {
fprintf(stderr, "%s:%d:%s", file, line, expr);
fflush(stderr);
_exit(kExitWithAssertionFailure);
diff --git a/sandbox/linux/tests/unit_tests.h b/sandbox/linux/tests/unit_tests.h
index c6d1b4d..5480b56 100644
--- a/sandbox/linux/tests/unit_tests.h
+++ b/sandbox/linux/tests/unit_tests.h
@@ -33,59 +33,62 @@ bool IsRunningOnValgrind();
// NOTE: If you do decide to write your own DeathCheck, make sure to use
// gtests's ASSERT_XXX() macros instead of SANDBOX_ASSERT(). See
// unit_tests.cc for examples.
-#define DEATH_SUCCESS() sandbox::UnitTests::DeathSuccess, NULL
-#define DEATH_MESSAGE(msg) sandbox::UnitTests::DeathMessage, \
- static_cast<const void *>( \
- static_cast<const char *>(msg))
-#define DEATH_EXIT_CODE(rc) sandbox::UnitTests::DeathExitCode, \
- reinterpret_cast<void *>(static_cast<intptr_t>(rc))
-#define DEATH_BY_SIGNAL(s) sandbox::UnitTests::DeathExitCode, \
- reinterpret_cast<void *>(static_cast<intptr_t>(s))
+#define DEATH_SUCCESS() sandbox::UnitTests::DeathSuccess, NULL
+#define DEATH_MESSAGE(msg) \
+ sandbox::UnitTests::DeathMessage, \
+ static_cast<const void*>(static_cast<const char*>(msg))
+#define DEATH_EXIT_CODE(rc) \
+ sandbox::UnitTests::DeathExitCode, \
+ reinterpret_cast<void*>(static_cast<intptr_t>(rc))
+#define DEATH_BY_SIGNAL(s) \
+ sandbox::UnitTests::DeathExitCode, \
+ reinterpret_cast<void*>(static_cast<intptr_t>(s))
// A SANDBOX_DEATH_TEST is just like a SANDBOX_TEST (see below), but it assumes
// that the test actually dies. The death test only passes if the death occurs
// in the expected fashion, as specified by "death" and "death_aux". These two
// parameters are typically set to one of the DEATH_XXX() macros.
-#define SANDBOX_DEATH_TEST(test_case_name, test_name, death) \
- void TEST_##test_name(void *); \
- TEST(test_case_name, test_name) { \
- sandbox::UnitTests::RunTestInProcess(TEST_##test_name, NULL, death); \
- } \
- void TEST_##test_name(void *)
+#define SANDBOX_DEATH_TEST(test_case_name, test_name, death) \
+ void TEST_##test_name(void*); \
+ TEST(test_case_name, test_name) { \
+ sandbox::UnitTests::RunTestInProcess(TEST_##test_name, NULL, death); \
+ } \
+ void TEST_##test_name(void*)
// Define a new test case that runs inside of a GTest death test. This is
// necessary, as most of our tests by definition make global and irreversible
// changes to the system (i.e. they install a sandbox). GTest provides death
// tests as a tool to isolate global changes from the rest of the tests.
-#define SANDBOX_TEST(test_case_name, test_name) \
+#define SANDBOX_TEST(test_case_name, test_name) \
SANDBOX_DEATH_TEST(test_case_name, test_name, DEATH_SUCCESS())
// Simple assertion macro that is compatible with running inside of a death
// test. We unfortunately cannot use any of the GTest macros.
#define SANDBOX_STR(x) #x
-#define SANDBOX_ASSERT(expr) \
- ((expr) \
- ? static_cast<void>(0) \
- : sandbox::UnitTests::AssertionFailure(SANDBOX_STR(expr), \
- __FILE__, __LINE__))
+#define SANDBOX_ASSERT(expr) \
+ ((expr) ? static_cast<void>(0) : sandbox::UnitTests::AssertionFailure( \
+ SANDBOX_STR(expr), __FILE__, __LINE__))
class UnitTests {
public:
- typedef void (*Test)(void *);
- typedef void (*DeathCheck)(int status, const std::string& msg,
- const void *aux);
+ typedef void (*Test)(void*);
+ typedef void (*DeathCheck)(int status,
+ const std::string& msg,
+ const void* aux);
// Runs a test inside a short-lived process. Do not call this function
// directly. It is automatically invoked by SANDBOX_TEST(). Most sandboxing
// functions make global irreversible changes to the execution environment
// and must therefore execute in their own isolated process.
- static void RunTestInProcess(Test test, void *arg, DeathCheck death,
- const void *death_aux);
+ static void RunTestInProcess(Test test,
+ void* arg,
+ DeathCheck death,
+ const void* death_aux);
// Report a useful error message and terminate the current SANDBOX_TEST().
// Calling this function from outside a SANDBOX_TEST() is unlikely to do
// anything useful.
- static void AssertionFailure(const char *expr, const char *file, int line);
+ static void AssertionFailure(const char* expr, const char* file, int line);
// Sometimes we determine at run-time that a test should be disabled.
// Call this method if we want to return from a test and completely
@@ -98,29 +101,30 @@ class UnitTests {
// A DeathCheck method that verifies that the test completed succcessfully.
// This is the default test mode for SANDBOX_TEST(). The "aux" parameter
// of this DeathCheck is unused (and thus unnamed)
- static void DeathSuccess(int status, const std::string& msg, const void *);
+ static void DeathSuccess(int status, const std::string& msg, const void*);
// A DeathCheck method that verifies that the test completed with error
// code "1" and printed a message containing a particular substring. The
// "aux" pointer should point to a C-string containing the expected error
// message. This method is useful for checking assertion failures such as
// in SANDBOX_ASSERT() and/or SANDBOX_DIE().
- static void DeathMessage(int status, const std::string& msg,
- const void *aux);
+ static void DeathMessage(int status, const std::string& msg, const void* aux);
// A DeathCheck method that verifies that the test completed with a
// particular exit code. If the test output any messages to stderr, they are
// silently ignored. The expected exit code should be passed in by
// casting the its "int" value to a "void *", which is then used for "aux".
- static void DeathExitCode(int status, const std::string& msg,
- const void *aux);
+ static void DeathExitCode(int status,
+ const std::string& msg,
+ const void* aux);
// A DeathCheck method that verifies that the test was terminated by a
// particular signal. If the test output any messages to stderr, they are
// silently ignore. The expected signal number should be passed in by
// casting the its "int" value to a "void *", which is then used for "aux".
- static void DeathBySignal(int status, const std::string& msg,
- const void *aux);
+ static void DeathBySignal(int status,
+ const std::string& msg,
+ const void* aux);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(UnitTests);