summaryrefslogtreecommitdiffstats
path: root/mojo/nacl/mojo_syscall_internal.h
blob: 84de656d20434d0fc9975d3bab50337a4c5cdc79 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef MOJO_NACL_MOJO_SYSCALL_INTERNAL_H_
#define MOJO_NACL_MOJO_SYSCALL_INTERNAL_H_

#include "native_client/src/trusted/service_runtime/nacl_copy.h"
#include "native_client/src/trusted/service_runtime/sel_ldr.h"

namespace {

class ScopedCopyLock {
 public:
  explicit ScopedCopyLock(struct NaClApp* nap) : nap_(nap) {
    NaClCopyTakeLock(nap_);
  }
  ~ScopedCopyLock() {
    NaClCopyDropLock(nap_);
  }
 private:
  struct NaClApp* nap_;
};

static inline uintptr_t NaClUserToSysAddrArray(
    struct NaClApp* nap,
    uint32_t uaddr,
    size_t count,
    size_t size) {
  // TODO(ncbray): overflow checking
  size_t range = count * size;
  return NaClUserToSysAddrRange(nap, uaddr, range);
}

// We don't use plain-old memcpy because reads and writes to the untrusted
// address space from trusted code must be volatile.  Non-volatile memory
// operations are dangerous because a compiler would be free to materialize a
// second load from the same memory address or materialize a load from a memory
// address that was stored, and assume the materialized load would return the
// same value as the previous load or store.  Data races could cause the
// materialized load to return a different value, however, which could lead to
// time of check vs. time of use problems, or worse.  For this binding code in
// particular, where memcpy is being called with a constant size, it is entirely
// conceivable the function will be inlined, unrolled, and optimized.
static inline void memcpy_volatile_out(
    void volatile* dst,
    const void* src,
    size_t n) {
  char volatile* c_dst = static_cast<char volatile*>(dst);
  const char* c_src = static_cast<const char*>(src);
  for (size_t i = 0; i < n; i++) {
    c_dst[i] = c_src[i];
  }
}

template <typename T> bool ConvertScalarInput(
    struct NaClApp* nap,
    uint32_t user_ptr,
    T* value) {
  if (user_ptr) {
    uintptr_t temp = NaClUserToSysAddrRange(nap, user_ptr, sizeof(T));
    if (temp != kNaClBadAddress) {
      *value = *reinterpret_cast<T volatile*>(temp);
      return true;
    }
  }
  return false;
}

template <typename T> bool ConvertScalarOutput(
    struct NaClApp* nap,
    uint32_t user_ptr,
    bool optional,
    T volatile** sys_ptr) {
  if (user_ptr) {
    uintptr_t temp = NaClUserToSysAddrRange(nap, user_ptr, sizeof(T));
    if (temp != kNaClBadAddress) {
      *sys_ptr = reinterpret_cast<T volatile*>(temp);
      return true;
    } else if (optional) {
      *sys_ptr = 0;
      return true;
    }
  }
  *sys_ptr = 0; // Paranoia.
  return false;
}

template <typename T> bool ConvertScalarInOut(
    struct NaClApp* nap,
    uint32_t user_ptr,
    bool optional,
    T* value,
    T volatile** sys_ptr) {
  if (user_ptr) {
    uintptr_t temp = NaClUserToSysAddrRange(nap, user_ptr, sizeof(T));
    if (temp != kNaClBadAddress) {
      T volatile* converted = reinterpret_cast<T volatile*>(temp);
      *sys_ptr = converted;
      *value = *converted;
      return true;
    }
  } else if (optional) {
    *sys_ptr = 0;
    *value = static_cast<T>(0); // Paranoia.
    return true;
  }
  *sys_ptr = 0; // Paranoia.
  *value = static_cast<T>(0); // Paranoia.
  return false;
}

template <typename T> bool ConvertArray(
    struct NaClApp* nap,
    uint32_t user_ptr,
    uint32_t length,
    size_t element_size,
    bool optional,
    T** sys_ptr) {
  if (user_ptr) {
    uintptr_t temp = NaClUserToSysAddrArray(nap, user_ptr, length,
                                            element_size);
    if (temp != kNaClBadAddress) {
      *sys_ptr = reinterpret_cast<T*>(temp);
      return true;
    }
  } else if (optional) {
    *sys_ptr = 0;
    return true;
  }
  return false;
}

template <typename T> bool ConvertBytes(
    struct NaClApp* nap,
    uint32_t user_ptr,
    uint32_t length,
    bool optional,
    T** sys_ptr) {
  if (user_ptr) {
    uintptr_t temp = NaClUserToSysAddrRange(nap, user_ptr, length);
    if (temp != kNaClBadAddress) {
      *sys_ptr = reinterpret_cast<T*>(temp);
      return true;
    }
  } else if (optional) {
    *sys_ptr = 0;
    return true;
  }
  return false;
}

// TODO(ncbray): size validation and complete copy.
// TODO(ncbray): ensure non-null / missized structs are covered by a test case.
template <typename T> bool ConvertExtensibleStructInput(
    struct NaClApp* nap,
    uint32_t user_ptr,
    bool optional,
    T** sys_ptr) {
  if (user_ptr) {
    uintptr_t temp = NaClUserToSysAddrRange(nap, user_ptr, sizeof(T));
    if (temp != kNaClBadAddress) {
      *sys_ptr = reinterpret_cast<T*>(temp);
      return true;
    }
  } else if (optional) {
    *sys_ptr = 0;
    return true;
  }
  return false;
}

} // namespace

#endif // MOJO_NACL_MOJO_SYSCALL_INTERNAL_H_