summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Marshall <tdm@cyngn.com>2014-12-16 14:06:02 -0800
committerTom Marshall <tdm@cyngn.com>2015-11-25 15:34:35 -0800
commit40b9e54a3c05111a64159645170f67a9153dbcd5 (patch)
treeb0cd18a4effc43edb3d492586b9394336282637f
parent423f1e94f58891347d06b7a881ce6b1e67ac8339 (diff)
downloadbootable_recovery-40b9e54a3c05111a64159645170f67a9153dbcd5.zip
bootable_recovery-40b9e54a3c05111a64159645170f67a9153dbcd5.tar.gz
bootable_recovery-40b9e54a3c05111a64159645170f67a9153dbcd5.tar.bz2
recovery: Provide caching for sideload files
Create a cache of block data received via adb. The cache size is set to ensure that there is at least 100mb available for the installer. When the cache is large enough to hold the entire file, each block is read via adb at most once. When the cache is not large enough to hold the entire file, the cache will need to be pruned. Because files tend to be read sequentially during install, the pruning algorithm attempts to discard blocks that are behind the current file position. Change-Id: Id8fc7fa5b38f1d80461eb576b1a1b5d53453cfc1
-rw-r--r--fuse_sideload.cpp108
1 files changed, 107 insertions, 1 deletions
diff --git a/fuse_sideload.cpp b/fuse_sideload.cpp
index 39c7237..a53a756 100644
--- a/fuse_sideload.cpp
+++ b/fuse_sideload.cpp
@@ -68,6 +68,8 @@
#define NO_STATUS 1
+#define INSTALL_REQUIRED_MEMORY (100*1024*1024)
+
struct fuse_data {
int ffd; // file descriptor for the fuse socket
@@ -82,7 +84,7 @@ struct fuse_data {
uid_t uid;
gid_t gid;
- uint32_t curr_block; // cache the block most recently read from the host
+ uint32_t curr_block; // cache the block most recently used
uint8_t* block_data;
uint8_t* extra_block; // another block of storage for reads that
@@ -90,8 +92,80 @@ struct fuse_data {
uint8_t* hashes; // SHA-256 hash of each block (all zeros
// if block hasn't been read yet)
+
+ // Block cache
+ uint32_t block_cache_max_size; // Max allowed block cache size
+ uint32_t block_cache_size; // Current block cache size
+ uint8_t** block_cache; // Block cache data
};
+static uint64_t free_memory() {
+ uint64_t mem = 0;
+ FILE* fp = fopen("/proc/meminfo", "r");
+ if (fp) {
+ char buf[256];
+ char* linebuf = buf;
+ size_t buflen = sizeof(buf);
+ while (getline(&linebuf, &buflen, fp) > 0) {
+ char* key = buf;
+ char* val = strchr(buf, ':');
+ *val = '\0';
+ ++val;
+ if (strcmp(key, "MemFree") == 0) {
+ mem += strtoul(val, NULL, 0) * 1024;
+ }
+ if (strcmp(key, "Buffers") == 0) {
+ mem += strtoul(val, NULL, 0) * 1024;
+ }
+ if (strcmp(key, "Cached") == 0) {
+ mem += strtoul(val, NULL, 0) * 1024;
+ }
+ }
+ fclose(fp);
+ }
+ return mem;
+}
+
+static int block_cache_fetch(struct fuse_data* fd, uint32_t block)
+{
+ if (fd->block_cache == NULL) {
+ return -1;
+ }
+ if (fd->block_cache[block] == NULL) {
+ return -1;
+ }
+ memcpy(fd->block_data, fd->block_cache[block], fd->block_size);
+ return 0;
+}
+
+static void block_cache_enter(struct fuse_data* fd, uint32_t block)
+{
+ if (!fd->block_cache)
+ return;
+ if (fd->block_cache_size == fd->block_cache_max_size) {
+ // Evict a block from the cache. Since the file is typically read
+ // sequentially, start looking from the block behind the current
+ // block and proceed backward.
+ int n;
+ for (n = fd->curr_block - 1; n != (int)fd->curr_block; --n) {
+ if (n < 0) {
+ n = fd->file_blocks - 1;
+ }
+ if (fd->block_cache[n]) {
+ free(fd->block_cache[n]);
+ fd->block_cache[n] = NULL;
+ fd->block_cache_size--;
+ break;
+ }
+ }
+ }
+
+ fd->block_cache[block] = (uint8_t*)malloc(fd->block_size);
+ memcpy(fd->block_cache[block], fd->block_data, fd->block_size);
+
+ fd->block_cache_size++;
+}
+
static void fuse_reply(struct fuse_data* fd, __u64 unique, const void *data, size_t len)
{
struct fuse_out_header hdr;
@@ -236,6 +310,11 @@ static int fetch_block(struct fuse_data* fd, uint32_t block) {
return 0;
}
+ if (block_cache_fetch(fd, block) == 0) {
+ fd->curr_block = block;
+ return 0;
+ }
+
size_t fetch_size = fd->block_size;
if (block * fd->block_size + fetch_size > fd->file_size) {
// If we're reading the last (partial) block of the file,
@@ -275,6 +354,7 @@ static int fetch_block(struct fuse_data* fd, uint32_t block) {
}
memcpy(blockhash, hash, SHA256_DIGEST_SIZE);
+ block_cache_enter(fd, block);
return 0;
}
@@ -383,6 +463,9 @@ int run_fuse_sideload(struct provider_vtab* vtab, void* cookie,
fd.block_size = block_size;
fd.file_blocks = (file_size == 0) ? 0 : (((file_size-1) / block_size) + 1);
+ uint64_t mem = free_memory();
+ uint64_t avail = mem - (INSTALL_REQUIRED_MEMORY + fd.file_blocks * sizeof(uint8_t*));
+
if (fd.file_blocks > (1<<18)) {
fprintf(stderr, "file has too many blocks (%u)\n", fd.file_blocks);
result = -1;
@@ -414,6 +497,22 @@ int run_fuse_sideload(struct provider_vtab* vtab, void* cookie,
goto done;
}
+ fd.block_cache_max_size = 0;
+ fd.block_cache_size = 0;
+ fd.block_cache = NULL;
+ if (mem > avail) {
+ uint32_t max_size = avail / fd.block_size;
+ if (max_size > fd.file_blocks) {
+ max_size = fd.file_blocks;
+ }
+ // The cache must be at least 1% of the file size or two blocks,
+ // whichever is larger.
+ if (max_size >= fd.file_blocks/100 && max_size >= 2) {
+ fd.block_cache_max_size = max_size;
+ fd.block_cache = (uint8_t**)calloc(fd.file_blocks, sizeof(uint8_t*));
+ }
+ }
+
signal(SIGTERM, sig_term);
fd.ffd = open("/dev/fuse", O_RDWR);
@@ -519,6 +618,13 @@ int run_fuse_sideload(struct provider_vtab* vtab, void* cookie,
}
if (fd.ffd) close(fd.ffd);
+ if (fd.block_cache) {
+ uint32_t n;
+ for (n = 0; n < fd.file_blocks; ++n) {
+ free(fd.block_cache[n]);
+ }
+ free(fd.block_cache);
+ }
free(fd.hashes);
free(fd.block_data);
free(fd.extra_block);