summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Mathewson <nickm@torproject.org>2017-09-04 15:48:01 -0400
committerNick Mathewson <nickm@torproject.org>2017-09-04 15:48:01 -0400
commit4d97efd4d1cdc39e42fd0aeaeeee9accfeb15785 (patch)
tree62788b4d77c777a29232f5e92a3053554fb2472a
parenta7aeb6f58f24e30c329874204a5f0db4b65bb2f0 (diff)
parent948be49ce06f744ca456d20f48bfb6d2f5cfb7d2 (diff)
downloadtor-4d97efd4d1cdc39e42fd0aeaeeee9accfeb15785.tar.gz
tor-4d97efd4d1cdc39e42fd0aeaeeee9accfeb15785.zip
Merge branch 'bug22752_031_simple' into maint-0.3.1
-rw-r--r--changes/bug22752_simple6
-rw-r--r--src/or/conscache.c84
-rw-r--r--src/or/conscache.h1
-rw-r--r--src/or/consdiffmgr.c10
4 files changed, 95 insertions, 6 deletions
diff --git a/changes/bug22752_simple b/changes/bug22752_simple
new file mode 100644
index 0000000000..7e60357052
--- /dev/null
+++ b/changes/bug22752_simple
@@ -0,0 +1,6 @@
+ o Major bugfixes (windows, directory cache):
+ - On windows, do not try to delete cached consensus documents and
+ diffs, until they unmapped from memory. Allow the diff storage
+ directory to grow larger in order to handle files that might
+ need to stay around longer. Fixes bug 22752; bugfix on
+ 0.3.1.1-alpha.
diff --git a/src/or/conscache.c b/src/or/conscache.c
index 5ffa129bbe..9e13ce8e43 100644
--- a/src/or/conscache.c
+++ b/src/or/conscache.c
@@ -9,6 +9,14 @@
#define CCE_MAGIC 0x17162253
+#ifdef _WIN32
+/* On Windows, unlink won't work on a file if the file is actively mmap()ed.
+ * That forces us to be less aggressive about unlinking files, and causes other
+ * changes throughout our logic.
+ */
+#define MUST_UNMAP_TO_UNLINK
+#endif
+
/**
* A consensus_cache_entry_t is a reference-counted handle to an
* item in a consensus_cache_t. It can be mmapped into RAM, or not,
@@ -49,6 +57,11 @@ struct consensus_cache_t {
storage_dir_t *dir;
/** List of all the entries in the directory. */
smartlist_t *entries;
+
+ /** The maximum number of entries that we'd like to allow in this cache.
+ * This is the same as the storagedir limit when MUST_UNMAP_TO_UNLINK is
+ * not defined. */
+ unsigned max_entries;
};
static void consensus_cache_clear(consensus_cache_t *cache);
@@ -64,9 +77,26 @@ static void consensus_cache_entry_unmap(consensus_cache_entry_t *ent);
consensus_cache_t *
consensus_cache_open(const char *subdir, int max_entries)
{
+ int storagedir_max_entries;
consensus_cache_t *cache = tor_malloc_zero(sizeof(consensus_cache_t));
char *directory = get_datadir_fname(subdir);
- cache->dir = storage_dir_new(directory, max_entries);
+ cache->max_entries = max_entries;
+
+#ifdef MUST_UNMAP_TO_UNLINK
+ /* If we can't unlink the files that we're still using, then we need to
+ * tell the storagedir backend to allow far more files than this consensus
+ * cache actually wants, so that it can hold files which, from this cache's
+ * perspective, have become useless.
+ */
+#define VERY_LARGE_STORAGEDIR_LIMIT (1000*1000)
+ storagedir_max_entries = VERY_LARGE_STORAGEDIR_LIMIT;
+#else
+ /* Otherwise, we can just tell the storagedir to use the same limits
+ * as this cache. */
+ storagedir_max_entries = max_entries;
+#endif
+
+ cache->dir = storage_dir_new(directory, storagedir_max_entries);
tor_free(directory);
if (!cache->dir) {
tor_free(cache);
@@ -77,6 +107,24 @@ consensus_cache_open(const char *subdir, int max_entries)
return cache;
}
+/** Return true if it's okay to put more entries in this cache than
+ * its official file limit.
+ *
+ * (We need this method on Windows, where we can't unlink files that are still
+ * in use, and therefore might need to temporarily exceed the file limit until
+ * the no-longer-wanted files are deletable.)
+ */
+int
+consensus_cache_may_overallocate(consensus_cache_t *cache)
+{
+ (void) cache;
+#ifdef MUST_UNMAP_TO_UNLINK
+ return 1;
+#else
+ return 0;
+#endif
+}
+
/**
* Tell the sandbox (if any) configured by <b>cfg</b> to allow the
* operations that <b>cache</b> will need.
@@ -85,6 +133,19 @@ int
consensus_cache_register_with_sandbox(consensus_cache_t *cache,
struct sandbox_cfg_elem **cfg)
{
+#ifdef MUST_UNMAP_TO_UNLINK
+ /* Our Linux sandbox doesn't support huge file lists like the one that would
+ * be generated by using VERY_LARGE_STORAGEDIR_LIMIT above in
+ * consensus_cache_open(). Since the Linux sandbox is the only one we have
+ * right now, we just assert that we never reach this point when we've had
+ * to use VERY_LARGE_STORAGEDIR_LIMIT.
+ *
+ * If at some point in the future we have a different sandbox mechanism that
+ * can handle huge file lists, we can remove this assertion or make it
+ * conditional.
+ */
+ tor_assert_nonfatal_unreached();
+#endif
return storage_dir_register_with_sandbox(cache->dir, cfg);
}
@@ -406,23 +467,36 @@ int
consensus_cache_get_n_filenames_available(consensus_cache_t *cache)
{
tor_assert(cache);
- int max = storage_dir_get_max_files(cache->dir);
+ int max = cache->max_entries;
int used = smartlist_len(storage_dir_list(cache->dir));
+#ifdef MUST_UNMAP_TO_UNLINK
+ if (used > max)
+ return 0;
+#else
tor_assert_nonfatal(max >= used);
+#endif
return max - used;
}
/**
* Delete every element of <b>cache</b> has been marked with
- * consensus_cache_entry_mark_for_removal. If <b>force</b> is false,
- * retain those entries which are not in use except by the cache.
+ * consensus_cache_entry_mark_for_removal. If <b>force</b> is false,
+ * retain those entries which are in use by something other than the cache.
*/
void
consensus_cache_delete_pending(consensus_cache_t *cache, int force)
{
SMARTLIST_FOREACH_BEGIN(cache->entries, consensus_cache_entry_t *, ent) {
tor_assert_nonfatal(ent->in_cache == cache);
- if (! force) {
+ int force_ent = force;
+#ifdef MUST_UNMAP_TO_UNLINK
+ /* We cannot delete anything with an active mmap on win32, so no
+ * force-deletion. */
+ if (ent->map) {
+ force_ent = 0;
+ }
+#endif
+ if (! force_ent) {
if (ent->refcnt > 1 || BUG(ent->in_cache == NULL)) {
/* Somebody is using this entry right now */
continue;
diff --git a/src/or/conscache.h b/src/or/conscache.h
index aef54201f0..a0d74c4e08 100644
--- a/src/or/conscache.h
+++ b/src/or/conscache.h
@@ -14,6 +14,7 @@ HANDLE_DECL(consensus_cache_entry, consensus_cache_entry_t, )
consensus_cache_t *consensus_cache_open(const char *subdir, int max_entries);
void consensus_cache_free(consensus_cache_t *cache);
struct sandbox_cfg_elem;
+int consensus_cache_may_overallocate(consensus_cache_t *cache);
int consensus_cache_register_with_sandbox(consensus_cache_t *cache,
struct sandbox_cfg_elem **cfg);
void consensus_cache_unmap_lazy(consensus_cache_t *cache, time_t cutoff);
diff --git a/src/or/consdiffmgr.c b/src/or/consdiffmgr.c
index 928fc26f54..831d5d45c3 100644
--- a/src/or/consdiffmgr.c
+++ b/src/or/consdiffmgr.c
@@ -1136,7 +1136,7 @@ consdiffmgr_ensure_space_for_files(int n)
return 0;
}
// Let's get more assertive: clean out unused stuff, and force-remove
- // the files.
+ // the files that we can.
consdiffmgr_cleanup();
consensus_cache_delete_pending(cache, 1);
const int n_to_remove = n - consensus_cache_get_n_filenames_available(cache);
@@ -1159,6 +1159,14 @@ consdiffmgr_ensure_space_for_files(int n)
smartlist_free(objects);
consensus_cache_delete_pending(cache, 1);
+
+ if (consensus_cache_may_overallocate(cache)) {
+ /* If we're allowed to throw extra files into the cache, let's do so
+ * rather getting upset.
+ */
+ return 0;
+ }
+
if (BUG(n_marked < n_to_remove))
return -1;
else