aboutsummaryrefslogtreecommitdiff
path: root/src/or/conscache.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/or/conscache.c')
-rw-r--r--src/or/conscache.c39
1 files changed, 34 insertions, 5 deletions
diff --git a/src/or/conscache.c b/src/or/conscache.c
index 350a05b0f2..9e13ce8e43 100644
--- a/src/or/conscache.c
+++ b/src/or/conscache.c
@@ -10,7 +10,10 @@
#define CCE_MAGIC 0x17162253
#ifdef _WIN32
-/* On Windows, unlink won't work if there's an active mmap. */
+/* On Windows, unlink won't work on a file if the file is actively mmap()ed.
+ * That forces us to be less aggressive about unlinking files, and causes other
+ * changes throughout our logic.
+ */
#define MUST_UNMAP_TO_UNLINK
#endif
@@ -74,13 +77,26 @@ static void consensus_cache_entry_unmap(consensus_cache_entry_t *ent);
consensus_cache_t *
consensus_cache_open(const char *subdir, int max_entries)
{
+ int storagedir_max_entries;
consensus_cache_t *cache = tor_malloc_zero(sizeof(consensus_cache_t));
char *directory = get_datadir_fname(subdir);
cache->max_entries = max_entries;
+
#ifdef MUST_UNMAP_TO_UNLINK
- max_entries = 1000000;
+ /* If we can't unlink the files that we're still using, then we need to
+ * tell the storagedir backend to allow far more files than this consensus
+ * cache actually wants, so that it can hold files which, from this cache's
+ * perspective, have become useless.
+ */
+#define VERY_LARGE_STORAGEDIR_LIMIT (1000*1000)
+ storagedir_max_entries = VERY_LARGE_STORAGEDIR_LIMIT;
+#else
+ /* Otherwise, we can just tell the storagedir to use the same limits
+ * as this cache. */
+ storagedir_max_entries = max_entries;
#endif
- cache->dir = storage_dir_new(directory, max_entries);
+
+ cache->dir = storage_dir_new(directory, storagedir_max_entries);
tor_free(directory);
if (!cache->dir) {
tor_free(cache);
@@ -92,7 +108,12 @@ consensus_cache_open(const char *subdir, int max_entries)
}
/** Return true if it's okay to put more entries in this cache than
- * its official file limit. */
+ * its official file limit.
+ *
+ * (We need this method on Windows, where we can't unlink files that are still
+ * in use, and therefore might need to temporarily exceed the file limit until
+ * the no-longer-wanted files are deletable.)
+ */
int
consensus_cache_may_overallocate(consensus_cache_t *cache)
{
@@ -113,7 +134,15 @@ consensus_cache_register_with_sandbox(consensus_cache_t *cache,
struct sandbox_cfg_elem **cfg)
{
#ifdef MUST_UNMAP_TO_UNLINK
- /* Our sandbox doesn't support huge limits like we use here.
+ /* Our Linux sandbox doesn't support huge file lists like the one that would
+ * be generated by using VERY_LARGE_STORAGEDIR_LIMIT above in
+ * consensus_cache_open(). Since the Linux sandbox is the only one we have
+ * right now, we just assert that we never reach this point when we've had
+ * to use VERY_LARGE_STORAGEDIR_LIMIT.
+ *
+ * If at some point in the future we have a different sandbox mechanism that
+ * can handle huge file lists, we can remove this assertion or make it
+ * conditional.
*/
tor_assert_nonfatal_unreached();
#endif