1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-05-21 13:56:11 +02:00
git/split-index.c

377 lines
11 KiB
C
Raw Normal View History

#include "cache.h"
#include "split-index.h"
#include "ewah/ewok.h"
struct split_index *init_split_index(struct index_state *istate)
{
if (!istate->split_index) {
istate->split_index = xcalloc(1, sizeof(*istate->split_index));
istate->split_index->refcount = 1;
}
return istate->split_index;
}
int read_link_extension(struct index_state *istate,
const void *data_, unsigned long sz)
{
const unsigned char *data = data_;
struct split_index *si;
int ret;
if (sz < the_hash_algo->rawsz)
return error("corrupt link extension (too short)");
si = init_split_index(istate);
hashcpy(si->base_oid.hash, data);
data += the_hash_algo->rawsz;
sz -= the_hash_algo->rawsz;
if (!sz)
return 0;
si->delete_bitmap = ewah_new();
ret = ewah_read_mmap(si->delete_bitmap, data, sz);
if (ret < 0)
return error("corrupt delete bitmap in link extension");
data += ret;
sz -= ret;
si->replace_bitmap = ewah_new();
ret = ewah_read_mmap(si->replace_bitmap, data, sz);
if (ret < 0)
return error("corrupt replace bitmap in link extension");
if (ret != sz)
return error("garbage at the end of link extension");
return 0;
}
int write_link_extension(struct strbuf *sb,
struct index_state *istate)
{
struct split_index *si = istate->split_index;
strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
if (!si->delete_bitmap && !si->replace_bitmap)
return 0;
ewah_serialize_strbuf(si->delete_bitmap, sb);
ewah_serialize_strbuf(si->replace_bitmap, sb);
return 0;
}
static void mark_base_index_entries(struct index_state *base)
{
int i;
/*
* To keep track of the shared entries between
* istate->base->cache[] and istate->cache[], base entry
* position is stored in each base entry. All positions start
* from 1 instead of 0, which is reserved to say "this is a new
* entry".
*/
for (i = 0; i < base->cache_nr; i++)
base->cache[i]->index = i + 1;
}
void move_cache_to_base_index(struct index_state *istate)
{
struct split_index *si = istate->split_index;
int i;
/*
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
* If there was a previous base index, then transfer ownership of allocated
* entries to the parent index.
*/
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
if (si->base &&
si->base->ce_mem_pool) {
if (!istate->ce_mem_pool)
mem_pool_init(&istate->ce_mem_pool, 0);
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
}
si->base = xcalloc(1, sizeof(*si->base));
si->base->version = istate->version;
/* zero timestamp disables racy test in ce_write_index() */
si->base->timestamp = istate->timestamp;
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
si->base->cache_nr = istate->cache_nr;
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
/*
* The mem_pool needs to move with the allocated entries.
*/
si->base->ce_mem_pool = istate->ce_mem_pool;
istate->ce_mem_pool = NULL;
COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
mark_base_index_entries(si->base);
for (i = 0; i < si->base->cache_nr; i++)
si->base->cache[i]->ce_flags &= ~CE_UPDATE_IN_BASE;
}
static void mark_entry_for_delete(size_t pos, void *data)
{
struct index_state *istate = data;
if (pos >= istate->cache_nr)
die("position for delete %d exceeds base index size %d",
(int)pos, istate->cache_nr);
istate->cache[pos]->ce_flags |= CE_REMOVE;
istate->split_index->nr_deletions++;
}
static void replace_entry(size_t pos, void *data)
{
struct index_state *istate = data;
struct split_index *si = istate->split_index;
struct cache_entry *dst, *src;
if (pos >= istate->cache_nr)
die("position for replacement %d exceeds base index size %d",
(int)pos, istate->cache_nr);
if (si->nr_replacements >= si->saved_cache_nr)
die("too many replacements (%d vs %d)",
si->nr_replacements, si->saved_cache_nr);
dst = istate->cache[pos];
if (dst->ce_flags & CE_REMOVE)
die("entry %d is marked as both replaced and deleted",
(int)pos);
src = si->saved_cache[si->nr_replacements];
if (ce_namelen(src))
die("corrupt link extension, entry %d should have "
"zero length name", (int)pos);
src->index = pos + 1;
src->ce_flags |= CE_UPDATE_IN_BASE;
src->ce_namelen = dst->ce_namelen;
copy_cache_entry(dst, src);
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(src);
si->nr_replacements++;
}
void merge_base_index(struct index_state *istate)
{
struct split_index *si = istate->split_index;
unsigned int i;
mark_base_index_entries(si->base);
si->saved_cache = istate->cache;
si->saved_cache_nr = istate->cache_nr;
istate->cache_nr = si->base->cache_nr;
istate->cache = NULL;
istate->cache_alloc = 0;
ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
COPY_ARRAY(istate->cache, si->base->cache, istate->cache_nr);
si->nr_deletions = 0;
si->nr_replacements = 0;
ewah_each_bit(si->replace_bitmap, replace_entry, istate);
ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
if (si->nr_deletions)
remove_marked_cache_entries(istate);
for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
if (!ce_namelen(si->saved_cache[i]))
die("corrupt link extension, entry %d should "
"have non-zero length name", i);
add_index_entry(istate, si->saved_cache[i],
ADD_CACHE_OK_TO_ADD |
ADD_CACHE_KEEP_CACHE_TREE |
/*
* we may have to replay what
* merge-recursive.c:update_stages()
* does, which has this flag on
*/
ADD_CACHE_SKIP_DFCHECK);
si->saved_cache[i] = NULL;
}
ewah_free(si->delete_bitmap);
ewah_free(si->replace_bitmap);
FREE_AND_NULL(si->saved_cache);
si->delete_bitmap = NULL;
si->replace_bitmap = NULL;
si->saved_cache_nr = 0;
}
void prepare_to_write_split_index(struct index_state *istate)
{
struct split_index *si = init_split_index(istate);
struct cache_entry **entries = NULL, *ce;
int i, nr_entries = 0, nr_alloc = 0;
si->delete_bitmap = ewah_new();
si->replace_bitmap = ewah_new();
if (si->base) {
/* Go through istate->cache[] and mark CE_MATCHED to
* entry with positive index. We'll go through
* base->cache[] later to delete all entries in base
* that are not marked with either CE_MATCHED or
* CE_UPDATE_IN_BASE. If istate->cache[i] is a
* duplicate, deduplicate it.
*/
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *base;
/* namelen is checked separately */
const unsigned int ondisk_flags =
CE_STAGEMASK | CE_VALID | CE_EXTENDED_FLAGS;
unsigned int ce_flags, base_flags, ret;
ce = istate->cache[i];
if (!ce->index)
continue;
if (ce->index > si->base->cache_nr) {
ce->index = 0;
continue;
}
ce->ce_flags |= CE_MATCHED; /* or "shared" */
base = si->base->cache[ce->index - 1];
if (ce == base)
continue;
if (ce->ce_namelen != base->ce_namelen ||
strcmp(ce->name, base->name)) {
ce->index = 0;
continue;
}
ce_flags = ce->ce_flags;
base_flags = base->ce_flags;
/* only on-disk flags matter */
ce->ce_flags &= ondisk_flags;
base->ce_flags &= ondisk_flags;
ret = memcmp(&ce->ce_stat_data, &base->ce_stat_data,
offsetof(struct cache_entry, name) -
offsetof(struct cache_entry, ce_stat_data));
ce->ce_flags = ce_flags;
base->ce_flags = base_flags;
if (ret)
ce->ce_flags |= CE_UPDATE_IN_BASE;
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(base);
si->base->cache[ce->index - 1] = ce;
}
for (i = 0; i < si->base->cache_nr; i++) {
ce = si->base->cache[i];
if ((ce->ce_flags & CE_REMOVE) ||
!(ce->ce_flags & CE_MATCHED))
ewah_set(si->delete_bitmap, i);
else if (ce->ce_flags & CE_UPDATE_IN_BASE) {
ewah_set(si->replace_bitmap, i);
ce->ce_flags |= CE_STRIP_NAME;
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
if (is_null_oid(&ce->oid))
istate->drop_cache_tree = 1;
}
}
for (i = 0; i < istate->cache_nr; i++) {
ce = istate->cache[i];
if ((!si->base || !ce->index) && !(ce->ce_flags & CE_REMOVE)) {
assert(!(ce->ce_flags & CE_STRIP_NAME));
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
ce->ce_flags &= ~CE_MATCHED;
}
/*
* take cache[] out temporarily, put entries[] in its place
* for writing
*/
si->saved_cache = istate->cache;
si->saved_cache_nr = istate->cache_nr;
istate->cache = entries;
istate->cache_nr = nr_entries;
}
void finish_writing_split_index(struct index_state *istate)
{
struct split_index *si = init_split_index(istate);
ewah_free(si->delete_bitmap);
ewah_free(si->replace_bitmap);
si->delete_bitmap = NULL;
si->replace_bitmap = NULL;
free(istate->cache);
istate->cache = si->saved_cache;
istate->cache_nr = si->saved_cache_nr;
}
void discard_split_index(struct index_state *istate)
{
struct split_index *si = istate->split_index;
if (!si)
return;
istate->split_index = NULL;
si->refcount--;
if (si->refcount)
return;
if (si->base) {
discard_index(si->base);
free(si->base);
}
free(si);
}
void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce)
{
if (ce->index &&
istate->split_index &&
istate->split_index->base &&
ce->index <= istate->split_index->base->cache_nr &&
ce == istate->split_index->base->cache[ce->index - 1])
ce->ce_flags |= CE_REMOVE;
else
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(ce);
}
void replace_index_entry_in_base(struct index_state *istate,
struct cache_entry *old_entry,
struct cache_entry *new_entry)
{
if (old_entry->index &&
istate->split_index &&
istate->split_index->base &&
old_entry->index <= istate->split_index->base->cache_nr) {
new_entry->index = old_entry->index;
if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
block alloc: add lifecycle APIs for cache_entry structs It has been observed that the time spent loading an index with a large number of entries is partly dominated by malloc() calls. This change is in preparation for using memory pools to reduce the number of malloc() calls made to allocate cahce entries when loading an index. Add an API to allocate and discard cache entries, abstracting the details of managing the memory backing the cache entries. This commit does actually change how memory is managed - this will be done in a later commit in the series. This change makes the distinction between cache entries that are associated with an index and cache entries that are not associated with an index. A main use of cache entries is with an index, and we can optimize the memory management around this. We still have other cases where a cache entry is not persisted with an index, and so we need to handle the "transient" use case as well. To keep the congnitive overhead of managing the cache entries, there will only be a single discard function. This means there must be enough information kept with the cache entry so that we know how to discard them. A summary of the main functions in the API is: make_cache_entry: create cache entry for use in an index. Uses specified parameters to populate cache_entry fields. make_empty_cache_entry: Create an empty cache entry for use in an index. Returns cache entry with empty fields. make_transient_cache_entry: create cache entry that is not used in an index. Uses specified parameters to populate cache_entry fields. make_empty_transient_cache_entry: create cache entry that is not used in an index. Returns cache entry with empty fields. discard_cache_entry: A single function that knows how to discard a cache entry regardless of how it was allocated. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:31 +02:00
discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
void add_split_index(struct index_state *istate)
{
if (!istate->split_index) {
init_split_index(istate);
istate->cache_changed |= SPLIT_INDEX_ORDERED;
}
}
void remove_split_index(struct index_state *istate)
{
if (istate->split_index) {
/*
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
* When removing the split index, we need to move
* ownership of the mem_pool associated with the
* base index to the main index. There may be cache entries
* allocated from the base's memory pool that are shared with
* the_index.cache[].
*/
block alloc: allocate cache entries from mem_pool When reading large indexes from disk, a portion of the time is dominated in malloc() calls. This can be mitigated by allocating a large block of memory and manage it ourselves via memory pools. This change moves the cache entry allocation to be on top of memory pools. Design: The index_state struct will gain a notion of an associated memory_pool from which cache_entries will be allocated from. When reading in the index from disk, we have information on the number of entries and their size, which can guide us in deciding how large our initial memory allocation should be. When an index is discarded, the associated memory_pool will be discarded as well - so the lifetime of a cache_entry is tied to the lifetime of the index_state that it was allocated for. In the case of a Split Index, the following rules are followed. 1st, some terminology is defined: Terminology: - 'the_index': represents the logical view of the index - 'split_index': represents the "base" cache entries. Read from the split index file. 'the_index' can reference a single split_index, as well as cache_entries from the split_index. `the_index` will be discarded before the `split_index` is. This means that when we are allocating cache_entries in the presence of a split index, we need to allocate the entries from the `split_index`'s memory pool. This allows us to follow the pattern that `the_index` can reference cache_entries from the `split_index`, and that the cache_entries will not be freed while they are still being referenced. Managing transient cache_entry structs: Cache entries are usually allocated for an index, but this is not always the case. Cache entries are sometimes allocated because this is the type that the existing checkout_entry function works with. Because of this, the existing code needs to handle cache entries associated with an index / memory pool, and those that only exist transiently. Several strategies were contemplated around how to handle this: Chosen approach: An extra field was added to the cache_entry type to track whether the cache_entry was allocated from a memory pool or not. This is currently an int field, as there are no more available bits in the existing ce_flags bit field. If / when more bits are needed, this new field can be turned into a proper bit field. Alternatives: 1) Do not include any information about how the cache_entry was allocated. Calling code would be responsible for tracking whether the cache_entry needed to be freed or not. Pro: No extra memory overhead to track this state Con: Extra complexity in callers to handle this correctly. The extra complexity and burden to not regress this behavior in the future was more than we wanted. 2) cache_entry would gain knowledge about which mem_pool allocated it Pro: Could (potentially) do extra logic to know when a mem_pool no longer had references to any cache_entry Con: cache_entry would grow heavier by a pointer, instead of int We didn't see a tangible benefit to this approach 3) Do not add any extra information to a cache_entry, but when freeing a cache entry, check if the memory exists in a region managed by existing mem_pools. Pro: No extra memory overhead to track state Con: Extra computation is performed when freeing cache entries We decided tracking and iterating over known memory pool regions was less desirable than adding an extra field to track this stae. Signed-off-by: Jameson Miller <jamill@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 21:49:37 +02:00
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
/*
* The split index no longer owns the mem_pool backing
* its cache array. As we are discarding this index,
* mark the index as having no cache entries, so it
* will not attempt to clean up the cache entries or
* validate them.
*/
if (istate->split_index->base)
istate->split_index->base->cache_nr = 0;
/*
* We can discard the split index because its
* memory pool has been incorporated into the
* memory pool associated with the the_index.
*/
discard_split_index(istate);
istate->cache_changed |= SOMETHING_CHANGED;
}
}