1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-05-09 07:56:11 +02:00

Merge branch 'jm/cache-entry-from-mem-pool'

For a large tree, the index needs to hold many cache entries
allocated on heap.  These cache entries are now allocated out of a
dedicated memory pool to amortize malloc(3) overhead.

* jm/cache-entry-from-mem-pool:
  block alloc: add validations around cache_entry lifecyle
  block alloc: allocate cache entries from mem_pool
  mem-pool: fill out functionality
  mem-pool: add life cycle management functions
  mem-pool: only search head block for available space
  block alloc: add lifecycle APIs for cache_entry structs
  read-cache: teach make_cache_entry to take object_id
  read-cache: teach refresh_cache_entry to take istate
This commit is contained in:
Junio C Hamano 2018-08-02 15:30:43 -07:00
commit ae533c4a92
16 changed files with 515 additions and 134 deletions

24
apply.c
View File

@ -4093,12 +4093,12 @@ static int build_fake_ancestor(struct apply_state *state, struct patch *list)
return error(_("sha1 information is lacking or useless "
"(%s)."), name);
ce = make_cache_entry(patch->old_mode, oid.hash, name, 0, 0);
ce = make_cache_entry(&result, patch->old_mode, &oid, name, 0, 0);
if (!ce)
return error(_("make_cache_entry failed for path '%s'"),
name);
if (add_index_entry(&result, ce, ADD_CACHE_OK_TO_ADD)) {
free(ce);
discard_cache_entry(ce);
return error(_("could not add %s to temporary index"),
name);
}
@ -4266,9 +4266,8 @@ static int add_index_file(struct apply_state *state,
struct stat st;
struct cache_entry *ce;
int namelen = strlen(path);
unsigned ce_size = cache_entry_size(namelen);
ce = xcalloc(1, ce_size);
ce = make_empty_cache_entry(&the_index, namelen);
memcpy(ce->name, path, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(0);
@ -4281,13 +4280,13 @@ static int add_index_file(struct apply_state *state,
if (!skip_prefix(buf, "Subproject commit ", &s) ||
get_oid_hex(s, &ce->oid)) {
free(ce);
return error(_("corrupt patch for submodule %s"), path);
discard_cache_entry(ce);
return error(_("corrupt patch for submodule %s"), path);
}
} else {
if (!state->cached) {
if (lstat(path, &st) < 0) {
free(ce);
discard_cache_entry(ce);
return error_errno(_("unable to stat newly "
"created file '%s'"),
path);
@ -4295,13 +4294,13 @@ static int add_index_file(struct apply_state *state,
fill_stat_cache_info(ce, &st);
}
if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
free(ce);
discard_cache_entry(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
}
}
if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) {
free(ce);
discard_cache_entry(ce);
return error(_("unable to add cache entry for %s"), path);
}
@ -4425,27 +4424,26 @@ static int add_conflicted_stages_file(struct apply_state *state,
struct patch *patch)
{
int stage, namelen;
unsigned ce_size, mode;
unsigned mode;
struct cache_entry *ce;
if (!state->update_index)
return 0;
namelen = strlen(patch->new_name);
ce_size = cache_entry_size(namelen);
mode = patch->new_mode ? patch->new_mode : (S_IFREG | 0644);
remove_file_from_cache(patch->new_name);
for (stage = 1; stage < 4; stage++) {
if (is_null_oid(&patch->threeway_stage[stage - 1]))
continue;
ce = xcalloc(1, ce_size);
ce = make_empty_cache_entry(&the_index, namelen);
memcpy(ce->name, patch->new_name, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = namelen;
oidcpy(&ce->oid, &patch->threeway_stage[stage - 1]);
if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) {
free(ce);
discard_cache_entry(ce);
return error(_("unable to add cache entry for %s"),
patch->new_name);
}

View File

@ -176,7 +176,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
struct strbuf buf = STRBUF_INIT;
const char *ident;
time_t now;
int size, len;
int len;
struct cache_entry *ce;
unsigned mode;
struct strbuf msg = STRBUF_INIT;
@ -274,8 +274,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
/* Let's not bother reading from HEAD tree */
mode = S_IFREG | 0644;
}
size = cache_entry_size(len);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, &origin->blob_oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(0);

View File

@ -79,7 +79,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
return READ_TREE_RECURSIVE;
len = base->len + strlen(pathname);
ce = xcalloc(1, cache_entry_size(len));
ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, base->buf, base->len);
memcpy(ce->name + base->len, pathname, len - base->len);
@ -98,7 +98,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
if (ce->ce_mode == old->ce_mode &&
!oidcmp(&ce->oid, &old->oid)) {
old->ce_flags |= CE_UPDATE;
free(ce);
discard_cache_entry(ce);
return 0;
}
}
@ -232,11 +232,11 @@ static int checkout_merged(int pos, const struct checkout *state)
if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_cache_entry(mode, oid.hash, path, 2, 0);
ce = make_transient_cache_entry(mode, &oid, path, 2);
if (!ce)
die(_("make_cache_entry failed for path '%s'"), path);
status = checkout_entry(ce, state, NULL);
free(ce);
discard_cache_entry(ce);
return status;
}

View File

@ -322,10 +322,10 @@ static int checkout_path(unsigned mode, struct object_id *oid,
struct cache_entry *ce;
int ret;
ce = make_cache_entry(mode, oid->hash, path, 0, 0);
ce = make_transient_cache_entry(mode, oid, path, 0);
ret = checkout_entry(ce, state, NULL);
free(ce);
discard_cache_entry(ce);
return ret;
}
@ -489,7 +489,7 @@ static int run_dir_diff(const char *extcmd, int symlinks, const char *prefix,
* index.
*/
struct cache_entry *ce2 =
make_cache_entry(rmode, roid.hash,
make_cache_entry(&wtindex, rmode, &roid,
dst_path, 0, 0);
add_index_entry(&wtindex, ce2,

View File

@ -134,7 +134,7 @@ static void update_index_from_diff(struct diff_queue_struct *q,
continue;
}
ce = make_cache_entry(one->mode, one->oid.hash, one->path,
ce = make_cache_entry(&the_index, one->mode, &one->oid, one->path,
0, 0);
if (!ce)
die(_("make_cache_entry failed for path '%s'"),

View File

@ -268,15 +268,14 @@ static int process_lstat_error(const char *path, int err)
static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st)
{
int option, size;
int option;
struct cache_entry *ce;
/* Was the old index entry already up-to-date? */
if (old && !ce_stage(old) && !ce_match_stat(old, st, 0))
return 0;
size = cache_entry_size(len);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(&the_index, len);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(0);
ce->ce_namelen = len;
@ -285,13 +284,13 @@ static int add_one_path(const struct cache_entry *old, const char *path, int len
if (index_path(&ce->oid, path, st,
info_only ? 0 : HASH_WRITE_OBJECT)) {
free(ce);
discard_cache_entry(ce);
return -1;
}
option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
if (add_cache_entry(ce, option)) {
free(ce);
discard_cache_entry(ce);
return error("%s: cannot add to the index - missing --add option?", path);
}
return 0;
@ -402,15 +401,14 @@ static int process_path(const char *path, struct stat *st, int stat_errno)
static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
const char *path, int stage)
{
int size, len, option;
int len, option;
struct cache_entry *ce;
if (!verify_path(path, mode))
return error("Invalid path '%s'", path);
len = strlen(path);
size = cache_entry_size(len);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
@ -600,7 +598,6 @@ static struct cache_entry *read_one_ent(const char *which,
{
unsigned mode;
struct object_id oid;
int size;
struct cache_entry *ce;
if (get_tree_entry(ent, path, &oid, &mode)) {
@ -613,8 +610,7 @@ static struct cache_entry *read_one_ent(const char *which,
error("%s: not a blob in %s branch.", path, which);
return NULL;
}
size = cache_entry_size(namelen);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(&the_index, namelen);
oidcpy(&ce->oid, &oid);
memcpy(ce->name, path, namelen);
@ -691,8 +687,8 @@ static int unresolve_one(const char *path)
error("%s: cannot add their version to the index.", path);
ret = -1;
free_return:
free(ce_2);
free(ce_3);
discard_cache_entry(ce_2);
discard_cache_entry(ce_3);
return ret;
}
@ -759,7 +755,7 @@ static int do_reupdate(int ac, const char **av,
ce->name, ce_namelen(ce), 0);
if (old && ce->ce_mode == old->ce_mode &&
!oidcmp(&ce->oid, &old->oid)) {
free(old);
discard_cache_entry(old);
continue; /* unchanged */
}
/* Be careful. The working tree may not have the
@ -770,7 +766,7 @@ static int do_reupdate(int ac, const char **av,
path = xstrdup(ce->name);
update_one(path);
free(path);
free(old);
discard_cache_entry(old);
if (save_nr != active_nr)
goto redo;
}

64
cache.h
View File

@ -15,6 +15,7 @@
#include "path.h"
#include "sha1-array.h"
#include "repository.h"
#include "mem-pool.h"
#include <zlib.h>
typedef struct git_zstream {
@ -156,6 +157,7 @@ struct cache_entry {
struct stat_data ce_stat_data;
unsigned int ce_mode;
unsigned int ce_flags;
unsigned int mem_pool_allocated;
unsigned int ce_namelen;
unsigned int index; /* for link extension */
struct object_id oid;
@ -227,6 +229,7 @@ static inline void copy_cache_entry(struct cache_entry *dst,
const struct cache_entry *src)
{
unsigned int state = dst->ce_flags & CE_HASHED;
int mem_pool_allocated = dst->mem_pool_allocated;
/* Don't copy hash chain and name */
memcpy(&dst->ce_stat_data, &src->ce_stat_data,
@ -235,6 +238,9 @@ static inline void copy_cache_entry(struct cache_entry *dst,
/* Restore the hash state */
dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state;
/* Restore the mem_pool_allocated flag */
dst->mem_pool_allocated = mem_pool_allocated;
}
static inline unsigned create_ce_flags(unsigned stage)
@ -328,6 +334,7 @@ struct index_state {
struct untracked_cache *untracked;
uint64_t fsmonitor_last_update;
struct ewah_bitmap *fsmonitor_dirty;
struct mem_pool *ce_mem_pool;
};
extern struct index_state the_index;
@ -339,6 +346,60 @@ extern void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
extern void free_name_hash(struct index_state *istate);
/* Cache entry creation and cleanup */
/*
* Create cache_entry intended for use in the specified index. Caller
* is responsible for discarding the cache_entry with
* `discard_cache_entry`.
*/
struct cache_entry *make_cache_entry(struct index_state *istate,
unsigned int mode,
const struct object_id *oid,
const char *path,
int stage,
unsigned int refresh_options);
struct cache_entry *make_empty_cache_entry(struct index_state *istate,
size_t name_len);
/*
* Create a cache_entry that is not intended to be added to an index.
* Caller is responsible for discarding the cache_entry
* with `discard_cache_entry`.
*/
struct cache_entry *make_transient_cache_entry(unsigned int mode,
const struct object_id *oid,
const char *path,
int stage);
struct cache_entry *make_empty_transient_cache_entry(size_t name_len);
/*
* Discard cache entry.
*/
void discard_cache_entry(struct cache_entry *ce);
/*
* Check configuration if we should perform extra validation on cache
* entries.
*/
int should_validate_cache_entries(void);
/*
* Duplicate a cache_entry. Allocate memory for the new entry from a
* memory_pool. Takes into account cache_entry fields that are meant
* for managing the underlying memory allocation of the cache_entry.
*/
struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate);
/*
* Validate the cache entries in the index. This is an internal
* consistency check that the cache_entry structs are allocated from
* the expected memory pool.
*/
void validate_cache_entries(const struct index_state *istate);
#ifndef NO_THE_INDEX_COMPATIBILITY_MACROS
#define active_cache (the_index.cache)
#define active_nr (the_index.cache_nr)
@ -698,7 +759,6 @@ extern int remove_file_from_index(struct index_state *, const char *path);
extern int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
extern int add_file_to_index(struct index_state *, const char *path, int flags);
extern struct cache_entry *make_cache_entry(unsigned int mode, const unsigned char *sha1, const char *path, int stage, unsigned int refresh_options);
extern int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
extern int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
extern void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
@ -751,7 +811,7 @@ extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st);
#define REFRESH_IGNORE_SUBMODULES 0x0010 /* ignore submodules */
#define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */
extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
extern struct cache_entry *refresh_cache_entry(struct cache_entry *, unsigned int);
extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
/*
* Opportunistically update the index but do not complain if we can't.

3
git.c
View File

@ -414,7 +414,10 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
trace_argv_printf(argv, "trace: built-in: git");
validate_cache_entries(&the_index);
status = p->fn(argc, argv, prefix);
validate_cache_entries(&the_index);
if (status)
return status;

View File

@ -5,40 +5,88 @@
#include "cache.h"
#include "mem-pool.h"
static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc)
#define BLOCK_GROWTH_SIZE 1024*1024 - sizeof(struct mp_block);
/*
* Allocate a new mp_block and insert it after the block specified in
* `insert_after`. If `insert_after` is NULL, then insert block at the
* head of the linked list.
*/
static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc, struct mp_block *insert_after)
{
struct mp_block *p;
mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
p->next_block = mem_pool->mp_block;
p->next_free = (char *)p->space;
p->end = p->next_free + block_alloc;
mem_pool->mp_block = p;
if (insert_after) {
p->next_block = insert_after->next_block;
insert_after->next_block = p;
} else {
p->next_block = mem_pool->mp_block;
mem_pool->mp_block = p;
}
return p;
}
void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
{
struct mem_pool *pool;
if (*mem_pool)
return;
pool = xcalloc(1, sizeof(*pool));
pool->block_alloc = BLOCK_GROWTH_SIZE;
if (initial_size > 0)
mem_pool_alloc_block(pool, initial_size, NULL);
*mem_pool = pool;
}
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
{
struct mp_block *block, *block_to_free;
block = mem_pool->mp_block;
while (block)
{
block_to_free = block;
block = block->next_block;
if (invalidate_memory)
memset(block_to_free->space, 0xDD, ((char *)block_to_free->end) - ((char *)block_to_free->space));
free(block_to_free);
}
free(mem_pool);
}
void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
{
struct mp_block *p;
struct mp_block *p = NULL;
void *r;
/* round up to a 'uintmax_t' alignment */
if (len & (sizeof(uintmax_t) - 1))
len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
for (p = mem_pool->mp_block; p; p = p->next_block)
if (p->end - p->next_free >= len)
break;
if (mem_pool->mp_block &&
mem_pool->mp_block->end - mem_pool->mp_block->next_free >= len)
p = mem_pool->mp_block;
if (!p) {
if (len >= (mem_pool->block_alloc / 2)) {
mem_pool->pool_alloc += len;
return xmalloc(len);
}
if (len >= (mem_pool->block_alloc / 2))
return mem_pool_alloc_block(mem_pool, len, mem_pool->mp_block);
p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc);
p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc, NULL);
}
r = p->next_free;
@ -53,3 +101,45 @@ void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size)
memset(r, 0, len);
return r;
}
int mem_pool_contains(struct mem_pool *mem_pool, void *mem)
{
struct mp_block *p;
/* Check if memory is allocated in a block */
for (p = mem_pool->mp_block; p; p = p->next_block)
if ((mem >= ((void *)p->space)) &&
(mem < ((void *)p->end)))
return 1;
return 0;
}
void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src)
{
struct mp_block *p;
/* Append the blocks from src to dst */
if (dst->mp_block && src->mp_block) {
/*
* src and dst have blocks, append
* blocks from src to dst.
*/
p = dst->mp_block;
while (p->next_block)
p = p->next_block;
p->next_block = src->mp_block;
} else if (src->mp_block) {
/*
* src has blocks, dst is empty.
*/
dst->mp_block = src->mp_block;
} else {
/* src is empty, nothing to do. */
}
dst->pool_alloc += src->pool_alloc;
src->pool_alloc = 0;
src->mp_block = NULL;
}

View File

@ -21,6 +21,16 @@ struct mem_pool {
size_t pool_alloc;
};
/*
* Initialize mem_pool with specified initial size.
*/
void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
/*
* Discard a memory pool and free all the memory it is responsible for.
*/
void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
/*
* Alloc memory from the mem_pool.
*/
@ -31,4 +41,17 @@ void *mem_pool_alloc(struct mem_pool *pool, size_t len);
*/
void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size);
/*
* Move the memory associated with the 'src' pool to the 'dst' pool. The 'src'
* pool will be empty and not contain any memory. It still needs to be free'd
* with a call to `mem_pool_discard`.
*/
void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src);
/*
* Check if a memory pointed at by 'mem' is part of the range of
* memory managed by the specified mem_pool.
*/
int mem_pool_contains(struct mem_pool *mem_pool, void *mem);
#endif

View File

@ -320,7 +320,7 @@ static int add_cacheinfo(struct merge_options *o,
struct cache_entry *ce;
int ret;
ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0);
ce = make_cache_entry(&the_index, mode, oid ? oid : &null_oid, path, stage, 0);
if (!ce)
return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
@ -328,7 +328,7 @@ static int add_cacheinfo(struct merge_options *o,
if (refresh) {
struct cache_entry *nce;
nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
nce = refresh_cache_entry(&the_index, ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
if (!nce)
return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
if (nce != ce)

View File

@ -47,6 +47,48 @@
CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
/*
* This is an estimate of the pathname length in the index. We use
* this for V4 index files to guess the un-deltafied size of the index
* in memory because of pathname deltafication. This is not required
* for V2/V3 index formats because their pathnames are not compressed.
* If the initial amount of memory set aside is not sufficient, the
* mem pool will allocate extra memory.
*/
#define CACHE_ENTRY_PATH_LENGTH 80
static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
{
struct cache_entry *ce;
ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
ce->mem_pool_allocated = 1;
return ce;
}
static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
{
struct cache_entry * ce;
ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
ce->mem_pool_allocated = 1;
return ce;
}
static struct mem_pool *find_mem_pool(struct index_state *istate)
{
struct mem_pool **pool_ptr;
if (istate->split_index && istate->split_index->base)
pool_ptr = &istate->split_index->base->ce_mem_pool;
else
pool_ptr = &istate->ce_mem_pool;
if (!*pool_ptr)
mem_pool_init(pool_ptr, 0);
return *pool_ptr;
}
struct index_state the_index;
static const char *alternate_index_output;
@ -62,7 +104,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
replace_index_entry_in_base(istate, old, ce);
remove_name_hash(istate, old);
free(old);
discard_cache_entry(old);
ce->ce_flags &= ~CE_HASHED;
set_index_entry(istate, nr, ce);
ce->ce_flags |= CE_UPDATE_IN_BASE;
@ -75,7 +117,7 @@ void rename_index_entry_at(struct index_state *istate, int nr, const char *new_n
struct cache_entry *old_entry = istate->cache[nr], *new_entry;
int namelen = strlen(new_name);
new_entry = xmalloc(cache_entry_size(namelen));
new_entry = make_empty_cache_entry(istate, namelen);
copy_cache_entry(new_entry, old_entry);
new_entry->ce_flags &= ~CE_HASHED;
new_entry->ce_namelen = namelen;
@ -624,7 +666,7 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
new_entry = xcalloc(1, cache_entry_size(len));
new_entry = make_empty_cache_entry(istate, len);
memcpy(new_entry->name, alias->name, len);
copy_cache_entry(new_entry, ce);
save_or_free_index_entry(istate, ce);
@ -641,7 +683,7 @@ void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
{
int size, namelen, was_same;
int namelen, was_same;
mode_t st_mode = st->st_mode;
struct cache_entry *ce, *alias = NULL;
unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
@ -663,8 +705,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
while (namelen && path[namelen-1] == '/')
namelen--;
}
size = cache_entry_size(namelen);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(istate, namelen);
memcpy(ce->name, path, namelen);
ce->ce_namelen = namelen;
if (!intent_only)
@ -705,13 +746,13 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
ce_mark_uptodate(alias);
alias->ce_flags |= CE_ADDED;
free(ce);
discard_cache_entry(ce);
return 0;
}
}
if (!intent_only) {
if (index_path(&ce->oid, path, st, newflags)) {
free(ce);
discard_cache_entry(ce);
return error("unable to index file %s", path);
}
} else
@ -728,9 +769,9 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
ce->ce_mode == alias->ce_mode);
if (pretend)
free(ce);
discard_cache_entry(ce);
else if (add_index_entry(istate, ce, add_option)) {
free(ce);
discard_cache_entry(ce);
return error("unable to add %s to index", path);
}
if (verbose && !was_same)
@ -746,12 +787,25 @@ int add_file_to_index(struct index_state *istate, const char *path, int flags)
return add_to_index(istate, path, &st, flags);
}
struct cache_entry *make_cache_entry(unsigned int mode,
const unsigned char *sha1, const char *path, int stage,
unsigned int refresh_options)
struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
{
return mem_pool__ce_calloc(find_mem_pool(istate), len);
}
struct cache_entry *make_empty_transient_cache_entry(size_t len)
{
return xcalloc(1, cache_entry_size(len));
}
struct cache_entry *make_cache_entry(struct index_state *istate,
unsigned int mode,
const struct object_id *oid,
const char *path,
int stage,
unsigned int refresh_options)
{
int size, len;
struct cache_entry *ce, *ret;
int len;
if (!verify_path(path, mode)) {
error("Invalid path '%s'", path);
@ -759,21 +813,43 @@ struct cache_entry *make_cache_entry(unsigned int mode,
}
len = strlen(path);
size = cache_entry_size(len);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(istate, len);
hashcpy(ce->oid.hash, sha1);
oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = len;
ce->ce_mode = create_ce_mode(mode);
ret = refresh_cache_entry(ce, refresh_options);
ret = refresh_cache_entry(&the_index, ce, refresh_options);
if (ret != ce)
free(ce);
discard_cache_entry(ce);
return ret;
}
struct cache_entry *make_transient_cache_entry(unsigned int mode, const struct object_id *oid,
const char *path, int stage)
{
struct cache_entry *ce;
int len;
if (!verify_path(path, mode)) {
error("Invalid path '%s'", path);
return NULL;
}
len = strlen(path);
ce = make_empty_transient_cache_entry(len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = len;
ce->ce_mode = create_ce_mode(mode);
return ce;
}
/*
* Chmod an index entry with either +x or -x.
*
@ -1269,7 +1345,7 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
{
struct stat st;
struct cache_entry *updated;
int changed, size;
int changed;
int refresh = options & CE_MATCH_REFRESH;
int ignore_valid = options & CE_MATCH_IGNORE_VALID;
int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
@ -1349,8 +1425,7 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
return NULL;
}
size = ce_size(ce);
updated = xmalloc(size);
updated = make_empty_cache_entry(istate, ce_namelen(ce));
copy_cache_entry(updated, ce);
memcpy(updated->name, ce->name, ce->ce_namelen + 1);
fill_stat_cache_info(updated, &st);
@ -1474,10 +1549,11 @@ int refresh_index(struct index_state *istate, unsigned int flags,
return has_errors;
}
struct cache_entry *refresh_cache_entry(struct cache_entry *ce,
unsigned int options)
struct cache_entry *refresh_cache_entry(struct index_state *istate,
struct cache_entry *ce,
unsigned int options)
{
return refresh_cache_ent(&the_index, ce, options, NULL, NULL);
return refresh_cache_ent(istate, ce, options, NULL, NULL);
}
@ -1635,12 +1711,13 @@ int read_index(struct index_state *istate)
return read_index_from(istate, get_index_file(), get_git_dir());
}
static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool,
struct ondisk_cache_entry *ondisk,
unsigned int flags,
const char *name,
size_t len)
{
struct cache_entry *ce = xmalloc(cache_entry_size(len));
struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len);
ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
@ -1682,7 +1759,8 @@ static unsigned long expand_name_field(struct strbuf *name, const char *cp_)
return (const char *)ep + 1 - cp_;
}
static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
static struct cache_entry *create_from_disk(struct mem_pool *mem_pool,
struct ondisk_cache_entry *ondisk,
unsigned long *ent_size,
struct strbuf *previous_name)
{
@ -1713,13 +1791,13 @@ static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
/* v3 and earlier */
if (len == CE_NAMEMASK)
len = strlen(name);
ce = cache_entry_from_ondisk(ondisk, flags, name, len);
ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len);
*ent_size = ondisk_ce_size(ce);
} else {
unsigned long consumed;
consumed = expand_name_field(previous_name, name);
ce = cache_entry_from_ondisk(ondisk, flags,
ce = cache_entry_from_ondisk(mem_pool, ondisk, flags,
previous_name->buf,
previous_name->len);
@ -1793,6 +1871,22 @@ static void post_read_index_from(struct index_state *istate)
tweak_fsmonitor(istate);
}
static size_t estimate_cache_size_from_compressed(unsigned int entries)
{
return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
}
static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
{
long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
/*
* Account for potential alignment differences.
*/
per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));
return ondisk_size + entries * per_entry;
}
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
{
@ -1839,10 +1933,15 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
istate->initialized = 1;
if (istate->version == 4)
if (istate->version == 4) {
previous_name = &previous_name_buf;
else
mem_pool_init(&istate->ce_mem_pool,
estimate_cache_size_from_compressed(istate->cache_nr));
} else {
previous_name = NULL;
mem_pool_init(&istate->ce_mem_pool,
estimate_cache_size(mmap_size, istate->cache_nr));
}
src_offset = sizeof(*hdr);
for (i = 0; i < istate->cache_nr; i++) {
@ -1851,7 +1950,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
unsigned long consumed;
disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
ce = create_from_disk(disk_ce, &consumed, previous_name);
ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name);
set_index_entry(istate, i, ce);
src_offset += consumed;
@ -1948,17 +2047,15 @@ int is_index_unborn(struct index_state *istate)
int discard_index(struct index_state *istate)
{
int i;
/*
* Cache entries in istate->cache[] should have been allocated
* from the memory pool associated with this index, or from an
* associated split_index. There is no need to free individual
* cache entries. validate_cache_entries can detect when this
* assertion does not hold.
*/
validate_cache_entries(istate);
for (i = 0; i < istate->cache_nr; i++) {
if (istate->cache[i]->index &&
istate->split_index &&
istate->split_index->base &&
istate->cache[i]->index <= istate->split_index->base->cache_nr &&
istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1])
continue;
free(istate->cache[i]);
}
resolve_undo_clear_index(istate);
istate->cache_nr = 0;
istate->cache_changed = 0;
@ -1972,9 +2069,47 @@ int discard_index(struct index_state *istate)
discard_split_index(istate);
free_untracked_cache(istate->untracked);
istate->untracked = NULL;
if (istate->ce_mem_pool) {
mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
istate->ce_mem_pool = NULL;
}
return 0;
}
/*
* Validate the cache entries of this index.
* All cache entries associated with this index
* should have been allocated by the memory pool
* associated with this index, or by a referenced
* split index.
*/
void validate_cache_entries(const struct index_state *istate)
{
int i;
if (!should_validate_cache_entries() ||!istate || !istate->initialized)
return;
for (i = 0; i < istate->cache_nr; i++) {
if (!istate) {
die("internal error: cache entry is not allocated from expected memory pool");
} else if (!istate->ce_mem_pool ||
!mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
if (!istate->split_index ||
!istate->split_index->base ||
!istate->split_index->base->ce_mem_pool ||
!mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
die("internal error: cache entry is not allocated from expected memory pool");
}
}
}
if (istate->split_index)
validate_cache_entries(istate->split_index->base);
}
int unmerged_index(const struct index_state *istate)
{
int i;
@ -2647,14 +2782,13 @@ int read_index_unmerged(struct index_state *istate)
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
struct cache_entry *new_ce;
int size, len;
int len;
if (!ce_stage(ce))
continue;
unmerged = 1;
len = ce_namelen(ce);
size = cache_entry_size(len);
new_ce = xcalloc(1, size);
new_ce = make_empty_cache_entry(istate, len);
memcpy(new_ce->name, ce->name, len);
new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
new_ce->ce_namelen = len;
@ -2763,3 +2897,41 @@ void move_index_extensions(struct index_state *dst, struct index_state *src)
dst->untracked = src->untracked;
src->untracked = NULL;
}
struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
struct index_state *istate)
{
unsigned int size = ce_size(ce);
int mem_pool_allocated;
struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
mem_pool_allocated = new_entry->mem_pool_allocated;
memcpy(new_entry, ce, size);
new_entry->mem_pool_allocated = mem_pool_allocated;
return new_entry;
}
void discard_cache_entry(struct cache_entry *ce)
{
if (ce && should_validate_cache_entries())
memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
if (ce && ce->mem_pool_allocated)
return;
free(ce);
}
int should_validate_cache_entries(void)
{
static int validate_index_cache_entries = -1;
if (validate_index_cache_entries < 0) {
if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
validate_index_cache_entries = 1;
else
validate_index_cache_entries = 0;
}
return validate_index_cache_entries;
}

View File

@ -146,7 +146,9 @@ int unmerge_index_entry_at(struct index_state *istate, int pos)
struct cache_entry *nce;
if (!ru->mode[i])
continue;
nce = make_cache_entry(ru->mode[i], ru->oid[i].hash,
nce = make_cache_entry(istate,
ru->mode[i],
&ru->oid[i],
name, i + 1, 0);
if (matched)
nce->ce_flags |= CE_MATCHED;

View File

@ -73,16 +73,31 @@ void move_cache_to_base_index(struct index_state *istate)
int i;
/*
* do not delete old si->base, its index entries may be shared
* with istate->cache[]. Accept a bit of leaking here because
* this code is only used by short-lived update-index.
* If there was a previous base index, then transfer ownership of allocated
* entries to the parent index.
*/
if (si->base &&
si->base->ce_mem_pool) {
if (!istate->ce_mem_pool)
mem_pool_init(&istate->ce_mem_pool, 0);
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
}
si->base = xcalloc(1, sizeof(*si->base));
si->base->version = istate->version;
/* zero timestamp disables racy test in ce_write_index() */
si->base->timestamp = istate->timestamp;
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
si->base->cache_nr = istate->cache_nr;
/*
* The mem_pool needs to move with the allocated entries.
*/
si->base->ce_mem_pool = istate->ce_mem_pool;
istate->ce_mem_pool = NULL;
COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
mark_base_index_entries(si->base);
for (i = 0; i < si->base->cache_nr; i++)
@ -123,7 +138,7 @@ static void replace_entry(size_t pos, void *data)
src->ce_flags |= CE_UPDATE_IN_BASE;
src->ce_namelen = dst->ce_namelen;
copy_cache_entry(dst, src);
free(src);
discard_cache_entry(src);
si->nr_replacements++;
}
@ -224,7 +239,7 @@ void prepare_to_write_split_index(struct index_state *istate)
base->ce_flags = base_flags;
if (ret)
ce->ce_flags |= CE_UPDATE_IN_BASE;
free(base);
discard_cache_entry(base);
si->base->cache[ce->index - 1] = ce;
}
for (i = 0; i < si->base->cache_nr; i++) {
@ -301,7 +316,7 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce
ce == istate->split_index->base->cache[ce->index - 1])
ce->ce_flags |= CE_REMOVE;
else
free(ce);
discard_cache_entry(ce);
}
void replace_index_entry_in_base(struct index_state *istate,
@ -314,7 +329,7 @@ void replace_index_entry_in_base(struct index_state *istate,
old_entry->index <= istate->split_index->base->cache_nr) {
new_entry->index = old_entry->index;
if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
free(istate->split_index->base->cache[new_entry->index - 1]);
discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
@ -331,12 +346,31 @@ void remove_split_index(struct index_state *istate)
{
if (istate->split_index) {
/*
* can't discard_split_index(&the_index); because that
* will destroy split_index->base->cache[], which may
* be shared with the_index.cache[]. So yeah we're
* leaking a bit here.
* When removing the split index, we need to move
* ownership of the mem_pool associated with the
* base index to the main index. There may be cache entries
* allocated from the base's memory pool that are shared with
* the_index.cache[].
*/
istate->split_index = NULL;
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
/*
* The split index no longer owns the mem_pool backing
* its cache array. As we are discarding this index,
* mark the index as having no cache entries, so it
* will not attempt to clean up the cache entries or
* validate them.
*/
if (istate->split_index->base)
istate->split_index->base->cache_nr = 0;
/*
* We can discard the split index because its
* memory pool has been incorporated into the
* memory pool associated with the the_index.
*/
discard_split_index(istate);
istate->cache_changed |= SOMETHING_CHANGED;
}
}

4
tree.c
View File

@ -19,15 +19,13 @@ static int read_one_entry_opt(struct index_state *istate,
unsigned mode, int stage, int opt)
{
int len;
unsigned int size;
struct cache_entry *ce;
if (S_ISDIR(mode))
return READ_TREE_RECURSIVE;
len = strlen(pathname);
size = cache_entry_size(baselen + len);
ce = xcalloc(1, size);
ce = make_empty_cache_entry(istate, baselen + len);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);

View File

@ -204,20 +204,11 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
}
static struct cache_entry *dup_entry(const struct cache_entry *ce)
{
unsigned int size = ce_size(ce);
struct cache_entry *new_entry = xmalloc(size);
memcpy(new_entry, ce, size);
return new_entry;
}
static void add_entry(struct unpack_trees_options *o,
const struct cache_entry *ce,
unsigned int set, unsigned int clear)
{
do_add_entry(o, dup_entry(ce), set, clear);
do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear);
}
/*
@ -798,10 +789,17 @@ static int ce_in_traverse_path(const struct cache_entry *ce,
return (info->pathlen < ce_namelen(ce));
}
static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
static struct cache_entry *create_ce_entry(const struct traverse_info *info,
const struct name_entry *n,
int stage,
struct index_state *istate,
int is_transient)
{
int len = traverse_path_len(info, n);
struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
struct cache_entry *ce =
is_transient ?
make_empty_transient_cache_entry(len) :
make_empty_cache_entry(istate, len);
ce->ce_mode = create_ce_mode(n->mode);
ce->ce_flags = create_ce_flags(stage);
@ -847,7 +845,15 @@ static int unpack_nondirectories(int n, unsigned long mask,
stage = 3;
else
stage = 2;
src[i + o->merge] = create_ce_entry(info, names + i, stage);
/*
* If the merge bit is set, then the cache entries are
* discarded in the following block. In this case,
* construct "transient" cache_entries, as they are
* not stored in the index. otherwise construct the
* cache entry from the index aware logic.
*/
src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge);
}
if (o->merge) {
@ -856,7 +862,7 @@ static int unpack_nondirectories(int n, unsigned long mask,
for (i = 0; i < n; i++) {
struct cache_entry *ce = src[i + o->merge];
if (ce != o->df_conflict_entry)
free(ce);
discard_cache_entry(ce);
}
return rc;
}
@ -1788,7 +1794,7 @@ static int merged_entry(const struct cache_entry *ce,
struct unpack_trees_options *o)
{
int update = CE_UPDATE;
struct cache_entry *merge = dup_entry(ce);
struct cache_entry *merge = dup_cache_entry(ce, &o->result);
if (!old) {
/*
@ -1808,7 +1814,7 @@ static int merged_entry(const struct cache_entry *ce,
if (verify_absent(merge,
ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
free(merge);
discard_cache_entry(merge);
return -1;
}
invalidate_ce_path(merge, o);
@ -1834,7 +1840,7 @@ static int merged_entry(const struct cache_entry *ce,
update = 0;
} else {
if (verify_uptodate(old, o)) {
free(merge);
discard_cache_entry(merge);
return -1;
}
/* Migrate old flags over */