1
0
mirror of https://github.com/git/git.git synced 2024-09-25 16:51:05 +02:00
git/builtin-read-tree.c

653 lines
15 KiB
C
Raw Normal View History

/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
*/
#define DBRT_DEBUG 1
#include "cache.h"
#include "object.h"
#include "tree.h"
#include "tree-walk.h"
#include "cache-tree.h"
#include "unpack-trees.h"
#include "builtin.h"
static struct object_list *trees = NULL;
static void reject_merge(struct cache_entry *ce)
{
die("Entry '%s' would be overwritten by merge. Cannot merge.",
ce->name);
}
static int list_tree(unsigned char *sha1)
{
struct tree *tree = parse_tree_indirect(sha1);
if (!tree)
return -1;
object_list_append(&tree->object, &trees);
return 0;
}
static int same(struct cache_entry *a, struct cache_entry *b)
{
if (!!a != !!b)
return 0;
if (!a && !b)
return 1;
return a->ce_mode == b->ce_mode &&
!memcmp(a->sha1, b->sha1, 20);
}
/*
* When a CE gets turned into an unmerged entry, we
* want it to be up-to-date
*/
static void verify_uptodate(struct cache_entry *ce,
struct unpack_trees_options *o)
{
struct stat st;
if (o->index_only || o->reset)
return;
if (!lstat(ce->name, &st)) {
"Assume unchanged" git This adds "assume unchanged" logic, started by this message in the list discussion recently: <Pine.LNX.4.64.0601311807470.7301@g5.osdl.org> This is a workaround for filesystems that do not have lstat() that is quick enough for the index mechanism to take advantage of. On the paths marked as "assumed to be unchanged", the user needs to explicitly use update-index to register the object name to be in the next commit. You can use two new options to update-index to set and reset the CE_VALID bit: git-update-index --assume-unchanged path... git-update-index --no-assume-unchanged path... These forms manipulate only the CE_VALID bit; it does not change the object name recorded in the index file. Nor they add a new entry to the index. When the configuration variable "core.ignorestat = true" is set, the index entries are marked with CE_VALID bit automatically after: - update-index to explicitly register the current object name to the index file. - when update-index --refresh finds the path to be up-to-date. - when tools like read-tree -u and apply --index update the working tree file and register the current object name to the index file. The flag is dropped upon read-tree that does not check out the index entry. This happens regardless of the core.ignorestat settings. Index entries marked with CE_VALID bit are assumed to be unchanged most of the time. However, there are cases that CE_VALID bit is ignored for the sake of safety and usability: - while "git-read-tree -m" or git-apply need to make sure that the paths involved in the merge do not have local modifications. This sacrifices performance for safety. - when git-checkout-index -f -q -u -a tries to see if it needs to checkout the paths. Otherwise you can never check anything out ;-). - when git-update-index --really-refresh (a new flag) tries to see if the index entry is up to date. You can start with everything marked as CE_VALID and run this once to drop CE_VALID bit for paths that are modified. Most notably, "update-index --refresh" honours CE_VALID and does not actively stat, so after you modified a file in the working tree, update-index --refresh would not notice until you tell the index about it with "git-update-index path" or "git-update-index --no-assume-unchanged path". This version is not expected to be perfect. I think diff between index and/or tree and working files may need some adjustment, and there probably needs other cases we should automatically unmark paths that are marked to be CE_VALID. But the basics seem to work, and ready to be tested by people who asked for this feature. Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-02-09 06:15:24 +01:00
unsigned changed = ce_match_stat(ce, &st, 1);
if (!changed)
return;
errno = 0;
}
if (o->reset) {
ce->ce_flags |= htons(CE_UPDATE);
return;
}
if (errno == ENOENT)
return;
die("Entry '%s' not uptodate. Cannot merge.", ce->name);
}
static void invalidate_ce_path(struct cache_entry *ce)
{
if (ce)
cache_tree_invalidate_path(active_cache_tree, ce->name);
}
/*
* We do not want to remove or overwrite a working tree file that
* is not tracked.
*/
static void verify_absent(const char *path, const char *action,
struct unpack_trees_options *o)
{
struct stat st;
if (o->index_only || o->reset || !o->update)
return;
if (!lstat(path, &st))
die("Untracked working tree file '%s' "
"would be %s by merge.", path, action);
}
static int merged_entry(struct cache_entry *merge, struct cache_entry *old,
struct unpack_trees_options *o)
{
merge->ce_flags |= htons(CE_UPDATE);
if (old) {
/*
* See if we can re-use the old CE directly?
* That way we get the uptodate stat info.
*
* This also removes the UPDATE flag on
* a match.
*/
if (same(old, merge)) {
*merge = *old;
} else {
verify_uptodate(old, o);
invalidate_ce_path(old);
}
}
else {
verify_absent(merge->name, "overwritten", o);
invalidate_ce_path(merge);
}
merge->ce_flags &= ~htons(CE_STAGEMASK);
add_cache_entry(merge, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
return 1;
}
static int deleted_entry(struct cache_entry *ce, struct cache_entry *old,
struct unpack_trees_options *o)
{
if (old)
verify_uptodate(old, o);
else
verify_absent(ce->name, "removed", o);
ce->ce_mode = 0;
add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
invalidate_ce_path(ce);
return 1;
}
static int keep_entry(struct cache_entry *ce)
{
add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
return 1;
}
#if DBRT_DEBUG
static void show_stage_entry(FILE *o,
const char *label, const struct cache_entry *ce)
{
if (!ce)
fprintf(o, "%s (missing)\n", label);
else
fprintf(o, "%s%06o %s %d\t%s\n",
label,
ntohl(ce->ce_mode),
sha1_to_hex(ce->sha1),
ce_stage(ce),
ce->name);
}
#endif
static int threeway_merge(struct cache_entry **stages,
struct unpack_trees_options *o)
{
struct cache_entry *index;
struct cache_entry *head;
struct cache_entry *remote = stages[o->head_idx + 1];
int count;
int head_match = 0;
int remote_match = 0;
const char *path = NULL;
int df_conflict_head = 0;
int df_conflict_remote = 0;
int any_anc_missing = 0;
int no_anc_exists = 1;
int i;
for (i = 1; i < o->head_idx; i++) {
if (!stages[i])
any_anc_missing = 1;
else {
if (!path)
path = stages[i]->name;
no_anc_exists = 0;
}
[PATCH] read-tree: fix too strong index requirement #5ALT This fixes too strong index requirement 3-way merge enforces in one case: the same file is added in both branches. In this case, the original code insisted that if the index file has that path, it must match our branch and be up-to-date. However in this particular case, it only has to match it, and can be dirty. We just need to make sure that we keep the work-tree copy instead of checking out the merge result. The resolution of such a path, however, cannot be left to outside script, because we will not keep the original stage0 entries for unmerged paths when read-tree finishes, and at that point, the knowledge of "if we resolve it to match the new file added in both branches, the merge succeeds and the work tree would not lose information, but we should _not_ update the work tree from the resulting index file" is lost. For this reason, the now code needs to resolve this case (#5ALT) internally. This affects some existing tests in the test suite, but all in positive ways. In t1000 (3-way test), this #5ALT case now gets one stage0 entry, instead of an identical stage2 and stage3 entry pair, for such a path, and one test that checked for merge failure (because the test assumed the "stricter-than-necessary" behaviour) does not have to fail anymore. In t1005 (emu23 test), two tests that involves a case where the work tree already had a change introduced in the upstream (aka "merged head"), the merge succeeds instead of failing. Signed-off-by: Junio C Hamano <junkio@cox.net> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-11 03:37:47 +02:00
}
index = stages[0];
head = stages[o->head_idx];
if (head == &o->df_conflict_entry) {
df_conflict_head = 1;
head = NULL;
}
if (remote == &o->df_conflict_entry) {
df_conflict_remote = 1;
remote = NULL;
}
if (!path && index)
path = index->name;
if (!path && head)
path = head->name;
if (!path && remote)
path = remote->name;
/* First, if there's a #16 situation, note that to prevent #13
* and #14.
*/
if (!same(remote, head)) {
for (i = 1; i < o->head_idx; i++) {
if (same(stages[i], head)) {
head_match = i;
}
if (same(stages[i], remote)) {
remote_match = i;
}
}
}
/* We start with cases where the index is allowed to match
* something other than the head: #14(ALT) and #2ALT, where it
* is permitted to match the result instead.
*/
/* #14, #14ALT, #2ALT */
if (remote && !df_conflict_head && head_match && !remote_match) {
if (index && !same(index, remote) && !same(index, head))
reject_merge(index);
return merged_entry(remote, index, o);
}
/*
* If we have an entry in the index cache, then we want to
* make sure that it matches head.
*/
if (index && !same(index, head)) {
reject_merge(index);
}
if (head) {
/* #5ALT, #15 */
if (same(head, remote))
return merged_entry(head, index, o);
/* #13, #3ALT */
if (!df_conflict_remote && remote_match && !head_match)
return merged_entry(head, index, o);
}
/* #1 */
if (!head && !remote && any_anc_missing)
return 0;
/* Under the new "aggressive" rule, we resolve mostly trivial
* cases that we historically had git-merge-one-file resolve.
*/
if (o->aggressive) {
int head_deleted = !head && !df_conflict_head;
int remote_deleted = !remote && !df_conflict_remote;
/*
* Deleted in both.
* Deleted in one and unchanged in the other.
*/
if ((head_deleted && remote_deleted) ||
(head_deleted && remote && remote_match) ||
(remote_deleted && head && head_match)) {
if (index)
return deleted_entry(index, index, o);
else if (path)
verify_absent(path, "removed", o);
return 0;
}
/*
* Added in both, identically.
*/
if (no_anc_exists && head && remote && same(head, remote))
return merged_entry(head, index, o);
}
/* Below are "no merge" cases, which require that the index be
* up-to-date to avoid the files getting overwritten with
* conflict resolution files.
*/
if (index) {
verify_uptodate(index, o);
}
else if (path)
verify_absent(path, "overwritten", o);
o->nontrivial_merge = 1;
/* #2, #3, #4, #6, #7, #9, #11. */
count = 0;
if (!head_match || !remote_match) {
for (i = 1; i < o->head_idx; i++) {
if (stages[i]) {
keep_entry(stages[i]);
count++;
break;
}
}
}
#if DBRT_DEBUG
else {
fprintf(stderr, "read-tree: warning #16 detected\n");
show_stage_entry(stderr, "head ", stages[head_match]);
show_stage_entry(stderr, "remote ", stages[remote_match]);
}
#endif
if (head) { count += keep_entry(head); }
if (remote) { count += keep_entry(remote); }
return count;
}
/*
* Two-way merge.
*
* The rule is to "carry forward" what is in the index without losing
* information across a "fast forward", favoring a successful merge
* over a merge failure when it makes sense. For details of the
* "carry forward" rule, please see <Documentation/git-read-tree.txt>.
*
*/
static int twoway_merge(struct cache_entry **src,
struct unpack_trees_options *o)
{
struct cache_entry *current = src[0];
struct cache_entry *oldtree = src[1], *newtree = src[2];
if (o->merge_size != 2)
return error("Cannot do a twoway merge of %d trees",
o->merge_size);
if (current) {
if ((!oldtree && !newtree) || /* 4 and 5 */
(!oldtree && newtree &&
same(current, newtree)) || /* 6 and 7 */
(oldtree && newtree &&
same(oldtree, newtree)) || /* 14 and 15 */
(oldtree && newtree &&
!same(oldtree, newtree) && /* 18 and 19*/
same(current, newtree))) {
return keep_entry(current);
}
else if (oldtree && !newtree && same(current, oldtree)) {
/* 10 or 11 */
return deleted_entry(oldtree, current, o);
}
else if (oldtree && newtree &&
same(current, oldtree) && !same(current, newtree)) {
/* 20 or 21 */
return merged_entry(newtree, current, o);
}
else {
/* all other failures */
if (oldtree)
reject_merge(oldtree);
if (current)
reject_merge(current);
if (newtree)
reject_merge(newtree);
return -1;
}
}
else if (newtree)
return merged_entry(newtree, current, o);
else
return deleted_entry(oldtree, current, o);
}
/*
* Bind merge.
*
* Keep the index entries at stage0, collapse stage1 but make sure
* stage0 does not have anything there.
*/
static int bind_merge(struct cache_entry **src,
struct unpack_trees_options *o)
{
struct cache_entry *old = src[0];
struct cache_entry *a = src[1];
if (o->merge_size != 1)
return error("Cannot do a bind merge of %d trees\n",
o->merge_size);
if (a && old)
die("Entry '%s' overlaps. Cannot bind.", a->name);
if (!a)
return keep_entry(old);
else
return merged_entry(a, NULL, o);
}
/*
* One-way merge.
*
* The rule is:
* - take the stat information from stage0, take the data from stage1
*/
static int oneway_merge(struct cache_entry **src,
struct unpack_trees_options *o)
{
struct cache_entry *old = src[0];
struct cache_entry *a = src[1];
if (o->merge_size != 1)
return error("Cannot do a oneway merge of %d trees",
o->merge_size);
if (!a)
return deleted_entry(old, old, o);
if (old && same(old, a)) {
if (o->reset) {
struct stat st;
if (lstat(old->name, &st) ||
ce_match_stat(old, &st, 1))
old->ce_flags |= htons(CE_UPDATE);
}
return keep_entry(old);
}
return merged_entry(a, old, o);
}
static int read_cache_unmerged(void)
{
int i;
struct cache_entry **dst;
struct cache_entry *last = NULL;
read_cache();
dst = active_cache;
for (i = 0; i < active_nr; i++) {
struct cache_entry *ce = active_cache[i];
if (ce_stage(ce)) {
if (last && !strcmp(ce->name, last->name))
continue;
invalidate_ce_path(ce);
last = ce;
ce->ce_mode = 0;
ce->ce_flags &= ~htons(CE_STAGEMASK);
}
*dst++ = ce;
}
active_nr = dst - active_cache;
return !!last;
}
static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
{
struct tree_desc desc;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
struct name_entry entry;
int cnt;
memcpy(it->sha1, tree->object.sha1, 20);
desc.buf = tree->buffer;
desc.size = tree->size;
cnt = 0;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
while (tree_entry(&desc, &entry)) {
if (!S_ISDIR(entry.mode))
cnt++;
else {
struct cache_tree_sub *sub;
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
struct tree *subtree = lookup_tree(entry.sha1);
if (!subtree->object.parsed)
parse_tree(subtree);
tree_entry(): new tree-walking helper function This adds a "tree_entry()" function that combines the common operation of doing a "tree_entry_extract()" + "update_tree_entry()". It also has a simplified calling convention, designed for simple loops that traverse over a whole tree: the arguments are pointers to the tree descriptor and a name_entry structure to fill in, and it returns a boolean "true" if there was an entry left to be gotten in the tree. This allows tree traversal with struct tree_desc desc; struct name_entry entry; desc.buf = tree->buffer; desc.size = tree->size; while (tree_entry(&desc, &entry) { ... use "entry.{path, sha1, mode, pathlen}" ... } which is not only shorter than writing it out in full, it's hopefully less error prone too. [ It's actually a tad faster too - we don't need to recalculate the entry pathlength in both extract and update, but need to do it only once. Also, some callers can avoid doing a "strlen()" on the result, since it's returned as part of the name_entry structure. However, by now we're talking just 1% speedup on "git-rev-list --objects --all", and we're definitely at the point where tree walking is no longer the issue any more. ] NOTE! Not everybody wants to use this new helper function, since some of the tree walkers very much on purpose do the descriptor update separately from the entry extraction. So the "extract + update" sequence still remains as the core sequence, this is just a simplified interface. We should probably add a silly two-line inline helper function for initializing the descriptor from the "struct tree" too, just to cut down on the noise from that common "desc" initializer. Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-05-30 18:45:45 +02:00
sub = cache_tree_sub(it, entry.path);
sub->cache_tree = cache_tree();
prime_cache_tree_rec(sub->cache_tree, subtree);
cnt += sub->cache_tree->entry_count;
}
}
it->entry_count = cnt;
}
static void prime_cache_tree(void)
{
struct tree *tree = (struct tree *)trees->item;
if (!tree)
return;
active_cache_tree = cache_tree();
prime_cache_tree_rec(active_cache_tree, tree);
}
static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
static struct lock_file lock_file;
int cmd_read_tree(int argc, const char **argv, const char *prefix)
{
int i, newfd, stage = 0;
unsigned char sha1[20];
struct unpack_trees_options opts;
memset(&opts, 0, sizeof(opts));
opts.head_idx = -1;
setup_git_directory();
git_config(git_default_config);
newfd = hold_lock_file_for_update(&lock_file, get_index_file());
if (newfd < 0)
die("unable to create new index file");
git_config(git_default_config);
for (i = 1; i < argc; i++) {
const char *arg = argv[i];
/* "-u" means "update", meaning that a merge will update
* the working tree.
*/
if (!strcmp(arg, "-u")) {
opts.update = 1;
continue;
}
if (!strcmp(arg, "-v")) {
opts.verbose_update = 1;
continue;
}
/* "-i" means "index only", meaning that a merge will
* not even look at the working tree.
*/
if (!strcmp(arg, "-i")) {
opts.index_only = 1;
continue;
}
/* "--prefix=<subdirectory>/" means keep the current index
* entries and put the entries from the tree under the
* given subdirectory.
*/
if (!strncmp(arg, "--prefix=", 9)) {
if (stage || opts.merge || opts.prefix)
usage(read_tree_usage);
opts.prefix = arg + 9;
opts.merge = 1;
stage = 1;
if (read_cache_unmerged())
die("you need to resolve your current index first");
continue;
}
/* This differs from "-m" in that we'll silently ignore
* unmerged entries and overwrite working tree files that
* correspond to them.
*/
if (!strcmp(arg, "--reset")) {
if (stage || opts.merge || opts.prefix)
usage(read_tree_usage);
opts.reset = 1;
opts.merge = 1;
stage = 1;
read_cache_unmerged();
2005-06-15 19:25:46 +02:00
continue;
}
if (!strcmp(arg, "--trivial")) {
opts.trivial_merges_only = 1;
continue;
}
if (!strcmp(arg, "--aggressive")) {
opts.aggressive = 1;
continue;
}
/* "-m" stands for "merge", meaning we start in stage 1 */
if (!strcmp(arg, "-m")) {
if (stage || opts.merge || opts.prefix)
usage(read_tree_usage);
if (read_cache_unmerged())
die("you need to resolve your current index first");
stage = 1;
opts.merge = 1;
continue;
}
/* using -u and -i at the same time makes no sense */
if (1 < opts.index_only + opts.update)
usage(read_tree_usage);
if (get_sha1(arg, sha1))
die("Not a valid object name %s", arg);
if (list_tree(sha1) < 0)
die("failed to unpack tree object %s", arg);
stage++;
}
if ((opts.update||opts.index_only) && !opts.merge)
usage(read_tree_usage);
if (opts.prefix) {
int pfxlen = strlen(opts.prefix);
int pos;
if (opts.prefix[pfxlen-1] != '/')
die("prefix must end with /");
if (stage != 2)
die("binding merge takes only one tree");
pos = cache_name_pos(opts.prefix, pfxlen);
if (0 <= pos)
die("corrupt index file");
pos = -pos-1;
if (pos < active_nr &&
!strncmp(active_cache[pos]->name, opts.prefix, pfxlen))
die("subdirectory '%s' already exists.", opts.prefix);
pos = cache_name_pos(opts.prefix, pfxlen-1);
if (0 <= pos)
die("file '%.*s' already exists.",
pfxlen-1, opts.prefix);
}
if (opts.merge) {
if (stage < 2)
die("just how do you expect me to merge %d trees?", stage-1);
switch (stage - 1) {
case 1:
opts.fn = opts.prefix ? bind_merge : oneway_merge;
break;
case 2:
opts.fn = twoway_merge;
break;
case 3:
default:
opts.fn = threeway_merge;
cache_tree_free(&active_cache_tree);
break;
}
if (stage - 1 >= 3)
opts.head_idx = stage - 2;
else
opts.head_idx = 1;
}
unpack_trees(trees, &opts);
/*
* When reading only one tree (either the most basic form,
* "-m ent" or "--reset ent" form), we can obtain a fully
* valid cache-tree because the index must match exactly
* what came from the tree.
*/
if (trees && trees->item && !opts.prefix && (!opts.merge || (stage == 2))) {
cache_tree_free(&active_cache_tree);
prime_cache_tree();
}
if (write_cache(newfd, active_cache, active_nr) ||
close(newfd) || commit_lock_file(&lock_file))
die("unable to write new index file");
return 0;
}