1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-06-13 14:16:19 +02:00

midx: add pack_perm to write_midx_context

In an effort to align write_midx_internal() with the chunk-format API,
continue to group necessary data into "struct write_midx_context". This
change collects the "uint32_t *pack_perm" and large_offsets_needed bit
into the context.

Update write_midx_object_offsets() to match chunk_write_fn.

Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Derrick Stolee 2021-02-18 14:07:29 +00:00 committed by Junio C Hamano
parent 31bda9a237
commit 7a3ada1192

40
midx.c
View File

@ -461,6 +461,9 @@ struct write_midx_context {
struct pack_midx_entry *entries;
uint32_t entries_nr;
uint32_t *pack_perm;
unsigned large_offsets_needed:1;
};
static void add_pack_to_midx(const char *full_path, size_t full_path_len,
@ -736,27 +739,27 @@ static size_t write_midx_oid_lookup(struct hashfile *f,
return written;
}
static size_t write_midx_object_offsets(struct hashfile *f, int large_offset_needed,
uint32_t *perm,
struct pack_midx_entry *objects, uint32_t nr_objects)
static size_t write_midx_object_offsets(struct hashfile *f,
void *data)
{
struct pack_midx_entry *list = objects;
struct write_midx_context *ctx = data;
struct pack_midx_entry *list = ctx->entries;
uint32_t i, nr_large_offset = 0;
size_t written = 0;
for (i = 0; i < nr_objects; i++) {
for (i = 0; i < ctx->entries_nr; i++) {
struct pack_midx_entry *obj = list++;
if (perm[obj->pack_int_id] == PACK_EXPIRED)
if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
BUG("object %s is in an expired pack with int-id %d",
oid_to_hex(&obj->oid),
obj->pack_int_id);
hashwrite_be32(f, perm[obj->pack_int_id]);
hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
if (large_offset_needed && obj->offset >> 31)
if (ctx->large_offsets_needed && obj->offset >> 31)
hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
else if (!large_offset_needed && obj->offset >> 32)
else if (!ctx->large_offsets_needed && obj->offset >> 32)
BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
oid_to_hex(&obj->oid),
obj->offset);
@ -805,13 +808,11 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
struct hashfile *f = NULL;
struct lock_file lk;
struct write_midx_context ctx = { 0 };
uint32_t *pack_perm = NULL;
uint64_t written = 0;
uint32_t chunk_ids[MIDX_MAX_CHUNKS + 1];
uint64_t chunk_offsets[MIDX_MAX_CHUNKS + 1];
uint32_t num_large_offsets = 0;
struct progress *progress = NULL;
int large_offsets_needed = 0;
int pack_name_concat_len = 0;
int dropped_packs = 0;
int result = 0;
@ -857,11 +858,12 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr);
ctx.large_offsets_needed = 0;
for (i = 0; i < ctx.entries_nr; i++) {
if (ctx.entries[i].offset > 0x7fffffff)
num_large_offsets++;
if (ctx.entries[i].offset > 0xffffffff)
large_offsets_needed = 1;
ctx.large_offsets_needed = 1;
}
QSORT(ctx.info, ctx.nr, pack_info_compare);
@ -900,13 +902,13 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
*
* pack_perm[old_id] = new_id
*/
ALLOC_ARRAY(pack_perm, ctx.nr);
ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
for (i = 0; i < ctx.nr; i++) {
if (ctx.info[i].expired) {
dropped_packs++;
pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
} else {
pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
}
}
@ -927,7 +929,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
close_midx(ctx.m);
cur_chunk = 0;
num_chunks = large_offsets_needed ? 5 : 4;
num_chunks = ctx.large_offsets_needed ? 5 : 4;
if (ctx.nr - dropped_packs == 0) {
error(_("no pack files to index."));
@ -954,7 +956,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
cur_chunk++;
chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + ctx.entries_nr * MIDX_CHUNK_OFFSET_WIDTH;
if (large_offsets_needed) {
if (ctx.large_offsets_needed) {
chunk_ids[cur_chunk] = MIDX_CHUNKID_LARGEOFFSETS;
cur_chunk++;
@ -1004,7 +1006,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
break;
case MIDX_CHUNKID_OBJECTOFFSETS:
written += write_midx_object_offsets(f, large_offsets_needed, pack_perm, ctx.entries, ctx.entries_nr);
written += write_midx_object_offsets(f, &ctx);
break;
case MIDX_CHUNKID_LARGEOFFSETS:
@ -1039,7 +1041,7 @@ static int write_midx_internal(const char *object_dir, struct multi_pack_index *
free(ctx.info);
free(ctx.entries);
free(pack_perm);
free(ctx.pack_perm);
free(midx_name);
return result;
}