1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-04-27 14:05:10 +02:00

Merge branch 'tb/object-access-overflow-protection'

Various offset computation in the code that accesses the packfiles
and other data in the object layer has been hardened against
arithmetic overflow, especially on 32-bit systems.

* tb/object-access-overflow-protection:
  commit-graph.c: prevent overflow in `verify_commit_graph()`
  commit-graph.c: prevent overflow in `write_commit_graph()`
  commit-graph.c: prevent overflow in `merge_commit_graph()`
  commit-graph.c: prevent overflow in `split_graph_merge_strategy()`
  commit-graph.c: prevent overflow in `load_tree_for_commit()`
  commit-graph.c: prevent overflow in `fill_commit_in_graph()`
  commit-graph.c: prevent overflow in `fill_commit_graph_info()`
  commit-graph.c: prevent overflow in `load_oid_from_graph()`
  commit-graph.c: prevent overflow in add_graph_to_chain()
  commit-graph.c: prevent overflow in `write_commit_graph_file()`
  pack-bitmap.c: ensure that eindex lookups don't overflow
  midx.c: prevent overflow in `fill_included_packs_batch()`
  midx.c: prevent overflow in `write_midx_internal()`
  midx.c: store `nr`, `alloc` variables as `size_t`'s
  midx.c: prevent overflow in `nth_midxed_offset()`
  midx.c: prevent overflow in `nth_midxed_object_oid()`
  midx.c: use `size_t`'s for fanout nr and alloc
  packfile.c: use checked arithmetic in `nth_packed_object_offset()`
  packfile.c: prevent overflow in `load_idx()`
  packfile.c: prevent overflow in `nth_packed_object_id()`
This commit is contained in:
Junio C Hamano 2023-07-25 12:05:23 -07:00
commit 4488bb3bed
5 changed files with 80 additions and 54 deletions

View File

@ -480,7 +480,7 @@ static int add_graph_to_chain(struct commit_graph *g,
if (!cur_g ||
!oideq(&oids[n], &cur_g->oid) ||
!hasheq(oids[n].hash, g->chunk_base_graphs + g->hash_len * n)) {
!hasheq(oids[n].hash, g->chunk_base_graphs + st_mult(g->hash_len, n))) {
warning(_("commit-graph chain does not match"));
return 0;
}
@ -490,8 +490,15 @@ static int add_graph_to_chain(struct commit_graph *g,
g->base_graph = chain;
if (chain)
if (chain) {
if (unsigned_add_overflows(chain->num_commits,
chain->num_commits_in_base)) {
warning(_("commit count in base graph too high: %"PRIuMAX),
(uintmax_t)chain->num_commits_in_base);
return 0;
}
g->num_commits_in_base = chain->num_commits + chain->num_commits_in_base;
}
return 1;
}
@ -745,7 +752,7 @@ static void load_oid_from_graph(struct commit_graph *g,
lex_index = pos - g->num_commits_in_base;
oidread(oid, g->chunk_oid_lookup + g->hash_len * lex_index);
oidread(oid, g->chunk_oid_lookup + st_mult(g->hash_len, lex_index));
}
static struct commit_list **insert_parent_or_die(struct repository *r,
@ -781,7 +788,7 @@ static void fill_commit_graph_info(struct commit *item, struct commit_graph *g,
die(_("invalid commit position. commit-graph is likely corrupt"));
lex_index = pos - g->num_commits_in_base;
commit_data = g->chunk_commit_data + GRAPH_DATA_WIDTH * lex_index;
commit_data = g->chunk_commit_data + st_mult(GRAPH_DATA_WIDTH, lex_index);
graph_data = commit_graph_data_at(item);
graph_data->graph_pos = pos;
@ -791,14 +798,14 @@ static void fill_commit_graph_info(struct commit *item, struct commit_graph *g,
item->date = (timestamp_t)((date_high << 32) | date_low);
if (g->read_generation_data) {
offset = (timestamp_t)get_be32(g->chunk_generation_data + sizeof(uint32_t) * lex_index);
offset = (timestamp_t)get_be32(g->chunk_generation_data + st_mult(sizeof(uint32_t), lex_index));
if (offset & CORRECTED_COMMIT_DATE_OFFSET_OVERFLOW) {
if (!g->chunk_generation_data_overflow)
die(_("commit-graph requires overflow generation data but has none"));
offset_pos = offset ^ CORRECTED_COMMIT_DATE_OFFSET_OVERFLOW;
graph_data->generation = item->date + get_be64(g->chunk_generation_data_overflow + 8 * offset_pos);
graph_data->generation = item->date + get_be64(g->chunk_generation_data_overflow + st_mult(8, offset_pos));
} else
graph_data->generation = item->date + offset;
} else
@ -829,7 +836,7 @@ static int fill_commit_in_graph(struct repository *r,
fill_commit_graph_info(item, g, pos);
lex_index = pos - g->num_commits_in_base;
commit_data = g->chunk_commit_data + (g->hash_len + 16) * lex_index;
commit_data = g->chunk_commit_data + st_mult(g->hash_len + 16, lex_index);
item->object.parsed = 1;
@ -851,7 +858,7 @@ static int fill_commit_in_graph(struct repository *r,
}
parent_data_ptr = (uint32_t*)(g->chunk_extra_edges +
4 * (uint64_t)(edge_value & GRAPH_EDGE_LAST_MASK));
st_mult(4, edge_value & GRAPH_EDGE_LAST_MASK));
do {
edge_value = get_be32(parent_data_ptr);
pptr = insert_parent_or_die(r, g,
@ -971,7 +978,7 @@ static struct tree *load_tree_for_commit(struct repository *r,
g = g->base_graph;
commit_data = g->chunk_commit_data +
GRAPH_DATA_WIDTH * (graph_pos - g->num_commits_in_base);
st_mult(GRAPH_DATA_WIDTH, graph_pos - g->num_commits_in_base);
oidread(&oid, commit_data);
set_commit_tree(c, lookup_tree(r, &oid));
@ -1951,35 +1958,35 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
add_chunk(cf, GRAPH_CHUNKID_OIDFANOUT, GRAPH_FANOUT_SIZE,
write_graph_chunk_fanout);
add_chunk(cf, GRAPH_CHUNKID_OIDLOOKUP, hashsz * ctx->commits.nr,
add_chunk(cf, GRAPH_CHUNKID_OIDLOOKUP, st_mult(hashsz, ctx->commits.nr),
write_graph_chunk_oids);
add_chunk(cf, GRAPH_CHUNKID_DATA, (hashsz + 16) * ctx->commits.nr,
add_chunk(cf, GRAPH_CHUNKID_DATA, st_mult(hashsz + 16, ctx->commits.nr),
write_graph_chunk_data);
if (ctx->write_generation_data)
add_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA,
sizeof(uint32_t) * ctx->commits.nr,
st_mult(sizeof(uint32_t), ctx->commits.nr),
write_graph_chunk_generation_data);
if (ctx->num_generation_data_overflows)
add_chunk(cf, GRAPH_CHUNKID_GENERATION_DATA_OVERFLOW,
sizeof(timestamp_t) * ctx->num_generation_data_overflows,
st_mult(sizeof(timestamp_t), ctx->num_generation_data_overflows),
write_graph_chunk_generation_data_overflow);
if (ctx->num_extra_edges)
add_chunk(cf, GRAPH_CHUNKID_EXTRAEDGES,
4 * ctx->num_extra_edges,
st_mult(4, ctx->num_extra_edges),
write_graph_chunk_extra_edges);
if (ctx->changed_paths) {
add_chunk(cf, GRAPH_CHUNKID_BLOOMINDEXES,
sizeof(uint32_t) * ctx->commits.nr,
st_mult(sizeof(uint32_t), ctx->commits.nr),
write_graph_chunk_bloom_indexes);
add_chunk(cf, GRAPH_CHUNKID_BLOOMDATA,
sizeof(uint32_t) * 3
+ ctx->total_bloom_filter_data_size,
st_add(sizeof(uint32_t) * 3,
ctx->total_bloom_filter_data_size),
write_graph_chunk_bloom_data);
}
if (ctx->num_commit_graphs_after > 1)
add_chunk(cf, GRAPH_CHUNKID_BASE,
hashsz * (ctx->num_commit_graphs_after - 1),
st_mult(hashsz, ctx->num_commit_graphs_after - 1),
write_graph_chunk_base);
hashwrite_be32(f, GRAPH_SIGNATURE);
@ -1997,7 +2004,7 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
get_num_chunks(cf));
ctx->progress = start_delayed_progress(
progress_title.buf,
get_num_chunks(cf) * ctx->commits.nr);
st_mult(get_num_chunks(cf), ctx->commits.nr));
}
write_chunkfile(cf, ctx);
@ -2103,11 +2110,16 @@ static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
if (flags != COMMIT_GRAPH_SPLIT_MERGE_PROHIBITED &&
flags != COMMIT_GRAPH_SPLIT_REPLACE) {
while (g && (g->num_commits <= size_mult * num_commits ||
while (g && (g->num_commits <= st_mult(size_mult, num_commits) ||
(max_commits && num_commits > max_commits))) {
if (g->odb != ctx->odb)
break;
if (unsigned_add_overflows(num_commits, g->num_commits))
die(_("cannot merge graphs with %"PRIuMAX", "
"%"PRIuMAX" commits"),
(uintmax_t)num_commits,
(uintmax_t)g->num_commits);
num_commits += g->num_commits;
g = g->base_graph;
@ -2165,6 +2177,11 @@ static void merge_commit_graph(struct write_commit_graph_context *ctx,
uint32_t i;
uint32_t offset = g->num_commits_in_base;
if (unsigned_add_overflows(ctx->commits.nr, g->num_commits))
die(_("cannot merge graph %s, too many commits: %"PRIuMAX),
oid_to_hex(&g->oid),
(uintmax_t)st_add(ctx->commits.nr, g->num_commits));
ALLOC_GROW(ctx->commits.list, ctx->commits.nr + g->num_commits, ctx->commits.alloc);
for (i = 0; i < g->num_commits; i++) {
@ -2435,7 +2452,7 @@ int write_commit_graph(struct object_directory *odb,
struct commit_graph *g = ctx->r->objects->commit_graph;
for (i = 0; i < g->num_commits; i++) {
struct object_id oid;
oidread(&oid, g->chunk_oid_lookup + g->hash_len * i);
oidread(&oid, g->chunk_oid_lookup + st_mult(g->hash_len, i));
oid_array_append(&ctx->oids, &oid);
}
}
@ -2562,7 +2579,7 @@ static int verify_one_commit_graph(struct repository *r,
for (i = 0; i < g->num_commits; i++) {
struct commit *graph_commit;
oidread(&cur_oid, g->chunk_oid_lookup + g->hash_len * i);
oidread(&cur_oid, g->chunk_oid_lookup + st_mult(g->hash_len, i));
if (i && oidcmp(&prev_oid, &cur_oid) >= 0)
graph_report(_("commit-graph has incorrect OID order: %s then %s"),
@ -2606,7 +2623,7 @@ static int verify_one_commit_graph(struct repository *r,
timestamp_t generation;
display_progress(progress, ++(*seen));
oidread(&cur_oid, g->chunk_oid_lookup + g->hash_len * i);
oidread(&cur_oid, g->chunk_oid_lookup + st_mult(g->hash_len, i));
graph_commit = lookup_commit(r, &cur_oid);
odb_commit = (struct commit *)create_object(r, &cur_oid, alloc_commit_node(r));

42
midx.c
View File

@ -253,7 +253,7 @@ struct object_id *nth_midxed_object_oid(struct object_id *oid,
if (n >= m->num_objects)
return NULL;
oidread(oid, m->chunk_oid_lookup + m->hash_len * n);
oidread(oid, m->chunk_oid_lookup + st_mult(m->hash_len, n));
return oid;
}
@ -270,7 +270,8 @@ off_t nth_midxed_offset(struct multi_pack_index *m, uint32_t pos)
die(_("multi-pack-index stores a 64-bit offset, but off_t is too small"));
offset32 ^= MIDX_LARGE_OFFSET_NEEDED;
return get_be64(m->chunk_large_offsets + sizeof(uint64_t) * offset32);
return get_be64(m->chunk_large_offsets +
st_mult(sizeof(uint64_t), offset32));
}
return offset32;
@ -444,14 +445,14 @@ static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
struct write_midx_context {
struct pack_info *info;
uint32_t nr;
uint32_t alloc;
size_t nr;
size_t alloc;
struct multi_pack_index *m;
struct progress *progress;
unsigned pack_paths_checked;
struct pack_midx_entry *entries;
uint32_t entries_nr;
size_t entries_nr;
uint32_t *pack_perm;
uint32_t *pack_order;
@ -583,12 +584,14 @@ static void fill_pack_entry(uint32_t pack_int_id,
struct midx_fanout {
struct pack_midx_entry *entries;
uint32_t nr;
uint32_t alloc;
size_t nr, alloc;
};
static void midx_fanout_grow(struct midx_fanout *fanout, uint32_t nr)
static void midx_fanout_grow(struct midx_fanout *fanout, size_t nr)
{
if (nr < fanout->nr)
BUG("negative growth in midx_fanout_grow() (%"PRIuMAX" < %"PRIuMAX")",
(uintmax_t)nr, (uintmax_t)fanout->nr);
ALLOC_GROW(fanout->entries, nr, fanout->alloc);
}
@ -667,17 +670,18 @@ static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
struct pack_info *info,
uint32_t nr_packs,
uint32_t *nr_objects,
size_t *nr_objects,
int preferred_pack)
{
uint32_t cur_fanout, cur_pack, cur_object;
uint32_t alloc_objects, total_objects = 0;
size_t alloc_objects, total_objects = 0;
struct midx_fanout fanout = { 0 };
struct pack_midx_entry *deduplicated_entries = NULL;
uint32_t start_pack = m ? m->num_packs : 0;
for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
total_objects += info[cur_pack].p->num_objects;
total_objects = st_add(total_objects,
info[cur_pack].p->num_objects);
/*
* As we de-duplicate by fanout value, we expect the fanout
@ -720,7 +724,8 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
&fanout.entries[cur_object].oid))
continue;
ALLOC_GROW(deduplicated_entries, *nr_objects + 1, alloc_objects);
ALLOC_GROW(deduplicated_entries, st_add(*nr_objects, 1),
alloc_objects);
memcpy(&deduplicated_entries[*nr_objects],
&fanout.entries[cur_object],
sizeof(struct pack_midx_entry));
@ -1495,21 +1500,22 @@ static int write_midx_internal(const char *object_dir,
add_chunk(cf, MIDX_CHUNKID_OIDFANOUT, MIDX_CHUNK_FANOUT_SIZE,
write_midx_oid_fanout);
add_chunk(cf, MIDX_CHUNKID_OIDLOOKUP,
(size_t)ctx.entries_nr * the_hash_algo->rawsz,
st_mult(ctx.entries_nr, the_hash_algo->rawsz),
write_midx_oid_lookup);
add_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS,
(size_t)ctx.entries_nr * MIDX_CHUNK_OFFSET_WIDTH,
st_mult(ctx.entries_nr, MIDX_CHUNK_OFFSET_WIDTH),
write_midx_object_offsets);
if (ctx.large_offsets_needed)
add_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS,
(size_t)ctx.num_large_offsets * MIDX_CHUNK_LARGE_OFFSET_WIDTH,
st_mult(ctx.num_large_offsets,
MIDX_CHUNK_LARGE_OFFSET_WIDTH),
write_midx_large_offsets);
if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
ctx.pack_order = midx_pack_order(&ctx);
add_chunk(cf, MIDX_CHUNKID_REVINDEX,
ctx.entries_nr * sizeof(uint32_t),
st_mult(ctx.entries_nr, sizeof(uint32_t)),
write_midx_revindex);
}
@ -1987,8 +1993,8 @@ static int fill_included_packs_batch(struct repository *r,
if (open_pack_index(p) || !p->num_objects)
continue;
expected_size = (size_t)(p->pack_size
* pack_info[i].referenced_objects);
expected_size = st_mult(p->pack_size,
pack_info[i].referenced_objects);
expected_size /= p->num_objects;
if (expected_size >= batch_size)

View File

@ -106,7 +106,7 @@ struct packed_git {
const void *index_data;
size_t index_size;
uint32_t num_objects;
uint32_t crc_offset;
size_t crc_offset;
struct oidset bad_objects;
int index_version;
time_t mtime;

View File

@ -1293,7 +1293,7 @@ static void show_extended_objects(struct bitmap_index *bitmap_git,
for (i = 0; i < eindex->count; ++i) {
struct object *obj;
if (!bitmap_get(objects, bitmap_num_objects(bitmap_git) + i))
if (!bitmap_get(objects, st_add(bitmap_num_objects(bitmap_git), i)))
continue;
obj = eindex->objects[i];
@ -1472,7 +1472,7 @@ static void filter_bitmap_exclude_type(struct bitmap_index *bitmap_git,
* them individually.
*/
for (i = 0; i < eindex->count; i++) {
uint32_t pos = i + bitmap_num_objects(bitmap_git);
size_t pos = st_add(i, bitmap_num_objects(bitmap_git));
if (eindex->objects[i]->type == type &&
bitmap_get(to_filter, pos) &&
!bitmap_get(tips, pos))
@ -1563,7 +1563,7 @@ static void filter_bitmap_blob_limit(struct bitmap_index *bitmap_git,
}
for (i = 0; i < eindex->count; i++) {
uint32_t pos = i + bitmap_num_objects(bitmap_git);
size_t pos = st_add(i, bitmap_num_objects(bitmap_git));
if (eindex->objects[i]->type == OBJ_BLOB &&
bitmap_get(to_filter, pos) &&
!bitmap_get(tips, pos) &&
@ -2037,7 +2037,8 @@ static uint32_t count_object_type(struct bitmap_index *bitmap_git,
for (i = 0; i < eindex->count; ++i) {
if (eindex->objects[i]->type == type &&
bitmap_get(objects, bitmap_num_objects(bitmap_git) + i))
bitmap_get(objects,
st_add(bitmap_num_objects(bitmap_git), i)))
count++;
}
@ -2451,7 +2452,8 @@ static off_t get_disk_usage_for_extended(struct bitmap_index *bitmap_git)
for (i = 0; i < eindex->count; i++) {
struct object *obj = eindex->objects[i];
if (!bitmap_get(result, bitmap_num_objects(bitmap_git) + i))
if (!bitmap_get(result,
st_add(bitmap_num_objects(bitmap_git), i)))
continue;
if (oid_object_info_extended(the_repository, &obj->oid, &oi, 0) < 0)

View File

@ -184,7 +184,7 @@ int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
*/
(sizeof(off_t) <= 4))
return error("pack too large for current definition of off_t in %s", path);
p->crc_offset = 8 + 4 * 256 + nr * hashsz;
p->crc_offset = st_add(8 + 4 * 256, st_mult(nr, hashsz));
}
p->index_version = version;
@ -1918,10 +1918,10 @@ int nth_packed_object_id(struct object_id *oid,
return -1;
index += 4 * 256;
if (p->index_version == 1) {
oidread(oid, index + (hashsz + 4) * n + 4);
oidread(oid, index + st_add(st_mult(hashsz + 4, n), 4));
} else {
index += 8;
oidread(oid, index + hashsz * n);
oidread(oid, index + st_mult(hashsz, n));
}
return 0;
}
@ -1946,14 +1946,15 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
const unsigned int hashsz = the_hash_algo->rawsz;
index += 4 * 256;
if (p->index_version == 1) {
return ntohl(*((uint32_t *)(index + (hashsz + 4) * (size_t)n)));
return ntohl(*((uint32_t *)(index + st_mult(hashsz + 4, n))));
} else {
uint32_t off;
index += 8 + (size_t)p->num_objects * (hashsz + 4);
off = ntohl(*((uint32_t *)(index + 4 * n)));
index += st_add(8, st_mult(p->num_objects, hashsz + 4));
off = ntohl(*((uint32_t *)(index + st_mult(4, n))));
if (!(off & 0x80000000))
return off;
index += (size_t)p->num_objects * 4 + (off & 0x7fffffff) * 8;
index += st_add(st_mult(p->num_objects, 4),
st_mult(off & 0x7fffffff, 8));
check_pack_index_ptr(p, index);
return get_be64(index);
}