1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-05-08 23:46:08 +02:00
git/csum-file.c
Derrick Stolee ddaf1f62e3 csum-file: make hashwrite() more readable
The hashwrite() method takes an input buffer and updates a hashfile's
hash function while writing the data to a file. To avoid overuse of
flushes, the hashfile has an internal buffer and most writes will use
memcpy() to transfer data from the input 'buf' to the hashfile's buffer
of size 8 * 1024 bytes.

Logic introduced by a8032d12 (sha1write: don't copy full sized buffers,
2008-09-02) reduces the number of memcpy() calls when the input buffer
is sufficiently longer than the hashfile's buffer, causing nr to be the
length of the full buffer. In these cases, the input buffer is used
directly in chunks equal to the hashfile's buffer size.

This method caught my attention while investigating some performance
issues, but it turns out that these performance issues were noise within
the variance of the experiment.

However, during this investigation, I inspected hashwrite() and
misunderstood it, even after looking closely and trying to make it
faster. This change simply reorganizes some parts of the loop within
hashwrite() to make it clear that each batch either uses memcpy() to the
hashfile's buffer or writes directly from the input buffer. The previous
code relied on indirection through local variables and essentially
inlined the implementation of hashflush() to reduce lines of code.

Helped-by: Jeff King <peff@peff.net>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-03-26 14:32:45 -07:00

190 lines
4.2 KiB
C

/*
* csum-file.c
*
* Copyright (C) 2005 Linus Torvalds
*
* Simple file write infrastructure for writing SHA1-summed
* files. Useful when you write a file that you want to be
* able to verify hasn't been messed with afterwards.
*/
#include "cache.h"
#include "progress.h"
#include "csum-file.h"
static void flush(struct hashfile *f, const void *buf, unsigned int count)
{
if (0 <= f->check_fd && count) {
unsigned char check_buffer[8192];
ssize_t ret = read_in_full(f->check_fd, check_buffer, count);
if (ret < 0)
die_errno("%s: sha1 file read error", f->name);
if (ret != count)
die("%s: sha1 file truncated", f->name);
if (memcmp(buf, check_buffer, count))
die("sha1 file '%s' validation error", f->name);
}
for (;;) {
int ret = xwrite(f->fd, buf, count);
if (ret > 0) {
f->total += ret;
display_throughput(f->tp, f->total);
buf = (char *) buf + ret;
count -= ret;
if (count)
continue;
return;
}
if (!ret)
die("sha1 file '%s' write error. Out of diskspace", f->name);
die_errno("sha1 file '%s' write error", f->name);
}
}
void hashflush(struct hashfile *f)
{
unsigned offset = f->offset;
if (offset) {
the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
}
int finalize_hashfile(struct hashfile *f, unsigned char *result, unsigned int flags)
{
int fd;
hashflush(f);
the_hash_algo->final_fn(f->buffer, &f->ctx);
if (result)
hashcpy(result, f->buffer);
if (flags & CSUM_HASH_IN_STREAM)
flush(f, f->buffer, the_hash_algo->rawsz);
if (flags & CSUM_FSYNC)
fsync_or_die(f->fd, f->name);
if (flags & CSUM_CLOSE) {
if (close(f->fd))
die_errno("%s: sha1 file error on close", f->name);
fd = 0;
} else
fd = f->fd;
if (0 <= f->check_fd) {
char discard;
int cnt = read_in_full(f->check_fd, &discard, 1);
if (cnt < 0)
die_errno("%s: error when reading the tail of sha1 file",
f->name);
if (cnt)
die("%s: sha1 file has trailing garbage", f->name);
if (close(f->check_fd))
die_errno("%s: sha1 file error on close", f->name);
}
free(f);
return fd;
}
void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
unsigned left = sizeof(f->buffer) - f->offset;
unsigned nr = count > left ? left : count;
if (f->do_crc)
f->crc32 = crc32(f->crc32, buf, nr);
if (nr == sizeof(f->buffer)) {
/*
* Flush a full batch worth of data directly
* from the input, skipping the memcpy() to
* the hashfile's buffer. In this block,
* f->offset is necessarily zero.
*/
the_hash_algo->update_fn(&f->ctx, buf, nr);
flush(f, buf, nr);
} else {
/*
* Copy to the hashfile's buffer, flushing only
* if it became full.
*/
memcpy(f->buffer + f->offset, buf, nr);
f->offset += nr;
left -= nr;
if (!left)
hashflush(f);
}
count -= nr;
buf = (char *) buf + nr;
}
}
struct hashfile *hashfd(int fd, const char *name)
{
return hashfd_throughput(fd, name, NULL);
}
struct hashfile *hashfd_check(const char *name)
{
int sink, check;
struct hashfile *f;
sink = open("/dev/null", O_WRONLY);
if (sink < 0)
die_errno("unable to open /dev/null");
check = open(name, O_RDONLY);
if (check < 0)
die_errno("unable to open '%s'", name);
f = hashfd(sink, name);
f->check_fd = check;
return f;
}
struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
{
struct hashfile *f = xmalloc(sizeof(*f));
f->fd = fd;
f->check_fd = -1;
f->offset = 0;
f->total = 0;
f->tp = tp;
f->name = name;
f->do_crc = 0;
the_hash_algo->init_fn(&f->ctx);
return f;
}
void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
hashflush(f);
checkpoint->offset = f->total;
the_hash_algo->clone_fn(&checkpoint->ctx, &f->ctx);
}
int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
off_t offset = checkpoint->offset;
if (ftruncate(f->fd, offset) ||
lseek(f->fd, offset, SEEK_SET) != offset)
return -1;
f->total = offset;
f->ctx = checkpoint->ctx;
f->offset = 0; /* hashflush() was called in checkpoint */
return 0;
}
void crc32_begin(struct hashfile *f)
{
f->crc32 = crc32(0, NULL, 0);
f->do_crc = 1;
}
uint32_t crc32_end(struct hashfile *f)
{
f->do_crc = 0;
return f->crc32;
}