1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-05-18 17:06:07 +02:00

Merge branch 'ym/fix-opportunistic-index-update-race'

Read-only operations such as "git status" that internally refreshes
the index write out the refreshed index to the disk to optimize
future accesses to the working tree, but this could race with a
"read-write" operation that modify the index while it is running.
Detect such a race and avoid overwriting the index.

Duy raised a good point that we may need to do the same for the
normal writeout codepath, not just the "opportunistic" update
codepath.  While that is true, nobody sane would be running two
simultaneous operations that are clearly write-oriented competing
with each other against the same index file.  So in that sense that
can be done as a less urgent follow-up for this topic.

* ym/fix-opportunistic-index-update-race:
  read-cache.c: verify index file before we opportunistically update it
  wrapper.c: add xpread() similar to xread()
This commit is contained in:
Junio C Hamano 2014-06-03 12:06:41 -07:00
commit 9af098c29b
6 changed files with 90 additions and 5 deletions

View File

@ -542,7 +542,7 @@ static void *unpack_data(struct object_entry *obj,
do {
ssize_t n = (len < 64*1024) ? len : 64*1024;
n = pread(pack_fd, inbuf, n, from);
n = xpread(pack_fd, inbuf, n, from);
if (n < 0)
die_errno(_("cannot pread pack file"));
if (!n)

View File

@ -294,6 +294,7 @@ struct index_state {
initialized : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
unsigned char sha1[20];
};
extern struct index_state the_index;
@ -1337,6 +1338,8 @@ extern void fsync_or_die(int fd, const char *);
extern ssize_t read_in_full(int fd, void *buf, size_t count);
extern ssize_t write_in_full(int fd, const void *buf, size_t count);
extern ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
static inline ssize_t write_str_in_full(int fd, const char *str)
{
return write_in_full(fd, str, strlen(str));

View File

@ -14,7 +14,7 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of
}
while (n < length) {
ssize_t count = pread(fd, (char *)start + n, length - n, offset + n);
ssize_t count = xpread(fd, (char *)start + n, length - n, offset + n);
if (count == 0) {
memset((char *)start+n, 0, length-n);
@ -22,8 +22,6 @@ void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t of
}
if (count < 0) {
if (errno == EAGAIN || errno == EINTR)
continue;
free(start);
errno = EACCES;
return MAP_FAILED;

View File

@ -539,6 +539,7 @@ extern void *xcalloc(size_t nmemb, size_t size);
extern void *xmmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
extern ssize_t xread(int fd, void *buf, size_t len);
extern ssize_t xwrite(int fd, const void *buf, size_t len);
extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
extern int xdup(int fd);
extern FILE *xfdopen(int fd, const char *mode);
extern int xmkstemp(char *template);

View File

@ -1477,6 +1477,7 @@ int read_index_from(struct index_state *istate, const char *path)
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
hashcpy(istate->sha1, (unsigned char *)hdr + mmap_size - 20);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
@ -1760,6 +1761,50 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
return result;
}
/*
* This function verifies if index_state has the correct sha1 of the
* index file. Don't die if we have any other failure, just return 0.
*/
static int verify_index_from(const struct index_state *istate, const char *path)
{
int fd;
ssize_t n;
struct stat st;
unsigned char sha1[20];
if (!istate->initialized)
return 0;
fd = open(path, O_RDONLY);
if (fd < 0)
return 0;
if (fstat(fd, &st))
goto out;
if (st.st_size < sizeof(struct cache_header) + 20)
goto out;
n = pread_in_full(fd, sha1, 20, st.st_size - 20);
if (n != 20)
goto out;
if (hashcmp(istate->sha1, sha1))
goto out;
close(fd);
return 1;
out:
close(fd);
return 0;
}
static int verify_index(const struct index_state *istate)
{
return verify_index_from(istate, get_index_file());
}
static int has_racy_timestamp(struct index_state *istate)
{
int entries = istate->cache_nr;
@ -1779,7 +1824,7 @@ static int has_racy_timestamp(struct index_state *istate)
void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
{
if ((istate->cache_changed || has_racy_timestamp(istate)) &&
!write_index(istate, lockfile->fd))
verify_index(istate) && !write_index(istate, lockfile->fd))
commit_locked_index(lockfile);
else
rollback_lock_file(lockfile);

View File

@ -174,6 +174,24 @@ ssize_t xwrite(int fd, const void *buf, size_t len)
}
}
/*
* xpread() is the same as pread(), but it automatically restarts pread()
* operations with a recoverable error (EAGAIN and EINTR). xpread() DOES
* NOT GUARANTEE that "len" bytes is read even if the data is available.
*/
ssize_t xpread(int fd, void *buf, size_t len, off_t offset)
{
ssize_t nr;
if (len > MAX_IO_SIZE)
len = MAX_IO_SIZE;
while (1) {
nr = pread(fd, buf, len, offset);
if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
continue;
return nr;
}
}
ssize_t read_in_full(int fd, void *buf, size_t count)
{
char *p = buf;
@ -214,6 +232,26 @@ ssize_t write_in_full(int fd, const void *buf, size_t count)
return total;
}
ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset)
{
char *p = buf;
ssize_t total = 0;
while (count > 0) {
ssize_t loaded = xpread(fd, p, count, offset);
if (loaded < 0)
return -1;
if (loaded == 0)
return total;
count -= loaded;
p += loaded;
total += loaded;
offset += loaded;
}
return total;
}
int xdup(int fd)
{
int ret = dup(fd);