1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-06-07 01:26:13 +02:00
git/tag.c

143 lines
3.2 KiB
C
Raw Normal View History

#include "cache.h"
#include "tag.h"
#include "commit.h"
#include "tree.h"
#include "blob.h"
const char *tag_type = "tag";
struct object *deref_tag(struct object *o, const char *warn, int warnlen)
{
while (o && o->type == OBJ_TAG)
if (((struct tag *)o)->tagged)
o = parse_object(((struct tag *)o)->tagged->oid.hash);
else
o = NULL;
if (!o && warn) {
if (!warnlen)
warnlen = strlen(warn);
error("missing object referenced by '%.*s'", warnlen, warn);
}
return o;
}
upload-pack: avoid parsing tag destinations When upload-pack advertises refs, it dereferences any tags it sees, and shows the resulting sha1 to the client. It does this by calling deref_tag. That function must load and parse each tag object to find the sha1 of the tagged object. However, it also ends up parsing the tagged object itself, which is not strictly necessary for upload-pack's use. Each tag produces two object loads (assuming it is not a recursive tag), when it could get away with only a single one. Dropping the second load halves the effort we spend. The downside is that we are no longer verifying the resulting object by loading it. In particular: 1. We never cross-check the "type" field given in the tag object with the type of the pointed-to object. If the tag says it points to a tag but doesn't, then we will keep peeling and realize the error. If the tag says it points to a non-tag but actually points to a tag, we will stop peeling and just advertise the pointed-to tag. 2. If we are missing the pointed-to object, we will not realize (because we never even look it up in the object db). However, both of these are errors in the object database, and both will be detected if a client actually requests the broken objects in question. So we are simply pushing the verification away from the advertising stage, and down to the actual fetching stage. On my test repo with 120K refs, this drops the time to advertise the refs from ~3.2s to ~2.0s. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 20:18:01 +01:00
struct object *deref_tag_noverify(struct object *o)
{
while (o && o->type == OBJ_TAG) {
o = parse_object(o->oid.hash);
upload-pack: avoid parsing tag destinations When upload-pack advertises refs, it dereferences any tags it sees, and shows the resulting sha1 to the client. It does this by calling deref_tag. That function must load and parse each tag object to find the sha1 of the tagged object. However, it also ends up parsing the tagged object itself, which is not strictly necessary for upload-pack's use. Each tag produces two object loads (assuming it is not a recursive tag), when it could get away with only a single one. Dropping the second load halves the effort we spend. The downside is that we are no longer verifying the resulting object by loading it. In particular: 1. We never cross-check the "type" field given in the tag object with the type of the pointed-to object. If the tag says it points to a tag but doesn't, then we will keep peeling and realize the error. If the tag says it points to a non-tag but actually points to a tag, we will stop peeling and just advertise the pointed-to tag. 2. If we are missing the pointed-to object, we will not realize (because we never even look it up in the object db). However, both of these are errors in the object database, and both will be detected if a client actually requests the broken objects in question. So we are simply pushing the verification away from the advertising stage, and down to the actual fetching stage. On my test repo with 120K refs, this drops the time to advertise the refs from ~3.2s to ~2.0s. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 20:18:01 +01:00
if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
o = ((struct tag *)o)->tagged;
else
o = NULL;
}
return o;
}
struct tag *lookup_tag(const unsigned char *sha1)
{
struct object *obj = lookup_object(sha1);
if (!obj)
return create_object(sha1, alloc_tag_node());
add object_as_type helper for casting objects When we call lookup_commit, lookup_tree, etc, the logic goes something like: 1. Look for an existing object struct. If we don't have one, allocate and return a new one. 2. Double check that any object we have is the expected type (and complain and return NULL otherwise). 3. Convert an object with type OBJ_NONE (from a prior call to lookup_unknown_object) to the expected type. We can encapsulate steps 2 and 3 in a helper function which checks whether we have the expected object type, converts OBJ_NONE as appropriate, and returns the object. Not only does this shorten the code, but it also provides one central location for converting OBJ_NONE objects into objects of other types. Future patches will use that to enforce type-specific invariants. Since this is a refactoring, we would want it to behave exactly as the current code. It takes a little reasoning to see that this is the case: - for lookup_{commit,tree,etc} functions, we are just pulling steps 2 and 3 into a function that does the same thing. - for the call in peel_object, we currently only do step 3 (but we want to consolidate it with the others, as mentioned above). However, step 2 is a noop here, as the surrounding conditional makes sure we have OBJ_NONE (which we want to keep to avoid an extraneous call to sha1_object_info). - for the call in lookup_commit_reference_gently, we are currently doing step 2 but not step 3. However, step 3 is a noop here. The object we got will have just come from deref_tag, which must have figured out the type for each object in order to know when to stop peeling. Therefore the type will never be OBJ_NONE. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-07-13 08:42:03 +02:00
return object_as_type(obj, OBJ_TAG, 0);
}
static unsigned long parse_tag_date(const char *buf, const char *tail)
{
const char *dateptr;
while (buf < tail && *buf++ != '>')
/* nada */;
if (buf >= tail)
return 0;
dateptr = buf;
while (buf < tail && *buf++ != '\n')
/* nada */;
if (buf >= tail)
return 0;
/* dateptr < buf && buf[-1] == '\n', so strtoul will stop at buf-1 */
return strtoul(dateptr, NULL, 10);
}
int parse_tag_buffer(struct tag *item, const void *data, unsigned long size)
{
unsigned char sha1[20];
char type[20];
const char *bufptr = data;
const char *tail = bufptr + size;
const char *nl;
if (item->object.parsed)
return 0;
item->object.parsed = 1;
if (size < 64)
return -1;
if (memcmp("object ", bufptr, 7) || get_sha1_hex(bufptr + 7, sha1) || bufptr[47] != '\n')
return -1;
bufptr += 48; /* "object " + sha1 + "\n" */
if (!starts_with(bufptr, "type "))
return -1;
bufptr += 5;
nl = memchr(bufptr, '\n', tail - bufptr);
if (!nl || sizeof(type) <= (nl - bufptr))
return -1;
memcpy(type, bufptr, nl - bufptr);
type[nl - bufptr] = '\0';
bufptr = nl + 1;
if (!strcmp(type, blob_type)) {
item->tagged = &lookup_blob(sha1)->object;
} else if (!strcmp(type, tree_type)) {
item->tagged = &lookup_tree(sha1)->object;
} else if (!strcmp(type, commit_type)) {
item->tagged = &lookup_commit(sha1)->object;
} else if (!strcmp(type, tag_type)) {
item->tagged = &lookup_tag(sha1)->object;
} else {
error("Unknown type %s", type);
item->tagged = NULL;
}
if (bufptr + 4 < tail && starts_with(bufptr, "tag "))
; /* good */
else
return -1;
bufptr += 4;
nl = memchr(bufptr, '\n', tail - bufptr);
if (!nl)
return -1;
item->tag = xmemdupz(bufptr, nl - bufptr);
bufptr = nl + 1;
if (bufptr + 7 < tail && starts_with(bufptr, "tagger "))
item->date = parse_tag_date(bufptr, tail);
else
item->date = 0;
return 0;
}
int parse_tag(struct tag *item)
{
enum object_type type;
void *data;
unsigned long size;
int ret;
if (item->object.parsed)
return 0;
data = read_sha1_file(item->object.oid.hash, &type, &size);
if (!data)
return error("Could not read %s",
oid_to_hex(&item->object.oid));
if (type != OBJ_TAG) {
free(data);
return error("Object %s not a tag",
oid_to_hex(&item->object.oid));
}
ret = parse_tag_buffer(item, data, size);
free(data);
return ret;
}