1
0
Fork 0
mirror of https://github.com/git/git.git synced 2024-06-23 13:07:23 +02:00
git/t/t1404-update-ref-errors.sh

636 lines
20 KiB
Bash
Raw Normal View History

#!/bin/sh
test_description='Test git update-ref error handling'
. ./test-lib.sh
# Create some references, perhaps run pack-refs --all, then try to
# create some more references. Ensure that the second creation fails
# with the correct error message.
# Usage: test_update_rejected <before> <pack> <create> <error>
# <before> is a ws-separated list of refs to create before the test
# <pack> (true or false) tells whether to pack the refs before the test
# <create> is a list of variables to attempt creating
# <error> is a string to look for in the stderr of update-ref.
# All references are created in the namespace specified by the current
# value of $prefix.
test_update_rejected () {
before="$1" &&
pack="$2" &&
create="$3" &&
error="$4" &&
printf "create $prefix/%s $C\n" $before |
git update-ref --stdin &&
git for-each-ref $prefix >unchanged &&
if $pack
then
git pack-refs --all
fi &&
printf "create $prefix/%s $C\n" $create >input &&
test_must_fail git update-ref --stdin <input 2>output.err &&
test_i18ngrep -F "$error" output.err &&
git for-each-ref $prefix >actual &&
test_cmp unchanged actual
}
# Test adding and deleting D/F-conflicting references in a single
# transaction.
df_test() {
prefix="$1"
pack=: symadd=false symdel=false add_del=false addref= delref=
shift
while test $# -gt 0
do
case "$1" in
--pack)
pack="git pack-refs --all"
shift
;;
--sym-add)
# Perform the add via a symbolic reference
symadd=true
shift
;;
--sym-del)
# Perform the del via a symbolic reference
symdel=true
shift
;;
--del-add)
# Delete first reference then add second
add_del=false
delref="$prefix/r/$2"
addref="$prefix/r/$3"
shift 3
;;
--add-del)
# Add first reference then delete second
add_del=true
addref="$prefix/r/$2"
delref="$prefix/r/$3"
shift 3
;;
*)
echo 1>&2 "Extra args to df_test: $*"
return 1
;;
esac
done
git update-ref "$delref" $C &&
if $symadd
then
addname="$prefix/s/symadd" &&
git symbolic-ref "$addname" "$addref"
else
addname="$addref"
fi &&
if $symdel
then
delname="$prefix/s/symdel" &&
git symbolic-ref "$delname" "$delref"
else
delname="$delref"
fi &&
cat >expected-err <<-EOF &&
fatal: cannot lock ref $SQ$addname$SQ: $SQ$delref$SQ exists; cannot create $SQ$addref$SQ
EOF
$pack &&
if $add_del
then
printf "%s\n" "create $addname $D" "delete $delname"
else
printf "%s\n" "delete $delname" "create $addname $D"
fi >commands &&
test_must_fail git update-ref --stdin <commands 2>output.err &&
test_i18ncmp expected-err output.err &&
printf "%s\n" "$C $delref" >expected-refs &&
git for-each-ref --format="%(objectname) %(refname)" $prefix/r >actual-refs &&
test_cmp expected-refs actual-refs
}
test_expect_success 'setup' '
git commit --allow-empty -m Initial &&
C=$(git rev-parse HEAD) &&
git commit --allow-empty -m Second &&
D=$(git rev-parse HEAD) &&
git commit --allow-empty -m Third &&
E=$(git rev-parse HEAD)
'
test_expect_success 'existing loose ref is a simple prefix of new' '
prefix=refs/1l &&
test_update_rejected "a c e" false "b c/x d" \
"$SQ$prefix/c$SQ exists; cannot create $SQ$prefix/c/x$SQ"
'
test_expect_success 'existing packed ref is a simple prefix of new' '
prefix=refs/1p &&
test_update_rejected "a c e" true "b c/x d" \
"$SQ$prefix/c$SQ exists; cannot create $SQ$prefix/c/x$SQ"
'
test_expect_success 'existing loose ref is a deeper prefix of new' '
prefix=refs/2l &&
test_update_rejected "a c e" false "b c/x/y d" \
"$SQ$prefix/c$SQ exists; cannot create $SQ$prefix/c/x/y$SQ"
'
test_expect_success 'existing packed ref is a deeper prefix of new' '
prefix=refs/2p &&
test_update_rejected "a c e" true "b c/x/y d" \
"$SQ$prefix/c$SQ exists; cannot create $SQ$prefix/c/x/y$SQ"
'
test_expect_success 'new ref is a simple prefix of existing loose' '
prefix=refs/3l &&
test_update_rejected "a c/x e" false "b c d" \
"$SQ$prefix/c/x$SQ exists; cannot create $SQ$prefix/c$SQ"
'
test_expect_success 'new ref is a simple prefix of existing packed' '
prefix=refs/3p &&
test_update_rejected "a c/x e" true "b c d" \
"$SQ$prefix/c/x$SQ exists; cannot create $SQ$prefix/c$SQ"
'
test_expect_success 'new ref is a deeper prefix of existing loose' '
prefix=refs/4l &&
test_update_rejected "a c/x/y e" false "b c d" \
"$SQ$prefix/c/x/y$SQ exists; cannot create $SQ$prefix/c$SQ"
'
test_expect_success 'new ref is a deeper prefix of existing packed' '
prefix=refs/4p &&
test_update_rejected "a c/x/y e" true "b c d" \
"$SQ$prefix/c/x/y$SQ exists; cannot create $SQ$prefix/c$SQ"
'
refs: check for D/F conflicts among refs created in a transaction If two references that D/F conflict (e.g., "refs/foo" and "refs/foo/bar") are created in a single transaction, the old code discovered the problem only after the "commit" phase of ref_transaction_commit() had already begun. This could leave some references updated and others not, which violates the promise of atomicity. Instead, check for such conflicts during the "locking" phase: * Teach is_refname_available() to take an "extras" parameter that can contain extra reference names with which the specified refname must not conflict. * Change lock_ref_sha1_basic() to take an "extras" parameter, which it passes through to is_refname_available(). * Change ref_transaction_commit() to pass "affected_refnames" to lock_ref_sha1_basic() as its "extras" argument. This change fixes a test case in t1404. This code is a bit stricter than it needs to be. We could conceivably allow reference "refs/foo/bar" to be created in the same transaction as "refs/foo" is deleted (or vice versa). But that would be complicated to implement, because it is not possible to lock "refs/foo/bar" while "refs/foo" exists as a loose reference, but on the other hand we don't want to delete some references before adding others (because that could leave a gap during which required objects are unreachable). There is also a complication that reflog files' paths can conflict. Any less-strict implementation would probably require tricks like the packing of all references before the start of the real transaction, or the use of temporary intermediate reference names. So for now let's accept too-strict checks. Some reference update transactions will be rejected unnecessarily, but they will be rejected in their entirety rather than leaving the repository in an intermediate state, as would happen now. Please note that there is still one kind of D/F conflict that is *not* handled correctly. If two processes are running at the same time, and one tries to create "refs/foo" at the same time that the other tries to create "refs/foo/bar", then they can race with each other. Both processes can obtain their respective locks ("refs/foo.lock" and "refs/foo/bar.lock"), proceed to the "commit" phase of ref_transaction_commit(), and then the slower process will discover that it cannot rename its lockfile into place (after possibly having committed changes to other references). There appears to be no way to fix this race without changing the locking policy, which in turn would require a change to *all* Git clients. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
2015-05-11 17:25:12 +02:00
test_expect_success 'one new ref is a simple prefix of another' '
prefix=refs/5 &&
test_update_rejected "a e" false "b c c/x d" \
"cannot process $SQ$prefix/c$SQ and $SQ$prefix/c/x$SQ at the same time"
'
test_expect_success 'empty directory should not fool rev-parse' '
prefix=refs/e-rev-parse &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
echo "$C" >expected &&
git rev-parse $prefix/foo >actual &&
test_cmp expected actual
'
test_expect_success 'empty directory should not fool for-each-ref' '
prefix=refs/e-for-each-ref &&
git update-ref $prefix/foo $C &&
git for-each-ref $prefix >expected &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
git for-each-ref $prefix >actual &&
test_cmp expected actual
'
test_expect_success 'empty directory should not fool create' '
prefix=refs/e-create &&
mkdir -p .git/$prefix/foo/bar/baz &&
printf "create %s $C\n" $prefix/foo |
git update-ref --stdin
'
test_expect_success 'empty directory should not fool verify' '
prefix=refs/e-verify &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
printf "verify %s $C\n" $prefix/foo |
git update-ref --stdin
'
test_expect_success 'empty directory should not fool 1-arg update' '
prefix=refs/e-update-1 &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
printf "update %s $D\n" $prefix/foo |
git update-ref --stdin
'
test_expect_success 'empty directory should not fool 2-arg update' '
prefix=refs/e-update-2 &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
printf "update %s $D $C\n" $prefix/foo |
git update-ref --stdin
'
test_expect_success 'empty directory should not fool 0-arg delete' '
prefix=refs/e-delete-0 &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
printf "delete %s\n" $prefix/foo |
git update-ref --stdin
'
test_expect_success 'empty directory should not fool 1-arg delete' '
prefix=refs/e-delete-1 &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
mkdir -p .git/$prefix/foo/bar/baz &&
printf "delete %s $C\n" $prefix/foo |
git update-ref --stdin
'
test_expect_success 'D/F conflict prevents add long + delete short' '
df_test refs/df-al-ds --add-del foo/bar foo
'
test_expect_success 'D/F conflict prevents add short + delete long' '
df_test refs/df-as-dl --add-del foo foo/bar
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents delete long + add short' '
df_test refs/df-dl-as --del-add foo/bar foo
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents delete short + add long' '
df_test refs/df-ds-al --del-add foo foo/bar
'
test_expect_success 'D/F conflict prevents add long + delete short packed' '
df_test refs/df-al-dsp --pack --add-del foo/bar foo
'
test_expect_success 'D/F conflict prevents add short + delete long packed' '
df_test refs/df-as-dlp --pack --add-del foo foo/bar
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents delete long packed + add short' '
df_test refs/df-dlp-as --pack --del-add foo/bar foo
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents delete short packed + add long' '
df_test refs/df-dsp-al --pack --del-add foo foo/bar
'
# Try some combinations involving symbolic refs...
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents indirect add long + delete short' '
df_test refs/df-ial-ds --sym-add --add-del foo/bar foo
'
test_expect_success 'D/F conflict prevents indirect add long + indirect delete short' '
df_test refs/df-ial-ids --sym-add --sym-del --add-del foo/bar foo
'
test_expect_success 'D/F conflict prevents indirect add short + indirect delete long' '
df_test refs/df-ias-idl --sym-add --sym-del --add-del foo foo/bar
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents indirect delete long + indirect add short' '
df_test refs/df-idl-ias --sym-add --sym-del --del-add foo/bar foo
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents indirect add long + delete short packed' '
df_test refs/df-ial-dsp --sym-add --pack --add-del foo/bar foo
'
test_expect_success 'D/F conflict prevents indirect add long + indirect delete short packed' '
df_test refs/df-ial-idsp --sym-add --sym-del --pack --add-del foo/bar foo
'
test_expect_success 'D/F conflict prevents add long + indirect delete short packed' '
df_test refs/df-al-idsp --sym-del --pack --add-del foo/bar foo
'
files_transaction_prepare(): fix handling of ref lock failure Since dc39e09942 (files_ref_store: use a transaction to update packed refs, 2017-09-08), failure to lock a reference has been handled incorrectly by `files_transaction_prepare()`. If `lock_ref_for_update()` fails in the lock-acquisition loop of that function, it sets `ret` then breaks out of that loop. Prior to dc39e09942, that was OK, because the only thing following the loop was the cleanup code. But dc39e09942 added another blurb of code between the loop and the cleanup. That blurb sometimes resets `ret` to zero, making the cleanup code think that the locking was successful. Specifically, whenever * One or more reference deletions have been processed successfully in the lock-acquisition loop. (Processing the first such reference causes a packed-ref transaction to be initialized.) * Then `lock_ref_for_update()` fails for a subsequent reference. Such a failure can happen for a number of reasons, such as the old SHA-1 not being correct, lock contention, etc. This causes a `break` out of the lock-acquisition loop. * The `packed-refs` lock is acquired successfully and `ref_transaction_prepare()` succeeds for the packed-ref transaction. This has the effect of resetting `ret` back to 0, and making the cleanup code think that lock acquisition was successful. In that case, any reference updates that were processed prior to breaking out of the loop would be carried out (loose and packed), but the reference that couldn't be locked and any subsequent references would silently be ignored. This can easily cause data loss if, for example, the user was trying to push a new name for an existing branch while deleting the old name. After the push, the branch could be left unreachable, and could even subsequently be garbage-collected. This problem was noticed in the context of deleting one reference and creating another in a single transaction, when the two references D/F conflict with each other, like git update-ref --stdin <<EOF delete refs/foo create refs/foo/bar HEAD EOF This triggers the above bug because the deletion is processed successfully for `refs/foo`, then the D/F conflict causes `lock_ref_for_update()` to fail when `refs/foo/bar` is processed. In this case the transaction *should* fail, but instead it causes `refs/foo` to be deleted without creating `refs/foo`. This could easily result in data loss. The fix is simple: instead of just breaking out of the loop, jump directly to the cleanup code. This fixes some tests in t1404 that were added in the previous commit. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-24 17:16:25 +02:00
test_expect_success 'D/F conflict prevents indirect delete long packed + indirect add short' '
df_test refs/df-idlp-ias --sym-add --sym-del --pack --del-add foo/bar foo
'
# Test various errors when reading the old values of references...
test_expect_success 'missing old value blocks update' '
prefix=refs/missing-update &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: unable to resolve reference $SQ$prefix/foo$SQ
EOF
printf "%s\n" "update $prefix/foo $E $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'incorrect old value blocks update' '
prefix=refs/incorrect-update &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: is at $C but expected $D
EOF
printf "%s\n" "update $prefix/foo $E $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'existing old value blocks create' '
prefix=refs/existing-create &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: reference already exists
EOF
printf "%s\n" "create $prefix/foo $E" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'incorrect old value blocks delete' '
prefix=refs/incorrect-delete &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: is at $C but expected $D
EOF
printf "%s\n" "delete $prefix/foo $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'missing old value blocks indirect update' '
prefix=refs/missing-indirect-update &&
git symbolic-ref $prefix/symref $prefix/foo &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: unable to resolve reference $SQ$prefix/foo$SQ
EOF
printf "%s\n" "update $prefix/symref $E $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'incorrect old value blocks indirect update' '
prefix=refs/incorrect-indirect-update &&
git symbolic-ref $prefix/symref $prefix/foo &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: is at $C but expected $D
EOF
printf "%s\n" "update $prefix/symref $E $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'existing old value blocks indirect create' '
prefix=refs/existing-indirect-create &&
git symbolic-ref $prefix/symref $prefix/foo &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: reference already exists
EOF
printf "%s\n" "create $prefix/symref $E" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'incorrect old value blocks indirect delete' '
prefix=refs/incorrect-indirect-delete &&
git symbolic-ref $prefix/symref $prefix/foo &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: is at $C but expected $D
EOF
printf "%s\n" "delete $prefix/symref $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'missing old value blocks indirect no-deref update' '
prefix=refs/missing-noderef-update &&
git symbolic-ref $prefix/symref $prefix/foo &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: reference is missing but expected $D
EOF
printf "%s\n" "option no-deref" "update $prefix/symref $E $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'incorrect old value blocks indirect no-deref update' '
prefix=refs/incorrect-noderef-update &&
git symbolic-ref $prefix/symref $prefix/foo &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: is at $C but expected $D
EOF
printf "%s\n" "option no-deref" "update $prefix/symref $E $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'existing old value blocks indirect no-deref create' '
prefix=refs/existing-noderef-create &&
git symbolic-ref $prefix/symref $prefix/foo &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: reference already exists
EOF
printf "%s\n" "option no-deref" "create $prefix/symref $E" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'incorrect old value blocks indirect no-deref delete' '
prefix=refs/incorrect-noderef-delete &&
git symbolic-ref $prefix/symref $prefix/foo &&
git update-ref $prefix/foo $C &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: is at $C but expected $D
EOF
printf "%s\n" "option no-deref" "delete $prefix/symref $D" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'non-empty directory blocks create' '
prefix=refs/ne-create &&
mkdir -p .git/$prefix/foo/bar &&
: >.git/$prefix/foo/bar/baz.lock &&
test_when_finished "rm -f .git/$prefix/foo/bar/baz.lock" &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: there is a non-empty directory $SQ.git/$prefix/foo$SQ blocking reference $SQ$prefix/foo$SQ
EOF
printf "%s\n" "update $prefix/foo $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: unable to resolve reference $SQ$prefix/foo$SQ
EOF
printf "%s\n" "update $prefix/foo $D $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'broken reference blocks create' '
prefix=refs/broken-create &&
mkdir -p .git/$prefix &&
echo "gobbledigook" >.git/$prefix/foo &&
test_when_finished "rm -f .git/$prefix/foo" &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: unable to resolve reference $SQ$prefix/foo$SQ: reference broken
EOF
printf "%s\n" "update $prefix/foo $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/foo$SQ: unable to resolve reference $SQ$prefix/foo$SQ: reference broken
EOF
printf "%s\n" "update $prefix/foo $D $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'non-empty directory blocks indirect create' '
prefix=refs/ne-indirect-create &&
git symbolic-ref $prefix/symref $prefix/foo &&
mkdir -p .git/$prefix/foo/bar &&
: >.git/$prefix/foo/bar/baz.lock &&
test_when_finished "rm -f .git/$prefix/foo/bar/baz.lock" &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: there is a non-empty directory $SQ.git/$prefix/foo$SQ blocking reference $SQ$prefix/foo$SQ
EOF
printf "%s\n" "update $prefix/symref $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: unable to resolve reference $SQ$prefix/foo$SQ
EOF
printf "%s\n" "update $prefix/symref $D $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
test_expect_success 'broken reference blocks indirect create' '
prefix=refs/broken-indirect-create &&
git symbolic-ref $prefix/symref $prefix/foo &&
echo "gobbledigook" >.git/$prefix/foo &&
test_when_finished "rm -f .git/$prefix/foo" &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: unable to resolve reference $SQ$prefix/foo$SQ: reference broken
EOF
printf "%s\n" "update $prefix/symref $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err &&
cat >expected <<-EOF &&
fatal: cannot lock ref $SQ$prefix/symref$SQ: unable to resolve reference $SQ$prefix/foo$SQ: reference broken
EOF
printf "%s\n" "update $prefix/symref $D $C" |
test_must_fail git update-ref --stdin 2>output.err &&
test_cmp expected output.err
'
files_ref_store: use a transaction to update packed refs When processing a `files_ref_store` transaction, it is sometimes necessary to delete some references from the "packed-refs" file. Do that using a reference transaction conducted against the `packed_ref_store`. This change further decouples `files_ref_store` from `packed_ref_store`. It also fixes multiple problems, including the two revealed by test cases added in the previous commit. First, the old code didn't obtain the `packed-refs` lock until `files_transaction_finish()`. This means that a failure to acquire the `packed-refs` lock (e.g., due to contention with another process) wasn't detected until it was too late (problems like this are supposed to be detected in the "prepare" phase). The new code acquires the `packed-refs` lock in `files_transaction_prepare()`, the same stage of the processing when the loose reference locks are being acquired, removing another reason why the "prepare" phase might succeed and the "finish" phase might nevertheless fail. Second, the old code deleted the loose version of a reference before deleting any packed version of the same reference. This left a moment when another process might think that the packed version of the reference is current, which is incorrect. (Even worse, the packed version of the reference can be arbitrarily old, and might even point at an object that has since been garbage-collected.) Third, if a reference deletion fails to acquire the `packed-refs` lock altogether, then the old code might leave the repository in the incorrect state (possibly corrupt) described in the previous paragraph. Now we activate the new "packed-refs" file (sans any references that are being deleted) *before* deleting the corresponding loose references. But we hold the "packed-refs" lock until after the loose references have been finalized, thus preventing a simultaneous "pack-refs" process from packing the loose version of the reference in the time gap, which would otherwise defeat our attempt to delete it. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-08 15:51:51 +02:00
test_expect_success 'no bogus intermediate values during delete' '
prefix=refs/slow-transaction &&
# Set up a reference with differing loose and packed versions:
git update-ref $prefix/foo $C &&
git pack-refs --all &&
git update-ref $prefix/foo $D &&
git for-each-ref $prefix >unchanged &&
# Now try to update the reference, but hold the `packed-refs` lock
# for a while to see what happens while the process is blocked:
: >.git/packed-refs.lock &&
test_when_finished "rm -f .git/packed-refs.lock" &&
{
# Note: the following command is intentionally run in the
# background. We increase the timeout so that `update-ref`
# attempts to acquire the `packed-refs` lock for much longer
# than it takes for us to do the check then delete it:
git -c core.packedrefstimeout=30000 update-ref -d $prefix/foo &
} &&
pid2=$! &&
# Give update-ref plenty of time to get to the point where it tries
# to lock packed-refs:
sleep 1 &&
# Make sure that update-ref did not complete despite the lock:
kill -0 $pid2 &&
# Verify that the reference still has its old value:
sha1=$(git rev-parse --verify --quiet $prefix/foo || echo undefined) &&
case "$sha1" in
$D)
# This is what we hope for; it means that nothing
# user-visible has changed yet.
: ;;
undefined)
# This is not correct; it means the deletion has happened
# already even though update-ref should not have been
# able to acquire the lock yet.
echo "$prefix/foo deleted prematurely" &&
break
;;
$C)
# This value should never be seen. Probably the loose
# reference has been deleted but the packed reference
# is still there:
echo "$prefix/foo incorrectly observed to be C" &&
break
;;
*)
# WTF?
echo "unexpected value observed for $prefix/foo: $sha1" &&
break
;;
esac >out &&
rm -f .git/packed-refs.lock &&
wait $pid2 &&
test_must_be_empty out &&
test_must_fail git rev-parse --verify --quiet $prefix/foo
'
files_ref_store: use a transaction to update packed refs When processing a `files_ref_store` transaction, it is sometimes necessary to delete some references from the "packed-refs" file. Do that using a reference transaction conducted against the `packed_ref_store`. This change further decouples `files_ref_store` from `packed_ref_store`. It also fixes multiple problems, including the two revealed by test cases added in the previous commit. First, the old code didn't obtain the `packed-refs` lock until `files_transaction_finish()`. This means that a failure to acquire the `packed-refs` lock (e.g., due to contention with another process) wasn't detected until it was too late (problems like this are supposed to be detected in the "prepare" phase). The new code acquires the `packed-refs` lock in `files_transaction_prepare()`, the same stage of the processing when the loose reference locks are being acquired, removing another reason why the "prepare" phase might succeed and the "finish" phase might nevertheless fail. Second, the old code deleted the loose version of a reference before deleting any packed version of the same reference. This left a moment when another process might think that the packed version of the reference is current, which is incorrect. (Even worse, the packed version of the reference can be arbitrarily old, and might even point at an object that has since been garbage-collected.) Third, if a reference deletion fails to acquire the `packed-refs` lock altogether, then the old code might leave the repository in the incorrect state (possibly corrupt) described in the previous paragraph. Now we activate the new "packed-refs" file (sans any references that are being deleted) *before* deleting the corresponding loose references. But we hold the "packed-refs" lock until after the loose references have been finalized, thus preventing a simultaneous "pack-refs" process from packing the loose version of the reference in the time gap, which would otherwise defeat our attempt to delete it. Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-08 15:51:51 +02:00
test_expect_success 'delete fails cleanly if packed-refs file is locked' '
prefix=refs/locked-packed-refs &&
# Set up a reference with differing loose and packed versions:
git update-ref $prefix/foo $C &&
git pack-refs --all &&
git update-ref $prefix/foo $D &&
git for-each-ref $prefix >unchanged &&
# Now try to delete it while the `packed-refs` lock is held:
: >.git/packed-refs.lock &&
test_when_finished "rm -f .git/packed-refs.lock" &&
test_must_fail git update-ref -d $prefix/foo >out 2>err &&
git for-each-ref $prefix >actual &&
test_i18ngrep "Unable to create $SQ.*packed-refs.lock$SQ: " err &&
test_cmp unchanged actual
'
refs/files-backend: handle packed transaction prepare failure In files_transaction_prepare(), if we have to delete some refs, we use a subordinate packed_transaction to do so. It's rare for that sub-transaction's prepare step to fail, since we hold the packed-refs lock. But if it does, we trigger a BUG() due to these steps: - we've attached the packed transaction to the files transaction as backend_data->packed_transaction - when the prepare step fails, the packed transaction cleans itself up, putting itself into the CLOSED state - the error value from preparing the packed transaction lets us know in files_transaction_prepare() that we should also clean up and return an error. We call files_transaction_cleanup(), which tries to abort backend_data->packed_transaction. Since it's already CLOSED, that triggers an assertion in ref_transaction_abort(). We can fix that by disconnecting the packed transaction from the outer files transaction, and then free-ing (not aborting!) it ourselves. A few other options/alternatives I considered: - we could just make it a noop to abort a CLOSED transaction. But that seems less safe, since clearly this code expects (and enforces) a particular set of state transitions. - we could have files_transaction_cleanup() selectively call abort() vs free() based on the state of the on the packed transaction. That's basically a more restricted version of the above, but also potentially unsafe. - instead of disconnecting backend_data->packed_transaction on error, we could wait to install it until we successfully prepare. That might make the flow a little simpler, but it introduces a hassle. Earlier parts of files_transaction_prepare() that encounter an error will jump to the cleanup label, and expect that cleaning up the outer transaction will clean up the packed transaction, too. We'd have to adjust those sites to clean up the packed transaction. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-03-21 10:28:44 +01:00
test_expect_success 'delete fails cleanly if packed-refs.new write fails' '
# Setup and expectations are similar to the test above.
prefix=refs/failed-packed-refs &&
git update-ref $prefix/foo $C &&
git pack-refs --all &&
git update-ref $prefix/foo $D &&
git for-each-ref $prefix >unchanged &&
# This should not happen in practice, but it is an easy way to get a
# reliable error (we open with create_tempfile(), which uses O_EXCL).
: >.git/packed-refs.new &&
test_when_finished "rm -f .git/packed-refs.new" &&
test_must_fail git update-ref -d $prefix/foo &&
git for-each-ref $prefix >actual &&
test_cmp unchanged actual
'
test_done