mirror of
https://github.com/git/git.git
synced 2024-05-08 22:36:10 +02:00
Compare commits
206 Commits
a1b6bdbf5f
...
d17c7c41a5
Author | SHA1 | Date | |
---|---|---|---|
Junio C Hamano | d17c7c41a5 | ||
Junio C Hamano | a9f388adb7 | ||
Junio C Hamano | 68517df9b3 | ||
Junio C Hamano | a036f64c84 | ||
Junio C Hamano | 3507f7cc2c | ||
Junio C Hamano | 6e93b7179b | ||
Junio C Hamano | 40d7122f50 | ||
Junio C Hamano | 05e0a59f43 | ||
Junio C Hamano | 02c8f05cb6 | ||
Junio C Hamano | 655a8aa93b | ||
Junio C Hamano | a71906e14d | ||
Junio C Hamano | 08af859678 | ||
Junio C Hamano | d01c32bd0f | ||
Junio C Hamano | e729936f3b | ||
Junio C Hamano | 0d84efb4d2 | ||
Junio C Hamano | b284ee05c1 | ||
Junio C Hamano | dae7a2848e | ||
Junio C Hamano | c9db527f2b | ||
Junio C Hamano | 42d002b98a | ||
Junio C Hamano | 3d5247292d | ||
Junio C Hamano | 547e0f216c | ||
Junio C Hamano | 4a277284f1 | ||
Junio C Hamano | ff1711aa8d | ||
Junio C Hamano | ec0543990d | ||
Junio C Hamano | 36af2aec03 | ||
Junio C Hamano | 2213ee529a | ||
Junio C Hamano | 5a7b7d66f3 | ||
Junio C Hamano | 35231f7570 | ||
Junio C Hamano | 2d656a8f06 | ||
Junio C Hamano | 90b8d9dae6 | ||
Junio C Hamano | 9b135aad0e | ||
Junio C Hamano | 5d11b483f1 | ||
Linus Arver | d67307d42c | ||
Linus Arver | 973e567a5b | ||
Linus Arver | c78c547df3 | ||
Linus Arver | 5d5c3a1f96 | ||
Linus Arver | 0c27b3fe36 | ||
Linus Arver | 6fb35a02c1 | ||
Linus Arver | 0aad163420 | ||
Linus Arver | c1cd6d3db0 | ||
Linus Arver | ffc48ec0e2 | ||
Linus Arver | 88c6a87170 | ||
Rubén Justo | 02f4ea5266 | ||
Junio C Hamano | 1ecbdd0d19 | ||
Karthik Nayak | f6bb0b94b9 | ||
Karthik Nayak | a5c288a529 | ||
Karthik Nayak | 2a871280a6 | ||
Karthik Nayak | 1c0d5fa54f | ||
Karthik Nayak | adac361761 | ||
Karthik Nayak | 6b1d1d6370 | ||
Karthik Nayak | c7954434c7 | ||
Karthik Nayak | 316a1bfaaf | ||
Roland Hieber | 6b7c45e8c9 | ||
Roland Hieber | d13a295074 | ||
Roland Hieber | cb85fdf4a4 | ||
Johannes Schindelin | d14cfd0ffa | ||
Junio C Hamano | f8e1deb644 | ||
Josh Steadmon | d0ab1a164f | ||
Jeff King | a35c005757 | ||
Josh Steadmon | d012ab2288 | ||
Josh Steadmon | e267557cab | ||
Josh Steadmon | 54bc9a2f27 | ||
Josh Steadmon | 5b25d70efe | ||
Josh Steadmon | 2065c774ed | ||
Josh Steadmon | 8427b7e72b | ||
Johannes Schindelin | c75662bfc9 | ||
Johannes Schindelin | 12c2ee5fbd | ||
Junio C Hamano | ce36894509 | ||
Junio C Hamano | ce48fb2eab | ||
Patrick Steinhardt | aceb1c27e6 | ||
Patrick Steinhardt | 7f564bc977 | ||
Patrick Steinhardt | acdc335345 | ||
Patrick Steinhardt | 93cddb1559 | ||
Patrick Steinhardt | de4f0cf494 | ||
Patrick Steinhardt | 1ab05e5efb | ||
Patrick Steinhardt | 8ef8e73719 | ||
Patrick Steinhardt | 75f37a77da | ||
Patrick Steinhardt | 013827af99 | ||
Patrick Steinhardt | fe6f48045b | ||
Patrick Steinhardt | e850e67b0a | ||
Patrick Steinhardt | 10f22ffa78 | ||
Adam Johnson | 5fb7686409 | ||
Dragan Simic | cadcf58085 | ||
Linus Arver | 61e124bb2d | ||
Linus Arver | bf96614541 | ||
Linus Arver | 01ea2b2836 | ||
Linus Arver | e2663c4597 | ||
Linus Arver | c8d6a54a07 | ||
Linus Arver | 84b91fc465 | ||
Linus Arver | 824503ce88 | ||
Linus Arver | 7e50b3f5df | ||
Phillip Wood | a6c2654f83 | ||
Phillip Wood | 53f6746615 | ||
Phillip Wood | 497a01a2d3 | ||
Phillip Wood | a3152edc97 | ||
Phillip Wood | 42aae6a49a | ||
Junio C Hamano | 2a60cb766e | ||
Patrick Steinhardt | 0c47355790 | ||
Patrick Steinhardt | 19fa8cd48c | ||
Patrick Steinhardt | 9ee6d63bab | ||
Patrick Steinhardt | 66bce9d00b | ||
Patrick Steinhardt | f59aa5e0a9 | ||
Patrick Steinhardt | 319ba14407 | ||
Xing Xin | 93e2ae1c95 | ||
brian m. carlson | ffff4ac065 | ||
brian m. carlson | 40220f48b1 | ||
brian m. carlson | 30c0a3036f | ||
brian m. carlson | ac4c7cbfaa | ||
brian m. carlson | 37417b7717 | ||
brian m. carlson | bd590bde58 | ||
brian m. carlson | 36f7d865e3 | ||
brian m. carlson | 8470c94be3 | ||
brian m. carlson | ad9bb6dfe6 | ||
brian m. carlson | 5af5cc68aa | ||
brian m. carlson | 2ae6dc686d | ||
brian m. carlson | ca9ccbf674 | ||
brian m. carlson | 6a6d6fb12e | ||
brian m. carlson | d01c76f1cf | ||
brian m. carlson | 90765ea81e | ||
brian m. carlson | 7046f1d572 | ||
Patrick Steinhardt | 70b81fbf3c | ||
Patrick Steinhardt | db1d63bf57 | ||
Patrick Steinhardt | ca13c3e94a | ||
Patrick Steinhardt | 04ba2c7eb3 | ||
Patrick Steinhardt | ca44ef3165 | ||
Patrick Steinhardt | 9cdeb34b96 | ||
Patrick Steinhardt | 2c5c7639e5 | ||
Patrick Steinhardt | d1ef3d3b1d | ||
Patrick Steinhardt | 40c60f4c12 | ||
Patrick Steinhardt | 21bcb4a602 | ||
Patrick Steinhardt | 11d3f1aa5f | ||
Patrick Steinhardt | ab2b3aadf3 | ||
Patrick Steinhardt | 2d65e5b6a6 | ||
Dragan Simic | 796d081f86 | ||
Dragan Simic | c02dc38570 | ||
Patrick Steinhardt | fa74f32291 | ||
Patrick Steinhardt | a155ab2bf4 | ||
Patrick Steinhardt | 8aaeffe3b5 | ||
Patrick Steinhardt | 60dd319519 | ||
Patrick Steinhardt | 7e892fec47 | ||
Patrick Steinhardt | d0dd119f72 | ||
Patrick Steinhardt | 44afd85fbd | ||
Patrick Steinhardt | 485c63cf5c | ||
Patrick Steinhardt | 4af31dc84a | ||
Patrick Steinhardt | 455d61b6d2 | ||
Patrick Steinhardt | f57cc987a9 | ||
Phillip Wood | 67ae0f9494 | ||
Phillip Wood | fd315c4311 | ||
Eric Wong | 172b78f0f1 | ||
Eric Wong | 4bfdf5800f | ||
Eric Wong | 66f36956f0 | ||
Taylor Blau | 4cbfcd8092 | ||
Taylor Blau | 7e491570cf | ||
Taylor Blau | 2e58dc2aae | ||
Taylor Blau | 33b446a467 | ||
Taylor Blau | 4a2598d798 | ||
Taylor Blau | d4e6e581e8 | ||
Taylor Blau | cfe8a0fbad | ||
Taylor Blau | 0c6600dd0b | ||
Taylor Blau | 9441dcf968 | ||
Taylor Blau | aa561e9859 | ||
Taylor Blau | 2f1510773c | ||
Taylor Blau | 67fae97280 | ||
Taylor Blau | 5d2ff78bf1 | ||
Taylor Blau | be78176a72 | ||
Taylor Blau | abda8a70cf | ||
Taylor Blau | 9bde930afa | ||
Taylor Blau | cd1350305b | ||
Taylor Blau | f90f154e80 | ||
Taylor Blau | 52c172f282 | ||
Taylor Blau | afc4b56406 | ||
Taylor Blau | 6ff2e68c44 | ||
Taylor Blau | c1540ea44c | ||
Taylor Blau | 191f0173d1 | ||
Taylor Blau | 67fd41174f | ||
Ignacio Encinas | c94884d5fc | ||
Ignacio Encinas | 417da9673b | ||
Junio C Hamano | fe86a3474a | ||
Dragan Simic | 2b65364061 | ||
Junio C Hamano | 0ebed321d4 | ||
Calvin Wan | c9e04a1e1f | ||
Calvin Wan | bb8fcd7e65 | ||
Jonathan Tan | bde11fdc7a | ||
Junio C Hamano | 483b759b47 | ||
Taylor Blau | cdaf3a673d | ||
Taylor Blau | 9dafc9dd55 | ||
Taylor Blau | f6c99dd8d6 | ||
Taylor Blau | f5ed359d4f | ||
Taylor Blau | 4b3ecbde03 | ||
Taylor Blau | 72b1a01cbc | ||
Taylor Blau | fe581f2099 | ||
Taylor Blau | e224c7a813 | ||
Taylor Blau | b5cfb6b517 | ||
Taylor Blau | 1b37a1af03 | ||
Taylor Blau | 4ba29e7251 | ||
Taylor Blau | c193f6ad22 | ||
Jonathan Tan | 822c686508 | ||
Taylor Blau | 188a189bde | ||
Taylor Blau | a88cdc5641 | ||
Taylor Blau | 8683fad29c | ||
Britton Leo Kerin | d372feb203 | ||
Britton Leo Kerin | 306f468cc0 | ||
Junio C Hamano | 3ea54d054a | ||
Junio C Hamano | ca30ba266c | ||
Junio C Hamano | d28eb5e4f0 | ||
Junio C Hamano | d63a9e201a |
|
@ -303,7 +303,7 @@ jobs:
|
|||
CC: ${{matrix.vector.cc}}
|
||||
CC_PACKAGE: ${{matrix.vector.cc_package}}
|
||||
jobname: ${{matrix.vector.jobname}}
|
||||
runs_on_pool: ${{matrix.vector.pool}}
|
||||
distro: ${{matrix.vector.pool}}
|
||||
runs-on: ${{matrix.vector.pool}}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
@ -342,12 +342,16 @@ jobs:
|
|||
vector:
|
||||
- jobname: linux-musl
|
||||
image: alpine
|
||||
distro: alpine-latest
|
||||
- jobname: linux32
|
||||
image: daald/ubuntu32:xenial
|
||||
distro: ubuntu32-16.04
|
||||
- jobname: pedantic
|
||||
image: fedora
|
||||
distro: fedora-latest
|
||||
env:
|
||||
jobname: ${{matrix.vector.jobname}}
|
||||
distro: ${{matrix.vector.distro}}
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{matrix.vector.image}}
|
||||
steps:
|
||||
|
@ -355,7 +359,7 @@ jobs:
|
|||
if: matrix.vector.jobname != 'linux32'
|
||||
- uses: actions/checkout@v1 # cannot be upgraded because Node.js Actions aren't supported in this container
|
||||
if: matrix.vector.jobname == 'linux32'
|
||||
- run: ci/install-docker-dependencies.sh
|
||||
- run: ci/install-dependencies.sh
|
||||
- run: ci/run-build-and-tests.sh
|
||||
- name: print test failures
|
||||
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
|
||||
|
|
|
@ -9,8 +9,10 @@ workflow:
|
|||
|
||||
test:linux:
|
||||
image: $image
|
||||
variables:
|
||||
CUSTOM_PATH: "/custom"
|
||||
before_script:
|
||||
- ./ci/install-docker-dependencies.sh
|
||||
- ./ci/install-dependencies.sh
|
||||
script:
|
||||
- useradd builder --create-home
|
||||
- chown -R builder "${CI_PROJECT_DIR}"
|
||||
|
@ -98,7 +100,7 @@ static-analysis:
|
|||
variables:
|
||||
jobname: StaticAnalysis
|
||||
before_script:
|
||||
- ./ci/install-docker-dependencies.sh
|
||||
- ./ci/install-dependencies.sh
|
||||
script:
|
||||
- ./ci/run-static-analysis.sh
|
||||
- ./ci/check-directional-formatting.bash
|
||||
|
|
|
@ -110,6 +110,7 @@ TECH_DOCS += SubmittingPatches
|
|||
TECH_DOCS += ToolsForGit
|
||||
TECH_DOCS += technical/bitmap-format
|
||||
TECH_DOCS += technical/bundle-uri
|
||||
TECH_DOCS += technical/git-std-lib
|
||||
TECH_DOCS += technical/hash-function-transition
|
||||
TECH_DOCS += technical/long-running-process-protocol
|
||||
TECH_DOCS += technical/multi-pack-index
|
||||
|
|
|
@ -1116,6 +1116,15 @@ $ git send-email --to=target@example.com psuh/*.patch
|
|||
NOTE: Check `git help send-email` for some other options which you may find
|
||||
valuable, such as changing the Reply-to address or adding more CC and BCC lines.
|
||||
|
||||
:contrib-scripts: footnoteref:[contrib-scripts,Scripts under `contrib/` are +
|
||||
not part of the core `git` binary and must be called directly. Clone the Git +
|
||||
codebase and run `perl contrib/contacts/git-contacts`.]
|
||||
|
||||
NOTE: If you're not sure whom to CC, running `contrib/contacts/git-contacts` can
|
||||
list potential reviewers. In addition, you can do `git send-email
|
||||
--cc-cmd='perl contrib/contacts/git-contacts' feature/*.patch`{contrib-scripts} to
|
||||
automatically pass this list of emails to `send-email`.
|
||||
|
||||
NOTE: When you are sending a real patch, it will go to git@vger.kernel.org - but
|
||||
please don't send your patchset from the tutorial to the real mailing list! For
|
||||
now, you can send it to yourself, to make sure you understand how it will look.
|
||||
|
|
|
@ -397,17 +397,57 @@ letter.
|
|||
[[send-patches]]
|
||||
=== Sending your patches.
|
||||
|
||||
==== Choosing your reviewers
|
||||
|
||||
:security-ml: footnoteref:[security-ml,The Git Security mailing list: git-security@googlegroups.com]
|
||||
|
||||
Before sending any patches, please note that patches that may be
|
||||
NOTE: Patches that may be
|
||||
security relevant should be submitted privately to the Git Security
|
||||
mailing list{security-ml}, instead of the public mailing list.
|
||||
|
||||
Learn to use format-patch and send-email if possible. These commands
|
||||
:contrib-scripts: footnoteref:[contrib-scripts,Scripts under `contrib/` are +
|
||||
not part of the core `git` binary and must be called directly. Clone the Git +
|
||||
codebase and run `perl contrib/contacts/git-contacts`.]
|
||||
|
||||
Send your patch with "To:" set to the mailing list, with "cc:" listing
|
||||
people who are involved in the area you are touching (the `git-contacts`
|
||||
script in `contrib/contacts/`{contrib-scripts} can help to
|
||||
identify them), to solicit comments and reviews. Also, when you made
|
||||
trial merges of your topic to `next` and `seen`, you may have noticed
|
||||
work by others conflicting with your changes. There is a good possibility
|
||||
that these people may know the area you are touching well.
|
||||
|
||||
If you are using `send-email`, you can feed it the output of `git-contacts` like
|
||||
this:
|
||||
|
||||
....
|
||||
git send-email --cc-cmd='perl contrib/contacts/git-contacts' feature/*.patch
|
||||
....
|
||||
|
||||
:current-maintainer: footnote:[The current maintainer: gitster@pobox.com]
|
||||
:git-ml: footnote:[The mailing list: git@vger.kernel.org]
|
||||
|
||||
After the list reached a consensus that it is a good idea to apply the
|
||||
patch, re-send it with "To:" set to the maintainer{current-maintainer}
|
||||
and "cc:" the list{git-ml} for inclusion. This is especially relevant
|
||||
when the maintainer did not heavily participate in the discussion and
|
||||
instead left the review to trusted others.
|
||||
|
||||
Do not forget to add trailers such as `Acked-by:`, `Reviewed-by:` and
|
||||
`Tested-by:` lines as necessary to credit people who helped your
|
||||
patch, and "cc:" them when sending such a final version for inclusion.
|
||||
|
||||
==== `format-patch` and `send-email`
|
||||
|
||||
Learn to use `format-patch` and `send-email` if possible. These commands
|
||||
are optimized for the workflow of sending patches, avoiding many ways
|
||||
your existing e-mail client (often optimized for "multipart/*" MIME
|
||||
type e-mails) might render your patches unusable.
|
||||
|
||||
NOTE: Here we outline the procedure using `format-patch` and
|
||||
`send-email`, but you can instead use GitGitGadget to send in your
|
||||
patches (see link:MyFirstContribution.html[MyFirstContribution]).
|
||||
|
||||
People on the Git mailing list need to be able to read and
|
||||
comment on the changes you are submitting. It is important for
|
||||
a developer to be able to "quote" your changes, using standard
|
||||
|
@ -500,34 +540,6 @@ patch, format it as "multipart/signed", not a text/plain message
|
|||
that starts with `-----BEGIN PGP SIGNED MESSAGE-----`. That is
|
||||
not a text/plain, it's something else.
|
||||
|
||||
:security-ml-ref: footnoteref:[security-ml]
|
||||
|
||||
As mentioned at the beginning of the section, patches that may be
|
||||
security relevant should not be submitted to the public mailing list
|
||||
mentioned below, but should instead be sent privately to the Git
|
||||
Security mailing list{security-ml-ref}.
|
||||
|
||||
Send your patch with "To:" set to the mailing list, with "cc:" listing
|
||||
people who are involved in the area you are touching (the `git
|
||||
contacts` command in `contrib/contacts/` can help to
|
||||
identify them), to solicit comments and reviews. Also, when you made
|
||||
trial merges of your topic to `next` and `seen`, you may have noticed
|
||||
work by others conflicting with your changes. There is a good possibility
|
||||
that these people may know the area you are touching well.
|
||||
|
||||
:current-maintainer: footnote:[The current maintainer: gitster@pobox.com]
|
||||
:git-ml: footnote:[The mailing list: git@vger.kernel.org]
|
||||
|
||||
After the list reached a consensus that it is a good idea to apply the
|
||||
patch, re-send it with "To:" set to the maintainer{current-maintainer}
|
||||
and "cc:" the list{git-ml} for inclusion. This is especially relevant
|
||||
when the maintainer did not heavily participate in the discussion and
|
||||
instead left the review to trusted others.
|
||||
|
||||
Do not forget to add trailers such as `Acked-by:`, `Reviewed-by:` and
|
||||
`Tested-by:` lines as necessary to credit people who helped your
|
||||
patch, and "cc:" them when sending such a final version for inclusion.
|
||||
|
||||
== Subsystems with dedicated maintainers
|
||||
|
||||
Some parts of the system have dedicated maintainers with their own
|
||||
|
|
|
@ -58,11 +58,11 @@ compared case sensitively. These subsection names follow the same
|
|||
restrictions as section names.
|
||||
|
||||
All the other lines (and the remainder of the line after the section
|
||||
header) are recognized as setting variables, in the form
|
||||
'name = value' (or just 'name', which is a short-hand to say that
|
||||
the variable is the boolean "true").
|
||||
The variable names are case-insensitive, allow only alphanumeric characters
|
||||
and `-`, and must start with an alphabetic character.
|
||||
header) are recognized as setting variables, in the form 'name = value'
|
||||
(or just 'name', which is a short-hand to say that the variable is the
|
||||
boolean "true"). The variable names are case-insensitive, allow only
|
||||
alphanumeric characters and `-`, and must start with an alphabetic
|
||||
character.
|
||||
|
||||
Whitespace characters surrounding `name`, `=` and `value` are discarded.
|
||||
Internal whitespace characters within 'value' are retained verbatim.
|
||||
|
@ -97,10 +97,10 @@ to be included. The variable takes a pathname as its value, and is
|
|||
subject to tilde expansion. These variables can be given multiple times.
|
||||
|
||||
The contents of the included file are inserted immediately, as if they
|
||||
had been found at the location of the include directive. If the value of the
|
||||
variable is a relative path, the path is considered to
|
||||
be relative to the configuration file in which the include directive
|
||||
was found. See below for examples.
|
||||
had been found at the location of the include directive. If the value of
|
||||
the variable is a relative path, the path is considered to be relative to
|
||||
the configuration file in which the include directive was found. See
|
||||
below for examples.
|
||||
|
||||
Conditional includes
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -188,6 +188,12 @@ As for the naming of this keyword, it is for forwards compatibility with
|
|||
a naming scheme that supports more variable-based include conditions,
|
||||
but currently Git only supports the exact keyword described above.
|
||||
|
||||
`hostname`::
|
||||
The data that follows the keyword `hostname:` is taken to be a
|
||||
pattern with standard globbing wildcards. If the current
|
||||
hostname (output of gethostname(2)) matches the
|
||||
pattern, the include condition is met.
|
||||
|
||||
A few more notes on matching via `gitdir` and `gitdir/i`:
|
||||
|
||||
* Symlinks in `$GIT_DIR` are not resolved before matching.
|
||||
|
@ -263,6 +269,10 @@ Example
|
|||
path = foo.inc
|
||||
[remote "origin"]
|
||||
url = https://example.com/git
|
||||
|
||||
; include only if the hostname of the machine matches some-hostname
|
||||
[includeIf "hostname:some-hostname"]
|
||||
path = foo.inc
|
||||
----
|
||||
|
||||
Values
|
||||
|
@ -383,6 +393,8 @@ include::config/apply.txt[]
|
|||
|
||||
include::config/attr.txt[]
|
||||
|
||||
include::config/bitmap-pseudo-merge.txt[]
|
||||
|
||||
include::config/blame.txt[]
|
||||
|
||||
include::config/branch.txt[]
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
bitmapPseudoMerge.<name>.pattern::
|
||||
Regular expression used to match reference names. Commits
|
||||
pointed to by references matching this pattern (and meeting
|
||||
the below criteria, like `bitmapPseudoMerge.<name>.sampleRate`
|
||||
and `bitmapPseudoMerge.<name>.threshold`) will be considered
|
||||
for inclusion in a pseudo-merge bitmap.
|
||||
+
|
||||
Commits are grouped into pseudo-merge groups based on whether or not
|
||||
any reference(s) that point at a given commit match the pattern, which
|
||||
is an extended regular expression.
|
||||
+
|
||||
Within a pseudo-merge group, commits may be further grouped into
|
||||
sub-groups based on the capture groups in the pattern. These
|
||||
sub-groupings are formed from the regular expressions by concatenating
|
||||
any capture groups from the regular expression, with a '-' dash in
|
||||
between.
|
||||
+
|
||||
For example, if the pattern is `refs/tags/`, then all tags (provided
|
||||
they meet the below criteria) will be considered candidates for the
|
||||
same pseudo-merge group. However, if the pattern is instead
|
||||
`refs/remotes/([0-9])+/tags/`, then tags from different remotes will
|
||||
be grouped into separate pseudo-merge groups, based on the remote
|
||||
number.
|
||||
|
||||
bitmapPseudoMerge.<name>.decay::
|
||||
Determines the rate at which consecutive pseudo-merge bitmap
|
||||
groups decrease in size. Must be non-negative. This parameter
|
||||
can be thought of as `k` in the function `f(n) = C *
|
||||
n^(-k/100)`, where `f(n)` is the size of the `n`th group.
|
||||
+
|
||||
Setting the decay rate equal to `0` will cause all groups to be the
|
||||
same size. Setting the decay rate equal to `100` will cause the `n`th
|
||||
group to be `1/n` the size of the initial group. Higher values of the
|
||||
decay rate cause consecutive groups to shrink at an increasing rate.
|
||||
The default is `100`.
|
||||
|
||||
bitmapPseudoMerge.<name>.sampleRate::
|
||||
Determines the proportion of non-bitmapped commits (among
|
||||
reference tips) which are selected for inclusion in an
|
||||
unstable pseudo-merge bitmap. Must be between `0` and `100`
|
||||
(inclusive). The default is `100`.
|
||||
|
||||
bitmapPseudoMerge.<name>.threshold::
|
||||
Determines the minimum age of non-bitmapped commits (among
|
||||
reference tips, as above) which are candidates for inclusion
|
||||
in an unstable pseudo-merge bitmap. The default is
|
||||
`1.week.ago`.
|
||||
|
||||
bitmapPseudoMerge.<name>.maxMerges::
|
||||
Determines the maximum number of pseudo-merge commits among
|
||||
which commits may be distributed.
|
||||
+
|
||||
For pseudo-merge groups whose pattern does not contain any capture
|
||||
groups, this setting is applied for all commits matching the regular
|
||||
expression. For patterns that have one or more capture groups, this
|
||||
setting is applied for each distinct capture group.
|
||||
+
|
||||
For example, if your capture group is `refs/tags/`, then this setting
|
||||
will distribute all tags into a maximum of `maxMerges` pseudo-merge
|
||||
commits. However, if your capture group is, say,
|
||||
`refs/remotes/([0-9]+)/tags/`, then this setting will be applied to
|
||||
each remote's set of tags individually.
|
||||
+
|
||||
Must be non-negative. The default value is 64.
|
||||
|
||||
bitmapPseudoMerge.<name>.stableThreshold::
|
||||
Determines the minimum age of commits (among reference tips,
|
||||
as above, however stable commits are still considered
|
||||
candidates even when they have been covered by a bitmap) which
|
||||
are candidates for a stable a pseudo-merge bitmap. The default
|
||||
is `1.month.ago`.
|
||||
|
||||
bitmapPseudoMerge.<name>.stableSize::
|
||||
Determines the size (in number of commits) of a stable
|
||||
psuedo-merge bitmap. The default is `512`.
|
|
@ -9,6 +9,29 @@ commitGraph.maxNewFilters::
|
|||
commit-graph write` (c.f., linkgit:git-commit-graph[1]).
|
||||
|
||||
commitGraph.readChangedPaths::
|
||||
If true, then git will use the changed-path Bloom filters in the
|
||||
commit-graph file (if it exists, and they are present). Defaults to
|
||||
true. See linkgit:git-commit-graph[1] for more information.
|
||||
Deprecated. Equivalent to commitGraph.changedPathsVersion=-1 if true, and
|
||||
commitGraph.changedPathsVersion=0 if false. (If commitGraph.changedPathVersion
|
||||
is also set, commitGraph.changedPathsVersion takes precedence.)
|
||||
|
||||
commitGraph.changedPathsVersion::
|
||||
Specifies the version of the changed-path Bloom filters that Git will read and
|
||||
write. May be -1, 0, 1, or 2. Note that values greater than 1 may be
|
||||
incompatible with older versions of Git which do not yet understand
|
||||
those versions. Use caution when operating in a mixed-version
|
||||
environment.
|
||||
+
|
||||
Defaults to -1.
|
||||
+
|
||||
If -1, Git will use the version of the changed-path Bloom filters in the
|
||||
repository, defaulting to 1 if there are none.
|
||||
+
|
||||
If 0, Git will not read any Bloom filters, and will write version 1 Bloom
|
||||
filters when instructed to write.
|
||||
+
|
||||
If 1, Git will only read version 1 Bloom filters, and will write version 1
|
||||
Bloom filters.
|
||||
+
|
||||
If 2, Git will only read version 2 Bloom filters, and will write version 2
|
||||
Bloom filters.
|
||||
+
|
||||
See linkgit:git-commit-graph[1] for more information.
|
||||
|
|
|
@ -8,7 +8,7 @@ git-credential - Retrieve and store user credentials
|
|||
SYNOPSIS
|
||||
--------
|
||||
------------------
|
||||
'git credential' (fill|approve|reject)
|
||||
'git credential' (fill|approve|reject|capability)
|
||||
------------------
|
||||
|
||||
DESCRIPTION
|
||||
|
@ -41,6 +41,9 @@ If the action is `reject`, git-credential will send the description to
|
|||
any configured credential helpers, which may erase any stored
|
||||
credentials matching the description.
|
||||
|
||||
If the action is `capability`, git-credential will announce any capabilities
|
||||
it supports to standard output.
|
||||
|
||||
If the action is `approve` or `reject`, no output should be emitted.
|
||||
|
||||
TYPICAL USE OF GIT CREDENTIAL
|
||||
|
@ -111,7 +114,9 @@ attribute per line. Each attribute is specified by a key-value pair,
|
|||
separated by an `=` (equals) sign, followed by a newline.
|
||||
|
||||
The key may contain any bytes except `=`, newline, or NUL. The value may
|
||||
contain any bytes except newline or NUL.
|
||||
contain any bytes except newline or NUL. A line, including the trailing
|
||||
newline, may not exceed 65535 bytes in order to allow implementations to
|
||||
parse efficiently.
|
||||
|
||||
Attributes with keys that end with C-style array brackets `[]` can have
|
||||
multiple values. Each instance of a multi-valued attribute forms an
|
||||
|
@ -178,6 +183,61 @@ empty string.
|
|||
Components which are missing from the URL (e.g., there is no
|
||||
username in the example above) will be left unset.
|
||||
|
||||
`authtype`::
|
||||
This indicates that the authentication scheme in question should be used.
|
||||
Common values for HTTP and HTTPS include `basic`, `bearer`, and `digest`,
|
||||
although the latter is insecure and should not be used. If `credential`
|
||||
is used, this may be set to an arbitrary string suitable for the protocol in
|
||||
question (usually HTTP).
|
||||
+
|
||||
This value should not be sent unless the appropriate capability (see below) is
|
||||
provided on input.
|
||||
|
||||
`credential`::
|
||||
The pre-encoded credential, suitable for the protocol in question (usually
|
||||
HTTP). If this key is sent, `authtype` is mandatory, and `username` and
|
||||
`password` are not used. For HTTP, Git concatenates the `authtype` value and
|
||||
this value with a single space to determine the `Authorization` header.
|
||||
+
|
||||
This value should not be sent unless the appropriate capability (see below) is
|
||||
provided on input.
|
||||
|
||||
`ephemeral`::
|
||||
This boolean value indicates, if true, that the value in the `credential`
|
||||
field should not be saved by the credential helper because its usefulness is
|
||||
limited in time. For example, an HTTP Digest `credential` value is computed
|
||||
using a nonce and reusing it will not result in successful authentication.
|
||||
This may also be used for situations with short duration (e.g., 24-hour)
|
||||
credentials. The default value is false.
|
||||
+
|
||||
The credential helper will still be invoked with `store` or `erase` so that it
|
||||
can determine whether the operation was successful.
|
||||
+
|
||||
This value should not be sent unless the appropriate capability (see below) is
|
||||
provided on input.
|
||||
|
||||
`state[]`::
|
||||
This value provides an opaque state that will be passed back to this helper
|
||||
if it is called again. Each different credential helper may specify this
|
||||
once. The value should include a prefix unique to the credential helper and
|
||||
should ignore values that don't match its prefix.
|
||||
+
|
||||
This value should not be sent unless the appropriate capability (see below) is
|
||||
provided on input.
|
||||
|
||||
`continue`::
|
||||
This is a boolean value, which, if enabled, indicates that this
|
||||
authentication is a non-final part of a multistage authentication step. This
|
||||
is common in protocols such as NTLM and Kerberos, where two rounds of client
|
||||
authentication are required, and setting this flag allows the credential
|
||||
helper to implement the multistage authentication step. This flag should
|
||||
only be sent if a further stage is required; that is, if another round of
|
||||
authentication is expected.
|
||||
+
|
||||
This value should not be sent unless the appropriate capability (see below) is
|
||||
provided on input. This attribute is 'one-way' from a credential helper to
|
||||
pass information to Git (or other programs invoking `git credential`).
|
||||
|
||||
`wwwauth[]`::
|
||||
|
||||
When an HTTP response is received by Git that includes one or more
|
||||
|
@ -189,7 +249,45 @@ attribute 'wwwauth[]', where the order of the attributes is the same as
|
|||
they appear in the HTTP response. This attribute is 'one-way' from Git
|
||||
to pass additional information to credential helpers.
|
||||
|
||||
Unrecognised attributes are silently discarded.
|
||||
`capability[]`::
|
||||
This signals that Git, or the helper, as appropriate, supports the capability
|
||||
in question. This can be used to provide better, more specific data as part
|
||||
of the protocol. A `capability[]` directive must precede any value depending
|
||||
on it and these directives _should_ be the first item announced in the
|
||||
protocol.
|
||||
+
|
||||
There are two currently supported capabilities. The first is `authtype`, which
|
||||
indicates that the `authtype`, `credential`, and `ephemeral` values are
|
||||
understood. The second is `state`, which indicates that the `state[]` and
|
||||
`continue` values are understood.
|
||||
+
|
||||
It is not obligatory to use the additional features just because the capability
|
||||
is supported, but they should not be provided without the capability.
|
||||
|
||||
Unrecognised attributes and capabilities are silently discarded.
|
||||
|
||||
[[CAPA-IOFMT]]
|
||||
CAPABILITY INPUT/OUTPUT FORMAT
|
||||
------------------------------
|
||||
|
||||
For `git credential capability`, the format is slightly different. First, a
|
||||
`version 0` announcement is made to indicate the current version of the
|
||||
protocol, and then each capability is announced with a line like `capability
|
||||
authtype`. Credential helpers may also implement this format, again with the
|
||||
`capability` argument. Additional lines may be added in the future; callers
|
||||
should ignore lines which they don't understand.
|
||||
|
||||
Because this is a new part of the credential helper protocol, older versions of
|
||||
Git, as well as some credential helpers, may not support it. If a non-zero
|
||||
exit status is received, or if the first line doesn't start with the word
|
||||
`version` and a space, callers should assume that no capabilities are supported.
|
||||
|
||||
The intention of this format is to differentiate it from the credential output
|
||||
in an unambiguous way. It is possible to use very simple credential helpers
|
||||
(e.g., inline shell scripts) which always produce identical output. Using a
|
||||
distinct format allows users to continue to use this syntax without having to
|
||||
worry about correctly implementing capability advertisements or accidentally
|
||||
confusing callers querying for capabilities.
|
||||
|
||||
GIT
|
||||
---
|
||||
|
|
|
@ -42,6 +42,15 @@ These config values are loaded from system, global, and local Git config,
|
|||
as available. If `git for-each-repo` is run in a directory that is not a
|
||||
Git repository, then only the system and global config is used.
|
||||
|
||||
--keep-going::
|
||||
Continue with the remaining repositories if the command failed
|
||||
on a repository. The exit code will still indicate that the
|
||||
overall operation was not successful.
|
||||
+
|
||||
Note that the exact exit code of the failing command is not passed
|
||||
through as the exit code of the `for-each-repo` command: If the command
|
||||
failed in any of the specified repositories, the overall exit code will
|
||||
be 1.
|
||||
|
||||
SUBPROCESS BEHAVIOR
|
||||
-------------------
|
||||
|
|
|
@ -20,7 +20,7 @@ SYNOPSIS
|
|||
[--in-reply-to=<message-id>] [--suffix=.<sfx>]
|
||||
[--ignore-if-in-upstream] [--always]
|
||||
[--cover-from-description=<mode>]
|
||||
[--rfc] [--subject-prefix=<subject-prefix>]
|
||||
[--rfc[=<rfc>]] [--subject-prefix=<subject-prefix>]
|
||||
[(--reroll-count|-v) <n>]
|
||||
[--to=<email>] [--cc=<email>]
|
||||
[--[no-]cover-letter] [--quiet]
|
||||
|
@ -238,10 +238,21 @@ the patches (with a value of e.g. "PATCH my-project").
|
|||
value of the `format.filenameMaxLength` configuration
|
||||
variable, or 64 if unconfigured.
|
||||
|
||||
--rfc::
|
||||
Prepends "RFC" to the subject prefix (producing "RFC PATCH" by
|
||||
default). RFC means "Request For Comments"; use this when sending
|
||||
an experimental patch for discussion rather than application.
|
||||
--rfc[=<rfc>]::
|
||||
Prepends the string _<rfc>_ (defaults to "RFC") to
|
||||
the subject prefix. As the subject prefix defaults to
|
||||
"PATCH", you'll get "RFC PATCH" by default.
|
||||
+
|
||||
RFC means "Request For Comments"; use this when sending
|
||||
an experimental patch for discussion rather than application.
|
||||
"--rfc=WIP" may also be a useful way to indicate that a patch
|
||||
is not complete yet ("WIP" stands for "Work In Progress").
|
||||
+
|
||||
If the convention of the receiving community for a particular extra
|
||||
string is to have it _after_ the subject prefix, the string _<rfc>_
|
||||
can be prefixed with a dash ("`-`") to signal that the the rest of
|
||||
the _<rfc>_ string should be appended to the subject prefix instead,
|
||||
e.g., `--rfc='-(WIP)'` results in "PATCH (WIP)".
|
||||
|
||||
-v <n>::
|
||||
--reroll-count=<n>::
|
||||
|
|
|
@ -61,10 +61,10 @@ still contains <old-oid>.
|
|||
With `--stdin`, update-ref reads instructions from standard input and
|
||||
performs all modifications together. Specify commands of the form:
|
||||
|
||||
update SP <ref> SP <new-oid> [SP <old-oid>] LF
|
||||
create SP <ref> SP <new-oid> LF
|
||||
delete SP <ref> [SP <old-oid>] LF
|
||||
verify SP <ref> [SP <old-oid>] LF
|
||||
update SP <ref> SP (<new-oid> | ref:<new-target>) [SP (<old-oid> | ref:<old-target>)] LF
|
||||
create SP <ref> SP (<new-oid> | ref:<new-target>) LF
|
||||
delete SP <ref> [SP (<old-oid> | ref:<old-target>)] LF
|
||||
verify SP <ref> [SP (<old-oid> | ref:<old-target>)] LF
|
||||
option SP <opt> LF
|
||||
start LF
|
||||
prepare LF
|
||||
|
@ -82,10 +82,10 @@ specify a missing value, omit the value and its preceding SP entirely.
|
|||
Alternatively, use `-z` to specify in NUL-terminated format, without
|
||||
quoting:
|
||||
|
||||
update SP <ref> NUL <new-oid> NUL [<old-oid>] NUL
|
||||
create SP <ref> NUL <new-oid> NUL
|
||||
delete SP <ref> NUL [<old-oid>] NUL
|
||||
verify SP <ref> NUL [<old-oid>] NUL
|
||||
update SP <ref> NUL (<new-oid> | ref:<new-target>) NUL [(<old-oid> | ref:<old-target>)] NUL
|
||||
create SP <ref> NUL (<new-oid> | ref:<new-target>) NUL
|
||||
delete SP <ref> NUL [(<old-oid> | ref:<old-target>)] NUL
|
||||
verify SP <ref> NUL [(<old-oid> | ref:<old-target>)] NUL
|
||||
option SP <opt> NUL
|
||||
start NUL
|
||||
prepare NUL
|
||||
|
@ -95,6 +95,12 @@ quoting:
|
|||
In this format, use 40 "0" to specify a zero value, and use the empty
|
||||
string to specify a missing value.
|
||||
|
||||
For commands which support it, substituting the <old-oid> value with
|
||||
ref:<old-target> will ensure that the <ref> targets the specified
|
||||
old-target before the update. Similarly, substituting the <new-oid>
|
||||
with ref:<new-target> will ensure that the <ref> is a symbolic ref
|
||||
targeting the new-target after the update.
|
||||
|
||||
In either format, values can be specified in any form that Git
|
||||
recognizes as an object name. Commands in any other format or a
|
||||
repeated <ref> produce an error. Command meanings are:
|
||||
|
@ -103,19 +109,28 @@ update::
|
|||
Set <ref> to <new-oid> after verifying <old-oid>, if given.
|
||||
Specify a zero <new-oid> to ensure the ref does not exist
|
||||
after the update and/or a zero <old-oid> to make sure the
|
||||
ref does not exist before the update.
|
||||
ref does not exist before the update. If ref:<old-target>
|
||||
is provided, we verify that the <ref> is an existing symbolic
|
||||
ref which targets <old-target>. If ref:<new-target> is given,
|
||||
the update ensures <ref> is a symbolic ref which targets
|
||||
<new-target>.
|
||||
|
||||
create::
|
||||
Create <ref> with <new-oid> after verifying it does not
|
||||
exist. The given <new-oid> may not be zero.
|
||||
exist. The given <new-oid> may not be zero. If instead
|
||||
ref:<new-target> is provided, a symbolic ref is created
|
||||
which targets <new-target>.
|
||||
|
||||
delete::
|
||||
Delete <ref> after verifying it exists with <old-oid>, if
|
||||
given. If given, <old-oid> may not be zero.
|
||||
Delete <ref> after verifying it exists with <old-oid>, if given.
|
||||
If given, <old-oid> may not be zero. If instead, ref:<old-target>
|
||||
is provided, verify that the symbolic ref <ref> targets
|
||||
<old-target> before deleting it.
|
||||
|
||||
verify::
|
||||
Verify <ref> against <old-oid> but do not change it. If
|
||||
<old-oid> is zero or missing, the ref must not exist.
|
||||
<old-oid> is zero or missing, the ref must not exist. For
|
||||
verifying symbolic refs, provide ref:<old-target>.
|
||||
|
||||
option::
|
||||
Modify the behavior of the next command naming a <ref>.
|
||||
|
|
|
@ -142,13 +142,16 @@ All multi-byte numbers are in network byte order.
|
|||
|
||||
==== Bloom Filter Data (ID: {'B', 'D', 'A', 'T'}) [Optional]
|
||||
* It starts with header consisting of three unsigned 32-bit integers:
|
||||
- Version of the hash algorithm being used. We currently only support
|
||||
value 1 which corresponds to the 32-bit version of the murmur3 hash
|
||||
- Version of the hash algorithm being used. We currently support
|
||||
value 2 which corresponds to the 32-bit version of the murmur3 hash
|
||||
implemented exactly as described in
|
||||
https://en.wikipedia.org/wiki/MurmurHash#Algorithm and the double
|
||||
hashing technique using seed values 0x293ae76f and 0x7e646e2 as
|
||||
described in https://doi.org/10.1007/978-3-540-30494-4_26 "Bloom Filters
|
||||
in Probabilistic Verification"
|
||||
in Probabilistic Verification". Version 1 Bloom filters have a bug that appears
|
||||
when char is signed and the repository has path names that have characters >=
|
||||
0x80; Git supports reading and writing them, but this ability will be removed
|
||||
in a future version of Git.
|
||||
- The number of times a path is hashed and hence the number of bit positions
|
||||
that cumulatively determine whether a file is present in the commit.
|
||||
- The minimum number of bits 'b' per entry in the Bloom filter. If the filter
|
||||
|
|
|
@ -486,7 +486,7 @@ reference-transaction
|
|||
This hook is invoked by any Git command that performs reference
|
||||
updates. It executes whenever a reference transaction is prepared,
|
||||
committed or aborted and may thus get called multiple times. The hook
|
||||
does not cover symbolic references (but that may change in the future).
|
||||
also cover symbolic references.
|
||||
|
||||
The hook takes exactly one argument, which is the current state the
|
||||
given reference transaction is in:
|
||||
|
@ -503,16 +503,20 @@ given reference transaction is in:
|
|||
For each reference update that was added to the transaction, the hook
|
||||
receives on standard input a line of the format:
|
||||
|
||||
<old-oid> SP <new-oid> SP <ref-name> LF
|
||||
<old-value> SP <new-value> SP <ref-name> LF
|
||||
|
||||
where `<old-oid>` is the old object name passed into the reference
|
||||
transaction, `<new-oid>` is the new object name to be stored in the
|
||||
where `<old-value>` is the old object name passed into the reference
|
||||
transaction, `<new-value>` is the new object name to be stored in the
|
||||
ref and `<ref-name>` is the full name of the ref. When force updating
|
||||
the reference regardless of its current value or when the reference is
|
||||
to be created anew, `<old-oid>` is the all-zeroes object name. To
|
||||
to be created anew, `<old-value>` is the all-zeroes object name. To
|
||||
distinguish these cases, you can inspect the current value of
|
||||
`<ref-name>` via `git rev-parse`.
|
||||
|
||||
For symbolic reference updates the `<old_value>` and `<new-value>`
|
||||
fields could denote references instead of objects, denoted via the
|
||||
`ref:<ref-target>` format.
|
||||
|
||||
The exit status of the hook is ignored for any state except for the
|
||||
"prepared" state. In the "prepared" state, a non-zero exit status will
|
||||
cause the transaction to be aborted. The hook will not be called with
|
||||
|
|
|
@ -255,3 +255,208 @@ triplet is -
|
|||
xor_row (4 byte integer, network byte order): ::
|
||||
The position of the triplet whose bitmap is used to compress
|
||||
this one, or `0xffffffff` if no such bitmap exists.
|
||||
|
||||
Pseudo-merge bitmaps
|
||||
--------------------
|
||||
|
||||
If the `BITMAP_OPT_PSEUDO_MERGES` flag is set, a variable number of
|
||||
bytes (preceding the name-hash cache, commit lookup table, and trailing
|
||||
checksum) of the `.bitmap` file is used to store pseudo-merge bitmaps.
|
||||
|
||||
A "pseudo-merge bitmap" is used to refer to a pair of bitmaps, as
|
||||
follows:
|
||||
|
||||
Commit bitmap::
|
||||
|
||||
A bitmap whose set bits describe the set of commits included in the
|
||||
pseudo-merge's "merge" bitmap (as below).
|
||||
|
||||
Merge bitmap::
|
||||
|
||||
A bitmap whose set bits describe the reachability closure over the set
|
||||
of commits in the pseudo-merge's "commits" bitmap (as above). An
|
||||
identical bitmap would be generated for an octopus merge with the same
|
||||
set of parents as described in the commits bitmap.
|
||||
|
||||
Pseudo-merge bitmaps can accelerate bitmap traversals when all commits
|
||||
for a given pseudo-merge are listed on either side of the traversal,
|
||||
either directly (by explicitly asking for them as part of the `HAVES`
|
||||
or `WANTS`) or indirectly (by encountering them during a fill-in
|
||||
traversal).
|
||||
|
||||
=== Use-cases
|
||||
|
||||
For example, suppose there exists a pseudo-merge bitmap with a large
|
||||
number of commits, all of which are listed in the `WANTS` section of
|
||||
some bitmap traversal query. When pseudo-merge bitmaps are enabled, the
|
||||
bitmap machinery can quickly determine there is a pseudo-merge which
|
||||
satisfies some subset of the wanted objects on either side of the query.
|
||||
Then, we can inflate the EWAH-compressed bitmap, and `OR` it in to the
|
||||
resulting bitmap. By contrast, without pseudo-merge bitmaps, we would
|
||||
have to repeat the decompression and `OR`-ing step over a potentially
|
||||
large number of individual bitmaps, which can take proportionally more
|
||||
time.
|
||||
|
||||
Another benefit of pseudo-merges arises when there is some combination
|
||||
of (a) a large number of references, with (b) poor bitmap coverage, and
|
||||
(c) deep, nested trees, making fill-in traversal relatively expensive.
|
||||
For example, suppose that there are a large enough number of tags where
|
||||
bitmapping each of the tags individually is infeasible. Without
|
||||
pseudo-merge bitmaps, computing the result of, say, `git rev-list
|
||||
--use-bitmap-index --count --objects --tags` would likely require a
|
||||
large amount of fill-in traversal. But when a large quantity of those
|
||||
tags are stored together in a pseudo-merge bitmap, the bitmap machinery
|
||||
can take advantage of the fact that we only care about the union of
|
||||
objects reachable from all of those tags, and answer the query much
|
||||
faster.
|
||||
|
||||
=== File format
|
||||
|
||||
If enabled, pseudo-merge bitmaps are stored in an optional section at
|
||||
the end of a `.bitmap` file. The format is as follows:
|
||||
|
||||
....
|
||||
+-------------------------------------------+
|
||||
| .bitmap File |
|
||||
+-------------------------------------------+
|
||||
| |
|
||||
| Pseudo-merge bitmaps (Variable Length) |
|
||||
| +---------------------------+ |
|
||||
| | commits_bitmap (EWAH) | |
|
||||
| +---------------------------+ |
|
||||
| | merge_bitmap (EWAH) | |
|
||||
| +---------------------------+ |
|
||||
| |
|
||||
+-------------------------------------------+
|
||||
| |
|
||||
| Lookup Table |
|
||||
| +------------+--------------+ |
|
||||
| | commit_pos | offset | |
|
||||
| +------------+--------------+ |
|
||||
| | 4 bytes | 8 bytes | |
|
||||
| +------------+--------------+ |
|
||||
| |
|
||||
| Offset Cases: |
|
||||
| ------------- |
|
||||
| |
|
||||
| 1. MSB Unset: single pseudo-merge bitmap |
|
||||
| + offset to pseudo-merge bitmap |
|
||||
| |
|
||||
| 2. MSB Set: multiple pseudo-merges |
|
||||
| + offset to extended lookup table |
|
||||
| |
|
||||
+-------------------------------------------+
|
||||
| |
|
||||
| Extended Lookup Table (Optional) |
|
||||
| |
|
||||
| +----+----------+----------+----------+ |
|
||||
| | N | Offset 1 | .... | Offset N | |
|
||||
| +----+----------+----------+----------+ |
|
||||
| | | 8 bytes | .... | 8 bytes | |
|
||||
| +----+----------+----------+----------+ |
|
||||
| |
|
||||
+-------------------------------------------+
|
||||
| |
|
||||
| Pseudo-merge Metadata |
|
||||
| +------------------+----------------+ |
|
||||
| | # pseudo-merges | # Commits | |
|
||||
| +------------------+----------------+ |
|
||||
| | 4 bytes | 4 bytes | |
|
||||
| +------------------+----------------+ |
|
||||
| |
|
||||
| +------------------+----------------+ |
|
||||
| | Lookup offset | Extension size | |
|
||||
| +------------------+----------------+ |
|
||||
| | 8 bytes | 8 bytes | |
|
||||
| +------------------+----------------+ |
|
||||
| |
|
||||
+-------------------------------------------+
|
||||
....
|
||||
|
||||
* One or more pseudo-merge bitmaps, each containing:
|
||||
|
||||
** `commits_bitmap`, an EWAH-compressed bitmap describing the set of
|
||||
commits included in the this psuedo-merge.
|
||||
|
||||
** `merge_bitmap`, an EWAH-compressed bitmap describing the union of
|
||||
the set of objects reachable from all commits listed in the
|
||||
`commits_bitmap`.
|
||||
|
||||
* A lookup table, mapping pseudo-merged commits to the pseudo-merges
|
||||
they belong to. Entries appear in increasing order of each commit's
|
||||
bit position. Each entry is 12 bytes wide, and is comprised of the
|
||||
following:
|
||||
|
||||
** `commit_pos`, a 4-byte unsigned value (in network byte-order)
|
||||
containing the bit position for this commit.
|
||||
|
||||
** `offset`, an 8-byte unsigned value (also in network byte-order)
|
||||
containing either one of two possible offsets, depending on whether or
|
||||
not the most-significant bit is set.
|
||||
|
||||
*** If unset (i.e. `offset & ((uint64_t)1<<63) == 0`), the offset
|
||||
(relative to the beginning of the `.bitmap` file) at which the
|
||||
pseudo-merge bitmap for this commit can be read. This indicates
|
||||
only a single pseudo-merge bitmap contains this commit.
|
||||
|
||||
*** If set (i.e. `offset & ((uint64_t)1<<63) != 0`), the offset
|
||||
(again relative to the beginning of the `.bitmap` file) at which
|
||||
the extended offset table can be located describing the set of
|
||||
pseudo-merge bitmaps which contain this commit. This indicates
|
||||
that multiple pseudo-merge bitmaps contain this commit.
|
||||
|
||||
* An (optional) extended lookup table (written if and only if there is
|
||||
at least one commit which appears in more than one pseudo-merge).
|
||||
There are as many entries as commits which appear in multiple
|
||||
pseudo-merges. Each entry contains the following:
|
||||
|
||||
** `N`, a 4-byte unsigned value equal to the number of pseudo-merges
|
||||
which contain a given commit.
|
||||
|
||||
** An array of `N` 8-byte unsigned values, each of which is
|
||||
interpreted as an offset (relative to the beginning of the
|
||||
`.bitmap` file) at which a pseudo-merge bitmap for this commit can
|
||||
be read. These values occur in no particular order.
|
||||
|
||||
* Positions for all pseudo-merges, each stored as an 8-byte unsigned
|
||||
value (in network byte-order) containing the offset (relative to the
|
||||
beginnign of the `.bitmap` file) of each consecutive pseudo-merge.
|
||||
|
||||
* A 4-byte unsigned value (in network byte-order) equal to the number of
|
||||
pseudo-merges.
|
||||
|
||||
* A 4-byte unsigned value (in network byte-order) equal to the number of
|
||||
unique commits which appear in any pseudo-merge.
|
||||
|
||||
* An 8-byte unsigned value (in network byte-order) equal to the number
|
||||
of bytes between the start of the pseudo-merge section and the
|
||||
beginning of the lookup table.
|
||||
|
||||
* An 8-byte unsigned value (in network byte-order) equal to the number
|
||||
of bytes in the pseudo-merge section (including this field).
|
||||
|
||||
=== Pseudo-merge selection
|
||||
|
||||
Pseudo-merge commits are selected among non-bitmapped commits at the
|
||||
tip of one or more reference(s). In addition, there are a handful of
|
||||
constraints to further refine this selection:
|
||||
|
||||
`pack.bitmapPseudoMergeDecay`:: Defines the "decay rate", which
|
||||
corresponds to how quickly (or not) consecutive pseudo-merges decrease
|
||||
in size relative to one another.
|
||||
|
||||
`pack.bitmapPseudoMergeGroups`:: Defines the maximum number of
|
||||
pseudo-merge groups.
|
||||
|
||||
`pack.bitmapPseudoMergeSampleRate`:: Defines the percentage of commits
|
||||
(matching the above criteria) which are selected.
|
||||
|
||||
`pack.bitmapPseudoMergeThreshold`:: Defines the minimum age of a commit
|
||||
in order to be considered for inclusion within one or more pseudo-merge
|
||||
bitmaps.
|
||||
|
||||
The size of consecutive pseudo-merge groups decays according to a
|
||||
power-law decay function, where the size of the `n`-th group is `f(n) =
|
||||
C*n^-k`. The value of `C` is chosen accordingly to match the number of
|
||||
desired groups, and `k` is 1/100th of the value of
|
||||
`pack.bitmapPseudoMergeDecay`.
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
= Git Standard Library
|
||||
|
||||
The Git Standard Library intends to serve as the foundational library
|
||||
and root dependency that other libraries in Git will be built off of.
|
||||
That is to say, suppose we have libraries X and Y; a user that wants to
|
||||
use X and Y would need to include X, Y, and this Git Standard Library.
|
||||
This does not mean that the Git Standard Library will be the only
|
||||
possible root dependency in the future, but rather the most significant
|
||||
and widely used one. Git itself is also built off of the Git Standard
|
||||
Library.
|
||||
|
||||
== Dependency graph in libified Git
|
||||
|
||||
Before the introduction of the Git Standard Library, all objects defined
|
||||
in the Git library are compiled and archived into a singular file,
|
||||
libgit.a, which is then linked against by common-main.o with other
|
||||
external dependencies and turned into the Git executable. In other
|
||||
words, the Git executable has dependencies on libgit.a and a couple of
|
||||
external libraries. The libfication of Git slightly alters this build
|
||||
flow by separating out libgit.a into libgit.a and git-std-lib.a.
|
||||
|
||||
With our current method of building Git, we can imagine the dependency
|
||||
graph as such:
|
||||
|
||||
Git
|
||||
/\
|
||||
/ \
|
||||
/ \
|
||||
libgit.a ext deps
|
||||
|
||||
We want to separate out potential libraries from libgit.a and have
|
||||
libgit.a depend on them, which would possibly look like:
|
||||
|
||||
Git
|
||||
/\
|
||||
/ \
|
||||
/ \
|
||||
libgit.a ext deps
|
||||
/\
|
||||
/ \
|
||||
/ \
|
||||
object-store.a (other lib)
|
||||
| /
|
||||
| /
|
||||
| /
|
||||
| /
|
||||
| /
|
||||
| /
|
||||
| /
|
||||
git-std-lib.a
|
||||
|
||||
Instead of containing all objects in Git, libgit.a would contain objects
|
||||
that are not built by libraries it links against. Consequently, if
|
||||
someone wanted a custom build of Git with a custom implementation of the
|
||||
object store, they would only have to swap out object-store.a rather
|
||||
than do a hard fork of Git.
|
||||
|
||||
== Rationale behind Git Standard Library
|
||||
|
||||
The rationale behind the selected object files in the Git Standard
|
||||
Library is the result of two observations within the Git
|
||||
codebase:
|
||||
1. every file includes git-compat-util.h which defines functions
|
||||
in a couple of different files
|
||||
2. wrapper.c + usage.c have difficult-to-separate circular
|
||||
dependencies with each other and other files.
|
||||
|
||||
=== Ubiquity of git-compat-util.h and circular dependencies
|
||||
|
||||
Every file in the Git codebase includes git-compat-util.h. It serves as
|
||||
"a compatibility aid that isolates the knowledge of platform specific
|
||||
inclusion order and what feature macros to define before including which
|
||||
system header" (Junio[1]). Since every file includes git-compat-util.h,
|
||||
and git-compat-util.h includes wrapper.h and usage.h, it would make
|
||||
sense for wrapper.c and usage.c to be a part of the root library. They
|
||||
have difficult to separate circular dependencies with each other so it
|
||||
would impractical for them to be independent libraries. Wrapper.c has
|
||||
dependencies on parse.c, abspath.c, strbuf.c, which in turn also have
|
||||
dependencies on usage.c and wrapper.c - more circular dependencies.
|
||||
|
||||
=== Tradeoff between swappability and refactoring
|
||||
|
||||
From the above dependency graph, we can see that git-std-lib.a could be
|
||||
many smaller libraries rather than a singular library. So why choose a
|
||||
singular library when multiple libraries can be individually easier to
|
||||
swap and are more modular? A singular library requires less work to
|
||||
separate out circular dependencies within itself so it becomes a
|
||||
tradeoff question between work and reward. While there may be a point in
|
||||
the future where a file like usage.c would want its own library so that
|
||||
someone can have custom die() or error(), the work required to refactor
|
||||
out the circular dependencies in some files would be enormous due to
|
||||
their ubiquity so therefore I believe it is not worth the tradeoff
|
||||
currently. Additionally, we can in the future choose to do this refactor
|
||||
and change the API for the library if there becomes enough of a reason
|
||||
to do so (remember we are avoiding promising stability of the interfaces
|
||||
of those libraries).
|
||||
|
||||
=== Reuse of compatibility functions in git-compat-util.h
|
||||
|
||||
Most functions defined in git-compat-util.h are implemented in compat/
|
||||
and have dependencies limited to strbuf.h and wrapper.h so they can be
|
||||
easily included in git-std-lib.a, which as a root dependency means that
|
||||
higher level libraries do not have to worry about compatibility files in
|
||||
compat/. The rest of the functions defined in git-compat-util.h are
|
||||
implemented in top level files and are hidden behind
|
||||
an #ifdef if their implementation is not in git-std-lib.a.
|
||||
|
||||
=== Rationale summary
|
||||
|
||||
The Git Standard Library allows us to get the libification ball rolling
|
||||
with other libraries in Git. By not spending many more months attempting
|
||||
to refactor difficult circular dependencies and instead spending that
|
||||
time getting to a state where we can test out swapping a library out
|
||||
such as config or object store, we can prove the viability of Git
|
||||
libification on a much faster time scale. Additionally the code cleanups
|
||||
that have happened so far have been minor and beneficial for the
|
||||
codebase. It is probable that making large movements would negatively
|
||||
affect code clarity.
|
||||
|
||||
== Git Standard Library boundary
|
||||
|
||||
While I have described above some useful heuristics for identifying
|
||||
potential candidates for git-std-lib.a, a standard library should not
|
||||
have a shaky definition for what belongs in it.
|
||||
|
||||
- Low-level files (aka operates only on other primitive types) that are
|
||||
used everywhere within the codebase (wrapper.c, usage.c, strbuf.c)
|
||||
- Dependencies that are low-level and widely used
|
||||
(abspath.c, date.c, hex-ll.c, parse.c, utf8.c)
|
||||
- low-level git/* files with functions defined in git-compat-util.h
|
||||
(ctype.c)
|
||||
- compat/*
|
||||
|
||||
There are other files that might fit this definition, but that does not
|
||||
mean it should belong in git-std-lib.a. Those files should start as
|
||||
their own separate library since any file added to git-std-lib.a loses
|
||||
its flexibility of being easily swappable.
|
||||
|
||||
Wrapper.c and usage.c have dependencies on pager and trace2 that are
|
||||
possible to remove at the cost of sacrificing the ability for standard Git
|
||||
to be able to trace functions in those files and other files in git-std-lib.a.
|
||||
In order for git-std-lib.a to compile with those dependencies, stubbed out
|
||||
versions of those files are implemented and swapped in during compilation time
|
||||
(see STUB_LIB_OBJS in the Makefile).
|
||||
|
||||
== Files inside of Git Standard Library
|
||||
|
||||
The set of files in git-std-lib.a can be found in STD_LIB_OBJS and COMPAT_OBJS
|
||||
in the Makefile.
|
||||
|
||||
When these files are compiled together with the files in STUB_LIB_OBJS (or
|
||||
user-provided files that provide the same functions), they form a complete
|
||||
library.
|
||||
|
||||
== Pitfalls
|
||||
|
||||
There are a small amount of files under compat/* that have dependencies
|
||||
not inside of git-std-lib.a. While those functions are not called on
|
||||
Linux, other OSes might call those problematic functions. I don't see
|
||||
this as a major problem, just moreso an observation that libification in
|
||||
general may also require some minor compatibility work in the future.
|
||||
|
||||
== Testing
|
||||
|
||||
Unit tests should catch any breakages caused by changes to files in
|
||||
git-std-lib.a (i.e. introduction of a out of scope dependency) and new
|
||||
functions introduced to git-std-lib.a will require unit tests written
|
||||
for them.
|
||||
|
||||
[1] https://lore.kernel.org/git/xmqqwn17sydw.fsf@gitster.g/
|
130
Makefile
130
Makefile
|
@ -409,6 +409,9 @@ include shared.mak
|
|||
# to the "<name>" of the corresponding `compat/fsmonitor/fsm-settings-<name>.c`
|
||||
# that implements the `fsm_os_settings__*()` routines.
|
||||
#
|
||||
# Define LINK_FUZZ_PROGRAMS if you want `make all` to also build the fuzz test
|
||||
# programs in oss-fuzz/.
|
||||
#
|
||||
# === Optional library: libintl ===
|
||||
#
|
||||
# Define NO_GETTEXT if you don't want Git output to be translated.
|
||||
|
@ -669,6 +672,8 @@ FUZZ_PROGRAMS =
|
|||
GIT_OBJS =
|
||||
LIB_OBJS =
|
||||
SCALAR_OBJS =
|
||||
STD_LIB_OBJS =
|
||||
STUB_LIB_OBJS =
|
||||
OBJECTS =
|
||||
OTHER_PROGRAMS =
|
||||
PROGRAM_OBJS =
|
||||
|
@ -752,23 +757,6 @@ SCRIPTS = $(SCRIPT_SH_GEN) \
|
|||
|
||||
ETAGS_TARGET = TAGS
|
||||
|
||||
# If you add a new fuzzer, please also make sure to run it in
|
||||
# ci/run-build-and-minimal-fuzzers.sh so that we make sure it still links and
|
||||
# runs in the future.
|
||||
FUZZ_OBJS += oss-fuzz/dummy-cmd-main.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-commit-graph.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-config.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-date.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-pack-headers.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-pack-idx.o
|
||||
.PHONY: fuzz-objs
|
||||
fuzz-objs: $(FUZZ_OBJS)
|
||||
|
||||
# Always build fuzz objects even if not testing, to prevent bit-rot.
|
||||
all:: $(FUZZ_OBJS)
|
||||
|
||||
FUZZ_PROGRAMS += $(patsubst %.o,%,$(filter-out %dummy-cmd-main.o,$(FUZZ_OBJS)))
|
||||
|
||||
# Empty...
|
||||
EXTRA_PROGRAMS =
|
||||
|
||||
|
@ -808,6 +796,7 @@ TEST_BUILTINS_OBJS += test-dump-split-index.o
|
|||
TEST_BUILTINS_OBJS += test-dump-untracked-cache.o
|
||||
TEST_BUILTINS_OBJS += test-env-helper.o
|
||||
TEST_BUILTINS_OBJS += test-example-decorate.o
|
||||
TEST_BUILTINS_OBJS += test-example-tap.o
|
||||
TEST_BUILTINS_OBJS += test-find-pack.o
|
||||
TEST_BUILTINS_OBJS += test-fsmonitor-client.o
|
||||
TEST_BUILTINS_OBJS += test-genrandom.o
|
||||
|
@ -865,6 +854,7 @@ TEST_BUILTINS_OBJS += test-userdiff.o
|
|||
TEST_BUILTINS_OBJS += test-wildmatch.o
|
||||
TEST_BUILTINS_OBJS += test-windows-named-pipe.o
|
||||
TEST_BUILTINS_OBJS += test-write-cache.o
|
||||
TEST_BUILTINS_OBJS += test-xgethostname.o
|
||||
TEST_BUILTINS_OBJS += test-xml-encode.o
|
||||
|
||||
# Do not add more tests here unless they have extra dependencies. Add
|
||||
|
@ -872,8 +862,6 @@ TEST_BUILTINS_OBJS += test-xml-encode.o
|
|||
TEST_PROGRAMS_NEED_X += test-fake-ssh
|
||||
TEST_PROGRAMS_NEED_X += test-tool
|
||||
|
||||
TEST_PROGRAMS = $(patsubst %,t/helper/%$X,$(TEST_PROGRAMS_NEED_X))
|
||||
|
||||
# List built-in command $C whose implementation cmd_$C() is not in
|
||||
# builtin/$C.o but is linked in as part of some other command.
|
||||
BUILT_INS += $(patsubst builtin/%.o,git-%$X,$(BUILTIN_OBJS))
|
||||
|
@ -925,6 +913,8 @@ TEST_SHELL_PATH = $(SHELL_PATH)
|
|||
|
||||
LIB_FILE = libgit.a
|
||||
XDIFF_LIB = xdiff/lib.a
|
||||
STD_LIB_FILE = git-std-lib.a
|
||||
STUB_LIB_FILE = git-stub-lib.a
|
||||
REFTABLE_LIB = reftable/libreftable.a
|
||||
REFTABLE_TEST_LIB = reftable/libreftable_test.a
|
||||
|
||||
|
@ -964,7 +954,6 @@ COCCI_SOURCES = $(filter-out $(THIRD_PARTY_SOURCES),$(FOUND_C_SOURCES))
|
|||
|
||||
LIB_H = $(FOUND_H_SOURCES)
|
||||
|
||||
LIB_OBJS += abspath.o
|
||||
LIB_OBJS += add-interactive.o
|
||||
LIB_OBJS += add-patch.o
|
||||
LIB_OBJS += advice.o
|
||||
|
@ -1006,8 +995,6 @@ LIB_OBJS += convert.o
|
|||
LIB_OBJS += copy.o
|
||||
LIB_OBJS += credential.o
|
||||
LIB_OBJS += csum-file.o
|
||||
LIB_OBJS += ctype.o
|
||||
LIB_OBJS += date.o
|
||||
LIB_OBJS += decorate.o
|
||||
LIB_OBJS += delta-islands.o
|
||||
LIB_OBJS += diagnose.o
|
||||
|
@ -1048,7 +1035,6 @@ LIB_OBJS += hash-lookup.o
|
|||
LIB_OBJS += hashmap.o
|
||||
LIB_OBJS += help.o
|
||||
LIB_OBJS += hex.o
|
||||
LIB_OBJS += hex-ll.o
|
||||
LIB_OBJS += hook.o
|
||||
LIB_OBJS += ident.o
|
||||
LIB_OBJS += json-writer.o
|
||||
|
@ -1102,7 +1088,6 @@ LIB_OBJS += pack-write.o
|
|||
LIB_OBJS += packfile.o
|
||||
LIB_OBJS += pager.o
|
||||
LIB_OBJS += parallel-checkout.o
|
||||
LIB_OBJS += parse.o
|
||||
LIB_OBJS += parse-options-cb.o
|
||||
LIB_OBJS += parse-options.o
|
||||
LIB_OBJS += patch-delta.o
|
||||
|
@ -1119,6 +1104,7 @@ LIB_OBJS += prompt.o
|
|||
LIB_OBJS += protocol.o
|
||||
LIB_OBJS += protocol-caps.o
|
||||
LIB_OBJS += prune-packed.o
|
||||
LIB_OBJS += pseudo-merge.o
|
||||
LIB_OBJS += quote.o
|
||||
LIB_OBJS += range-diff.o
|
||||
LIB_OBJS += reachable.o
|
||||
|
@ -1157,7 +1143,6 @@ LIB_OBJS += sparse-index.o
|
|||
LIB_OBJS += split-index.o
|
||||
LIB_OBJS += stable-qsort.o
|
||||
LIB_OBJS += statinfo.o
|
||||
LIB_OBJS += strbuf.o
|
||||
LIB_OBJS += streaming.o
|
||||
LIB_OBJS += string-list.o
|
||||
LIB_OBJS += strmap.o
|
||||
|
@ -1194,21 +1179,32 @@ LIB_OBJS += unpack-trees.o
|
|||
LIB_OBJS += upload-pack.o
|
||||
LIB_OBJS += url.o
|
||||
LIB_OBJS += urlmatch.o
|
||||
LIB_OBJS += usage.o
|
||||
LIB_OBJS += userdiff.o
|
||||
LIB_OBJS += utf8.o
|
||||
LIB_OBJS += varint.o
|
||||
LIB_OBJS += version.o
|
||||
LIB_OBJS += versioncmp.o
|
||||
LIB_OBJS += walker.o
|
||||
LIB_OBJS += wildmatch.o
|
||||
LIB_OBJS += worktree.o
|
||||
LIB_OBJS += wrapper.o
|
||||
LIB_OBJS += write-or-die.o
|
||||
LIB_OBJS += ws.o
|
||||
LIB_OBJS += wt-status.o
|
||||
LIB_OBJS += xdiff-interface.o
|
||||
|
||||
STD_LIB_OBJS += abspath.o
|
||||
STD_LIB_OBJS += ctype.o
|
||||
STD_LIB_OBJS += date.o
|
||||
STD_LIB_OBJS += hex-ll.o
|
||||
STD_LIB_OBJS += parse.o
|
||||
STD_LIB_OBJS += strbuf.o
|
||||
STD_LIB_OBJS += usage.o
|
||||
STD_LIB_OBJS += utf8.o
|
||||
STD_LIB_OBJS += wrapper.o
|
||||
|
||||
STUB_LIB_OBJS += stubs/trace2.o
|
||||
STUB_LIB_OBJS += stubs/pager.o
|
||||
STUB_LIB_OBJS += stubs/misc.o
|
||||
|
||||
BUILTIN_OBJS += builtin/add.o
|
||||
BUILTIN_OBJS += builtin/am.o
|
||||
BUILTIN_OBJS += builtin/annotate.o
|
||||
|
@ -1347,17 +1343,17 @@ THIRD_PARTY_SOURCES += compat/regex/%
|
|||
THIRD_PARTY_SOURCES += sha1collisiondetection/%
|
||||
THIRD_PARTY_SOURCES += sha1dc/%
|
||||
|
||||
UNIT_TEST_PROGRAMS += t-basic
|
||||
UNIT_TEST_PROGRAMS += t-mem-pool
|
||||
UNIT_TEST_PROGRAMS += t-strbuf
|
||||
UNIT_TEST_PROGRAMS += t-ctype
|
||||
UNIT_TEST_PROGRAMS += t-mem-pool
|
||||
UNIT_TEST_PROGRAMS += t-prio-queue
|
||||
UNIT_TEST_PROGRAMS += t-strbuf
|
||||
UNIT_TEST_PROGRAMS += t-trailer
|
||||
UNIT_TEST_PROGS = $(patsubst %,$(UNIT_TEST_BIN)/%$X,$(UNIT_TEST_PROGRAMS))
|
||||
UNIT_TEST_OBJS = $(patsubst %,$(UNIT_TEST_DIR)/%.o,$(UNIT_TEST_PROGRAMS))
|
||||
UNIT_TEST_OBJS += $(UNIT_TEST_DIR)/test-lib.o
|
||||
|
||||
# xdiff and reftable libs may in turn depend on what is in libgit.a
|
||||
GITLIBS = common-main.o $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE)
|
||||
GITLIBS = common-main.o $(STD_LIB_FILE) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(LIB_FILE)
|
||||
EXTLIBS =
|
||||
|
||||
GIT_USER_AGENT = git/$(GIT_VERSION)
|
||||
|
@ -2373,6 +2369,29 @@ ifndef NO_TCLTK
|
|||
endif
|
||||
$(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)'
|
||||
|
||||
# If you add a new fuzzer, please also make sure to run it in
|
||||
# ci/run-build-and-minimal-fuzzers.sh so that we make sure it still links and
|
||||
# runs in the future.
|
||||
FUZZ_OBJS += oss-fuzz/dummy-cmd-main.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-commit-graph.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-config.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-date.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-pack-headers.o
|
||||
FUZZ_OBJS += oss-fuzz/fuzz-pack-idx.o
|
||||
.PHONY: fuzz-objs
|
||||
fuzz-objs: $(FUZZ_OBJS)
|
||||
|
||||
# Always build fuzz objects even if not testing, to prevent bit-rot.
|
||||
all:: $(FUZZ_OBJS)
|
||||
|
||||
FUZZ_PROGRAMS += $(patsubst %.o,%,$(filter-out %dummy-cmd-main.o,$(FUZZ_OBJS)))
|
||||
|
||||
# Build fuzz programs when possible, even without the necessary fuzzing support,
|
||||
# to prevent bit-rot.
|
||||
ifdef LINK_FUZZ_PROGRAMS
|
||||
all:: $(FUZZ_PROGRAMS)
|
||||
endif
|
||||
|
||||
please_set_SHELL_PATH_to_a_more_modern_shell:
|
||||
@$$(:)
|
||||
|
||||
|
@ -2656,7 +2675,6 @@ REFTABLE_OBJS += reftable/merged.o
|
|||
REFTABLE_OBJS += reftable/pq.o
|
||||
REFTABLE_OBJS += reftable/reader.o
|
||||
REFTABLE_OBJS += reftable/record.o
|
||||
REFTABLE_OBJS += reftable/refname.o
|
||||
REFTABLE_OBJS += reftable/generic.o
|
||||
REFTABLE_OBJS += reftable/stack.o
|
||||
REFTABLE_OBJS += reftable/tree.o
|
||||
|
@ -2669,11 +2687,19 @@ REFTABLE_TEST_OBJS += reftable/merged_test.o
|
|||
REFTABLE_TEST_OBJS += reftable/pq_test.o
|
||||
REFTABLE_TEST_OBJS += reftable/record_test.o
|
||||
REFTABLE_TEST_OBJS += reftable/readwrite_test.o
|
||||
REFTABLE_TEST_OBJS += reftable/refname_test.o
|
||||
REFTABLE_TEST_OBJS += reftable/stack_test.o
|
||||
REFTABLE_TEST_OBJS += reftable/test_framework.o
|
||||
REFTABLE_TEST_OBJS += reftable/tree_test.o
|
||||
|
||||
ifndef NO_POSIX_GOODIES
|
||||
TEST_PROGRAMS_NEED_X += test-stdlib
|
||||
MY_VAR = not_else
|
||||
else
|
||||
MY_VAR = else
|
||||
endif
|
||||
|
||||
TEST_PROGRAMS = $(patsubst %,t/helper/%$X,$(TEST_PROGRAMS_NEED_X))
|
||||
|
||||
TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
|
||||
|
||||
.PHONY: test-objs
|
||||
|
@ -2698,6 +2724,8 @@ OBJECTS += $(XDIFF_OBJS)
|
|||
OBJECTS += $(FUZZ_OBJS)
|
||||
OBJECTS += $(REFTABLE_OBJS) $(REFTABLE_TEST_OBJS)
|
||||
OBJECTS += $(UNIT_TEST_OBJS)
|
||||
OBJECTS += $(STD_LIB_OBJS)
|
||||
OBJECTS += $(STUB_LIB_OBJS)
|
||||
|
||||
ifndef NO_CURL
|
||||
OBJECTS += http.o http-walker.o remote-curl.o
|
||||
|
@ -3200,6 +3228,10 @@ endif
|
|||
|
||||
test_bindir_programs := $(patsubst %,bin-wrappers/%,$(BINDIR_PROGRAMS_NEED_X) $(BINDIR_PROGRAMS_NO_X) $(TEST_PROGRAMS_NEED_X))
|
||||
|
||||
t/helper/test-stdlib$X: t/helper/test-stdlib.o GIT-LDFLAGS $(STD_LIB_FILE) $(STUB_LIB_FILE) $(GITLIBS)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
|
||||
$< $(STD_LIB_FILE) $(STUB_LIB_FILE) $(EXTLIBS)
|
||||
|
||||
all:: $(TEST_PROGRAMS) $(test_bindir_programs) $(UNIT_TEST_PROGS)
|
||||
|
||||
bin-wrappers/%: wrap-for-bin.sh
|
||||
|
@ -3228,7 +3260,7 @@ perf: all
|
|||
|
||||
.PRECIOUS: $(TEST_OBJS)
|
||||
|
||||
t/helper/test-tool$X: $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
|
||||
t/helper/test-tool$X: $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS)) $(UNIT_TEST_DIR)/test-lib.o
|
||||
|
||||
t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS) $(REFTABLE_TEST_LIB)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(filter %.a,$^) $(LIBS)
|
||||
|
@ -3629,6 +3661,7 @@ ifneq ($(INCLUDE_DLLS_IN_ARTIFACTS),)
|
|||
OTHER_PROGRAMS += $(shell echo *.dll t/helper/*.dll t/unit-tests/bin/*.dll)
|
||||
endif
|
||||
|
||||
# Added an info for debugging
|
||||
artifacts-tar:: $(ALL_COMMANDS_TO_INSTALL) $(SCRIPT_LIB) $(OTHER_PROGRAMS) \
|
||||
GIT-BUILD-OPTIONS $(TEST_PROGRAMS) $(test_bindir_programs) \
|
||||
$(UNIT_TEST_PROGS) $(MOFILES)
|
||||
|
@ -3691,7 +3724,7 @@ clean: profile-clean coverage-clean cocciclean
|
|||
$(RM) git.res
|
||||
$(RM) $(OBJECTS)
|
||||
$(RM) headless-git.o
|
||||
$(RM) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(REFTABLE_TEST_LIB)
|
||||
$(RM) $(LIB_FILE) $(XDIFF_LIB) $(REFTABLE_LIB) $(REFTABLE_TEST_LIB) $(STD_LIB_FILE) $(STUB_LIB_FILE)
|
||||
$(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) $(OTHER_PROGRAMS)
|
||||
$(RM) $(TEST_PROGRAMS)
|
||||
$(RM) $(FUZZ_PROGRAMS)
|
||||
|
@ -3858,22 +3891,22 @@ cover_db_html: cover_db
|
|||
#
|
||||
# An example command to build against libFuzzer from LLVM 11.0.0:
|
||||
#
|
||||
# make CC=clang CXX=clang++ \
|
||||
# make CC=clang FUZZ_CXX=clang++ \
|
||||
# CFLAGS="-fsanitize=fuzzer-no-link,address" \
|
||||
# LIB_FUZZING_ENGINE="-fsanitize=fuzzer,address" \
|
||||
# fuzz-all
|
||||
#
|
||||
FUZZ_CXX ?= $(CC)
|
||||
FUZZ_CXXFLAGS ?= $(ALL_CFLAGS)
|
||||
|
||||
.PHONY: fuzz-all
|
||||
fuzz-all: $(FUZZ_PROGRAMS)
|
||||
|
||||
$(FUZZ_PROGRAMS): %: %.o oss-fuzz/dummy-cmd-main.o $(GITLIBS) GIT-LDFLAGS
|
||||
$(QUIET_LINK)$(CXX) $(FUZZ_CXXFLAGS) -o $@ $(ALL_LDFLAGS) \
|
||||
$(QUIET_LINK)$(FUZZ_CXX) $(FUZZ_CXXFLAGS) -o $@ $(ALL_LDFLAGS) \
|
||||
-Wl,--allow-multiple-definition \
|
||||
$(filter %.o,$^) $(filter %.a,$^) $(LIBS) $(LIB_FUZZING_ENGINE)
|
||||
|
||||
fuzz-all: $(FUZZ_PROGRAMS)
|
||||
|
||||
$(UNIT_TEST_PROGS): $(UNIT_TEST_BIN)/%$X: $(UNIT_TEST_DIR)/%.o $(UNIT_TEST_DIR)/test-lib.o $(GITLIBS) GIT-LDFLAGS
|
||||
$(call mkdir_p_parent_template)
|
||||
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
|
||||
|
@ -3881,5 +3914,20 @@ $(UNIT_TEST_PROGS): $(UNIT_TEST_BIN)/%$X: $(UNIT_TEST_DIR)/%.o $(UNIT_TEST_DIR)/
|
|||
|
||||
.PHONY: build-unit-tests unit-tests
|
||||
build-unit-tests: $(UNIT_TEST_PROGS)
|
||||
unit-tests: $(UNIT_TEST_PROGS)
|
||||
unit-tests: $(UNIT_TEST_PROGS) t/helper/test-tool$X
|
||||
$(MAKE) -C t/ unit-tests
|
||||
|
||||
### Libified Git rules
|
||||
|
||||
# git-std-lib.a
|
||||
# Programs other than git should compile this with
|
||||
# make NO_GETTEXT=YesPlease git-std-lib.a
|
||||
# and link against git-stub-lib.a (if the default no-op functionality is fine)
|
||||
# or a custom .a file with the same interface as git-stub-lib.a (if custom
|
||||
# functionality is needed) as well.
|
||||
$(STD_LIB_FILE): $(STD_LIB_OBJS) $(COMPAT_OBJS)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
|
||||
|
||||
# git-stub-lib.a
|
||||
$(STUB_LIB_FILE): $(STUB_LIB_OBJS)
|
||||
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
|
||||
|
|
19
add-patch.c
19
add-patch.c
|
@ -293,10 +293,10 @@ static void err(struct add_p_state *s, const char *fmt, ...)
|
|||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
fputs(s->s.error_color, stderr);
|
||||
vfprintf(stderr, fmt, args);
|
||||
fputs(s->s.reset_color, stderr);
|
||||
fputc('\n', stderr);
|
||||
fputs(s->s.error_color, stdout);
|
||||
vfprintf(stdout, fmt, args);
|
||||
fputs(s->s.reset_color, stdout);
|
||||
fputc('\n', stdout);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
|
@ -1326,7 +1326,7 @@ static int apply_for_checkout(struct add_p_state *s, struct strbuf *diff,
|
|||
err(s, _("Nothing was applied.\n"));
|
||||
} else
|
||||
/* As a last resort, show the diff to the user */
|
||||
fwrite(diff->buf, diff->len, 1, stderr);
|
||||
fwrite(diff->buf, diff->len, 1, stdout);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1668,7 +1668,7 @@ static int patch_update_file(struct add_p_state *s,
|
|||
}
|
||||
} else if (s->answer.buf[0] == 'p') {
|
||||
rendered_hunk_index = -1;
|
||||
} else {
|
||||
} else if (s->answer.buf[0] == '?') {
|
||||
const char *p = _(help_patch_remainder), *eol = p;
|
||||
|
||||
color_fprintf(stdout, s->s.help_color, "%s",
|
||||
|
@ -1692,6 +1692,9 @@ static int patch_update_file(struct add_p_state *s,
|
|||
color_fprintf_ln(stdout, s->s.help_color,
|
||||
"%.*s", (int)(eol - p), p);
|
||||
}
|
||||
} else {
|
||||
err(s, _("Unknown command '%s' (use '?' for help)"),
|
||||
s->answer.buf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1778,9 +1781,9 @@ int run_add_p(struct repository *r, enum add_p_mode mode,
|
|||
break;
|
||||
|
||||
if (s.file_diff_nr == 0)
|
||||
fprintf(stderr, _("No changes.\n"));
|
||||
err(&s, _("No changes."));
|
||||
else if (binary_count == s.file_diff_nr)
|
||||
fprintf(stderr, _("Only binary files changed.\n"));
|
||||
err(&s, _("Only binary files changed."));
|
||||
|
||||
add_p_state_clear(&s);
|
||||
return 0;
|
||||
|
|
31
attr.c
31
attr.c
|
@ -1206,15 +1206,16 @@ static void collect_some_attrs(struct index_state *istate,
|
|||
}
|
||||
|
||||
static const char *default_attr_source_tree_object_name;
|
||||
static int ignore_bad_attr_tree;
|
||||
|
||||
void set_git_attr_source(const char *tree_object_name)
|
||||
{
|
||||
default_attr_source_tree_object_name = xstrdup(tree_object_name);
|
||||
}
|
||||
|
||||
static void compute_default_attr_source(struct object_id *attr_source)
|
||||
static int compute_default_attr_source(struct object_id *attr_source)
|
||||
{
|
||||
int ignore_bad_attr_tree = 0;
|
||||
|
||||
if (!default_attr_source_tree_object_name)
|
||||
default_attr_source_tree_object_name = getenv(GIT_ATTR_SOURCE_ENVIRONMENT);
|
||||
|
||||
|
@ -1230,22 +1231,34 @@ static void compute_default_attr_source(struct object_id *attr_source)
|
|||
ignore_bad_attr_tree = 1;
|
||||
}
|
||||
|
||||
if (!default_attr_source_tree_object_name || !is_null_oid(attr_source))
|
||||
return;
|
||||
if (!default_attr_source_tree_object_name)
|
||||
return 0;
|
||||
|
||||
if (!startup_info->have_repository) {
|
||||
if (!ignore_bad_attr_tree)
|
||||
die(_("cannot use --attr-source or GIT_ATTR_SOURCE without repo"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (repo_get_oid_treeish(the_repository,
|
||||
default_attr_source_tree_object_name,
|
||||
attr_source) && !ignore_bad_attr_tree)
|
||||
die(_("bad --attr-source or GIT_ATTR_SOURCE"));
|
||||
attr_source)) {
|
||||
if (!ignore_bad_attr_tree)
|
||||
die(_("bad --attr-source or GIT_ATTR_SOURCE"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct object_id *default_attr_source(void)
|
||||
{
|
||||
static struct object_id attr_source;
|
||||
static int has_attr_source = -1;
|
||||
|
||||
if (is_null_oid(&attr_source))
|
||||
compute_default_attr_source(&attr_source);
|
||||
if (is_null_oid(&attr_source))
|
||||
if (has_attr_source < 0)
|
||||
has_attr_source = compute_default_attr_source(&attr_source);
|
||||
if (!has_attr_source)
|
||||
return NULL;
|
||||
return &attr_source;
|
||||
}
|
||||
|
|
208
bloom.c
208
bloom.c
|
@ -6,6 +6,9 @@
|
|||
#include "commit-graph.h"
|
||||
#include "commit.h"
|
||||
#include "commit-slab.h"
|
||||
#include "tree.h"
|
||||
#include "tree-walk.h"
|
||||
#include "config.h"
|
||||
|
||||
define_commit_slab(bloom_filter_slab, struct bloom_filter);
|
||||
|
||||
|
@ -48,9 +51,9 @@ static int check_bloom_offset(struct commit_graph *g, uint32_t pos,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int load_bloom_filter_from_graph(struct commit_graph *g,
|
||||
struct bloom_filter *filter,
|
||||
uint32_t graph_pos)
|
||||
int load_bloom_filter_from_graph(struct commit_graph *g,
|
||||
struct bloom_filter *filter,
|
||||
uint32_t graph_pos)
|
||||
{
|
||||
uint32_t lex_pos, start_index, end_index;
|
||||
|
||||
|
@ -88,6 +91,8 @@ static int load_bloom_filter_from_graph(struct commit_graph *g,
|
|||
filter->data = (unsigned char *)(g->chunk_bloom_data +
|
||||
sizeof(unsigned char) * start_index +
|
||||
BLOOMDATA_CHUNK_HEADER_SIZE);
|
||||
filter->version = g->bloom_filter_settings->hash_version;
|
||||
filter->to_free = NULL;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -99,7 +104,64 @@ static int load_bloom_filter_from_graph(struct commit_graph *g,
|
|||
* Not considered to be cryptographically secure.
|
||||
* Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm
|
||||
*/
|
||||
uint32_t murmur3_seeded(uint32_t seed, const char *data, size_t len)
|
||||
uint32_t murmur3_seeded_v2(uint32_t seed, const char *data, size_t len)
|
||||
{
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
const uint32_t r1 = 15;
|
||||
const uint32_t r2 = 13;
|
||||
const uint32_t m = 5;
|
||||
const uint32_t n = 0xe6546b64;
|
||||
int i;
|
||||
uint32_t k1 = 0;
|
||||
const char *tail;
|
||||
|
||||
int len4 = len / sizeof(uint32_t);
|
||||
|
||||
uint32_t k;
|
||||
for (i = 0; i < len4; i++) {
|
||||
uint32_t byte1 = (uint32_t)(unsigned char)data[4*i];
|
||||
uint32_t byte2 = ((uint32_t)(unsigned char)data[4*i + 1]) << 8;
|
||||
uint32_t byte3 = ((uint32_t)(unsigned char)data[4*i + 2]) << 16;
|
||||
uint32_t byte4 = ((uint32_t)(unsigned char)data[4*i + 3]) << 24;
|
||||
k = byte1 | byte2 | byte3 | byte4;
|
||||
k *= c1;
|
||||
k = rotate_left(k, r1);
|
||||
k *= c2;
|
||||
|
||||
seed ^= k;
|
||||
seed = rotate_left(seed, r2) * m + n;
|
||||
}
|
||||
|
||||
tail = (data + len4 * sizeof(uint32_t));
|
||||
|
||||
switch (len & (sizeof(uint32_t) - 1)) {
|
||||
case 3:
|
||||
k1 ^= ((uint32_t)(unsigned char)tail[2]) << 16;
|
||||
/*-fallthrough*/
|
||||
case 2:
|
||||
k1 ^= ((uint32_t)(unsigned char)tail[1]) << 8;
|
||||
/*-fallthrough*/
|
||||
case 1:
|
||||
k1 ^= ((uint32_t)(unsigned char)tail[0]) << 0;
|
||||
k1 *= c1;
|
||||
k1 = rotate_left(k1, r1);
|
||||
k1 *= c2;
|
||||
seed ^= k1;
|
||||
break;
|
||||
}
|
||||
|
||||
seed ^= (uint32_t)len;
|
||||
seed ^= (seed >> 16);
|
||||
seed *= 0x85ebca6b;
|
||||
seed ^= (seed >> 13);
|
||||
seed *= 0xc2b2ae35;
|
||||
seed ^= (seed >> 16);
|
||||
|
||||
return seed;
|
||||
}
|
||||
|
||||
static uint32_t murmur3_seeded_v1(uint32_t seed, const char *data, size_t len)
|
||||
{
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
|
@ -164,8 +226,14 @@ void fill_bloom_key(const char *data,
|
|||
int i;
|
||||
const uint32_t seed0 = 0x293ae76f;
|
||||
const uint32_t seed1 = 0x7e646e2c;
|
||||
const uint32_t hash0 = murmur3_seeded(seed0, data, len);
|
||||
const uint32_t hash1 = murmur3_seeded(seed1, data, len);
|
||||
uint32_t hash0, hash1;
|
||||
if (settings->hash_version == 2) {
|
||||
hash0 = murmur3_seeded_v2(seed0, data, len);
|
||||
hash1 = murmur3_seeded_v2(seed1, data, len);
|
||||
} else {
|
||||
hash0 = murmur3_seeded_v1(seed0, data, len);
|
||||
hash1 = murmur3_seeded_v1(seed1, data, len);
|
||||
}
|
||||
|
||||
key->hashes = (uint32_t *)xcalloc(settings->num_hashes, sizeof(uint32_t));
|
||||
for (i = 0; i < settings->num_hashes; i++)
|
||||
|
@ -197,6 +265,18 @@ void init_bloom_filters(void)
|
|||
init_bloom_filter_slab(&bloom_filters);
|
||||
}
|
||||
|
||||
static void free_one_bloom_filter(struct bloom_filter *filter)
|
||||
{
|
||||
if (!filter)
|
||||
return;
|
||||
free(filter->to_free);
|
||||
}
|
||||
|
||||
void deinit_bloom_filters(void)
|
||||
{
|
||||
deep_clear_bloom_filter_slab(&bloom_filters, free_one_bloom_filter);
|
||||
}
|
||||
|
||||
static int pathmap_cmp(const void *hashmap_cmp_fn_data UNUSED,
|
||||
const struct hashmap_entry *eptr,
|
||||
const struct hashmap_entry *entry_or_key,
|
||||
|
@ -210,11 +290,97 @@ static int pathmap_cmp(const void *hashmap_cmp_fn_data UNUSED,
|
|||
return strcmp(e1->path, e2->path);
|
||||
}
|
||||
|
||||
static void init_truncated_large_filter(struct bloom_filter *filter)
|
||||
static void init_truncated_large_filter(struct bloom_filter *filter,
|
||||
int version)
|
||||
{
|
||||
filter->data = xmalloc(1);
|
||||
filter->data = filter->to_free = xmalloc(1);
|
||||
filter->data[0] = 0xFF;
|
||||
filter->len = 1;
|
||||
filter->version = version;
|
||||
}
|
||||
|
||||
#define VISITED (1u<<21)
|
||||
#define HIGH_BITS (1u<<22)
|
||||
|
||||
static int has_entries_with_high_bit(struct repository *r, struct tree *t)
|
||||
{
|
||||
if (parse_tree(t))
|
||||
return 1;
|
||||
|
||||
if (!(t->object.flags & VISITED)) {
|
||||
struct tree_desc desc;
|
||||
struct name_entry entry;
|
||||
|
||||
init_tree_desc(&desc, &t->object.oid, t->buffer, t->size);
|
||||
while (tree_entry(&desc, &entry)) {
|
||||
size_t i;
|
||||
for (i = 0; i < entry.pathlen; i++) {
|
||||
if (entry.path[i] & 0x80) {
|
||||
t->object.flags |= HIGH_BITS;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (S_ISDIR(entry.mode)) {
|
||||
struct tree *sub = lookup_tree(r, &entry.oid);
|
||||
if (sub && has_entries_with_high_bit(r, sub)) {
|
||||
t->object.flags |= HIGH_BITS;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
done:
|
||||
t->object.flags |= VISITED;
|
||||
}
|
||||
|
||||
return !!(t->object.flags & HIGH_BITS);
|
||||
}
|
||||
|
||||
static int commit_tree_has_high_bit_paths(struct repository *r,
|
||||
struct commit *c)
|
||||
{
|
||||
struct tree *t;
|
||||
if (repo_parse_commit(r, c))
|
||||
return 1;
|
||||
t = repo_get_commit_tree(r, c);
|
||||
if (!t)
|
||||
return 1;
|
||||
return has_entries_with_high_bit(r, t);
|
||||
}
|
||||
|
||||
static struct bloom_filter *upgrade_filter(struct repository *r, struct commit *c,
|
||||
struct bloom_filter *filter,
|
||||
int hash_version)
|
||||
{
|
||||
struct commit_list *p = c->parents;
|
||||
if (commit_tree_has_high_bit_paths(r, c))
|
||||
return NULL;
|
||||
|
||||
if (p && commit_tree_has_high_bit_paths(r, p->item))
|
||||
return NULL;
|
||||
|
||||
filter->version = hash_version;
|
||||
|
||||
return filter;
|
||||
}
|
||||
|
||||
struct bloom_filter *get_bloom_filter(struct repository *r, struct commit *c)
|
||||
{
|
||||
struct bloom_filter *filter;
|
||||
int hash_version;
|
||||
|
||||
filter = get_or_compute_bloom_filter(r, c, 0, NULL, NULL);
|
||||
if (!filter)
|
||||
return NULL;
|
||||
|
||||
prepare_repo_settings(r);
|
||||
hash_version = r->settings.commit_graph_changed_paths_version;
|
||||
|
||||
if (!(hash_version == -1 || hash_version == filter->version))
|
||||
return NULL; /* unusable filter */
|
||||
return filter;
|
||||
}
|
||||
|
||||
struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
|
||||
|
@ -242,8 +408,23 @@ struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
|
|||
filter, graph_pos);
|
||||
}
|
||||
|
||||
if (filter->data && filter->len)
|
||||
return filter;
|
||||
if (filter->data && filter->len) {
|
||||
struct bloom_filter *upgrade;
|
||||
if (!settings || settings->hash_version == filter->version)
|
||||
return filter;
|
||||
|
||||
/* version mismatch, see if we can upgrade */
|
||||
if (compute_if_not_present &&
|
||||
git_env_bool("GIT_TEST_UPGRADE_BLOOM_FILTERS", 1)) {
|
||||
upgrade = upgrade_filter(r, c, filter,
|
||||
settings->hash_version);
|
||||
if (upgrade) {
|
||||
if (computed)
|
||||
*computed |= BLOOM_UPGRADED;
|
||||
return upgrade;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!compute_if_not_present)
|
||||
return NULL;
|
||||
|
||||
|
@ -299,19 +480,22 @@ struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
|
|||
}
|
||||
|
||||
if (hashmap_get_size(&pathmap) > settings->max_changed_paths) {
|
||||
init_truncated_large_filter(filter);
|
||||
init_truncated_large_filter(filter,
|
||||
settings->hash_version);
|
||||
if (computed)
|
||||
*computed |= BLOOM_TRUNC_LARGE;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
filter->len = (hashmap_get_size(&pathmap) * settings->bits_per_entry + BITS_PER_WORD - 1) / BITS_PER_WORD;
|
||||
filter->version = settings->hash_version;
|
||||
if (!filter->len) {
|
||||
if (computed)
|
||||
*computed |= BLOOM_TRUNC_EMPTY;
|
||||
filter->len = 1;
|
||||
}
|
||||
CALLOC_ARRAY(filter->data, filter->len);
|
||||
filter->to_free = filter->data;
|
||||
|
||||
hashmap_for_each_entry(&pathmap, &iter, e, entry) {
|
||||
struct bloom_key key;
|
||||
|
@ -325,7 +509,7 @@ struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
|
|||
} else {
|
||||
for (i = 0; i < diff_queued_diff.nr; i++)
|
||||
diff_free_filepair(diff_queued_diff.queue[i]);
|
||||
init_truncated_large_filter(filter);
|
||||
init_truncated_large_filter(filter, settings->hash_version);
|
||||
|
||||
if (computed)
|
||||
*computed |= BLOOM_TRUNC_LARGE;
|
||||
|
|
38
bloom.h
38
bloom.h
|
@ -3,13 +3,16 @@
|
|||
|
||||
struct commit;
|
||||
struct repository;
|
||||
struct commit_graph;
|
||||
|
||||
struct bloom_filter_settings {
|
||||
/*
|
||||
* The version of the hashing technique being used.
|
||||
* We currently only support version = 1 which is
|
||||
* The newest version is 2, which is
|
||||
* the seeded murmur3 hashing technique implemented
|
||||
* in bloom.c.
|
||||
* in bloom.c. Bloom filters of version 1 were created
|
||||
* with prior versions of Git, which had a bug in the
|
||||
* implementation of the hash function.
|
||||
*/
|
||||
uint32_t hash_version;
|
||||
|
||||
|
@ -52,6 +55,9 @@ struct bloom_filter_settings {
|
|||
struct bloom_filter {
|
||||
unsigned char *data;
|
||||
size_t len;
|
||||
int version;
|
||||
|
||||
void *to_free;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -68,6 +74,10 @@ struct bloom_key {
|
|||
uint32_t *hashes;
|
||||
};
|
||||
|
||||
int load_bloom_filter_from_graph(struct commit_graph *g,
|
||||
struct bloom_filter *filter,
|
||||
uint32_t graph_pos);
|
||||
|
||||
/*
|
||||
* Calculate the murmur3 32-bit hash value for the given data
|
||||
* using the given seed.
|
||||
|
@ -75,7 +85,7 @@ struct bloom_key {
|
|||
* Not considered to be cryptographically secure.
|
||||
* Implemented as described in https://en.wikipedia.org/wiki/MurmurHash#Algorithm
|
||||
*/
|
||||
uint32_t murmur3_seeded(uint32_t seed, const char *data, size_t len);
|
||||
uint32_t murmur3_seeded_v2(uint32_t seed, const char *data, size_t len);
|
||||
|
||||
void fill_bloom_key(const char *data,
|
||||
size_t len,
|
||||
|
@ -88,12 +98,14 @@ void add_key_to_filter(const struct bloom_key *key,
|
|||
const struct bloom_filter_settings *settings);
|
||||
|
||||
void init_bloom_filters(void);
|
||||
void deinit_bloom_filters(void);
|
||||
|
||||
enum bloom_filter_computed {
|
||||
BLOOM_NOT_COMPUTED = (1 << 0),
|
||||
BLOOM_COMPUTED = (1 << 1),
|
||||
BLOOM_TRUNC_LARGE = (1 << 2),
|
||||
BLOOM_TRUNC_EMPTY = (1 << 3),
|
||||
BLOOM_UPGRADED = (1 << 4),
|
||||
};
|
||||
|
||||
struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
|
||||
|
@ -102,8 +114,24 @@ struct bloom_filter *get_or_compute_bloom_filter(struct repository *r,
|
|||
const struct bloom_filter_settings *settings,
|
||||
enum bloom_filter_computed *computed);
|
||||
|
||||
#define get_bloom_filter(r, c) get_or_compute_bloom_filter( \
|
||||
(r), (c), 0, NULL, NULL)
|
||||
/*
|
||||
* Find the Bloom filter associated with the given commit "c".
|
||||
*
|
||||
* If any of the following are true
|
||||
*
|
||||
* - the repository does not have a commit-graph, or
|
||||
* - the repository disables reading from the commit-graph, or
|
||||
* - the given commit does not have a Bloom filter computed, or
|
||||
* - there is a Bloom filter for commit "c", but it cannot be read
|
||||
* because the filter uses an incompatible version of murmur3
|
||||
*
|
||||
* , then `get_bloom_filter()` will return NULL. Otherwise, the corresponding
|
||||
* Bloom filter will be returned.
|
||||
*
|
||||
* For callers who wish to inspect Bloom filters with incompatible hash
|
||||
* versions, use get_or_compute_bloom_filter().
|
||||
*/
|
||||
struct bloom_filter *get_bloom_filter(struct repository *r, struct commit *c);
|
||||
|
||||
int bloom_filter_contains(const struct bloom_filter *filter,
|
||||
const struct bloom_key *key,
|
||||
|
|
2
branch.c
2
branch.c
|
@ -627,7 +627,7 @@ void create_branch(struct repository *r,
|
|||
if (!transaction ||
|
||||
ref_transaction_update(transaction, ref.buf,
|
||||
&oid, forcing ? NULL : null_oid(),
|
||||
0, msg, &err) ||
|
||||
NULL, NULL, 0, msg, &err) ||
|
||||
ref_transaction_commit(transaction, &err))
|
||||
die("%s", err.buf);
|
||||
ref_transaction_free(transaction);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) 2006 Linus Torvalds
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
|
@ -40,20 +40,20 @@ static int chmod_pathspec(struct pathspec *pathspec, char flip, int show_only)
|
|||
{
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
struct cache_entry *ce = the_index.cache[i];
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
struct cache_entry *ce = the_repository->index->cache[i];
|
||||
int err;
|
||||
|
||||
if (!include_sparse &&
|
||||
(ce_skip_worktree(ce) ||
|
||||
!path_in_sparse_checkout(ce->name, &the_index)))
|
||||
!path_in_sparse_checkout(ce->name, the_repository->index)))
|
||||
continue;
|
||||
|
||||
if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
|
||||
if (pathspec && !ce_path_match(the_repository->index, ce, pathspec, NULL))
|
||||
continue;
|
||||
|
||||
if (!show_only)
|
||||
err = chmod_index_entry(&the_index, ce, flip);
|
||||
err = chmod_index_entry(the_repository->index, ce, flip);
|
||||
else
|
||||
err = S_ISREG(ce->ce_mode) ? 0 : -1;
|
||||
|
||||
|
@ -68,20 +68,20 @@ static int renormalize_tracked_files(const struct pathspec *pathspec, int flags)
|
|||
{
|
||||
int i, retval = 0;
|
||||
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
struct cache_entry *ce = the_index.cache[i];
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
struct cache_entry *ce = the_repository->index->cache[i];
|
||||
|
||||
if (!include_sparse &&
|
||||
(ce_skip_worktree(ce) ||
|
||||
!path_in_sparse_checkout(ce->name, &the_index)))
|
||||
!path_in_sparse_checkout(ce->name, the_repository->index)))
|
||||
continue;
|
||||
if (ce_stage(ce))
|
||||
continue; /* do not touch unmerged paths */
|
||||
if (!S_ISREG(ce->ce_mode) && !S_ISLNK(ce->ce_mode))
|
||||
continue; /* do not touch non blobs */
|
||||
if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
|
||||
if (pathspec && !ce_path_match(the_repository->index, ce, pathspec, NULL))
|
||||
continue;
|
||||
retval |= add_file_to_index(&the_index, ce->name,
|
||||
retval |= add_file_to_index(the_repository->index, ce->name,
|
||||
flags | ADD_CACHE_RENORMALIZE);
|
||||
}
|
||||
|
||||
|
@ -100,11 +100,11 @@ static char *prune_directory(struct dir_struct *dir, struct pathspec *pathspec,
|
|||
i = dir->nr;
|
||||
while (--i >= 0) {
|
||||
struct dir_entry *entry = *src++;
|
||||
if (dir_path_match(&the_index, entry, pathspec, prefix, seen))
|
||||
if (dir_path_match(the_repository->index, entry, pathspec, prefix, seen))
|
||||
*dst++ = entry;
|
||||
}
|
||||
dir->nr = dst - dir->entries;
|
||||
add_pathspec_matches_against_index(pathspec, &the_index, seen,
|
||||
add_pathspec_matches_against_index(pathspec, the_repository->index, seen,
|
||||
PS_IGNORE_SKIP_WORKTREE);
|
||||
return seen;
|
||||
}
|
||||
|
@ -119,14 +119,14 @@ static int refresh(int verbose, const struct pathspec *pathspec)
|
|||
(verbose ? REFRESH_IN_PORCELAIN : REFRESH_QUIET);
|
||||
|
||||
seen = xcalloc(pathspec->nr, 1);
|
||||
refresh_index(&the_index, flags, pathspec, seen,
|
||||
refresh_index(the_repository->index, flags, pathspec, seen,
|
||||
_("Unstaged changes after refreshing the index:"));
|
||||
for (i = 0; i < pathspec->nr; i++) {
|
||||
if (!seen[i]) {
|
||||
const char *path = pathspec->items[i].original;
|
||||
|
||||
if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
|
||||
!path_in_sparse_checkout(path, &the_index)) {
|
||||
!path_in_sparse_checkout(path, the_repository->index)) {
|
||||
string_list_append(&only_match_skip_worktree,
|
||||
pathspec->items[i].original);
|
||||
} else {
|
||||
|
@ -338,12 +338,12 @@ static int add_files(struct dir_struct *dir, int flags)
|
|||
|
||||
for (i = 0; i < dir->nr; i++) {
|
||||
if (!include_sparse &&
|
||||
!path_in_sparse_checkout(dir->entries[i]->name, &the_index)) {
|
||||
!path_in_sparse_checkout(dir->entries[i]->name, the_repository->index)) {
|
||||
string_list_append(&matched_sparse_paths,
|
||||
dir->entries[i]->name);
|
||||
continue;
|
||||
}
|
||||
if (add_file_to_index(&the_index, dir->entries[i]->name, flags)) {
|
||||
if (add_file_to_index(the_repository->index, dir->entries[i]->name, flags)) {
|
||||
if (!ignore_add_errors)
|
||||
die(_("adding files failed"));
|
||||
exit_status = 1;
|
||||
|
@ -461,8 +461,8 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
if (repo_read_index_preload(the_repository, &pathspec, 0) < 0)
|
||||
die(_("index file corrupt"));
|
||||
|
||||
die_in_unpopulated_submodule(&the_index, prefix);
|
||||
die_path_inside_submodule(&the_index, &pathspec);
|
||||
die_in_unpopulated_submodule(the_repository->index, prefix);
|
||||
die_path_inside_submodule(the_repository->index, &pathspec);
|
||||
|
||||
if (add_new_files) {
|
||||
int baselen;
|
||||
|
@ -474,7 +474,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
|
||||
/* This picks up the paths that are not tracked */
|
||||
baselen = fill_directory(&dir, &the_index, &pathspec);
|
||||
baselen = fill_directory(&dir, the_repository->index, &pathspec);
|
||||
if (pathspec.nr)
|
||||
seen = prune_directory(&dir, &pathspec, baselen);
|
||||
}
|
||||
|
@ -491,7 +491,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
|
||||
if (!seen)
|
||||
seen = find_pathspecs_matching_against_index(&pathspec,
|
||||
&the_index, PS_IGNORE_SKIP_WORKTREE);
|
||||
the_repository->index, PS_IGNORE_SKIP_WORKTREE);
|
||||
|
||||
/*
|
||||
* file_exists() assumes exact match
|
||||
|
@ -527,8 +527,8 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
!file_exists(path)) {
|
||||
if (ignore_missing) {
|
||||
int dtype = DT_UNKNOWN;
|
||||
if (is_excluded(&dir, &the_index, path, &dtype))
|
||||
dir_add_ignored(&dir, &the_index,
|
||||
if (is_excluded(&dir, the_repository->index, path, &dtype))
|
||||
dir_add_ignored(&dir, the_repository->index,
|
||||
path, pathspec.items[i].len);
|
||||
} else
|
||||
die(_("pathspec '%s' did not match any files"),
|
||||
|
@ -569,7 +569,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
|
|||
end_odb_transaction();
|
||||
|
||||
finish:
|
||||
if (write_locked_index(&the_index, &lock_file,
|
||||
if (write_locked_index(the_repository->index, &lock_file,
|
||||
COMMIT_LOCK | SKIP_IF_UNCHANGED))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
|
|
36
builtin/am.c
36
builtin/am.c
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Based on git-am.sh by Junio C Hamano.
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "advice.h"
|
||||
|
@ -1536,8 +1536,8 @@ static int run_apply(const struct am_state *state, const char *index_file)
|
|||
|
||||
if (index_file) {
|
||||
/* Reload index as apply_all_patches() will have modified it. */
|
||||
discard_index(&the_index);
|
||||
read_index_from(&the_index, index_file, get_git_dir());
|
||||
discard_index(the_repository->index);
|
||||
read_index_from(the_repository->index, index_file, get_git_dir());
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1579,10 +1579,10 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
|
|||
if (build_fake_ancestor(state, index_path))
|
||||
return error("could not build fake ancestor");
|
||||
|
||||
discard_index(&the_index);
|
||||
read_index_from(&the_index, index_path, get_git_dir());
|
||||
discard_index(the_repository->index);
|
||||
read_index_from(the_repository->index, index_path, get_git_dir());
|
||||
|
||||
if (write_index_as_tree(&orig_tree, &the_index, index_path, 0, NULL))
|
||||
if (write_index_as_tree(&orig_tree, the_repository->index, index_path, 0, NULL))
|
||||
return error(_("Repository lacks necessary blobs to fall back on 3-way merge."));
|
||||
|
||||
say(state, stdout, _("Using index info to reconstruct a base tree..."));
|
||||
|
@ -1608,12 +1608,12 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
|
|||
return error(_("Did you hand edit your patch?\n"
|
||||
"It does not apply to blobs recorded in its index."));
|
||||
|
||||
if (write_index_as_tree(&their_tree, &the_index, index_path, 0, NULL))
|
||||
if (write_index_as_tree(&their_tree, the_repository->index, index_path, 0, NULL))
|
||||
return error("could not write tree");
|
||||
|
||||
say(state, stdout, _("Falling back to patching base and 3-way merge..."));
|
||||
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
repo_read_index(the_repository);
|
||||
|
||||
/*
|
||||
|
@ -1660,7 +1660,7 @@ static void do_commit(const struct am_state *state)
|
|||
if (!state->no_verify && run_hooks("pre-applypatch"))
|
||||
exit(1);
|
||||
|
||||
if (write_index_as_tree(&tree, &the_index, get_index_file(), 0, NULL))
|
||||
if (write_index_as_tree(&tree, the_repository->index, get_index_file(), 0, NULL))
|
||||
die(_("git write-tree failed to write a tree"));
|
||||
|
||||
if (!repo_get_oid_commit(the_repository, "HEAD", &parent)) {
|
||||
|
@ -1948,7 +1948,7 @@ static void am_resolve(struct am_state *state, int allow_empty)
|
|||
}
|
||||
}
|
||||
|
||||
if (unmerged_index(&the_index)) {
|
||||
if (unmerged_index(the_repository->index)) {
|
||||
printf_ln(_("You still have unmerged paths in your index.\n"
|
||||
"You should 'git add' each file with resolved conflicts to mark them as such.\n"
|
||||
"You might run `git rm` on a file to accept \"deleted by them\" for it."));
|
||||
|
@ -1987,12 +1987,12 @@ static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
|
|||
|
||||
repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
|
||||
|
||||
refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.head_idx = 1;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
opts.update = 1;
|
||||
opts.merge = 1;
|
||||
opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0;
|
||||
|
@ -2006,7 +2006,7 @@ static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
return 0;
|
||||
|
@ -2029,8 +2029,8 @@ static int merge_tree(struct tree *tree)
|
|||
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.head_idx = 1;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
opts.merge = 1;
|
||||
opts.fn = oneway_merge;
|
||||
init_tree_desc(&t[0], &tree->object.oid, tree->buffer, tree->size);
|
||||
|
@ -2040,7 +2040,7 @@ static int merge_tree(struct tree *tree)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
return 0;
|
||||
|
@ -2068,7 +2068,7 @@ static int clean_index(const struct object_id *head, const struct object_id *rem
|
|||
if (fast_forward_to(head_tree, head_tree, 1))
|
||||
return -1;
|
||||
|
||||
if (write_index_as_tree(&index, &the_index, get_index_file(), 0, NULL))
|
||||
if (write_index_as_tree(&index, the_repository->index, get_index_file(), 0, NULL))
|
||||
return -1;
|
||||
|
||||
index_tree = parse_tree_indirect(&index);
|
||||
|
|
|
@ -915,7 +915,6 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
|
|||
struct range_set ranges;
|
||||
unsigned int range_i;
|
||||
long anchor;
|
||||
const int hexsz = the_hash_algo->hexsz;
|
||||
long num_lines = 0;
|
||||
const char *str_usage = cmd_is_annotate ? annotate_usage : blame_usage;
|
||||
const char **opt_usage = cmd_is_annotate ? annotate_opt_usage : blame_opt_usage;
|
||||
|
@ -973,11 +972,11 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
|
|||
} else if (show_progress < 0)
|
||||
show_progress = isatty(2);
|
||||
|
||||
if (0 < abbrev && abbrev < hexsz)
|
||||
if (0 < abbrev && abbrev < (int)the_hash_algo->hexsz)
|
||||
/* one more abbrev length is needed for the boundary commit */
|
||||
abbrev++;
|
||||
else if (!abbrev)
|
||||
abbrev = hexsz;
|
||||
abbrev = the_hash_algo->hexsz;
|
||||
|
||||
if (revs_file && read_ancestry(revs_file))
|
||||
die_errno("reading graft file '%s' failed", revs_file);
|
||||
|
|
|
@ -140,6 +140,11 @@ static int cmd_bundle_verify(int argc, const char **argv, const char *prefix) {
|
|||
builtin_bundle_verify_usage, options, &bundle_file);
|
||||
/* bundle internals use argv[1] as further parameters */
|
||||
|
||||
if (!startup_info->have_repository) {
|
||||
ret = error(_("need a repository to verify a bundle"));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if ((bundle_fd = open_bundle(bundle_file, &header, &name)) < 0) {
|
||||
ret = 1;
|
||||
goto cleanup;
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) Linus Torvalds, 2005
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "convert.h"
|
||||
|
@ -77,7 +77,7 @@ static int filter_object(const char *path, unsigned mode,
|
|||
struct checkout_metadata meta;
|
||||
|
||||
init_checkout_metadata(&meta, NULL, NULL, oid);
|
||||
if (convert_to_working_tree(&the_index, path, *buf, *size, &strbuf, &meta)) {
|
||||
if (convert_to_working_tree(the_repository->index, path, *buf, *size, &strbuf, &meta)) {
|
||||
free(*buf);
|
||||
*size = strbuf.len;
|
||||
*buf = strbuf_detach(&strbuf, NULL);
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "attr.h"
|
||||
|
@ -71,9 +70,9 @@ static void check_attr(const char *prefix, struct attr_check *check,
|
|||
prefix_path(prefix, prefix ? strlen(prefix) : 0, file);
|
||||
|
||||
if (collect_all) {
|
||||
git_all_attrs(&the_index, full_path, check);
|
||||
git_all_attrs(the_repository->index, full_path, check);
|
||||
} else {
|
||||
git_check_attr(&the_index, full_path, check);
|
||||
git_check_attr(the_repository->index, full_path, check);
|
||||
}
|
||||
output_attr(check, file);
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "dir.h"
|
||||
|
@ -95,21 +94,21 @@ static int check_ignore(struct dir_struct *dir,
|
|||
PATHSPEC_KEEP_ORDER,
|
||||
prefix, argv);
|
||||
|
||||
die_path_inside_submodule(&the_index, &pathspec);
|
||||
die_path_inside_submodule(the_repository->index, &pathspec);
|
||||
|
||||
/*
|
||||
* look for pathspecs matching entries in the index, since these
|
||||
* should not be ignored, in order to be consistent with
|
||||
* 'git status', 'git add' etc.
|
||||
*/
|
||||
seen = find_pathspecs_matching_against_index(&pathspec, &the_index,
|
||||
seen = find_pathspecs_matching_against_index(&pathspec, the_repository->index,
|
||||
PS_HEED_SKIP_WORKTREE);
|
||||
for (i = 0; i < pathspec.nr; i++) {
|
||||
full_path = pathspec.items[i].match;
|
||||
pattern = NULL;
|
||||
if (!seen[i]) {
|
||||
int dtype = DT_UNKNOWN;
|
||||
pattern = last_matching_pattern(dir, &the_index,
|
||||
pattern = last_matching_pattern(dir, the_repository->index,
|
||||
full_path, &dtype);
|
||||
if (!verbose && pattern &&
|
||||
pattern->flags & PATTERN_FLAG_NEGATIVE)
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* Copyright (C) 2005 Linus Torvalds
|
||||
*
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "gettext.h"
|
||||
|
@ -69,7 +69,7 @@ static void write_tempfile_record(const char *name, const char *prefix)
|
|||
static int checkout_file(const char *name, const char *prefix)
|
||||
{
|
||||
int namelen = strlen(name);
|
||||
int pos = index_name_pos(&the_index, name, namelen);
|
||||
int pos = index_name_pos(the_repository->index, name, namelen);
|
||||
int has_same_name = 0;
|
||||
int is_file = 0;
|
||||
int is_skipped = 1;
|
||||
|
@ -79,8 +79,8 @@ static int checkout_file(const char *name, const char *prefix)
|
|||
if (pos < 0)
|
||||
pos = -pos - 1;
|
||||
|
||||
while (pos < the_index.cache_nr) {
|
||||
struct cache_entry *ce = the_index.cache[pos];
|
||||
while (pos <the_repository->index->cache_nr) {
|
||||
struct cache_entry *ce =the_repository->index->cache[pos];
|
||||
if (ce_namelen(ce) != namelen ||
|
||||
memcmp(ce->name, name, namelen))
|
||||
break;
|
||||
|
@ -140,8 +140,8 @@ static int checkout_all(const char *prefix, int prefix_length)
|
|||
int i, errs = 0;
|
||||
struct cache_entry *last_ce = NULL;
|
||||
|
||||
for (i = 0; i < the_index.cache_nr ; i++) {
|
||||
struct cache_entry *ce = the_index.cache[i];
|
||||
for (i = 0; i < the_repository->index->cache_nr ; i++) {
|
||||
struct cache_entry *ce = the_repository->index->cache[i];
|
||||
|
||||
if (S_ISSPARSEDIR(ce->ce_mode)) {
|
||||
if (!ce_skip_worktree(ce))
|
||||
|
@ -154,8 +154,8 @@ static int checkout_all(const char *prefix, int prefix_length)
|
|||
* first entry inside the expanded sparse directory).
|
||||
*/
|
||||
if (ignore_skip_worktree) {
|
||||
ensure_full_index(&the_index);
|
||||
ce = the_index.cache[i];
|
||||
ensure_full_index(the_repository->index);
|
||||
ce = the_repository->index->cache[i];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,7 +260,7 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
|
|||
|
||||
argc = parse_options(argc, argv, prefix, builtin_checkout_index_options,
|
||||
builtin_checkout_index_usage, 0);
|
||||
state.istate = &the_index;
|
||||
state.istate = the_repository->index;
|
||||
state.force = force;
|
||||
state.quiet = quiet;
|
||||
state.not_new = not_new;
|
||||
|
@ -280,7 +280,7 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
|
|||
*/
|
||||
if (index_opt && !state.base_dir_len && !to_tempfile) {
|
||||
state.refresh_cache = 1;
|
||||
state.istate = &the_index;
|
||||
state.istate = the_repository->index;
|
||||
repo_hold_locked_index(the_repository, &lock_file,
|
||||
LOCK_DIE_ON_ERROR);
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
|
|||
return 1;
|
||||
|
||||
if (is_lock_file_locked(&lock_file) &&
|
||||
write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die("Unable to write new index file");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "advice.h"
|
||||
#include "branch.h"
|
||||
|
@ -146,7 +145,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
|
|||
return READ_TREE_RECURSIVE;
|
||||
|
||||
len = base->len + strlen(pathname);
|
||||
ce = make_empty_cache_entry(&the_index, len);
|
||||
ce = make_empty_cache_entry(the_repository->index, len);
|
||||
oidcpy(&ce->oid, oid);
|
||||
memcpy(ce->name, base->buf, base->len);
|
||||
memcpy(ce->name + base->len, pathname, len - base->len);
|
||||
|
@ -159,9 +158,9 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
|
|||
* entry in place. Whether it is UPTODATE or not, checkout_entry will
|
||||
* do the right thing.
|
||||
*/
|
||||
pos = index_name_pos(&the_index, ce->name, ce->ce_namelen);
|
||||
pos = index_name_pos(the_repository->index, ce->name, ce->ce_namelen);
|
||||
if (pos >= 0) {
|
||||
struct cache_entry *old = the_index.cache[pos];
|
||||
struct cache_entry *old = the_repository->index->cache[pos];
|
||||
if (ce->ce_mode == old->ce_mode &&
|
||||
!ce_intent_to_add(old) &&
|
||||
oideq(&ce->oid, &old->oid)) {
|
||||
|
@ -171,7 +170,7 @@ static int update_some(const struct object_id *oid, struct strbuf *base,
|
|||
}
|
||||
}
|
||||
|
||||
add_index_entry(&the_index, ce,
|
||||
add_index_entry(the_repository->index, ce,
|
||||
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
|
||||
return 0;
|
||||
}
|
||||
|
@ -190,8 +189,8 @@ static int read_tree_some(struct tree *tree, const struct pathspec *pathspec)
|
|||
|
||||
static int skip_same_name(const struct cache_entry *ce, int pos)
|
||||
{
|
||||
while (++pos < the_index.cache_nr &&
|
||||
!strcmp(the_index.cache[pos]->name, ce->name))
|
||||
while (++pos < the_repository->index->cache_nr &&
|
||||
!strcmp(the_repository->index->cache[pos]->name, ce->name))
|
||||
; /* skip */
|
||||
return pos;
|
||||
}
|
||||
|
@ -199,9 +198,9 @@ static int skip_same_name(const struct cache_entry *ce, int pos)
|
|||
static int check_stage(int stage, const struct cache_entry *ce, int pos,
|
||||
int overlay_mode)
|
||||
{
|
||||
while (pos < the_index.cache_nr &&
|
||||
!strcmp(the_index.cache[pos]->name, ce->name)) {
|
||||
if (ce_stage(the_index.cache[pos]) == stage)
|
||||
while (pos < the_repository->index->cache_nr &&
|
||||
!strcmp(the_repository->index->cache[pos]->name, ce->name)) {
|
||||
if (ce_stage(the_repository->index->cache[pos]) == stage)
|
||||
return 0;
|
||||
pos++;
|
||||
}
|
||||
|
@ -218,8 +217,8 @@ static int check_stages(unsigned stages, const struct cache_entry *ce, int pos)
|
|||
unsigned seen = 0;
|
||||
const char *name = ce->name;
|
||||
|
||||
while (pos < the_index.cache_nr) {
|
||||
ce = the_index.cache[pos];
|
||||
while (pos < the_repository->index->cache_nr) {
|
||||
ce = the_repository->index->cache[pos];
|
||||
if (strcmp(name, ce->name))
|
||||
break;
|
||||
seen |= (1 << ce_stage(ce));
|
||||
|
@ -235,10 +234,10 @@ static int checkout_stage(int stage, const struct cache_entry *ce, int pos,
|
|||
const struct checkout *state, int *nr_checkouts,
|
||||
int overlay_mode)
|
||||
{
|
||||
while (pos < the_index.cache_nr &&
|
||||
!strcmp(the_index.cache[pos]->name, ce->name)) {
|
||||
if (ce_stage(the_index.cache[pos]) == stage)
|
||||
return checkout_entry(the_index.cache[pos], state,
|
||||
while (pos < the_repository->index->cache_nr &&
|
||||
!strcmp(the_repository->index->cache[pos]->name, ce->name)) {
|
||||
if (ce_stage(the_repository->index->cache[pos]) == stage)
|
||||
return checkout_entry(the_repository->index->cache[pos], state,
|
||||
NULL, nr_checkouts);
|
||||
pos++;
|
||||
}
|
||||
|
@ -256,7 +255,7 @@ static int checkout_merged(int pos, const struct checkout *state,
|
|||
int *nr_checkouts, struct mem_pool *ce_mem_pool,
|
||||
int conflict_style)
|
||||
{
|
||||
struct cache_entry *ce = the_index.cache[pos];
|
||||
struct cache_entry *ce = the_repository->index->cache[pos];
|
||||
const char *path = ce->name;
|
||||
mmfile_t ancestor, ours, theirs;
|
||||
enum ll_merge_result merge_status;
|
||||
|
@ -269,7 +268,7 @@ static int checkout_merged(int pos, const struct checkout *state,
|
|||
int renormalize = 0;
|
||||
|
||||
memset(threeway, 0, sizeof(threeway));
|
||||
while (pos < the_index.cache_nr) {
|
||||
while (pos < the_repository->index->cache_nr) {
|
||||
int stage;
|
||||
stage = ce_stage(ce);
|
||||
if (!stage || strcmp(path, ce->name))
|
||||
|
@ -278,7 +277,7 @@ static int checkout_merged(int pos, const struct checkout *state,
|
|||
if (stage == 2)
|
||||
mode = create_ce_mode(ce->ce_mode);
|
||||
pos++;
|
||||
ce = the_index.cache[pos];
|
||||
ce = the_repository->index->cache[pos];
|
||||
}
|
||||
if (is_null_oid(&threeway[1]) || is_null_oid(&threeway[2]))
|
||||
return error(_("path '%s' does not have necessary versions"), path);
|
||||
|
@ -356,7 +355,7 @@ static void mark_ce_for_checkout_overlay(struct cache_entry *ce,
|
|||
* match_pathspec() for _all_ entries when
|
||||
* opts->source_tree != NULL.
|
||||
*/
|
||||
if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
|
||||
if (ce_path_match(the_repository->index, ce, &opts->pathspec, ps_matched))
|
||||
ce->ce_flags |= CE_MATCHED;
|
||||
}
|
||||
|
||||
|
@ -367,7 +366,7 @@ static void mark_ce_for_checkout_no_overlay(struct cache_entry *ce,
|
|||
ce->ce_flags &= ~CE_MATCHED;
|
||||
if (!opts->ignore_skipworktree && ce_skip_worktree(ce))
|
||||
return;
|
||||
if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched)) {
|
||||
if (ce_path_match(the_repository->index, ce, &opts->pathspec, ps_matched)) {
|
||||
ce->ce_flags |= CE_MATCHED;
|
||||
if (opts->source_tree && !(ce->ce_flags & CE_UPDATE))
|
||||
/*
|
||||
|
@ -391,7 +390,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
|
|||
|
||||
state.force = 1;
|
||||
state.refresh_cache = 1;
|
||||
state.istate = &the_index;
|
||||
state.istate = the_repository->index;
|
||||
|
||||
mem_pool_init(&ce_mem_pool, 0);
|
||||
get_parallel_checkout_configs(&pc_workers, &pc_threshold);
|
||||
|
@ -404,8 +403,8 @@ static int checkout_worktree(const struct checkout_opts *opts,
|
|||
if (pc_workers > 1)
|
||||
init_parallel_checkout();
|
||||
|
||||
for (pos = 0; pos < the_index.cache_nr; pos++) {
|
||||
struct cache_entry *ce = the_index.cache[pos];
|
||||
for (pos = 0; pos < the_repository->index->cache_nr; pos++) {
|
||||
struct cache_entry *ce = the_repository->index->cache[pos];
|
||||
if (ce->ce_flags & CE_MATCHED) {
|
||||
if (!ce_stage(ce)) {
|
||||
errs |= checkout_entry(ce, &state,
|
||||
|
@ -429,7 +428,7 @@ static int checkout_worktree(const struct checkout_opts *opts,
|
|||
errs |= run_parallel_checkout(&state, pc_workers, pc_threshold,
|
||||
NULL, NULL);
|
||||
mem_pool_discard(&ce_mem_pool, should_validate_cache_entries());
|
||||
remove_marked_cache_entries(&the_index, 1);
|
||||
remove_marked_cache_entries(the_repository->index, 1);
|
||||
remove_scheduled_dirs();
|
||||
errs |= finish_delayed_checkout(&state, opts->show_progress);
|
||||
|
||||
|
@ -571,7 +570,7 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
if (opts->source_tree)
|
||||
read_tree_some(opts->source_tree, &opts->pathspec);
|
||||
if (opts->merge)
|
||||
unmerge_index(&the_index, &opts->pathspec, CE_MATCHED);
|
||||
unmerge_index(the_repository->index, &opts->pathspec, CE_MATCHED);
|
||||
|
||||
ps_matched = xcalloc(opts->pathspec.nr, 1);
|
||||
|
||||
|
@ -579,13 +578,13 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
* Make sure all pathspecs participated in locating the paths
|
||||
* to be checked out.
|
||||
*/
|
||||
for (pos = 0; pos < the_index.cache_nr; pos++)
|
||||
for (pos = 0; pos < the_repository->index->cache_nr; pos++)
|
||||
if (opts->overlay_mode)
|
||||
mark_ce_for_checkout_overlay(the_index.cache[pos],
|
||||
mark_ce_for_checkout_overlay(the_repository->index->cache[pos],
|
||||
ps_matched,
|
||||
opts);
|
||||
else
|
||||
mark_ce_for_checkout_no_overlay(the_index.cache[pos],
|
||||
mark_ce_for_checkout_no_overlay(the_repository->index->cache[pos],
|
||||
ps_matched,
|
||||
opts);
|
||||
|
||||
|
@ -596,8 +595,8 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
free(ps_matched);
|
||||
|
||||
/* Any unmerged paths? */
|
||||
for (pos = 0; pos < the_index.cache_nr; pos++) {
|
||||
const struct cache_entry *ce = the_index.cache[pos];
|
||||
for (pos = 0; pos < the_repository->index->cache_nr; pos++) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[pos];
|
||||
if (ce->ce_flags & CE_MATCHED) {
|
||||
if (!ce_stage(ce))
|
||||
continue;
|
||||
|
@ -622,7 +621,7 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
if (opts->checkout_worktree)
|
||||
errs |= checkout_worktree(opts, new_branch_info);
|
||||
else
|
||||
remove_marked_cache_entries(&the_index, 1);
|
||||
remove_marked_cache_entries(the_repository->index, 1);
|
||||
|
||||
/*
|
||||
* Allow updating the index when checking out from the index.
|
||||
|
@ -634,7 +633,7 @@ static int checkout_paths(const struct checkout_opts *opts,
|
|||
checkout_index = opts->checkout_index;
|
||||
|
||||
if (checkout_index) {
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
} else {
|
||||
/*
|
||||
|
@ -703,8 +702,8 @@ static int reset_tree(struct tree *tree, const struct checkout_opts *o,
|
|||
opts.merge = 1;
|
||||
opts.fn = oneway_merge;
|
||||
opts.verbose_update = o->show_progress;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
init_checkout_metadata(&opts.meta, info->refname,
|
||||
info->commit ? &info->commit->object.oid : null_oid(),
|
||||
NULL);
|
||||
|
@ -756,12 +755,12 @@ static void init_topts(struct unpack_trees_options *topts, int merge,
|
|||
{
|
||||
memset(topts, 0, sizeof(*topts));
|
||||
topts->head_idx = -1;
|
||||
topts->src_index = &the_index;
|
||||
topts->dst_index = &the_index;
|
||||
topts->src_index = the_repository->index;
|
||||
topts->dst_index = the_repository->index;
|
||||
|
||||
setup_unpack_trees_porcelain(topts, "checkout");
|
||||
|
||||
topts->initial_checkout = is_index_unborn(&the_index);
|
||||
topts->initial_checkout = is_index_unborn(the_repository->index);
|
||||
topts->update = 1;
|
||||
topts->merge = 1;
|
||||
topts->quiet = merge && old_commit;
|
||||
|
@ -783,7 +782,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
|
|||
if (repo_read_index_preload(the_repository, NULL, 0) < 0)
|
||||
return error(_("index file corrupt"));
|
||||
|
||||
resolve_undo_clear_index(&the_index);
|
||||
resolve_undo_clear_index(the_repository->index);
|
||||
if (opts->new_orphan_branch && opts->orphan_from_empty_tree) {
|
||||
if (new_branch_info->commit)
|
||||
BUG("'switch --orphan' should never accept a commit as starting point");
|
||||
|
@ -807,9 +806,9 @@ static int merge_working_tree(const struct checkout_opts *opts,
|
|||
struct unpack_trees_options topts;
|
||||
const struct object_id *old_commit_oid;
|
||||
|
||||
refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
|
||||
if (unmerged_index(&the_index)) {
|
||||
if (unmerged_index(the_repository->index)) {
|
||||
error(_("you need to resolve your current index first"));
|
||||
return 1;
|
||||
}
|
||||
|
@ -919,10 +918,10 @@ static int merge_working_tree(const struct checkout_opts *opts,
|
|||
}
|
||||
}
|
||||
|
||||
if (!cache_tree_fully_valid(the_index.cache_tree))
|
||||
cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
|
||||
if (!cache_tree_fully_valid(the_repository->index->cache_tree))
|
||||
cache_tree_update(the_repository->index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
if (!opts->discard_changes && !opts->quiet && new_branch_info->commit)
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* Based on git-clean.sh by Pavel Roskin
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "config.h"
|
||||
|
@ -714,7 +713,7 @@ static int filter_by_patterns_cmd(void)
|
|||
for_each_string_list_item(item, &del_list) {
|
||||
int dtype = DT_UNKNOWN;
|
||||
|
||||
if (is_excluded(&dir, &the_index, item->string, &dtype)) {
|
||||
if (is_excluded(&dir, the_repository->index, item->string, &dtype)) {
|
||||
*item->string = '\0';
|
||||
changed++;
|
||||
}
|
||||
|
@ -1021,7 +1020,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
|||
PATHSPEC_PREFER_CWD,
|
||||
prefix, argv);
|
||||
|
||||
fill_directory(&dir, &the_index, &pathspec);
|
||||
fill_directory(&dir, the_repository->index, &pathspec);
|
||||
correct_untracked_entries(&dir);
|
||||
|
||||
for (i = 0; i < dir.nr; i++) {
|
||||
|
@ -1029,7 +1028,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
|
|||
struct stat st;
|
||||
const char *rel;
|
||||
|
||||
if (!index_name_is_other(&the_index, ent->name, ent->len))
|
||||
if (!index_name_is_other(the_repository->index, ent->name, ent->len))
|
||||
continue;
|
||||
|
||||
if (lstat(ent->name, &st))
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
* Clone a repository into a different directory that does not yet exist.
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "advice.h"
|
||||
|
@ -547,7 +546,7 @@ static void write_remote_refs(const struct ref *local_refs)
|
|||
if (!r->peer_ref)
|
||||
continue;
|
||||
if (ref_transaction_create(t, r->peer_ref->name, &r->old_oid,
|
||||
0, NULL, &err))
|
||||
NULL, 0, NULL, &err))
|
||||
die("%s", err.buf);
|
||||
}
|
||||
|
||||
|
@ -731,8 +730,8 @@ static int checkout(int submodule_progress, int filter_submodules)
|
|||
opts.preserve_ignored = 0;
|
||||
opts.fn = oneway_merge;
|
||||
opts.verbose_update = (option_verbosity >= 0);
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
init_checkout_metadata(&opts.meta, head, &oid, NULL);
|
||||
|
||||
tree = parse_tree_indirect(&oid);
|
||||
|
@ -746,7 +745,7 @@ static int checkout(int submodule_progress, int filter_submodules)
|
|||
|
||||
free(head);
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
err |= run_hooks_l("post-checkout", oid_to_hex(null_oid()),
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* Based on git-commit.sh by Junio C Hamano and Linus Torvalds
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
|
@ -266,19 +265,19 @@ static int list_paths(struct string_list *list, const char *with_tree,
|
|||
|
||||
if (with_tree) {
|
||||
char *max_prefix = common_prefix(pattern);
|
||||
overlay_tree_on_index(&the_index, with_tree, max_prefix);
|
||||
overlay_tree_on_index(the_repository->index, with_tree, max_prefix);
|
||||
free(max_prefix);
|
||||
}
|
||||
|
||||
/* TODO: audit for interaction with sparse-index. */
|
||||
ensure_full_index(&the_index);
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_index.cache[i];
|
||||
ensure_full_index(the_repository->index);
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[i];
|
||||
struct string_list_item *item;
|
||||
|
||||
if (ce->ce_flags & CE_UPDATE)
|
||||
continue;
|
||||
if (!ce_path_match(&the_index, ce, pattern, m))
|
||||
if (!ce_path_match(the_repository->index, ce, pattern, m))
|
||||
continue;
|
||||
item = string_list_insert(list, ce->name);
|
||||
if (ce_skip_worktree(ce))
|
||||
|
@ -302,10 +301,10 @@ static void add_remove_files(struct string_list *list)
|
|||
continue;
|
||||
|
||||
if (!lstat(p->string, &st)) {
|
||||
if (add_to_index(&the_index, p->string, &st, 0))
|
||||
if (add_to_index(the_repository->index, p->string, &st, 0))
|
||||
die(_("updating files failed"));
|
||||
} else
|
||||
remove_file_from_index(&the_index, p->string);
|
||||
remove_file_from_index(the_repository->index, p->string);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,7 +315,7 @@ static void create_base_index(const struct commit *current_head)
|
|||
struct tree_desc t;
|
||||
|
||||
if (!current_head) {
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -324,8 +323,8 @@ static void create_base_index(const struct commit *current_head)
|
|||
opts.head_idx = 1;
|
||||
opts.index_only = 1;
|
||||
opts.merge = 1;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
|
||||
opts.fn = oneway_merge;
|
||||
tree = parse_tree_indirect(¤t_head->object.oid);
|
||||
|
@ -344,7 +343,7 @@ static void refresh_cache_or_die(int refresh_flags)
|
|||
* refresh_flags contains REFRESH_QUIET, so the only errors
|
||||
* are for unmerged entries.
|
||||
*/
|
||||
if (refresh_index(&the_index, refresh_flags | REFRESH_IN_PORCELAIN, NULL, NULL, NULL))
|
||||
if (refresh_index(the_repository->index, refresh_flags | REFRESH_IN_PORCELAIN, NULL, NULL, NULL))
|
||||
die_resolve_conflict("commit");
|
||||
}
|
||||
|
||||
|
@ -393,7 +392,7 @@ static const char *prepare_index(const char **argv, const char *prefix,
|
|||
|
||||
refresh_cache_or_die(refresh_flags);
|
||||
|
||||
if (write_locked_index(&the_index, &index_lock, 0))
|
||||
if (write_locked_index(the_repository->index, &index_lock, 0))
|
||||
die(_("unable to create temporary index"));
|
||||
|
||||
old_repo_index_file = the_repository->index_file;
|
||||
|
@ -412,13 +411,13 @@ static const char *prepare_index(const char **argv, const char *prefix,
|
|||
unsetenv(INDEX_ENVIRONMENT);
|
||||
FREE_AND_NULL(old_index_env);
|
||||
|
||||
discard_index(&the_index);
|
||||
read_index_from(&the_index, get_lock_file_path(&index_lock),
|
||||
discard_index(the_repository->index);
|
||||
read_index_from(the_repository->index, get_lock_file_path(&index_lock),
|
||||
get_git_dir());
|
||||
if (cache_tree_update(&the_index, WRITE_TREE_SILENT) == 0) {
|
||||
if (cache_tree_update(the_repository->index, WRITE_TREE_SILENT) == 0) {
|
||||
if (reopen_lock_file(&index_lock) < 0)
|
||||
die(_("unable to write index file"));
|
||||
if (write_locked_index(&the_index, &index_lock, 0))
|
||||
if (write_locked_index(the_repository->index, &index_lock, 0))
|
||||
die(_("unable to update temporary index"));
|
||||
} else
|
||||
warning(_("Failed to update main cache tree"));
|
||||
|
@ -450,8 +449,8 @@ static const char *prepare_index(const char **argv, const char *prefix,
|
|||
exit(128);
|
||||
|
||||
refresh_cache_or_die(refresh_flags);
|
||||
cache_tree_update(&the_index, WRITE_TREE_SILENT);
|
||||
if (write_locked_index(&the_index, &index_lock, 0))
|
||||
cache_tree_update(the_repository->index, WRITE_TREE_SILENT);
|
||||
if (write_locked_index(the_repository->index, &index_lock, 0))
|
||||
die(_("unable to write new index file"));
|
||||
commit_style = COMMIT_NORMAL;
|
||||
ret = get_lock_file_path(&index_lock);
|
||||
|
@ -472,10 +471,10 @@ static const char *prepare_index(const char **argv, const char *prefix,
|
|||
repo_hold_locked_index(the_repository, &index_lock,
|
||||
LOCK_DIE_ON_ERROR);
|
||||
refresh_cache_or_die(refresh_flags);
|
||||
if (the_index.cache_changed
|
||||
|| !cache_tree_fully_valid(the_index.cache_tree))
|
||||
cache_tree_update(&the_index, WRITE_TREE_SILENT);
|
||||
if (write_locked_index(&the_index, &index_lock,
|
||||
if (the_repository->index->cache_changed
|
||||
|| !cache_tree_fully_valid(the_repository->index->cache_tree))
|
||||
cache_tree_update(the_repository->index, WRITE_TREE_SILENT);
|
||||
if (write_locked_index(the_repository->index, &index_lock,
|
||||
COMMIT_LOCK | SKIP_IF_UNCHANGED))
|
||||
die(_("unable to write new index file"));
|
||||
commit_style = COMMIT_AS_IS;
|
||||
|
@ -516,15 +515,15 @@ static const char *prepare_index(const char **argv, const char *prefix,
|
|||
if (list_paths(&partial, !current_head ? NULL : "HEAD", &pathspec))
|
||||
exit(1);
|
||||
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
if (repo_read_index(the_repository) < 0)
|
||||
die(_("cannot read the index"));
|
||||
|
||||
repo_hold_locked_index(the_repository, &index_lock, LOCK_DIE_ON_ERROR);
|
||||
add_remove_files(&partial);
|
||||
refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
cache_tree_update(&the_index, WRITE_TREE_SILENT);
|
||||
if (write_locked_index(&the_index, &index_lock, 0))
|
||||
refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
cache_tree_update(the_repository->index, WRITE_TREE_SILENT);
|
||||
if (write_locked_index(the_repository->index, &index_lock, 0))
|
||||
die(_("unable to write new index file"));
|
||||
|
||||
hold_lock_file_for_update(&false_lock,
|
||||
|
@ -534,14 +533,14 @@ static const char *prepare_index(const char **argv, const char *prefix,
|
|||
|
||||
create_base_index(current_head);
|
||||
add_remove_files(&partial);
|
||||
refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
|
||||
if (write_locked_index(&the_index, &false_lock, 0))
|
||||
if (write_locked_index(the_repository->index, &false_lock, 0))
|
||||
die(_("unable to write temporary index file"));
|
||||
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
ret = get_lock_file_path(&false_lock);
|
||||
read_index_from(&the_index, ret, get_git_dir());
|
||||
read_index_from(the_repository->index, ret, get_git_dir());
|
||||
out:
|
||||
string_list_clear(&partial, 0);
|
||||
clear_pathspec(&pathspec);
|
||||
|
@ -999,7 +998,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
|
|||
struct object_id oid;
|
||||
const char *parent = "HEAD";
|
||||
|
||||
if (!the_index.initialized && repo_read_index(the_repository) < 0)
|
||||
if (!the_repository->index->initialized && repo_read_index(the_repository) < 0)
|
||||
die(_("Cannot read index"));
|
||||
|
||||
if (amend)
|
||||
|
@ -1009,11 +1008,11 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
|
|||
int i, ita_nr = 0;
|
||||
|
||||
/* TODO: audit for interaction with sparse-index. */
|
||||
ensure_full_index(&the_index);
|
||||
for (i = 0; i < the_index.cache_nr; i++)
|
||||
if (ce_intent_to_add(the_index.cache[i]))
|
||||
ensure_full_index(the_repository->index);
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++)
|
||||
if (ce_intent_to_add(the_repository->index->cache[i]))
|
||||
ita_nr++;
|
||||
committable = the_index.cache_nr - ita_nr > 0;
|
||||
committable = the_repository->index->cache_nr - ita_nr > 0;
|
||||
} else {
|
||||
/*
|
||||
* Unless the user did explicitly request a submodule
|
||||
|
@ -1081,11 +1080,11 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
|
|||
* and could have updated it. We must do this before we invoke
|
||||
* the editor and after we invoke run_status above.
|
||||
*/
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
}
|
||||
read_index_from(&the_index, index_file, get_git_dir());
|
||||
read_index_from(the_repository->index, index_file, get_git_dir());
|
||||
|
||||
if (cache_tree_update(&the_index, 0)) {
|
||||
if (cache_tree_update(the_repository->index, 0)) {
|
||||
error(_("Error building trees"));
|
||||
return 0;
|
||||
}
|
||||
|
@ -1586,7 +1585,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
|
|||
status_format != STATUS_FORMAT_PORCELAIN_V2)
|
||||
progress_flag = REFRESH_PROGRESS;
|
||||
repo_read_index(the_repository);
|
||||
refresh_index(&the_index,
|
||||
refresh_index(the_repository->index,
|
||||
REFRESH_QUIET|REFRESH_UNMERGED|progress_flag,
|
||||
&s.pathspec, NULL, NULL);
|
||||
|
||||
|
@ -1856,7 +1855,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
|
|||
append_merge_tag_headers(parents, &tail);
|
||||
}
|
||||
|
||||
if (commit_tree_extended(sb.buf, sb.len, &the_index.cache_tree->oid,
|
||||
if (commit_tree_extended(sb.buf, sb.len, &the_repository->index->cache_tree->oid,
|
||||
parents, &oid, author_ident.buf, NULL,
|
||||
sign_commit, extra)) {
|
||||
rollback_index_files();
|
||||
|
|
|
@ -115,7 +115,9 @@ static int read_request(FILE *fh, struct credential *c,
|
|||
return error("client sent bogus timeout line: %s", item.buf);
|
||||
*timeout = atoi(p);
|
||||
|
||||
if (credential_read(c, fh) < 0)
|
||||
credential_set_all_capabilities(c, CREDENTIAL_OP_INITIAL);
|
||||
|
||||
if (credential_read(c, fh, CREDENTIAL_OP_HELPER) < 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -131,8 +133,18 @@ static void serve_one_client(FILE *in, FILE *out)
|
|||
else if (!strcmp(action.buf, "get")) {
|
||||
struct credential_cache_entry *e = lookup_credential(&c);
|
||||
if (e) {
|
||||
fprintf(out, "username=%s\n", e->item.username);
|
||||
fprintf(out, "password=%s\n", e->item.password);
|
||||
e->item.capa_authtype.request_initial = 1;
|
||||
e->item.capa_authtype.request_helper = 1;
|
||||
|
||||
fprintf(out, "capability[]=authtype\n");
|
||||
if (e->item.username)
|
||||
fprintf(out, "username=%s\n", e->item.username);
|
||||
if (e->item.password)
|
||||
fprintf(out, "password=%s\n", e->item.password);
|
||||
if (credential_has_capability(&c.capa_authtype, CREDENTIAL_OP_HELPER) && e->item.authtype)
|
||||
fprintf(out, "authtype=%s\n", e->item.authtype);
|
||||
if (credential_has_capability(&c.capa_authtype, CREDENTIAL_OP_HELPER) && e->item.credential)
|
||||
fprintf(out, "credential=%s\n", e->item.credential);
|
||||
if (e->item.password_expiry_utc != TIME_MAX)
|
||||
fprintf(out, "password_expiry_utc=%"PRItime"\n",
|
||||
e->item.password_expiry_utc);
|
||||
|
@ -157,8 +169,10 @@ static void serve_one_client(FILE *in, FILE *out)
|
|||
else if (!strcmp(action.buf, "store")) {
|
||||
if (timeout < 0)
|
||||
warning("cache client didn't specify a timeout");
|
||||
else if (!c.username || !c.password)
|
||||
else if ((!c.username || !c.password) && (!c.authtype && !c.credential))
|
||||
warning("cache client gave us a partial credential");
|
||||
else if (c.ephemeral)
|
||||
warning("not storing ephemeral credential");
|
||||
else {
|
||||
remove_credential(&c, 0);
|
||||
cache_credential(&c, timeout);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include "builtin.h"
|
||||
#include "credential.h"
|
||||
#include "gettext.h"
|
||||
#include "parse-options.h"
|
||||
#include "path.h"
|
||||
|
@ -127,6 +128,13 @@ static char *get_socket_path(void)
|
|||
return socket;
|
||||
}
|
||||
|
||||
static void announce_capabilities(void)
|
||||
{
|
||||
struct credential c = CREDENTIAL_INIT;
|
||||
c.capa_authtype.request_initial = 1;
|
||||
credential_announce_capabilities(&c, stdout);
|
||||
}
|
||||
|
||||
int cmd_credential_cache(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
char *socket_path = NULL;
|
||||
|
@ -163,6 +171,8 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix)
|
|||
do_cache(socket_path, op, timeout, FLAG_RELAY);
|
||||
else if (!strcmp(op, "store"))
|
||||
do_cache(socket_path, op, timeout, FLAG_RELAY|FLAG_SPAWN);
|
||||
else if (!strcmp(op, "capability"))
|
||||
announce_capabilities();
|
||||
else
|
||||
; /* ignore unknown operation */
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ int cmd_credential_store(int argc, const char **argv, const char *prefix)
|
|||
if (!fns.nr)
|
||||
die("unable to set up default path; use --file");
|
||||
|
||||
if (credential_read(&c, stdin) < 0)
|
||||
if (credential_read(&c, stdin, CREDENTIAL_OP_HELPER) < 0)
|
||||
die("unable to read credential");
|
||||
|
||||
if (!strcmp(op, "get"))
|
||||
|
|
|
@ -17,15 +17,24 @@ int cmd_credential(int argc, const char **argv, const char *prefix UNUSED)
|
|||
usage(usage_msg);
|
||||
op = argv[1];
|
||||
|
||||
if (credential_read(&c, stdin) < 0)
|
||||
if (!strcmp(op, "capability")) {
|
||||
credential_set_all_capabilities(&c, CREDENTIAL_OP_INITIAL);
|
||||
credential_announce_capabilities(&c, stdout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (credential_read(&c, stdin, CREDENTIAL_OP_INITIAL) < 0)
|
||||
die("unable to read credential from stdin");
|
||||
|
||||
if (!strcmp(op, "fill")) {
|
||||
credential_fill(&c);
|
||||
credential_write(&c, stdout);
|
||||
credential_fill(&c, 0);
|
||||
credential_next_state(&c);
|
||||
credential_write(&c, stdout, CREDENTIAL_OP_RESPONSE);
|
||||
} else if (!strcmp(op, "approve")) {
|
||||
credential_set_all_capabilities(&c, CREDENTIAL_OP_HELPER);
|
||||
credential_approve(&c);
|
||||
} else if (!strcmp(op, "reject")) {
|
||||
credential_set_all_capabilities(&c, CREDENTIAL_OP_HELPER);
|
||||
credential_reject(&c);
|
||||
} else {
|
||||
usage(usage_msg);
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "environment.h"
|
||||
|
@ -674,7 +673,7 @@ int cmd_describe(int argc, const char **argv, const char *prefix)
|
|||
prepare_repo_settings(the_repository);
|
||||
the_repository->settings.command_requires_full_index = 0;
|
||||
repo_read_index(the_repository);
|
||||
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED,
|
||||
refresh_index(the_repository->index, REFRESH_QUIET|REFRESH_UNMERGED,
|
||||
NULL, NULL, NULL);
|
||||
fd = repo_hold_locked_index(the_repository,
|
||||
&index_lock, 0);
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "diff.h"
|
||||
|
@ -206,7 +205,7 @@ int cmd_diff_tree(int argc, const char **argv, const char *prefix)
|
|||
opt->diffopt.rotate_to_strict = 0;
|
||||
opt->diffopt.no_free = 1;
|
||||
if (opt->diffopt.detect_rename) {
|
||||
if (!the_index.cache)
|
||||
if (the_repository->index->cache)
|
||||
repo_read_index(the_repository);
|
||||
opt->diffopt.setup |= DIFF_SETUP_USE_SIZE_CACHE;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (c) 2006 Junio C Hamano
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "ewah/ewok.h"
|
||||
|
@ -239,9 +239,9 @@ static void refresh_index_quietly(void)
|
|||
fd = repo_hold_locked_index(the_repository, &lock_file, 0);
|
||||
if (fd < 0)
|
||||
return;
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
repo_read_index(the_repository);
|
||||
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL,
|
||||
refresh_index(the_repository->index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL,
|
||||
NULL);
|
||||
repo_update_index_if_able(the_repository, &lock_file);
|
||||
}
|
||||
|
@ -465,6 +465,15 @@ int cmd_diff(int argc, const char **argv, const char *prefix)
|
|||
no_index = DIFF_NO_INDEX_IMPLICIT;
|
||||
}
|
||||
|
||||
/*
|
||||
* When operating outside of a Git repository we need to have a hash
|
||||
* algorithm at hand so that we can generate the blob hashes. We
|
||||
* default to SHA1 here, but may eventually want to change this to be
|
||||
* configurable via a command line option.
|
||||
*/
|
||||
if (nongit)
|
||||
repo_set_hash_algo(the_repository, GIT_HASH_SHA1);
|
||||
|
||||
init_diff_ui_defaults();
|
||||
git_config(git_diff_ui_config, NULL);
|
||||
prefix = precompose_argv_prefix(argc, argv, prefix);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
*
|
||||
* Copyright (C) 2016 Johannes Schindelin
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "config.h"
|
||||
|
@ -117,7 +117,7 @@ static int use_wt_file(const char *workdir, const char *name,
|
|||
int fd = open(buf.buf, O_RDONLY);
|
||||
|
||||
if (fd >= 0 &&
|
||||
!index_fd(&the_index, &wt_oid, fd, &st, OBJ_BLOB, name, 0)) {
|
||||
!index_fd(the_repository->index, &wt_oid, fd, &st, OBJ_BLOB, name, 0)) {
|
||||
if (is_null_oid(oid)) {
|
||||
oidcpy(oid, &wt_oid);
|
||||
use = 1;
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include "object-store-ll.h"
|
||||
#include "mem-pool.h"
|
||||
#include "commit-reach.h"
|
||||
#include "khash.h"
|
||||
#include "khashl.h"
|
||||
#include "date.h"
|
||||
|
||||
#define PACK_ID_BITS 16
|
||||
|
@ -1634,7 +1634,7 @@ static int update_branch(struct branch *b)
|
|||
transaction = ref_transaction_begin(&err);
|
||||
if (!transaction ||
|
||||
ref_transaction_update(transaction, b->name, &b->oid, &old_oid,
|
||||
0, msg, &err) ||
|
||||
NULL, NULL, 0, msg, &err) ||
|
||||
ref_transaction_commit(transaction, &err)) {
|
||||
ref_transaction_free(transaction);
|
||||
error("%s", err.buf);
|
||||
|
@ -1675,7 +1675,8 @@ static void dump_tags(void)
|
|||
strbuf_addf(&ref_name, "refs/tags/%s", t->name);
|
||||
|
||||
if (ref_transaction_update(transaction, ref_name.buf,
|
||||
&t->oid, NULL, 0, msg, &err)) {
|
||||
&t->oid, NULL, NULL, NULL,
|
||||
0, msg, &err)) {
|
||||
failure |= error("%s", err.buf);
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -668,7 +668,7 @@ static int s_update_ref(const char *action,
|
|||
|
||||
ret = ref_transaction_update(transaction, ref->name, &ref->new_oid,
|
||||
check_old ? &ref->old_oid : NULL,
|
||||
0, msg, &err);
|
||||
NULL, NULL, 0, msg, &err);
|
||||
if (ret) {
|
||||
ret = STORE_REF_ERROR_OTHER;
|
||||
goto out;
|
||||
|
@ -1383,7 +1383,7 @@ static int prune_refs(struct display_state *display_state,
|
|||
if (transaction) {
|
||||
for (ref = stale_refs; ref; ref = ref->next) {
|
||||
result = ref_transaction_delete(transaction, ref->name, NULL, 0,
|
||||
"fetch: prune", &err);
|
||||
NULL, "fetch: prune", &err);
|
||||
if (result)
|
||||
goto cleanup;
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ static int run_command_on_repo(const char *path, int argc, const char ** argv)
|
|||
int cmd_for_each_repo(int argc, const char **argv, const char *prefix)
|
||||
{
|
||||
static const char *config_key = NULL;
|
||||
int keep_going = 0;
|
||||
int i, result = 0;
|
||||
const struct string_list *values;
|
||||
int err;
|
||||
|
@ -39,6 +40,8 @@ int cmd_for_each_repo(int argc, const char **argv, const char *prefix)
|
|||
const struct option options[] = {
|
||||
OPT_STRING(0, "config", &config_key, N_("config"),
|
||||
N_("config key storing a list of repository paths")),
|
||||
OPT_BOOL(0, "keep-going", &keep_going,
|
||||
N_("keep going even if command fails in a repository")),
|
||||
OPT_END()
|
||||
};
|
||||
|
||||
|
@ -55,8 +58,14 @@ int cmd_for_each_repo(int argc, const char **argv, const char *prefix)
|
|||
else if (err)
|
||||
return 0;
|
||||
|
||||
for (i = 0; !result && i < values->nr; i++)
|
||||
result = run_command_on_repo(values->items[i].string, argc, argv);
|
||||
for (i = 0; i < values->nr; i++) {
|
||||
int ret = run_command_on_repo(values->items[i].string, argc, argv);
|
||||
if (ret) {
|
||||
if (!keep_going)
|
||||
return ret;
|
||||
result = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "fsmonitor--daemon.h"
|
||||
#include "repository.h"
|
||||
#include "simple-ipc.h"
|
||||
#include "khash.h"
|
||||
#include "khashl.h"
|
||||
#include "run-command.h"
|
||||
#include "trace.h"
|
||||
#include "trace2.h"
|
||||
|
@ -650,7 +650,7 @@ static int fsmonitor_parse_client_token(const char *buf_token,
|
|||
return 0;
|
||||
}
|
||||
|
||||
KHASH_INIT(str, const char *, int, 0, kh_str_hash_func, kh_str_hash_equal)
|
||||
KHASHL_SET_INIT(KH_LOCAL, kh_str, str, const char *, kh_hash_str, kh_eq_str)
|
||||
|
||||
static int do_handle_client(struct fsmonitor_daemon_state *state,
|
||||
const char *command,
|
||||
|
|
|
@ -1870,6 +1870,7 @@ static int launchctl_schedule_plist(const char *exec_path, enum schedule_priorit
|
|||
"<string>%s/git</string>\n"
|
||||
"<string>--exec-path=%s</string>\n"
|
||||
"<string>for-each-repo</string>\n"
|
||||
"<string>--keep-going</string>\n"
|
||||
"<string>--config=maintenance.repo</string>\n"
|
||||
"<string>maintenance</string>\n"
|
||||
"<string>run</string>\n"
|
||||
|
@ -2112,7 +2113,7 @@ static int schtasks_schedule_task(const char *exec_path, enum schedule_priority
|
|||
"<Actions Context=\"Author\">\n"
|
||||
"<Exec>\n"
|
||||
"<Command>\"%s\\headless-git.exe\"</Command>\n"
|
||||
"<Arguments>--exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%s</Arguments>\n"
|
||||
"<Arguments>--exec-path=\"%s\" for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%s</Arguments>\n"
|
||||
"</Exec>\n"
|
||||
"</Actions>\n"
|
||||
"</Task>\n";
|
||||
|
@ -2257,7 +2258,7 @@ static int crontab_update_schedule(int run_maintenance, int fd)
|
|||
"# replaced in the future by a Git command.\n\n");
|
||||
|
||||
strbuf_addf(&line_format,
|
||||
"%%d %%s * * %%s \"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%s\n",
|
||||
"%%d %%s * * %%s \"%s/git\" --exec-path=\"%s\" for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%%s\n",
|
||||
exec_path, exec_path);
|
||||
fprintf(cron_in, line_format.buf, minute, "1-23", "*", "hourly");
|
||||
fprintf(cron_in, line_format.buf, minute, "0", "1-6", "daily");
|
||||
|
@ -2458,7 +2459,7 @@ static int systemd_timer_write_service_template(const char *exec_path)
|
|||
"\n"
|
||||
"[Service]\n"
|
||||
"Type=oneshot\n"
|
||||
"ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --config=maintenance.repo maintenance run --schedule=%%i\n"
|
||||
"ExecStart=\"%s/git\" --exec-path=\"%s\" for-each-repo --keep-going --config=maintenance.repo maintenance run --schedule=%%i\n"
|
||||
"LockPersonality=yes\n"
|
||||
"MemoryDenyWriteExecute=yes\n"
|
||||
"NoNewPrivileges=yes\n"
|
||||
|
|
|
@ -141,7 +141,7 @@ static void interpret_trailers(const struct process_trailer_options *opts,
|
|||
LIST_HEAD(head);
|
||||
struct strbuf sb = STRBUF_INIT;
|
||||
struct strbuf trailer_block = STRBUF_INIT;
|
||||
struct trailer_info info;
|
||||
struct trailer_info *info;
|
||||
FILE *outfile = stdout;
|
||||
|
||||
trailer_config_init();
|
||||
|
@ -151,13 +151,13 @@ static void interpret_trailers(const struct process_trailer_options *opts,
|
|||
if (opts->in_place)
|
||||
outfile = create_in_place_tempfile(file);
|
||||
|
||||
parse_trailers(opts, &info, sb.buf, &head);
|
||||
info = parse_trailers(opts, sb.buf, &head);
|
||||
|
||||
/* Print the lines before the trailers */
|
||||
if (!opts->only_trailers)
|
||||
fwrite(sb.buf, 1, info.trailer_block_start, outfile);
|
||||
fwrite(sb.buf, 1, trailer_block_start(info), outfile);
|
||||
|
||||
if (!opts->only_trailers && !info.blank_line_before_trailer)
|
||||
if (!opts->only_trailers && !blank_line_before_trailer_block(info))
|
||||
fprintf(outfile, "\n");
|
||||
|
||||
|
||||
|
@ -178,8 +178,8 @@ static void interpret_trailers(const struct process_trailer_options *opts,
|
|||
|
||||
/* Print the lines after the trailers as is */
|
||||
if (!opts->only_trailers)
|
||||
fwrite(sb.buf + info.trailer_block_end, 1, sb.len - info.trailer_block_end, outfile);
|
||||
trailer_info_release(&info);
|
||||
fwrite(sb.buf + trailer_block_end(info), 1, sb.len - trailer_block_end(info), outfile);
|
||||
trailer_info_release(info);
|
||||
|
||||
if (opts->in_place)
|
||||
if (rename_tempfile(&trailers_tempfile, file))
|
||||
|
|
|
@ -1494,6 +1494,19 @@ static int subject_prefix_callback(const struct option *opt, const char *arg,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rfc_callback(const struct option *opt, const char *arg,
|
||||
int unset)
|
||||
{
|
||||
const char **rfc = opt->value;
|
||||
|
||||
*rfc = opt->value;
|
||||
if (unset)
|
||||
*rfc = NULL;
|
||||
else
|
||||
*rfc = arg ? arg : "RFC";
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int numbered_cmdline_opt = 0;
|
||||
|
||||
static int numbered_callback(const struct option *opt, const char *arg,
|
||||
|
@ -1907,8 +1920,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
|
|||
struct strbuf rdiff2 = STRBUF_INIT;
|
||||
struct strbuf rdiff_title = STRBUF_INIT;
|
||||
struct strbuf sprefix = STRBUF_INIT;
|
||||
const char *rfc = NULL;
|
||||
int creation_factor = -1;
|
||||
int rfc = 0;
|
||||
|
||||
const struct option builtin_format_patch_options[] = {
|
||||
OPT_CALLBACK_F('n', "numbered", &numbered, NULL,
|
||||
|
@ -1932,7 +1945,9 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
|
|||
N_("mark the series as Nth re-roll")),
|
||||
OPT_INTEGER(0, "filename-max-length", &fmt_patch_name_max,
|
||||
N_("max length of output filename")),
|
||||
OPT_BOOL(0, "rfc", &rfc, N_("use [RFC PATCH] instead of [PATCH]")),
|
||||
OPT_CALLBACK_F(0, "rfc", &rfc, N_("rfc"),
|
||||
N_("add <rfc> (default 'RFC') before 'PATCH'"),
|
||||
PARSE_OPT_OPTARG, rfc_callback),
|
||||
OPT_STRING(0, "cover-from-description", &cover_from_description_arg,
|
||||
N_("cover-from-description-mode"),
|
||||
N_("generate parts of a cover letter based on a branch's description")),
|
||||
|
@ -2050,8 +2065,13 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
|
|||
if (cover_from_description_arg)
|
||||
cover_from_description_mode = parse_cover_from_description(cover_from_description_arg);
|
||||
|
||||
if (rfc)
|
||||
strbuf_insertstr(&sprefix, 0, "RFC ");
|
||||
if (rfc && rfc[0]) {
|
||||
subject_prefix = 1;
|
||||
if (rfc[0] == '-')
|
||||
strbuf_addf(&sprefix, " %s", rfc + 1);
|
||||
else
|
||||
strbuf_insertf(&sprefix, 0, "%s ", rfc);
|
||||
}
|
||||
|
||||
if (reroll_count) {
|
||||
strbuf_addf(&sprefix, " v%s", reroll_count);
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "hex.h"
|
||||
#include "read-cache-ll.h"
|
||||
|
@ -18,11 +17,11 @@ static int merge_entry(int pos, const char *path)
|
|||
char ownbuf[4][60];
|
||||
struct child_process cmd = CHILD_PROCESS_INIT;
|
||||
|
||||
if (pos >= the_index.cache_nr)
|
||||
if (pos >= the_repository->index->cache_nr)
|
||||
die("git merge-index: %s not in the cache", path);
|
||||
found = 0;
|
||||
do {
|
||||
const struct cache_entry *ce = the_index.cache[pos];
|
||||
const struct cache_entry *ce = the_repository->index->cache[pos];
|
||||
int stage = ce_stage(ce);
|
||||
|
||||
if (strcmp(ce->name, path))
|
||||
|
@ -32,7 +31,7 @@ static int merge_entry(int pos, const char *path)
|
|||
xsnprintf(ownbuf[stage], sizeof(ownbuf[stage]), "%o", ce->ce_mode);
|
||||
arguments[stage] = hexbuf[stage];
|
||||
arguments[stage + 4] = ownbuf[stage];
|
||||
} while (++pos < the_index.cache_nr);
|
||||
} while (++pos < the_repository->index->cache_nr);
|
||||
if (!found)
|
||||
die("git merge-index: %s not in the cache", path);
|
||||
|
||||
|
@ -51,7 +50,7 @@ static int merge_entry(int pos, const char *path)
|
|||
|
||||
static void merge_one_path(const char *path)
|
||||
{
|
||||
int pos = index_name_pos(&the_index, path, strlen(path));
|
||||
int pos = index_name_pos(the_repository->index, path, strlen(path));
|
||||
|
||||
/*
|
||||
* If it already exists in the cache as stage0, it's
|
||||
|
@ -65,9 +64,9 @@ static void merge_all(void)
|
|||
{
|
||||
int i;
|
||||
/* TODO: audit for interaction with sparse-index. */
|
||||
ensure_full_index(&the_index);
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_index.cache[i];
|
||||
ensure_full_index(the_repository->index);
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[i];
|
||||
if (!ce_stage(ce))
|
||||
continue;
|
||||
i += merge_entry(i, ce->name)-1;
|
||||
|
@ -89,7 +88,7 @@ int cmd_merge_index(int argc, const char **argv, const char *prefix UNUSED)
|
|||
repo_read_index(the_repository);
|
||||
|
||||
/* TODO: audit for interaction with sparse-index. */
|
||||
ensure_full_index(&the_index);
|
||||
ensure_full_index(the_repository->index);
|
||||
|
||||
i = 1;
|
||||
if (!strcmp(argv[i], "-o")) {
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "tree-walk.h"
|
||||
#include "xdiff-interface.h"
|
||||
|
@ -364,7 +363,7 @@ static void trivial_merge_trees(struct tree_desc t[3], const char *base)
|
|||
|
||||
setup_traverse_info(&info, base);
|
||||
info.fn = threeway_callback;
|
||||
traverse_trees(&the_index, 3, t, &info);
|
||||
traverse_trees(the_repository->index, 3, t, &info);
|
||||
}
|
||||
|
||||
static void *get_tree_descriptor(struct repository *r,
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
* Based on git-merge.sh by Junio C Hamano.
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "advice.h"
|
||||
|
@ -300,7 +299,7 @@ static int save_state(struct object_id *stash)
|
|||
int rc = -1;
|
||||
|
||||
fd = repo_hold_locked_index(the_repository, &lock_file, 0);
|
||||
refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
if (0 <= fd)
|
||||
repo_update_index_if_able(the_repository, &lock_file);
|
||||
rollback_lock_file(&lock_file);
|
||||
|
@ -372,7 +371,7 @@ static void restore_state(const struct object_id *head,
|
|||
run_command(&cmd);
|
||||
|
||||
refresh_cache:
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
if (repo_read_index(the_repository) < 0)
|
||||
die(_("could not read index"));
|
||||
}
|
||||
|
@ -657,8 +656,8 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
|
|||
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.head_idx = 2;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
opts.update = 1;
|
||||
opts.verbose_update = 1;
|
||||
opts.trivial_merges_only = 1;
|
||||
|
@ -674,7 +673,7 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
|
|||
if (!trees[nr_trees++])
|
||||
return -1;
|
||||
opts.fn = threeway_merge;
|
||||
cache_tree_free(&the_index.cache_tree);
|
||||
cache_tree_free(&the_repository->index->cache_tree);
|
||||
for (i = 0; i < nr_trees; i++) {
|
||||
parse_tree(trees[i]);
|
||||
init_tree_desc(t+i, &trees[i]->object.oid,
|
||||
|
@ -687,7 +686,7 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
|
|||
|
||||
static void write_tree_trivial(struct object_id *oid)
|
||||
{
|
||||
if (write_index_as_tree(oid, &the_index, get_index_file(), 0, NULL))
|
||||
if (write_index_as_tree(oid, the_repository->index, get_index_file(), 0, NULL))
|
||||
die(_("git write-tree failed to write a tree"));
|
||||
}
|
||||
|
||||
|
@ -745,7 +744,7 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
|
|||
rollback_lock_file(&lock);
|
||||
return 2;
|
||||
}
|
||||
if (write_locked_index(&the_index, &lock,
|
||||
if (write_locked_index(the_repository->index, &lock,
|
||||
COMMIT_LOCK | SKIP_IF_UNCHANGED))
|
||||
die(_("unable to write %s"), get_index_file());
|
||||
return clean ? 0 : 1;
|
||||
|
@ -768,8 +767,8 @@ static int count_unmerged_entries(void)
|
|||
{
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < the_index.cache_nr; i++)
|
||||
if (ce_stage(the_index.cache[i]))
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++)
|
||||
if (ce_stage(the_repository->index->cache[i]))
|
||||
ret++;
|
||||
|
||||
return ret;
|
||||
|
@ -843,9 +842,9 @@ static void prepare_to_commit(struct commit_list *remoteheads)
|
|||
* the editor and after we invoke run_status above.
|
||||
*/
|
||||
if (invoked_hook)
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
}
|
||||
read_index_from(&the_index, index_file, get_git_dir());
|
||||
read_index_from(the_repository->index, index_file, get_git_dir());
|
||||
strbuf_addbuf(&msg, &merge_msg);
|
||||
if (squash)
|
||||
BUG("the control must not reach here under --squash");
|
||||
|
@ -957,7 +956,7 @@ static int suggest_conflicts(void)
|
|||
* Thus, we will get the cleanup mode which is returned when we _are_
|
||||
* using an editor.
|
||||
*/
|
||||
append_conflicts_hint(&the_index, &msgbuf,
|
||||
append_conflicts_hint(the_repository->index, &msgbuf,
|
||||
get_cleanup_mode(cleanup_arg, 1));
|
||||
fputs(msgbuf.buf, fp);
|
||||
strbuf_release(&msgbuf);
|
||||
|
@ -1386,7 +1385,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
|
|||
else
|
||||
die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists)."));
|
||||
}
|
||||
resolve_undo_clear_index(&the_index);
|
||||
resolve_undo_clear_index(the_repository->index);
|
||||
|
||||
if (option_edit < 0)
|
||||
option_edit = default_edit_option();
|
||||
|
@ -1595,7 +1594,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
|
|||
* We are not doing octopus, not fast-forward, and have
|
||||
* only one common.
|
||||
*/
|
||||
refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL);
|
||||
if (allow_trivial && fast_forward != FF_ONLY) {
|
||||
/*
|
||||
* Must first ensure that index matches HEAD before
|
||||
|
@ -1784,6 +1783,6 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
strbuf_release(&buf);
|
||||
free(branch_to_free);
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include "strbuf.h"
|
||||
#include "trace2.h"
|
||||
#include "object-store-ll.h"
|
||||
#include "replace-object.h"
|
||||
|
||||
#define BUILTIN_MIDX_WRITE_USAGE \
|
||||
N_("git multi-pack-index [<options>] write [--preferred-pack=<pack>]" \
|
||||
|
@ -273,6 +274,8 @@ int cmd_multi_pack_index(int argc, const char **argv,
|
|||
};
|
||||
struct option *options = parse_options_concat(builtin_multi_pack_index_options, common_opts);
|
||||
|
||||
disable_replace_refs();
|
||||
|
||||
git_config(git_default_config, NULL);
|
||||
|
||||
if (the_repository &&
|
||||
|
|
68
builtin/mv.c
68
builtin/mv.c
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) 2006 Johannes Schindelin
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "advice.h"
|
||||
|
@ -95,9 +95,9 @@ static void prepare_move_submodule(const char *src, int first,
|
|||
const char **submodule_gitfile)
|
||||
{
|
||||
struct strbuf submodule_dotgit = STRBUF_INIT;
|
||||
if (!S_ISGITLINK(the_index.cache[first]->ce_mode))
|
||||
if (!S_ISGITLINK(the_repository->index->cache[first]->ce_mode))
|
||||
die(_("Directory %s is in index and no submodule?"), src);
|
||||
if (!is_staging_gitmodules_ok(&the_index))
|
||||
if (!is_staging_gitmodules_ok(the_repository->index))
|
||||
die(_("Please stage your changes to .gitmodules or stash them to proceed"));
|
||||
strbuf_addf(&submodule_dotgit, "%s/.git", src);
|
||||
*submodule_gitfile = read_gitfile(submodule_dotgit.buf);
|
||||
|
@ -114,13 +114,13 @@ static int index_range_of_same_dir(const char *src, int length,
|
|||
const char *src_w_slash = add_slash(src);
|
||||
int first, last, len_w_slash = length + 1;
|
||||
|
||||
first = index_name_pos(&the_index, src_w_slash, len_w_slash);
|
||||
first = index_name_pos(the_repository->index, src_w_slash, len_w_slash);
|
||||
if (first >= 0)
|
||||
die(_("%.*s is in index"), len_w_slash, src_w_slash);
|
||||
|
||||
first = -1 - first;
|
||||
for (last = first; last < the_index.cache_nr; last++) {
|
||||
const char *path = the_index.cache[last]->name;
|
||||
for (last = first; last < the_repository->index->cache_nr; last++) {
|
||||
const char *path = the_repository->index->cache[last]->name;
|
||||
if (strncmp(path, src_w_slash, len_w_slash))
|
||||
break;
|
||||
}
|
||||
|
@ -144,14 +144,14 @@ static int empty_dir_has_sparse_contents(const char *name)
|
|||
const char *with_slash = add_slash(name);
|
||||
int length = strlen(with_slash);
|
||||
|
||||
int pos = index_name_pos(&the_index, with_slash, length);
|
||||
int pos = index_name_pos(the_repository->index, with_slash, length);
|
||||
const struct cache_entry *ce;
|
||||
|
||||
if (pos < 0) {
|
||||
pos = -pos - 1;
|
||||
if (pos >= the_index.cache_nr)
|
||||
if (pos >= the_repository->index->cache_nr)
|
||||
goto free_return;
|
||||
ce = the_index.cache[pos];
|
||||
ce = the_repository->index->cache[pos];
|
||||
if (strncmp(with_slash, ce->name, length))
|
||||
goto free_return;
|
||||
if (ce_skip_worktree(ce))
|
||||
|
@ -223,7 +223,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
S_ISDIR(st.st_mode)) {
|
||||
destination = internal_prefix_pathspec(dst_w_slash, argv, argc, DUP_BASENAME);
|
||||
} else {
|
||||
if (!path_in_sparse_checkout(dst_w_slash, &the_index) &&
|
||||
if (!path_in_sparse_checkout(dst_w_slash, the_repository->index) &&
|
||||
empty_dir_has_sparse_contents(dst_w_slash)) {
|
||||
destination = internal_prefix_pathspec(dst_w_slash, argv, argc, DUP_BASENAME);
|
||||
dst_mode = SKIP_WORKTREE_DIR;
|
||||
|
@ -239,7 +239,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
* is deprecated at this point) sparse-checkout. As
|
||||
* SPARSE here is only considering cone-mode situation.
|
||||
*/
|
||||
if (!path_in_cone_mode_sparse_checkout(destination[0], &the_index))
|
||||
if (!path_in_cone_mode_sparse_checkout(destination[0], the_repository->index))
|
||||
dst_mode = SPARSE;
|
||||
}
|
||||
}
|
||||
|
@ -263,10 +263,10 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
int pos;
|
||||
const struct cache_entry *ce;
|
||||
|
||||
pos = index_name_pos(&the_index, src, length);
|
||||
pos = index_name_pos(the_repository->index, src, length);
|
||||
if (pos < 0) {
|
||||
const char *src_w_slash = add_slash(src);
|
||||
if (!path_in_sparse_checkout(src_w_slash, &the_index) &&
|
||||
if (!path_in_sparse_checkout(src_w_slash, the_repository->index) &&
|
||||
empty_dir_has_sparse_contents(src)) {
|
||||
modes[i] |= SKIP_WORKTREE_DIR;
|
||||
goto dir_check;
|
||||
|
@ -276,7 +276,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
bad = _("bad source");
|
||||
goto act_on_entry;
|
||||
}
|
||||
ce = the_index.cache[pos];
|
||||
ce = the_repository->index->cache[pos];
|
||||
if (!ce_skip_worktree(ce)) {
|
||||
bad = _("bad source");
|
||||
goto act_on_entry;
|
||||
|
@ -286,7 +286,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
goto act_on_entry;
|
||||
}
|
||||
/* Check if dst exists in index */
|
||||
if (index_name_pos(&the_index, dst, strlen(dst)) < 0) {
|
||||
if (index_name_pos(the_repository->index, dst, strlen(dst)) < 0) {
|
||||
modes[i] |= SPARSE;
|
||||
goto act_on_entry;
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
dir_check:
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
int j, dst_len, n;
|
||||
int first = index_name_pos(&the_index, src, length), last;
|
||||
int first = index_name_pos(the_repository->index, src, length), last;
|
||||
|
||||
if (first >= 0) {
|
||||
prepare_move_submodule(src, first,
|
||||
|
@ -339,7 +339,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
dst_len = strlen(dst);
|
||||
|
||||
for (j = 0; j < last - first; j++) {
|
||||
const struct cache_entry *ce = the_index.cache[first + j];
|
||||
const struct cache_entry *ce = the_repository->index->cache[first + j];
|
||||
const char *path = ce->name;
|
||||
source[argc + j] = path;
|
||||
destination[argc + j] =
|
||||
|
@ -351,7 +351,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
argc += last - first;
|
||||
goto act_on_entry;
|
||||
}
|
||||
if (!(ce = index_file_exists(&the_index, src, length, 0))) {
|
||||
if (!(ce = index_file_exists(the_repository->index, src, length, 0))) {
|
||||
bad = _("not under version control");
|
||||
goto act_on_entry;
|
||||
}
|
||||
|
@ -387,7 +387,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
|
||||
if (ignore_sparse &&
|
||||
(dst_mode & (SKIP_WORKTREE_DIR | SPARSE)) &&
|
||||
index_entry_exists(&the_index, dst, strlen(dst))) {
|
||||
index_entry_exists(the_repository->index, dst, strlen(dst))) {
|
||||
bad = _("destination exists in the index");
|
||||
if (force) {
|
||||
if (verbose)
|
||||
|
@ -404,12 +404,12 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
* option as a way to have a successful run.
|
||||
*/
|
||||
if (!ignore_sparse &&
|
||||
!path_in_sparse_checkout(src, &the_index)) {
|
||||
!path_in_sparse_checkout(src, the_repository->index)) {
|
||||
string_list_append(&only_match_skip_worktree, src);
|
||||
skip_sparse = 1;
|
||||
}
|
||||
if (!ignore_sparse &&
|
||||
!path_in_sparse_checkout(dst, &the_index)) {
|
||||
!path_in_sparse_checkout(dst, the_repository->index)) {
|
||||
string_list_append(&only_match_skip_worktree, dst);
|
||||
skip_sparse = 1;
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
int pos;
|
||||
int sparse_and_dirty = 0;
|
||||
struct checkout state = CHECKOUT_INIT;
|
||||
state.istate = &the_index;
|
||||
state.istate = the_repository->index;
|
||||
|
||||
if (force)
|
||||
state.force = 1;
|
||||
|
@ -476,14 +476,14 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
if (mode & (WORKING_DIRECTORY | SKIP_WORKTREE_DIR))
|
||||
continue;
|
||||
|
||||
pos = index_name_pos(&the_index, src, strlen(src));
|
||||
pos = index_name_pos(the_repository->index, src, strlen(src));
|
||||
assert(pos >= 0);
|
||||
if (!(mode & SPARSE) && !lstat(src, &st))
|
||||
sparse_and_dirty = ie_modified(&the_index,
|
||||
the_index.cache[pos],
|
||||
sparse_and_dirty = ie_modified(the_repository->index,
|
||||
the_repository->index->cache[pos],
|
||||
&st,
|
||||
0);
|
||||
rename_index_entry_at(&the_index, pos, dst);
|
||||
rename_index_entry_at(the_repository->index, pos, dst);
|
||||
|
||||
if (ignore_sparse &&
|
||||
core_apply_sparse_checkout &&
|
||||
|
@ -495,11 +495,11 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
* should be added in a future patch.
|
||||
*/
|
||||
if ((mode & SPARSE) &&
|
||||
path_in_sparse_checkout(dst, &the_index)) {
|
||||
path_in_sparse_checkout(dst, the_repository->index)) {
|
||||
/* from out-of-cone to in-cone */
|
||||
int dst_pos = index_name_pos(&the_index, dst,
|
||||
int dst_pos = index_name_pos(the_repository->index, dst,
|
||||
strlen(dst));
|
||||
struct cache_entry *dst_ce = the_index.cache[dst_pos];
|
||||
struct cache_entry *dst_ce = the_repository->index->cache[dst_pos];
|
||||
|
||||
dst_ce->ce_flags &= ~CE_SKIP_WORKTREE;
|
||||
|
||||
|
@ -507,11 +507,11 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
die(_("cannot checkout %s"), dst_ce->name);
|
||||
} else if ((dst_mode & (SKIP_WORKTREE_DIR | SPARSE)) &&
|
||||
!(mode & SPARSE) &&
|
||||
!path_in_sparse_checkout(dst, &the_index)) {
|
||||
!path_in_sparse_checkout(dst, the_repository->index)) {
|
||||
/* from in-cone to out-of-cone */
|
||||
int dst_pos = index_name_pos(&the_index, dst,
|
||||
int dst_pos = index_name_pos(the_repository->index, dst,
|
||||
strlen(dst));
|
||||
struct cache_entry *dst_ce = the_index.cache[dst_pos];
|
||||
struct cache_entry *dst_ce = the_repository->index->cache[dst_pos];
|
||||
|
||||
/*
|
||||
* if src is clean, it will suffice to remove it
|
||||
|
@ -559,9 +559,9 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
|
|||
advise_on_moving_dirty_path(&dirty_paths);
|
||||
|
||||
if (gitmodules_modified)
|
||||
stage_updated_gitmodules(&the_index);
|
||||
stage_updated_gitmodules(the_repository->index);
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file,
|
||||
if (write_locked_index(the_repository->index, &lock_file,
|
||||
COMMIT_LOCK | SKIP_IF_UNCHANGED))
|
||||
die(_("Unable to write new index file"));
|
||||
|
||||
|
|
|
@ -1339,6 +1339,7 @@ static void write_pack_file(void)
|
|||
hash_to_hex(hash));
|
||||
|
||||
if (write_bitmap_index) {
|
||||
bitmap_writer_init(the_repository);
|
||||
bitmap_writer_set_checksum(hash);
|
||||
bitmap_writer_build_type_index(
|
||||
&to_pack, written_list, nr_written);
|
||||
|
@ -1359,7 +1360,7 @@ static void write_pack_file(void)
|
|||
stop_progress(&progress_state);
|
||||
|
||||
bitmap_writer_show_progress(progress);
|
||||
bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
|
||||
bitmap_writer_select_commits(indexed_commits, indexed_commits_nr);
|
||||
if (bitmap_writer_build(&to_pack) < 0)
|
||||
die(_("failed to write bitmap index"));
|
||||
bitmap_writer_finish(written_list, nr_written,
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*
|
||||
* Fetch one or more remote refs and merge it/them into the current HEAD.
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
|
@ -1044,7 +1044,7 @@ int cmd_pull(int argc, const char **argv, const char *prefix)
|
|||
if (opt_autostash == -1)
|
||||
opt_autostash = config_autostash;
|
||||
|
||||
if (is_null_oid(&orig_head) && !is_index_unborn(&the_index))
|
||||
if (is_null_oid(&orig_head) && !is_index_unborn(the_repository->index))
|
||||
die(_("Updating an unborn branch with changes added to the index."));
|
||||
|
||||
if (!opt_autostash)
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* Copyright (C) Linus Torvalds, 2005
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "gettext.h"
|
||||
|
@ -159,8 +158,8 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.head_idx = -1;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
|
||||
git_config(git_read_tree_config, NULL);
|
||||
|
||||
|
@ -197,7 +196,7 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
die(_("You need to resolve your current index first"));
|
||||
stage = opts.merge = 1;
|
||||
}
|
||||
resolve_undo_clear_index(&the_index);
|
||||
resolve_undo_clear_index(the_repository->index);
|
||||
|
||||
for (i = 0; i < argc; i++) {
|
||||
const char *arg = argv[i];
|
||||
|
@ -225,7 +224,7 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
setup_work_tree();
|
||||
|
||||
if (opts.skip_sparse_checkout)
|
||||
ensure_full_index(&the_index);
|
||||
ensure_full_index(the_repository->index);
|
||||
|
||||
if (opts.merge) {
|
||||
switch (stage - 1) {
|
||||
|
@ -237,7 +236,7 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
break;
|
||||
case 2:
|
||||
opts.fn = twoway_merge;
|
||||
opts.initial_checkout = is_index_unborn(&the_index);
|
||||
opts.initial_checkout = is_index_unborn(the_repository->index);
|
||||
break;
|
||||
case 3:
|
||||
default:
|
||||
|
@ -258,7 +257,7 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
if (nr_trees == 1 && !opts.prefix)
|
||||
opts.skip_cache_tree_update = 1;
|
||||
|
||||
cache_tree_free(&the_index.cache_tree);
|
||||
cache_tree_free(&the_repository->index->cache_tree);
|
||||
for (i = 0; i < nr_trees; i++) {
|
||||
struct tree *tree = trees[i];
|
||||
if (parse_tree(tree) < 0)
|
||||
|
@ -282,7 +281,7 @@ int cmd_read_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
the_repository->index,
|
||||
trees[0]);
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die("unable to write new index file");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* Copyright (c) 2018 Pratik Karki
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "environment.h"
|
||||
|
@ -194,7 +193,7 @@ static struct replay_opts get_replay_opts(const struct rebase_options *opts)
|
|||
return replay;
|
||||
}
|
||||
|
||||
static int edit_todo_file(unsigned flags)
|
||||
static int edit_todo_file(unsigned flags, struct replay_opts *opts)
|
||||
{
|
||||
const char *todo_file = rebase_path_todo();
|
||||
struct todo_list todo_list = TODO_LIST_INIT,
|
||||
|
@ -205,7 +204,8 @@ static int edit_todo_file(unsigned flags)
|
|||
return error_errno(_("could not read '%s'."), todo_file);
|
||||
|
||||
strbuf_stripspace(&todo_list.buf, comment_line_str);
|
||||
res = edit_todo_list(the_repository, &todo_list, &new_todo, NULL, NULL, flags);
|
||||
res = edit_todo_list(the_repository, opts, &todo_list, &new_todo,
|
||||
NULL, NULL, flags);
|
||||
if (!res && todo_list_write_to_file(the_repository, &new_todo, todo_file,
|
||||
NULL, NULL, -1, flags & ~(TODO_LIST_SHORTEN_IDS)))
|
||||
res = error_errno(_("could not write '%s'"), todo_file);
|
||||
|
@ -295,9 +295,9 @@ static int do_interactive_rebase(struct rebase_options *opts, unsigned flags)
|
|||
if (ret)
|
||||
error(_("could not generate todo list"));
|
||||
else {
|
||||
discard_index(&the_index);
|
||||
if (todo_list_parse_insn_buffer(the_repository, todo_list.buf.buf,
|
||||
&todo_list))
|
||||
discard_index(the_repository->index);
|
||||
if (todo_list_parse_insn_buffer(the_repository, &replay,
|
||||
todo_list.buf.buf, &todo_list))
|
||||
BUG("unusable todo list");
|
||||
|
||||
ret = complete_action(the_repository, &replay, flags,
|
||||
|
@ -352,9 +352,13 @@ static int run_sequencer_rebase(struct rebase_options *opts)
|
|||
replay_opts_release(&replay_opts);
|
||||
break;
|
||||
}
|
||||
case ACTION_EDIT_TODO:
|
||||
ret = edit_todo_file(flags);
|
||||
case ACTION_EDIT_TODO: {
|
||||
struct replay_opts replay_opts = get_replay_opts(opts);
|
||||
|
||||
ret = edit_todo_file(flags, &replay_opts);
|
||||
replay_opts_release(&replay_opts);
|
||||
break;
|
||||
}
|
||||
case ACTION_SHOW_CURRENT_PATCH: {
|
||||
struct child_process cmd = CHILD_PROCESS_INIT;
|
||||
|
||||
|
|
|
@ -1576,7 +1576,8 @@ static const char *update(struct command *cmd, struct shallow_info *si)
|
|||
if (ref_transaction_delete(transaction,
|
||||
namespaced_name,
|
||||
old_oid,
|
||||
0, "push", &err)) {
|
||||
0, NULL,
|
||||
"push", &err)) {
|
||||
rp_error("%s", err.buf);
|
||||
ret = "failed to delete";
|
||||
} else {
|
||||
|
@ -1595,6 +1596,7 @@ static const char *update(struct command *cmd, struct shallow_info *si)
|
|||
if (ref_transaction_update(transaction,
|
||||
namespaced_name,
|
||||
new_oid, old_oid,
|
||||
NULL, NULL,
|
||||
0, "push",
|
||||
&err)) {
|
||||
rp_error("%s", err.buf);
|
||||
|
|
|
@ -201,7 +201,7 @@ static int replace_object_oid(const char *object_ref,
|
|||
transaction = ref_transaction_begin(&err);
|
||||
if (!transaction ||
|
||||
ref_transaction_update(transaction, ref.buf, repl, &prev,
|
||||
0, NULL, &err) ||
|
||||
NULL, NULL, 0, NULL, &err) ||
|
||||
ref_transaction_commit(transaction, &err))
|
||||
res = error("%s", err.buf);
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
* "git replay" builtin command
|
||||
*/
|
||||
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "git-compat-util.h"
|
||||
|
||||
#include "builtin.h"
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
* Copyright (c) 2005, 2006 Linus Torvalds and Junio C Hamano
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
|
@ -66,8 +66,8 @@ static int reset_index(const char *ref, const struct object_id *oid, int reset_t
|
|||
|
||||
memset(&opts, 0, sizeof(opts));
|
||||
opts.head_idx = 1;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
opts.fn = oneway_merge;
|
||||
opts.merge = 1;
|
||||
init_checkout_metadata(&opts.meta, ref, oid, NULL);
|
||||
|
@ -159,11 +159,11 @@ static void update_index_from_diff(struct diff_queue_struct *q,
|
|||
struct cache_entry *ce;
|
||||
|
||||
if (!is_in_reset_tree && !intent_to_add) {
|
||||
remove_file_from_index(&the_index, one->path);
|
||||
remove_file_from_index(the_repository->index, one->path);
|
||||
continue;
|
||||
}
|
||||
|
||||
ce = make_cache_entry(&the_index, one->mode, &one->oid, one->path,
|
||||
ce = make_cache_entry(the_repository->index, one->mode, &one->oid, one->path,
|
||||
0, 0);
|
||||
|
||||
/*
|
||||
|
@ -174,9 +174,9 @@ static void update_index_from_diff(struct diff_queue_struct *q,
|
|||
* if this entry is outside the sparse cone - this is necessary
|
||||
* to properly construct the reset sparse directory.
|
||||
*/
|
||||
pos = index_name_pos(&the_index, one->path, strlen(one->path));
|
||||
if ((pos >= 0 && ce_skip_worktree(the_index.cache[pos])) ||
|
||||
(pos < 0 && !path_in_sparse_checkout(one->path, &the_index)))
|
||||
pos = index_name_pos(the_repository->index, one->path, strlen(one->path));
|
||||
if ((pos >= 0 && ce_skip_worktree(the_repository->index->cache[pos])) ||
|
||||
(pos < 0 && !path_in_sparse_checkout(one->path, the_repository->index)))
|
||||
ce->ce_flags |= CE_SKIP_WORKTREE;
|
||||
|
||||
if (!ce)
|
||||
|
@ -186,7 +186,7 @@ static void update_index_from_diff(struct diff_queue_struct *q,
|
|||
ce->ce_flags |= CE_INTENT_TO_ADD;
|
||||
set_object_name_for_intent_to_add_entry(ce);
|
||||
}
|
||||
add_index_entry(&the_index, ce,
|
||||
add_index_entry(the_repository->index, ce,
|
||||
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
|
||||
}
|
||||
}
|
||||
|
@ -208,8 +208,8 @@ static int read_from_tree(const struct pathspec *pathspec,
|
|||
opt.change = diff_change;
|
||||
opt.add_remove = diff_addremove;
|
||||
|
||||
if (pathspec->nr && pathspec_needs_expanded_index(&the_index, pathspec))
|
||||
ensure_full_index(&the_index);
|
||||
if (pathspec->nr && pathspec_needs_expanded_index(the_repository->index, pathspec))
|
||||
ensure_full_index(the_repository->index);
|
||||
|
||||
if (do_diff_cache(tree_oid, &opt))
|
||||
return 1;
|
||||
|
@ -235,7 +235,7 @@ static void set_reflog_message(struct strbuf *sb, const char *action,
|
|||
|
||||
static void die_if_unmerged_cache(int reset_type)
|
||||
{
|
||||
if (is_merge() || unmerged_index(&the_index))
|
||||
if (is_merge() || unmerged_index(the_repository->index))
|
||||
die(_("Cannot do a %s reset in the middle of a merge."),
|
||||
_(reset_type_names[reset_type]));
|
||||
|
||||
|
@ -470,12 +470,12 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
|
|||
update_ref_status = 1;
|
||||
goto cleanup;
|
||||
}
|
||||
the_index.updated_skipworktree = 1;
|
||||
the_repository->index->updated_skipworktree = 1;
|
||||
if (!no_refresh && get_git_work_tree()) {
|
||||
uint64_t t_begin, t_delta_in_ms;
|
||||
|
||||
t_begin = getnanotime();
|
||||
refresh_index(&the_index, flags, NULL, NULL,
|
||||
refresh_index(the_repository->index, flags, NULL, NULL,
|
||||
_("Unstaged changes after reset:"));
|
||||
t_delta_in_ms = (getnanotime() - t_begin) / 1000000;
|
||||
if (!quiet && advice_enabled(ADVICE_RESET_NO_REFRESH_WARNING) && t_delta_in_ms > REFRESH_INDEX_DELAY_WARNING_IN_MS) {
|
||||
|
@ -501,7 +501,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
|
|||
free(ref);
|
||||
}
|
||||
|
||||
if (write_locked_index(&the_index, &lock, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock, COMMIT_LOCK))
|
||||
die(_("Could not write new index file."));
|
||||
}
|
||||
|
||||
|
@ -516,7 +516,7 @@ int cmd_reset(int argc, const char **argv, const char *prefix)
|
|||
if (!pathspec.nr)
|
||||
remove_branch_state(the_repository, 0);
|
||||
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
|
||||
cleanup:
|
||||
clear_pathspec(&pathspec);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) Linus Torvalds, 2005
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "config.h"
|
||||
|
@ -687,7 +687,6 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
|||
const char *name = NULL;
|
||||
struct object_context unused;
|
||||
struct strbuf buf = STRBUF_INIT;
|
||||
const int hexsz = the_hash_algo->hexsz;
|
||||
int seen_end_of_options = 0;
|
||||
enum format_type format = FORMAT_DEFAULT;
|
||||
|
||||
|
@ -863,8 +862,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
|||
abbrev = strtoul(arg, NULL, 10);
|
||||
if (abbrev < MINIMUM_ABBREV)
|
||||
abbrev = MINIMUM_ABBREV;
|
||||
else if (hexsz <= abbrev)
|
||||
abbrev = hexsz;
|
||||
else if ((int)the_hash_algo->hexsz <= abbrev)
|
||||
abbrev = the_hash_algo->hexsz;
|
||||
continue;
|
||||
}
|
||||
if (!strcmp(arg, "--sq")) {
|
||||
|
@ -1049,8 +1048,8 @@ int cmd_rev_parse(int argc, const char **argv, const char *prefix)
|
|||
if (!strcmp(arg, "--shared-index-path")) {
|
||||
if (repo_read_index(the_repository) < 0)
|
||||
die(_("Could not read the index"));
|
||||
if (the_index.split_index) {
|
||||
const struct object_id *oid = &the_index.split_index->base_oid;
|
||||
if (the_repository->index->split_index) {
|
||||
const struct object_id *oid = &the_repository->index->split_index->base_oid;
|
||||
const char *path = git_path("sharedindex.%s", oid_to_hex(oid));
|
||||
print_path(path, prefix, format, DEFAULT_RELATIVE);
|
||||
}
|
||||
|
|
40
builtin/rm.c
40
builtin/rm.c
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) Linus Torvalds 2006
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "advice.h"
|
||||
#include "config.h"
|
||||
|
@ -41,8 +41,8 @@ static int get_ours_cache_pos(const char *path, int pos)
|
|||
{
|
||||
int i = -pos - 1;
|
||||
|
||||
while ((i < the_index.cache_nr) && !strcmp(the_index.cache[i]->name, path)) {
|
||||
if (ce_stage(the_index.cache[i]) == 2)
|
||||
while ((i < the_repository->index->cache_nr) && !strcmp(the_repository->index->cache[i]->name, path)) {
|
||||
if (ce_stage(the_repository->index->cache[i]) == 2)
|
||||
return i;
|
||||
i++;
|
||||
}
|
||||
|
@ -78,13 +78,13 @@ static void submodules_absorb_gitdir_if_needed(void)
|
|||
int pos;
|
||||
const struct cache_entry *ce;
|
||||
|
||||
pos = index_name_pos(&the_index, name, strlen(name));
|
||||
pos = index_name_pos(the_repository->index, name, strlen(name));
|
||||
if (pos < 0) {
|
||||
pos = get_ours_cache_pos(name, pos);
|
||||
if (pos < 0)
|
||||
continue;
|
||||
}
|
||||
ce = the_index.cache[pos];
|
||||
ce = the_repository->index->cache[pos];
|
||||
|
||||
if (!S_ISGITLINK(ce->ce_mode) ||
|
||||
!file_exists(ce->name) ||
|
||||
|
@ -122,7 +122,7 @@ static int check_local_mod(struct object_id *head, int index_only)
|
|||
int local_changes = 0;
|
||||
int staged_changes = 0;
|
||||
|
||||
pos = index_name_pos(&the_index, name, strlen(name));
|
||||
pos = index_name_pos(the_repository->index, name, strlen(name));
|
||||
if (pos < 0) {
|
||||
/*
|
||||
* Skip unmerged entries except for populated submodules
|
||||
|
@ -132,11 +132,11 @@ static int check_local_mod(struct object_id *head, int index_only)
|
|||
if (pos < 0)
|
||||
continue;
|
||||
|
||||
if (!S_ISGITLINK(the_index.cache[pos]->ce_mode) ||
|
||||
if (!S_ISGITLINK(the_repository->index->cache[pos]->ce_mode) ||
|
||||
is_empty_dir(name))
|
||||
continue;
|
||||
}
|
||||
ce = the_index.cache[pos];
|
||||
ce = the_repository->index->cache[pos];
|
||||
|
||||
if (lstat(ce->name, &st) < 0) {
|
||||
if (!is_missing_file_error(errno))
|
||||
|
@ -173,7 +173,7 @@ static int check_local_mod(struct object_id *head, int index_only)
|
|||
* Is the index different from the file in the work tree?
|
||||
* If it's a submodule, is its work tree modified?
|
||||
*/
|
||||
if (ie_match_stat(&the_index, ce, &st, 0) ||
|
||||
if (ie_match_stat(the_repository->index, ce, &st, 0) ||
|
||||
(S_ISGITLINK(ce->ce_mode) &&
|
||||
bad_to_remove_submodule(ce->name,
|
||||
SUBMODULE_REMOVAL_DIE_ON_ERROR |
|
||||
|
@ -301,27 +301,27 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
|
|||
if (repo_read_index(the_repository) < 0)
|
||||
die(_("index file corrupt"));
|
||||
|
||||
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &pathspec, NULL, NULL);
|
||||
refresh_index(the_repository->index, REFRESH_QUIET|REFRESH_UNMERGED, &pathspec, NULL, NULL);
|
||||
|
||||
seen = xcalloc(pathspec.nr, 1);
|
||||
|
||||
if (pathspec_needs_expanded_index(&the_index, &pathspec))
|
||||
ensure_full_index(&the_index);
|
||||
if (pathspec_needs_expanded_index(the_repository->index, &pathspec))
|
||||
ensure_full_index(the_repository->index);
|
||||
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_index.cache[i];
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[i];
|
||||
|
||||
if (!include_sparse &&
|
||||
(ce_skip_worktree(ce) ||
|
||||
!path_in_sparse_checkout(ce->name, &the_index)))
|
||||
!path_in_sparse_checkout(ce->name, the_repository->index)))
|
||||
continue;
|
||||
if (!ce_path_match(&the_index, ce, &pathspec, seen))
|
||||
if (!ce_path_match(the_repository->index, ce, &pathspec, seen))
|
||||
continue;
|
||||
ALLOC_GROW(list.entry, list.nr + 1, list.alloc);
|
||||
list.entry[list.nr].name = xstrdup(ce->name);
|
||||
list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode);
|
||||
if (list.entry[list.nr++].is_submodule &&
|
||||
!is_staging_gitmodules_ok(&the_index))
|
||||
!is_staging_gitmodules_ok(the_repository->index))
|
||||
die(_("please stage your changes to .gitmodules or stash them to proceed"));
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
|
|||
if (!quiet)
|
||||
printf("rm '%s'\n", path);
|
||||
|
||||
if (remove_file_from_index(&the_index, path))
|
||||
if (remove_file_from_index(the_repository->index, path))
|
||||
die(_("git rm: unable to remove %s"), path);
|
||||
}
|
||||
|
||||
|
@ -432,10 +432,10 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
|
|||
}
|
||||
strbuf_release(&buf);
|
||||
if (gitmodules_modified)
|
||||
stage_updated_gitmodules(&the_index);
|
||||
stage_updated_gitmodules(the_repository->index);
|
||||
}
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file,
|
||||
if (write_locked_index(the_repository->index, &lock_file,
|
||||
COMMIT_LOCK | SKIP_IF_UNCHANGED))
|
||||
die(_("Unable to write new index file"));
|
||||
|
||||
|
|
|
@ -435,7 +435,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
|
|||
usage_with_options(shortlog_usage, options);
|
||||
}
|
||||
|
||||
if (setup_revisions(argc, argv, &rev, NULL) != 1) {
|
||||
if (!nongit && setup_revisions(argc, argv, &rev, NULL) != 1) {
|
||||
error(_("unrecognized argument: %s"), argv[1]);
|
||||
usage_with_options(shortlog_usage, options);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "config.h"
|
||||
|
@ -273,7 +272,7 @@ static int reset_tree(struct object_id *i_tree, int update, int reset)
|
|||
struct lock_file lock_file = LOCK_INIT;
|
||||
|
||||
repo_read_index_preload(the_repository, NULL, 0);
|
||||
if (refresh_index(&the_index, REFRESH_QUIET, NULL, NULL, NULL))
|
||||
if (refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL, NULL))
|
||||
return -1;
|
||||
|
||||
repo_hold_locked_index(the_repository, &lock_file, LOCK_DIE_ON_ERROR);
|
||||
|
@ -287,8 +286,8 @@ static int reset_tree(struct object_id *i_tree, int update, int reset)
|
|||
init_tree_desc(t, &tree->object.oid, tree->buffer, tree->size);
|
||||
|
||||
opts.head_idx = 1;
|
||||
opts.src_index = &the_index;
|
||||
opts.dst_index = &the_index;
|
||||
opts.src_index = the_repository->index;
|
||||
opts.dst_index = the_repository->index;
|
||||
opts.merge = 1;
|
||||
opts.reset = reset ? UNPACK_RESET_PROTECT_UNTRACKED : 0;
|
||||
opts.update = update;
|
||||
|
@ -299,7 +298,7 @@ static int reset_tree(struct object_id *i_tree, int update, int reset)
|
|||
if (unpack_trees(nr_trees, t, &opts))
|
||||
return -1;
|
||||
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
return error(_("unable to write new index file"));
|
||||
|
||||
return 0;
|
||||
|
@ -430,7 +429,7 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
|
|||
state.force = 1;
|
||||
state.quiet = 1;
|
||||
state.refresh_cache = 1;
|
||||
state.istate = &the_index;
|
||||
state.istate = the_repository->index;
|
||||
|
||||
/*
|
||||
* Step 1: get a difference between orig_tree (which corresponding
|
||||
|
@ -454,7 +453,7 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
|
|||
|
||||
/* Look up the path's position in the current index. */
|
||||
p = diff_queued_diff.queue[i];
|
||||
pos = index_name_pos(&the_index, p->two->path,
|
||||
pos = index_name_pos(the_repository->index, p->two->path,
|
||||
strlen(p->two->path));
|
||||
|
||||
/*
|
||||
|
@ -465,10 +464,10 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
|
|||
* path, but left it out of the working tree, then clear the
|
||||
* SKIP_WORKTREE bit and write it to the working tree.
|
||||
*/
|
||||
if (pos >= 0 && ce_skip_worktree(the_index.cache[pos])) {
|
||||
if (pos >= 0 && ce_skip_worktree(the_repository->index->cache[pos])) {
|
||||
struct stat st;
|
||||
|
||||
ce = the_index.cache[pos];
|
||||
ce = the_repository->index->cache[pos];
|
||||
if (!lstat(ce->name, &st)) {
|
||||
/* Conflicting path present; relocate it */
|
||||
struct strbuf new_path = STRBUF_INIT;
|
||||
|
@ -504,12 +503,12 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
|
|||
if (pos < 0)
|
||||
option = ADD_CACHE_OK_TO_ADD;
|
||||
|
||||
ce = make_cache_entry(&the_index,
|
||||
ce = make_cache_entry(the_repository->index,
|
||||
p->one->mode,
|
||||
&p->one->oid,
|
||||
p->one->path,
|
||||
0, 0);
|
||||
add_index_entry(&the_index, ce, option);
|
||||
add_index_entry(the_repository->index, ce, option);
|
||||
}
|
||||
}
|
||||
diff_flush(&diff_opts);
|
||||
|
@ -518,7 +517,7 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
|
|||
* Step 4: write the new index to disk
|
||||
*/
|
||||
repo_hold_locked_index(the_repository, &lock, LOCK_DIE_ON_ERROR);
|
||||
if (write_locked_index(&the_index, &lock,
|
||||
if (write_locked_index(the_repository->index, &lock,
|
||||
COMMIT_LOCK | SKIP_IF_UNCHANGED))
|
||||
die(_("could not write index"));
|
||||
}
|
||||
|
@ -539,7 +538,7 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
|
|||
NULL, NULL, NULL))
|
||||
return error(_("could not write index"));
|
||||
|
||||
if (write_index_as_tree(&c_tree, &the_index, get_index_file(), 0,
|
||||
if (write_index_as_tree(&c_tree, the_repository->index, get_index_file(), 0,
|
||||
NULL))
|
||||
return error(_("cannot apply a stash in the middle of a merge"));
|
||||
|
||||
|
@ -562,14 +561,14 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
|
|||
return error(_("conflicts in index. "
|
||||
"Try without --index."));
|
||||
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
repo_read_index(the_repository);
|
||||
if (write_index_as_tree(&index_tree, &the_index,
|
||||
if (write_index_as_tree(&index_tree, the_repository->index,
|
||||
get_index_file(), 0, NULL))
|
||||
return error(_("could not save index tree"));
|
||||
|
||||
reset_head();
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
repo_read_index(the_repository);
|
||||
}
|
||||
}
|
||||
|
@ -875,8 +874,8 @@ static void diff_include_untracked(const struct stash_info *info, struct diff_op
|
|||
}
|
||||
|
||||
unpack_tree_opt.head_idx = -1;
|
||||
unpack_tree_opt.src_index = &the_index;
|
||||
unpack_tree_opt.dst_index = &the_index;
|
||||
unpack_tree_opt.src_index = the_repository->index;
|
||||
unpack_tree_opt.dst_index = the_repository->index;
|
||||
unpack_tree_opt.merge = 1;
|
||||
unpack_tree_opt.fn = stash_worktree_untracked_merge;
|
||||
|
||||
|
@ -1205,8 +1204,8 @@ static int stash_staged(struct stash_info *info, struct strbuf *out_patch,
|
|||
}
|
||||
|
||||
cp_diff_tree.git_cmd = 1;
|
||||
strvec_pushl(&cp_diff_tree.args, "diff-tree", "-p", "-U1", "HEAD",
|
||||
oid_to_hex(&info->w_tree), "--", NULL);
|
||||
strvec_pushl(&cp_diff_tree.args, "diff-tree", "-p", "--binary",
|
||||
"-U1", "HEAD", oid_to_hex(&info->w_tree), "--", NULL);
|
||||
if (pipe_command(&cp_diff_tree, NULL, 0, out_patch, 0, NULL, 0)) {
|
||||
ret = -1;
|
||||
goto done;
|
||||
|
@ -1395,7 +1394,7 @@ static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_b
|
|||
|
||||
strbuf_addf(&commit_tree_label, "index on %s\n", msg.buf);
|
||||
commit_list_insert(head_commit, &parents);
|
||||
if (write_index_as_tree(&info->i_tree, &the_index, get_index_file(), 0,
|
||||
if (write_index_as_tree(&info->i_tree, the_repository->index, get_index_file(), 0,
|
||||
NULL) ||
|
||||
commit_tree(commit_tree_label.buf, commit_tree_label.len,
|
||||
&info->i_tree, parents, &info->i_commit, NULL, NULL)) {
|
||||
|
@ -1540,9 +1539,9 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
|
|||
char *ps_matched = xcalloc(ps->nr, 1);
|
||||
|
||||
/* TODO: audit for interaction with sparse-index. */
|
||||
ensure_full_index(&the_index);
|
||||
for (i = 0; i < the_index.cache_nr; i++)
|
||||
ce_path_match(&the_index, the_index.cache[i], ps,
|
||||
ensure_full_index(the_repository->index);
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++)
|
||||
ce_path_match(the_repository->index, the_repository->index->cache[i], ps,
|
||||
ps_matched);
|
||||
|
||||
if (report_path_error(ps_matched, ps)) {
|
||||
|
@ -1612,7 +1611,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
|
|||
goto done;
|
||||
}
|
||||
}
|
||||
discard_index(&the_index);
|
||||
discard_index(the_repository->index);
|
||||
if (ps->nr) {
|
||||
struct child_process cp_add = CHILD_PROCESS_INIT;
|
||||
struct child_process cp_diff = CHILD_PROCESS_INIT;
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
#define USE_THE_INDEX_VARIABLE
|
||||
#include "builtin.h"
|
||||
#include "abspath.h"
|
||||
#include "environment.h"
|
||||
|
@ -207,18 +206,18 @@ static int module_list_compute(const char **argv,
|
|||
if (repo_read_index(the_repository) < 0)
|
||||
die(_("index file corrupt"));
|
||||
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_index.cache[i];
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[i];
|
||||
|
||||
if (!match_pathspec(&the_index, pathspec, ce->name, ce_namelen(ce),
|
||||
if (!match_pathspec(the_repository->index, pathspec, ce->name, ce_namelen(ce),
|
||||
0, ps_matched, 1) ||
|
||||
!S_ISGITLINK(ce->ce_mode))
|
||||
continue;
|
||||
|
||||
ALLOC_GROW(list->entries, list->nr + 1, list->alloc);
|
||||
list->entries[list->nr++] = ce;
|
||||
while (i + 1 < the_index.cache_nr &&
|
||||
!strcmp(ce->name, the_index.cache[i + 1]->name))
|
||||
while (i + 1 < the_repository->index->cache_nr &&
|
||||
!strcmp(ce->name, the_repository->index->cache[i + 1]->name))
|
||||
/*
|
||||
* Skip entries with the same name in different stages
|
||||
* to make sure an entry is returned only once.
|
||||
|
@ -907,7 +906,7 @@ static void generate_submodule_summary(struct summary_cb *info,
|
|||
int fd = open(p->sm_path, O_RDONLY);
|
||||
|
||||
if (fd < 0 || fstat(fd, &st) < 0 ||
|
||||
index_fd(&the_index, &p->oid_dst, fd, &st, OBJ_BLOB,
|
||||
index_fd(the_repository->index, &p->oid_dst, fd, &st, OBJ_BLOB,
|
||||
p->sm_path, 0))
|
||||
error(_("couldn't hash object from '%s'"), p->sm_path);
|
||||
} else {
|
||||
|
@ -3243,21 +3242,21 @@ static void die_on_index_match(const char *path, int force)
|
|||
char *ps_matched = xcalloc(ps.nr, 1);
|
||||
|
||||
/* TODO: audit for interaction with sparse-index. */
|
||||
ensure_full_index(&the_index);
|
||||
ensure_full_index(the_repository->index);
|
||||
|
||||
/*
|
||||
* Since there is only one pathspec, we just need to
|
||||
* check ps_matched[0] to know if a cache entry matched.
|
||||
*/
|
||||
for (i = 0; i < the_index.cache_nr; i++) {
|
||||
ce_path_match(&the_index, the_index.cache[i], &ps,
|
||||
for (i = 0; i < the_repository->index->cache_nr; i++) {
|
||||
ce_path_match(the_repository->index, the_repository->index->cache[i], &ps,
|
||||
ps_matched);
|
||||
|
||||
if (ps_matched[0]) {
|
||||
if (!force)
|
||||
die(_("'%s' already exists in the index"),
|
||||
path);
|
||||
if (!S_ISGITLINK(the_index.cache[i]->ce_mode))
|
||||
if (!S_ISGITLINK(the_repository->index->cache[i]->ce_mode))
|
||||
die(_("'%s' already exists in the index "
|
||||
"and is not a submodule"), path);
|
||||
break;
|
||||
|
|
|
@ -660,6 +660,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
|
|||
transaction = ref_transaction_begin(&err);
|
||||
if (!transaction ||
|
||||
ref_transaction_update(transaction, ref.buf, &object, &prev,
|
||||
NULL, NULL,
|
||||
create_reflog ? REF_FORCE_CREATE_REFLOG : 0,
|
||||
reflog_msg.buf, &err) ||
|
||||
ref_transaction_commit(transaction, &err)) {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) Linus Torvalds, 2005
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "bulk-checkin.h"
|
||||
#include "config.h"
|
||||
|
@ -247,16 +247,16 @@ static int test_if_untracked_cache_is_supported(void)
|
|||
static int mark_ce_flags(const char *path, int flag, int mark)
|
||||
{
|
||||
int namelen = strlen(path);
|
||||
int pos = index_name_pos(&the_index, path, namelen);
|
||||
int pos = index_name_pos(the_repository->index, path, namelen);
|
||||
if (0 <= pos) {
|
||||
mark_fsmonitor_invalid(&the_index, the_index.cache[pos]);
|
||||
mark_fsmonitor_invalid(the_repository->index, the_repository->index->cache[pos]);
|
||||
if (mark)
|
||||
the_index.cache[pos]->ce_flags |= flag;
|
||||
the_repository->index->cache[pos]->ce_flags |= flag;
|
||||
else
|
||||
the_index.cache[pos]->ce_flags &= ~flag;
|
||||
the_index.cache[pos]->ce_flags |= CE_UPDATE_IN_BASE;
|
||||
cache_tree_invalidate_path(&the_index, path);
|
||||
the_index.cache_changed |= CE_ENTRY_CHANGED;
|
||||
the_repository->index->cache[pos]->ce_flags &= ~flag;
|
||||
the_repository->index->cache[pos]->ce_flags |= CE_UPDATE_IN_BASE;
|
||||
cache_tree_invalidate_path(the_repository->index, path);
|
||||
the_repository->index->cache_changed |= CE_ENTRY_CHANGED;
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
|
@ -266,7 +266,7 @@ static int remove_one_path(const char *path)
|
|||
{
|
||||
if (!allow_remove)
|
||||
return error("%s: does not exist and --remove not passed", path);
|
||||
if (remove_file_from_index(&the_index, path))
|
||||
if (remove_file_from_index(the_repository->index, path))
|
||||
return error("%s: cannot remove from the index", path);
|
||||
return 0;
|
||||
}
|
||||
|
@ -291,24 +291,24 @@ static int add_one_path(const struct cache_entry *old, const char *path, int len
|
|||
struct cache_entry *ce;
|
||||
|
||||
/* Was the old index entry already up-to-date? */
|
||||
if (old && !ce_stage(old) && !ie_match_stat(&the_index, old, st, 0))
|
||||
if (old && !ce_stage(old) && !ie_match_stat(the_repository->index, old, st, 0))
|
||||
return 0;
|
||||
|
||||
ce = make_empty_cache_entry(&the_index, len);
|
||||
ce = make_empty_cache_entry(the_repository->index, len);
|
||||
memcpy(ce->name, path, len);
|
||||
ce->ce_flags = create_ce_flags(0);
|
||||
ce->ce_namelen = len;
|
||||
fill_stat_cache_info(&the_index, ce, st);
|
||||
fill_stat_cache_info(the_repository->index, ce, st);
|
||||
ce->ce_mode = ce_mode_from_stat(old, st->st_mode);
|
||||
|
||||
if (index_path(&the_index, &ce->oid, path, st,
|
||||
if (index_path(the_repository->index, &ce->oid, path, st,
|
||||
info_only ? 0 : HASH_WRITE_OBJECT)) {
|
||||
discard_cache_entry(ce);
|
||||
return -1;
|
||||
}
|
||||
option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
|
||||
option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
|
||||
if (add_index_entry(&the_index, ce, option)) {
|
||||
if (add_index_entry(the_repository->index, ce, option)) {
|
||||
discard_cache_entry(ce);
|
||||
return error("%s: cannot add to the index - missing --add option?", path);
|
||||
}
|
||||
|
@ -341,11 +341,11 @@ static int add_one_path(const struct cache_entry *old, const char *path, int len
|
|||
static int process_directory(const char *path, int len, struct stat *st)
|
||||
{
|
||||
struct object_id oid;
|
||||
int pos = index_name_pos(&the_index, path, len);
|
||||
int pos = index_name_pos(the_repository->index, path, len);
|
||||
|
||||
/* Exact match: file or existing gitlink */
|
||||
if (pos >= 0) {
|
||||
const struct cache_entry *ce = the_index.cache[pos];
|
||||
const struct cache_entry *ce = the_repository->index->cache[pos];
|
||||
if (S_ISGITLINK(ce->ce_mode)) {
|
||||
|
||||
/* Do nothing to the index if there is no HEAD! */
|
||||
|
@ -360,8 +360,8 @@ static int process_directory(const char *path, int len, struct stat *st)
|
|||
|
||||
/* Inexact match: is there perhaps a subdirectory match? */
|
||||
pos = -pos-1;
|
||||
while (pos < the_index.cache_nr) {
|
||||
const struct cache_entry *ce = the_index.cache[pos++];
|
||||
while (pos < the_repository->index->cache_nr) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[pos++];
|
||||
|
||||
if (strncmp(ce->name, path, len))
|
||||
break;
|
||||
|
@ -391,8 +391,8 @@ static int process_path(const char *path, struct stat *st, int stat_errno)
|
|||
if (has_symlink_leading_path(path, len))
|
||||
return error("'%s' is beyond a symbolic link", path);
|
||||
|
||||
pos = index_name_pos(&the_index, path, len);
|
||||
ce = pos < 0 ? NULL : the_index.cache[pos];
|
||||
pos = index_name_pos(the_repository->index, path, len);
|
||||
ce = pos < 0 ? NULL : the_repository->index->cache[pos];
|
||||
if (ce && ce_skip_worktree(ce)) {
|
||||
/*
|
||||
* working directory version is assumed "good"
|
||||
|
@ -400,7 +400,7 @@ static int process_path(const char *path, struct stat *st, int stat_errno)
|
|||
* On the other hand, removing it from index should work
|
||||
*/
|
||||
if (!ignore_skip_worktree_entries && allow_remove &&
|
||||
remove_file_from_index(&the_index, path))
|
||||
remove_file_from_index(the_repository->index, path))
|
||||
return error("%s: cannot remove from the index", path);
|
||||
return 0;
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
|
|||
return error("Invalid path '%s'", path);
|
||||
|
||||
len = strlen(path);
|
||||
ce = make_empty_cache_entry(&the_index, len);
|
||||
ce = make_empty_cache_entry(the_repository->index, len);
|
||||
|
||||
oidcpy(&ce->oid, oid);
|
||||
memcpy(ce->name, path, len);
|
||||
|
@ -439,7 +439,7 @@ static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
|
|||
ce->ce_flags |= CE_VALID;
|
||||
option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
|
||||
option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
|
||||
if (add_index_entry(&the_index, ce, option))
|
||||
if (add_index_entry(the_repository->index, ce, option))
|
||||
return error("%s: cannot add to the index - missing --add option?",
|
||||
path);
|
||||
report("add '%s'", path);
|
||||
|
@ -451,11 +451,11 @@ static void chmod_path(char flip, const char *path)
|
|||
int pos;
|
||||
struct cache_entry *ce;
|
||||
|
||||
pos = index_name_pos(&the_index, path, strlen(path));
|
||||
pos = index_name_pos(the_repository->index, path, strlen(path));
|
||||
if (pos < 0)
|
||||
goto fail;
|
||||
ce = the_index.cache[pos];
|
||||
if (chmod_index_entry(&the_index, ce, flip) < 0)
|
||||
ce = the_repository->index->cache[pos];
|
||||
if (chmod_index_entry(the_repository->index, ce, flip) < 0)
|
||||
goto fail;
|
||||
|
||||
report("chmod %cx '%s'", flip, path);
|
||||
|
@ -498,7 +498,7 @@ static void update_one(const char *path)
|
|||
}
|
||||
|
||||
if (force_remove) {
|
||||
if (remove_file_from_index(&the_index, path))
|
||||
if (remove_file_from_index(the_repository->index, path))
|
||||
die("git update-index: unable to remove %s", path);
|
||||
report("remove '%s'", path);
|
||||
return;
|
||||
|
@ -581,7 +581,7 @@ static void read_index_info(int nul_term_line)
|
|||
|
||||
if (!mode) {
|
||||
/* mode == 0 means there is no such path -- remove */
|
||||
if (remove_file_from_index(&the_index, path_name))
|
||||
if (remove_file_from_index(the_repository->index, path_name))
|
||||
die("git update-index: unable to remove %s",
|
||||
ptr);
|
||||
}
|
||||
|
@ -622,12 +622,12 @@ static struct cache_entry *read_one_ent(const char *which,
|
|||
error("%s: not in %s branch.", path, which);
|
||||
return NULL;
|
||||
}
|
||||
if (!the_index.sparse_index && mode == S_IFDIR) {
|
||||
if (!the_repository->index->sparse_index && mode == S_IFDIR) {
|
||||
if (which)
|
||||
error("%s: not a blob in %s branch.", path, which);
|
||||
return NULL;
|
||||
}
|
||||
ce = make_empty_cache_entry(&the_index, namelen);
|
||||
ce = make_empty_cache_entry(the_repository->index, namelen);
|
||||
|
||||
oidcpy(&ce->oid, &oid);
|
||||
memcpy(ce->name, path, namelen);
|
||||
|
@ -642,12 +642,12 @@ static int unresolve_one(const char *path)
|
|||
struct string_list_item *item;
|
||||
int res = 0;
|
||||
|
||||
if (!the_index.resolve_undo)
|
||||
if (!the_repository->index->resolve_undo)
|
||||
return res;
|
||||
item = string_list_lookup(the_index.resolve_undo, path);
|
||||
item = string_list_lookup(the_repository->index->resolve_undo, path);
|
||||
if (!item)
|
||||
return res; /* no resolve-undo record for the path */
|
||||
res = unmerge_index_entry(&the_index, path, item->util, 0);
|
||||
res = unmerge_index_entry(the_repository->index, path, item->util, 0);
|
||||
FREE_AND_NULL(item->util);
|
||||
return res;
|
||||
}
|
||||
|
@ -688,13 +688,13 @@ static int do_reupdate(const char **paths,
|
|||
*/
|
||||
has_head = 0;
|
||||
redo:
|
||||
for (pos = 0; pos < the_index.cache_nr; pos++) {
|
||||
const struct cache_entry *ce = the_index.cache[pos];
|
||||
for (pos = 0; pos < the_repository->index->cache_nr; pos++) {
|
||||
const struct cache_entry *ce = the_repository->index->cache[pos];
|
||||
struct cache_entry *old = NULL;
|
||||
int save_nr;
|
||||
char *path;
|
||||
|
||||
if (ce_stage(ce) || !ce_path_match(&the_index, ce, &pathspec, NULL))
|
||||
if (ce_stage(ce) || !ce_path_match(the_repository->index, ce, &pathspec, NULL))
|
||||
continue;
|
||||
if (has_head)
|
||||
old = read_one_ent(NULL, &head_oid,
|
||||
|
@ -710,7 +710,7 @@ static int do_reupdate(const char **paths,
|
|||
* to process each path individually
|
||||
*/
|
||||
if (S_ISSPARSEDIR(ce->ce_mode)) {
|
||||
ensure_full_index(&the_index);
|
||||
ensure_full_index(the_repository->index);
|
||||
goto redo;
|
||||
}
|
||||
|
||||
|
@ -718,12 +718,12 @@ static int do_reupdate(const char **paths,
|
|||
* path anymore, in which case, under 'allow_remove',
|
||||
* or worse yet 'allow_replace', active_nr may decrease.
|
||||
*/
|
||||
save_nr = the_index.cache_nr;
|
||||
save_nr = the_repository->index->cache_nr;
|
||||
path = xstrdup(ce->name);
|
||||
update_one(path);
|
||||
free(path);
|
||||
discard_cache_entry(old);
|
||||
if (save_nr != the_index.cache_nr)
|
||||
if (save_nr != the_repository->index->cache_nr)
|
||||
goto redo;
|
||||
}
|
||||
clear_pathspec(&pathspec);
|
||||
|
@ -739,9 +739,9 @@ static int refresh(struct refresh_params *o, unsigned int flag)
|
|||
{
|
||||
setup_work_tree();
|
||||
repo_read_index(the_repository);
|
||||
*o->has_errors |= refresh_index(&the_index, o->flags | flag, NULL,
|
||||
*o->has_errors |= refresh_index(the_repository->index, o->flags | flag, NULL,
|
||||
NULL, NULL);
|
||||
if (has_racy_timestamp(&the_index)) {
|
||||
if (has_racy_timestamp(the_repository->index)) {
|
||||
/*
|
||||
* Even if nothing else has changed, updating the file
|
||||
* increases the chance that racy timestamps become
|
||||
|
@ -750,7 +750,7 @@ static int refresh(struct refresh_params *o, unsigned int flag)
|
|||
* refresh_index() as these are no actual errors.
|
||||
* cmd_status() does the same.
|
||||
*/
|
||||
the_index.cache_changed |= SOMETHING_CHANGED;
|
||||
the_repository->index->cache_changed |= SOMETHING_CHANGED;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -787,7 +787,7 @@ static int resolve_undo_clear_callback(const struct option *opt UNUSED,
|
|||
{
|
||||
BUG_ON_OPT_NEG(unset);
|
||||
BUG_ON_OPT_ARG(arg);
|
||||
resolve_undo_clear_index(&the_index);
|
||||
resolve_undo_clear_index(the_repository->index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -888,7 +888,7 @@ static enum parse_opt_result unresolve_callback(
|
|||
*has_errors = do_unresolve(ctx->argc, ctx->argv,
|
||||
prefix, prefix ? strlen(prefix) : 0);
|
||||
if (*has_errors)
|
||||
the_index.cache_changed = 0;
|
||||
the_repository->index->cache_changed = 0;
|
||||
|
||||
ctx->argv += ctx->argc - 1;
|
||||
ctx->argc = 1;
|
||||
|
@ -909,7 +909,7 @@ static enum parse_opt_result reupdate_callback(
|
|||
setup_work_tree();
|
||||
*has_errors = do_reupdate(ctx->argv + 1, prefix);
|
||||
if (*has_errors)
|
||||
the_index.cache_changed = 0;
|
||||
the_repository->index->cache_changed = 0;
|
||||
|
||||
ctx->argv += ctx->argc - 1;
|
||||
ctx->argc = 1;
|
||||
|
@ -1056,7 +1056,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
if (entries < 0)
|
||||
die("cache corrupted");
|
||||
|
||||
the_index.updated_skipworktree = 1;
|
||||
the_repository->index->updated_skipworktree = 1;
|
||||
|
||||
/*
|
||||
* Custom copy of parse_options() because we want to handle
|
||||
|
@ -1111,18 +1111,18 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
|
||||
if (preferred_index_format) {
|
||||
if (preferred_index_format < 0) {
|
||||
printf(_("%d\n"), the_index.version);
|
||||
printf(_("%d\n"), the_repository->index->version);
|
||||
} else if (preferred_index_format < INDEX_FORMAT_LB ||
|
||||
INDEX_FORMAT_UB < preferred_index_format) {
|
||||
die("index-version %d not in range: %d..%d",
|
||||
preferred_index_format,
|
||||
INDEX_FORMAT_LB, INDEX_FORMAT_UB);
|
||||
} else {
|
||||
if (the_index.version != preferred_index_format)
|
||||
the_index.cache_changed |= SOMETHING_CHANGED;
|
||||
if (the_repository->index->version != preferred_index_format)
|
||||
the_repository->index->cache_changed |= SOMETHING_CHANGED;
|
||||
report(_("index-version: was %d, set to %d"),
|
||||
the_index.version, preferred_index_format);
|
||||
the_index.version = preferred_index_format;
|
||||
the_repository->index->version, preferred_index_format);
|
||||
the_repository->index->version = preferred_index_format;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1159,16 +1159,16 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
warning(_("core.splitIndex is set to false; "
|
||||
"remove or change it, if you really want to "
|
||||
"enable split index"));
|
||||
if (the_index.split_index)
|
||||
the_index.cache_changed |= SPLIT_INDEX_ORDERED;
|
||||
if (the_repository->index->split_index)
|
||||
the_repository->index->cache_changed |= SPLIT_INDEX_ORDERED;
|
||||
else
|
||||
add_split_index(&the_index);
|
||||
add_split_index(the_repository->index);
|
||||
} else if (!split_index) {
|
||||
if (git_config_get_split_index() == 1)
|
||||
warning(_("core.splitIndex is set to true; "
|
||||
"remove or change it, if you really want to "
|
||||
"disable split index"));
|
||||
remove_split_index(&the_index);
|
||||
remove_split_index(the_repository->index);
|
||||
}
|
||||
|
||||
prepare_repo_settings(r);
|
||||
|
@ -1180,7 +1180,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
warning(_("core.untrackedCache is set to true; "
|
||||
"remove or change it, if you really want to "
|
||||
"disable the untracked cache"));
|
||||
remove_untracked_cache(&the_index);
|
||||
remove_untracked_cache(the_repository->index);
|
||||
report(_("Untracked cache disabled"));
|
||||
break;
|
||||
case UC_TEST:
|
||||
|
@ -1192,7 +1192,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
warning(_("core.untrackedCache is set to false; "
|
||||
"remove or change it, if you really want to "
|
||||
"enable the untracked cache"));
|
||||
add_untracked_cache(&the_index);
|
||||
add_untracked_cache(the_repository->index);
|
||||
report(_("Untracked cache enabled for '%s'"), get_git_work_tree());
|
||||
break;
|
||||
default:
|
||||
|
@ -1222,7 +1222,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
"set it if you really want to "
|
||||
"enable fsmonitor"));
|
||||
}
|
||||
add_fsmonitor(&the_index);
|
||||
add_fsmonitor(the_repository->index);
|
||||
report(_("fsmonitor enabled"));
|
||||
} else if (!fsmonitor) {
|
||||
enum fsmonitor_mode fsm_mode = fsm_settings__get_mode(r);
|
||||
|
@ -1230,17 +1230,17 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
|
|||
warning(_("core.fsmonitor is set; "
|
||||
"remove it if you really want to "
|
||||
"disable fsmonitor"));
|
||||
remove_fsmonitor(&the_index);
|
||||
remove_fsmonitor(the_repository->index);
|
||||
report(_("fsmonitor disabled"));
|
||||
}
|
||||
|
||||
if (the_index.cache_changed || force_write) {
|
||||
if (the_repository->index->cache_changed || force_write) {
|
||||
if (newfd < 0) {
|
||||
if (refresh_args.flags & REFRESH_QUIET)
|
||||
exit(128);
|
||||
unable_to_lock_die(get_index_file(), lock_error);
|
||||
}
|
||||
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
|
||||
if (write_locked_index(the_repository->index, &lock_file, COMMIT_LOCK))
|
||||
die("Unable to write new index file");
|
||||
}
|
||||
|
||||
|
|
|
@ -88,6 +88,11 @@ static char *parse_refname(const char **next)
|
|||
*/
|
||||
#define PARSE_SHA1_ALLOW_EMPTY 0x02
|
||||
|
||||
/*
|
||||
* Parse refname targets using the ref:<ref_target> format.
|
||||
*/
|
||||
#define PARSE_REFNAME_TARGETS 0x04
|
||||
|
||||
/*
|
||||
* Parse an argument separator followed by the next argument, if any.
|
||||
* If there is an argument, convert it to a SHA-1, write it to sha1,
|
||||
|
@ -95,10 +100,13 @@ static char *parse_refname(const char **next)
|
|||
* return 0. If there is no argument at all (not even the empty
|
||||
* string), return 1 and leave *next unchanged. If the value is
|
||||
* provided but cannot be converted to a SHA-1, die. flags can
|
||||
* include PARSE_SHA1_OLD and/or PARSE_SHA1_ALLOW_EMPTY.
|
||||
* include PARSE_SHA1_OLD and/or PARSE_SHA1_ALLOW_EMPTY and/or
|
||||
* PARSE_REFNAME_TARGETS. When PARSE_REFNAME_TARGETS is set, parse
|
||||
* the argument as `ref:<refname>` and store the refname into
|
||||
* the target strbuf.
|
||||
*/
|
||||
static int parse_next_oid(const char **next, const char *end,
|
||||
struct object_id *oid,
|
||||
static int parse_next_arg(const char **next, const char *end,
|
||||
struct object_id *oid, struct strbuf *target,
|
||||
const char *command, const char *refname,
|
||||
int flags)
|
||||
{
|
||||
|
@ -118,8 +126,17 @@ static int parse_next_oid(const char **next, const char *end,
|
|||
(*next)++;
|
||||
*next = parse_arg(*next, &arg);
|
||||
if (arg.len) {
|
||||
if (repo_get_oid(the_repository, arg.buf, oid))
|
||||
goto invalid;
|
||||
if (repo_get_oid(the_repository, arg.buf, oid)) {
|
||||
const char *value;
|
||||
if (flags & PARSE_REFNAME_TARGETS &&
|
||||
skip_prefix(arg.buf, "ref:", &value)) {
|
||||
if (check_refname_format(value, REFNAME_ALLOW_ONELEVEL))
|
||||
die("invalid ref format: %s", value);
|
||||
strbuf_addstr(target, value);
|
||||
} else {
|
||||
goto invalid;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* Without -z, an empty value means all zeros: */
|
||||
oidclr(oid);
|
||||
|
@ -136,8 +153,17 @@ static int parse_next_oid(const char **next, const char *end,
|
|||
*next += arg.len;
|
||||
|
||||
if (arg.len) {
|
||||
if (repo_get_oid(the_repository, arg.buf, oid))
|
||||
goto invalid;
|
||||
if (repo_get_oid(the_repository, arg.buf, oid)) {
|
||||
const char *value;
|
||||
if (flags & PARSE_REFNAME_TARGETS &&
|
||||
skip_prefix(arg.buf, "ref:", &value)) {
|
||||
if (check_refname_format(value, REFNAME_ALLOW_ONELEVEL))
|
||||
die("invalid ref format: %s", value);
|
||||
strbuf_addstr(target, value);
|
||||
} else {
|
||||
goto invalid;
|
||||
}
|
||||
}
|
||||
} else if (flags & PARSE_SHA1_ALLOW_EMPTY) {
|
||||
/* With -z, treat an empty value as all zeros: */
|
||||
warning("%s %s: missing <new-oid>, treating as zero",
|
||||
|
@ -184,6 +210,8 @@ static void parse_cmd_update(struct ref_transaction *transaction,
|
|||
const char *next, const char *end)
|
||||
{
|
||||
struct strbuf err = STRBUF_INIT;
|
||||
struct strbuf new_target = STRBUF_INIT;
|
||||
struct strbuf old_target = STRBUF_INIT;
|
||||
char *refname;
|
||||
struct object_id new_oid, old_oid;
|
||||
int have_old;
|
||||
|
@ -192,18 +220,24 @@ static void parse_cmd_update(struct ref_transaction *transaction,
|
|||
if (!refname)
|
||||
die("update: missing <ref>");
|
||||
|
||||
if (parse_next_oid(&next, end, &new_oid, "update", refname,
|
||||
PARSE_SHA1_ALLOW_EMPTY))
|
||||
if (parse_next_arg(&next, end, &new_oid,
|
||||
&new_target, "update", refname,
|
||||
PARSE_SHA1_ALLOW_EMPTY | PARSE_REFNAME_TARGETS))
|
||||
die("update %s: missing <new-oid>", refname);
|
||||
|
||||
have_old = !parse_next_oid(&next, end, &old_oid, "update", refname,
|
||||
PARSE_SHA1_OLD);
|
||||
have_old = !parse_next_arg(&next, end, &old_oid,
|
||||
&old_target, "update", refname,
|
||||
PARSE_SHA1_OLD | PARSE_REFNAME_TARGETS);
|
||||
have_old = have_old && !old_target.len;
|
||||
|
||||
if (*next != line_termination)
|
||||
die("update %s: extra input: %s", refname, next);
|
||||
|
||||
if (ref_transaction_update(transaction, refname,
|
||||
&new_oid, have_old ? &old_oid : NULL,
|
||||
new_target.len ? NULL : &new_oid,
|
||||
have_old ? &old_oid : NULL,
|
||||
new_target.len ? new_target.buf : NULL,
|
||||
old_target.len ? old_target.buf : NULL,
|
||||
update_flags | create_reflog_flag,
|
||||
msg, &err))
|
||||
die("%s", err.buf);
|
||||
|
@ -211,12 +245,15 @@ static void parse_cmd_update(struct ref_transaction *transaction,
|
|||
update_flags = default_flags;
|
||||
free(refname);
|
||||
strbuf_release(&err);
|
||||
strbuf_release(&old_target);
|
||||
strbuf_release(&new_target);
|
||||
}
|
||||
|
||||
static void parse_cmd_create(struct ref_transaction *transaction,
|
||||
const char *next, const char *end)
|
||||
{
|
||||
struct strbuf err = STRBUF_INIT;
|
||||
struct strbuf new_target = STRBUF_INIT;
|
||||
char *refname;
|
||||
struct object_id new_oid;
|
||||
|
||||
|
@ -224,16 +261,22 @@ static void parse_cmd_create(struct ref_transaction *transaction,
|
|||
if (!refname)
|
||||
die("create: missing <ref>");
|
||||
|
||||
if (parse_next_oid(&next, end, &new_oid, "create", refname, 0))
|
||||
if (parse_next_arg(&next, end, &new_oid, &new_target,
|
||||
"create", refname, PARSE_REFNAME_TARGETS))
|
||||
die("create %s: missing <new-oid>", refname);
|
||||
|
||||
if (is_null_oid(&new_oid))
|
||||
if (!new_target.len && is_null_oid(&new_oid))
|
||||
die("create %s: zero <new-oid>", refname);
|
||||
|
||||
if (new_target.len && !(update_flags & REF_NO_DEREF))
|
||||
die("create %s: cannot create symrefs in deref mode", refname);
|
||||
|
||||
if (*next != line_termination)
|
||||
die("create %s: extra input: %s", refname, next);
|
||||
|
||||
if (ref_transaction_create(transaction, refname, &new_oid,
|
||||
if (ref_transaction_create(transaction, refname,
|
||||
new_target.len ? NULL : &new_oid ,
|
||||
new_target.len ? new_target.buf : NULL,
|
||||
update_flags | create_reflog_flag,
|
||||
msg, &err))
|
||||
die("%s", err.buf);
|
||||
|
@ -241,12 +284,14 @@ static void parse_cmd_create(struct ref_transaction *transaction,
|
|||
update_flags = default_flags;
|
||||
free(refname);
|
||||
strbuf_release(&err);
|
||||
strbuf_release(&new_target);
|
||||
}
|
||||
|
||||
static void parse_cmd_delete(struct ref_transaction *transaction,
|
||||
const char *next, const char *end)
|
||||
{
|
||||
struct strbuf err = STRBUF_INIT;
|
||||
struct strbuf old_target = STRBUF_INIT;
|
||||
char *refname;
|
||||
struct object_id old_oid;
|
||||
int have_old;
|
||||
|
@ -255,32 +300,40 @@ static void parse_cmd_delete(struct ref_transaction *transaction,
|
|||
if (!refname)
|
||||
die("delete: missing <ref>");
|
||||
|
||||
if (parse_next_oid(&next, end, &old_oid, "delete", refname,
|
||||
PARSE_SHA1_OLD)) {
|
||||
if (parse_next_arg(&next, end, &old_oid, &old_target,
|
||||
"delete", refname, PARSE_SHA1_OLD |
|
||||
PARSE_REFNAME_TARGETS)) {
|
||||
have_old = 0;
|
||||
} else {
|
||||
if (is_null_oid(&old_oid))
|
||||
if (!old_target.len && is_null_oid(&old_oid))
|
||||
die("delete %s: zero <old-oid>", refname);
|
||||
have_old = 1;
|
||||
have_old = 1 && !old_target.len;
|
||||
}
|
||||
|
||||
if (old_target.len && !(update_flags & REF_NO_DEREF))
|
||||
die("delete %s: cannot operate on symrefs in deref mode", refname);
|
||||
|
||||
if (*next != line_termination)
|
||||
die("delete %s: extra input: %s", refname, next);
|
||||
|
||||
if (ref_transaction_delete(transaction, refname,
|
||||
have_old ? &old_oid : NULL,
|
||||
update_flags, msg, &err))
|
||||
update_flags,
|
||||
old_target.len ? old_target.buf : NULL,
|
||||
msg, &err))
|
||||
die("%s", err.buf);
|
||||
|
||||
update_flags = default_flags;
|
||||
free(refname);
|
||||
strbuf_release(&err);
|
||||
strbuf_release(&old_target);
|
||||
}
|
||||
|
||||
static void parse_cmd_verify(struct ref_transaction *transaction,
|
||||
const char *next, const char *end)
|
||||
{
|
||||
struct strbuf err = STRBUF_INIT;
|
||||
struct strbuf old_target = STRBUF_INIT;
|
||||
char *refname;
|
||||
struct object_id old_oid;
|
||||
|
||||
|
@ -288,20 +341,27 @@ static void parse_cmd_verify(struct ref_transaction *transaction,
|
|||
if (!refname)
|
||||
die("verify: missing <ref>");
|
||||
|
||||
if (parse_next_oid(&next, end, &old_oid, "verify", refname,
|
||||
PARSE_SHA1_OLD))
|
||||
if (parse_next_arg(&next, end, &old_oid, &old_target,
|
||||
"verify", refname,
|
||||
PARSE_SHA1_OLD | PARSE_REFNAME_TARGETS))
|
||||
oidclr(&old_oid);
|
||||
|
||||
if (old_target.len && !(update_flags & REF_NO_DEREF))
|
||||
die("verify %s: cannot operate on symrefs in deref mode", refname);
|
||||
|
||||
if (*next != line_termination)
|
||||
die("verify %s: extra input: %s", refname, next);
|
||||
|
||||
if (ref_transaction_verify(transaction, refname, &old_oid,
|
||||
if (ref_transaction_verify(transaction, refname,
|
||||
old_target.len ? NULL : &old_oid,
|
||||
old_target.len ? old_target.buf : NULL,
|
||||
update_flags, &err))
|
||||
die("%s", err.buf);
|
||||
|
||||
update_flags = default_flags;
|
||||
free(refname);
|
||||
strbuf_release(&err);
|
||||
strbuf_release(&old_target);
|
||||
}
|
||||
|
||||
static void report_ok(const char *command)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Copyright (C) Linus Torvalds, 2005
|
||||
*/
|
||||
#define USE_THE_INDEX_VARIABLE
|
||||
|
||||
#include "builtin.h"
|
||||
#include "config.h"
|
||||
#include "environment.h"
|
||||
|
@ -44,8 +44,8 @@ int cmd_write_tree(int argc, const char **argv, const char *cmd_prefix)
|
|||
prepare_repo_settings(the_repository);
|
||||
the_repository->settings.command_requires_full_index = 0;
|
||||
|
||||
ret = write_index_as_tree(&oid, &the_index, get_index_file(), flags,
|
||||
tree_prefix);
|
||||
ret = write_index_as_tree(&oid, the_repository->index, get_index_file(),
|
||||
flags, tree_prefix);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
printf("%s\n", oid_to_hex(&oid));
|
||||
|
|
|
@ -1,34 +1,69 @@
|
|||
#!/usr/bin/env bash
|
||||
#!/bin/sh
|
||||
#
|
||||
# Install dependencies required to build and test Git on Linux and macOS
|
||||
#
|
||||
|
||||
. ${0%/*}/lib.sh
|
||||
|
||||
begin_group "Install dependencies"
|
||||
|
||||
P4WHENCE=https://cdist2.perforce.com/perforce/r21.2
|
||||
LFSWHENCE=https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION
|
||||
UBUNTU_COMMON_PKGS="make libssl-dev libcurl4-openssl-dev libexpat-dev
|
||||
tcl tk gettext zlib1g-dev perl-modules liberror-perl libauthen-sasl-perl
|
||||
libemail-valid-perl libio-socket-ssl-perl libnet-smtp-ssl-perl"
|
||||
JGITWHENCE=https://repo.eclipse.org/content/groups/releases//org/eclipse/jgit/org.eclipse.jgit.pgm/6.8.0.202311291450-r/org.eclipse.jgit.pgm-6.8.0.202311291450-r.sh
|
||||
|
||||
case "$runs_on_pool" in
|
||||
# Make sudo a no-op and execute the command directly when running as root.
|
||||
# While using sudo would be fine on most platforms when we are root already,
|
||||
# some platforms like e.g. Alpine Linux do not have sudo available by default
|
||||
# and would thus break.
|
||||
if test "$(id -u)" -eq 0
|
||||
then
|
||||
sudo () {
|
||||
"$@"
|
||||
}
|
||||
fi
|
||||
|
||||
case "$distro" in
|
||||
alpine-*)
|
||||
apk add --update shadow sudo build-base curl-dev openssl-dev expat-dev gettext \
|
||||
pcre2-dev python3 musl-libintl perl-utils ncurses \
|
||||
apache2 apache2-http2 apache2-proxy apache2-ssl apache2-webdav apr-util-dbd_sqlite3 \
|
||||
bash cvs gnupg perl-cgi perl-dbd-sqlite >/dev/null
|
||||
;;
|
||||
fedora-*)
|
||||
dnf -yq update >/dev/null &&
|
||||
dnf -yq install make gcc findutils diffutils perl python3 gettext zlib-devel expat-devel openssl-devel curl-devel pcre2-devel >/dev/null
|
||||
;;
|
||||
ubuntu-*)
|
||||
# Required so that apt doesn't wait for user input on certain packages.
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
sudo apt-get -q update
|
||||
sudo apt-get -q -y install language-pack-is libsvn-perl apache2 \
|
||||
$UBUNTU_COMMON_PKGS $CC_PACKAGE $PYTHON_PACKAGE
|
||||
mkdir --parents "$P4_PATH"
|
||||
pushd "$P4_PATH"
|
||||
wget --quiet "$P4WHENCE/bin.linux26x86_64/p4d"
|
||||
wget --quiet "$P4WHENCE/bin.linux26x86_64/p4"
|
||||
chmod u+x p4d
|
||||
chmod u+x p4
|
||||
popd
|
||||
mkdir --parents "$GIT_LFS_PATH"
|
||||
pushd "$GIT_LFS_PATH"
|
||||
wget --quiet "$LFSWHENCE/git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
|
||||
tar --extract --gunzip --file "git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
|
||||
cp git-lfs-$LINUX_GIT_LFS_VERSION/git-lfs .
|
||||
popd
|
||||
sudo apt-get -q -y install \
|
||||
language-pack-is libsvn-perl apache2 cvs cvsps git gnupg subversion \
|
||||
make libssl-dev libcurl4-openssl-dev libexpat-dev wget sudo default-jre \
|
||||
tcl tk gettext zlib1g-dev perl-modules liberror-perl libauthen-sasl-perl \
|
||||
libemail-valid-perl libio-socket-ssl-perl libnet-smtp-ssl-perl libdbd-sqlite3-perl libcgi-pm-perl \
|
||||
${CC_PACKAGE:-${CC:-gcc}} $PYTHON_PACKAGE
|
||||
|
||||
mkdir --parents "$CUSTOM_PATH"
|
||||
wget --quiet --directory-prefix="$CUSTOM_PATH" \
|
||||
"$P4WHENCE/bin.linux26x86_64/p4d" "$P4WHENCE/bin.linux26x86_64/p4"
|
||||
chmod a+x "$CUSTOM_PATH/p4d" "$CUSTOM_PATH/p4"
|
||||
|
||||
wget --quiet "$LFSWHENCE/git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
|
||||
tar -xzf "git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz" \
|
||||
-C "$CUSTOM_PATH" --strip-components=1 "git-lfs-$LINUX_GIT_LFS_VERSION/git-lfs"
|
||||
rm "git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
|
||||
|
||||
wget --quiet "$JGITWHENCE" --output-document="$CUSTOM_PATH/jgit"
|
||||
chmod a+x "$CUSTOM_PATH/jgit"
|
||||
;;
|
||||
ubuntu32-*)
|
||||
sudo linux32 --32bit i386 sh -c '
|
||||
apt update >/dev/null &&
|
||||
apt install -y build-essential libcurl4-openssl-dev \
|
||||
libssl-dev libexpat-dev gettext python >/dev/null
|
||||
'
|
||||
;;
|
||||
macos-*)
|
||||
export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1
|
||||
|
@ -38,12 +73,11 @@ macos-*)
|
|||
brew install $BREW_INSTALL_PACKAGES
|
||||
brew link --force gettext
|
||||
|
||||
mkdir -p "$P4_PATH"
|
||||
pushd "$P4_PATH"
|
||||
wget -q "$P4WHENCE/bin.macosx1015x86_64/helix-core-server.tgz" &&
|
||||
tar -xf helix-core-server.tgz &&
|
||||
sudo xattr -d com.apple.quarantine p4 p4d 2>/dev/null || true
|
||||
popd
|
||||
mkdir -p "$CUSTOM_PATH"
|
||||
wget -q "$P4WHENCE/bin.macosx1015x86_64/helix-core-server.tgz" &&
|
||||
tar -xf helix-core-server.tgz -C "$CUSTOM_PATH" p4 p4d &&
|
||||
sudo xattr -d com.apple.quarantine "$CUSTOM_PATH/p4" "$CUSTOM_PATH/p4d" 2>/dev/null || true
|
||||
rm helix-core-server.tgz
|
||||
|
||||
if test -n "$CC_PACKAGE"
|
||||
then
|
||||
|
@ -72,10 +106,6 @@ Documentation)
|
|||
test -n "$ALREADY_HAVE_ASCIIDOCTOR" ||
|
||||
sudo gem install --version 1.5.8 asciidoctor
|
||||
;;
|
||||
linux-gcc-default)
|
||||
sudo apt-get -q update
|
||||
sudo apt-get -q -y install $UBUNTU_COMMON_PKGS
|
||||
;;
|
||||
esac
|
||||
|
||||
if type p4d >/dev/null 2>&1 && type p4 >/dev/null 2>&1
|
||||
|
@ -87,6 +117,7 @@ then
|
|||
else
|
||||
echo >&2 "WARNING: perforce wasn't installed, see above for clues why"
|
||||
fi
|
||||
|
||||
if type git-lfs >/dev/null 2>&1
|
||||
then
|
||||
echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)"
|
||||
|
@ -94,3 +125,13 @@ then
|
|||
else
|
||||
echo >&2 "WARNING: git-lfs wasn't installed, see above for clues why"
|
||||
fi
|
||||
|
||||
if type jgit >/dev/null 2>&1
|
||||
then
|
||||
echo "$(tput setaf 6)JGit Version$(tput sgr0)"
|
||||
jgit version
|
||||
else
|
||||
echo >&2 "WARNING: JGit wasn't installed, see above for clues why"
|
||||
fi
|
||||
|
||||
end_group "Install dependencies"
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Install dependencies required to build and test Git inside container
|
||||
#
|
||||
|
||||
. ${0%/*}/lib.sh
|
||||
|
||||
begin_group "Install dependencies"
|
||||
|
||||
case "$jobname" in
|
||||
linux32)
|
||||
linux32 --32bit i386 sh -c '
|
||||
apt update >/dev/null &&
|
||||
apt install -y build-essential libcurl4-openssl-dev \
|
||||
libssl-dev libexpat-dev gettext python >/dev/null
|
||||
'
|
||||
;;
|
||||
linux-musl)
|
||||
apk add --update shadow sudo build-base curl-dev openssl-dev expat-dev gettext \
|
||||
pcre2-dev python3 musl-libintl perl-utils ncurses \
|
||||
apache2 apache2-http2 apache2-proxy apache2-ssl apache2-webdav apr-util-dbd_sqlite3 \
|
||||
bash cvs gnupg perl-cgi perl-dbd-sqlite >/dev/null
|
||||
;;
|
||||
linux-*|StaticAnalysis)
|
||||
# Required so that apt doesn't wait for user input on certain packages.
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt update -q &&
|
||||
apt install -q -y sudo git make language-pack-is libsvn-perl apache2 libssl-dev \
|
||||
libcurl4-openssl-dev libexpat-dev tcl tk gettext zlib1g-dev \
|
||||
perl-modules liberror-perl libauthen-sasl-perl libemail-valid-perl \
|
||||
libdbd-sqlite3-perl libio-socket-ssl-perl libnet-smtp-ssl-perl ${CC_PACKAGE:-${CC:-gcc}} \
|
||||
apache2 cvs cvsps gnupg libcgi-pm-perl subversion
|
||||
|
||||
if test "$jobname" = StaticAnalysis
|
||||
then
|
||||
apt install -q -y coccinelle
|
||||
fi
|
||||
;;
|
||||
pedantic)
|
||||
dnf -yq update >/dev/null &&
|
||||
dnf -yq install make gcc findutils diffutils perl python3 gettext zlib-devel expat-devel openssl-devel curl-devel pcre2-devel >/dev/null
|
||||
;;
|
||||
esac
|
||||
|
||||
end_group "Install dependencies"
|
14
ci/lib.sh
14
ci/lib.sh
|
@ -279,7 +279,7 @@ then
|
|||
|
||||
cache_dir="$HOME/none"
|
||||
|
||||
runs_on_pool=$(echo "$CI_JOB_IMAGE" | tr : -)
|
||||
distro=$(echo "$CI_JOB_IMAGE" | tr : -)
|
||||
JOBS=$(nproc)
|
||||
else
|
||||
echo "Could not identify CI type" >&2
|
||||
|
@ -318,7 +318,7 @@ export DEFAULT_TEST_TARGET=prove
|
|||
export GIT_TEST_CLONE_2GB=true
|
||||
export SKIP_DASHED_BUILT_INS=YesPlease
|
||||
|
||||
case "$runs_on_pool" in
|
||||
case "$distro" in
|
||||
ubuntu-*)
|
||||
if test "$jobname" = "linux-gcc-default"
|
||||
then
|
||||
|
@ -340,10 +340,6 @@ ubuntu-*)
|
|||
# image.
|
||||
# Keep that in mind when you encounter a broken OS X build!
|
||||
export LINUX_GIT_LFS_VERSION="1.5.2"
|
||||
|
||||
P4_PATH="$HOME/custom/p4"
|
||||
GIT_LFS_PATH="$HOME/custom/git-lfs"
|
||||
export PATH="$GIT_LFS_PATH:$P4_PATH:$PATH"
|
||||
;;
|
||||
macos-*)
|
||||
MAKEFLAGS="$MAKEFLAGS PYTHON_PATH=$(which python3)"
|
||||
|
@ -351,12 +347,12 @@ macos-*)
|
|||
then
|
||||
MAKEFLAGS="$MAKEFLAGS APPLE_COMMON_CRYPTO_SHA1=Yes"
|
||||
fi
|
||||
|
||||
P4_PATH="$HOME/custom/p4"
|
||||
export PATH="$P4_PATH:$PATH"
|
||||
;;
|
||||
esac
|
||||
|
||||
CUSTOM_PATH="${CUSTOM_PATH:-$HOME/path}"
|
||||
export PATH="$CUSTOM_PATH:$PATH"
|
||||
|
||||
case "$jobname" in
|
||||
linux32)
|
||||
CC=gcc
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
group "Build fuzzers" make \
|
||||
CC=clang \
|
||||
CXX=clang++ \
|
||||
FUZZ_CXX=clang++ \
|
||||
CFLAGS="-fsanitize=fuzzer-no-link,address" \
|
||||
LIB_FUZZING_ENGINE="-fsanitize=fuzzer,address" \
|
||||
fuzz-all
|
||||
|
|
|
@ -53,8 +53,6 @@ if test -n "$run_tests"
|
|||
then
|
||||
group "Run tests" make test ||
|
||||
handle_failed_tests
|
||||
group "Run unit tests" \
|
||||
make DEFAULT_UNIT_TEST_TARGET=unit-tests-prove unit-tests
|
||||
fi
|
||||
check_unignored_build_artifacts
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ handle_failed_tests
|
|||
|
||||
# We only have one unit test at the moment, so run it in the first slice
|
||||
if [ "$1" == "0" ] ; then
|
||||
group "Run unit tests" make --quiet -C t unit-tests-prove
|
||||
group "Run unit tests" make --quiet -C t unit-tests-test-tool
|
||||
fi
|
||||
|
||||
check_unignored_build_artifacts
|
||||
|
|
|
@ -344,7 +344,6 @@ static int graph_read_bloom_data(const unsigned char *chunk_start,
|
|||
size_t chunk_size, void *data)
|
||||
{
|
||||
struct commit_graph *g = data;
|
||||
uint32_t hash_version;
|
||||
|
||||
if (chunk_size < BLOOMDATA_CHUNK_HEADER_SIZE) {
|
||||
warning(_("ignoring too-small changed-path chunk"
|
||||
|
@ -356,13 +355,9 @@ static int graph_read_bloom_data(const unsigned char *chunk_start,
|
|||
|
||||
g->chunk_bloom_data = chunk_start;
|
||||
g->chunk_bloom_data_size = chunk_size;
|
||||
hash_version = get_be32(chunk_start);
|
||||
|
||||
if (hash_version != 1)
|
||||
return 0;
|
||||
|
||||
g->bloom_filter_settings = xmalloc(sizeof(struct bloom_filter_settings));
|
||||
g->bloom_filter_settings->hash_version = hash_version;
|
||||
g->bloom_filter_settings->hash_version = get_be32(chunk_start);
|
||||
g->bloom_filter_settings->num_hashes = get_be32(chunk_start + 4);
|
||||
g->bloom_filter_settings->bits_per_entry = get_be32(chunk_start + 8);
|
||||
g->bloom_filter_settings->max_changed_paths = DEFAULT_BLOOM_MAX_CHANGES;
|
||||
|
@ -459,7 +454,7 @@ struct commit_graph *parse_commit_graph(struct repo_settings *s,
|
|||
graph->read_generation_data = 1;
|
||||
}
|
||||
|
||||
if (s->commit_graph_read_changed_paths) {
|
||||
if (s->commit_graph_changed_paths_version) {
|
||||
read_chunk(cf, GRAPH_CHUNKID_BLOOMINDEXES,
|
||||
graph_read_bloom_index, graph);
|
||||
read_chunk(cf, GRAPH_CHUNKID_BLOOMDATA,
|
||||
|
@ -543,6 +538,31 @@ static int validate_mixed_generation_chain(struct commit_graph *g)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void validate_mixed_bloom_settings(struct commit_graph *g)
|
||||
{
|
||||
struct bloom_filter_settings *settings = NULL;
|
||||
for (; g; g = g->base_graph) {
|
||||
if (!g->bloom_filter_settings)
|
||||
continue;
|
||||
if (!settings) {
|
||||
settings = g->bloom_filter_settings;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (g->bloom_filter_settings->bits_per_entry != settings->bits_per_entry ||
|
||||
g->bloom_filter_settings->num_hashes != settings->num_hashes ||
|
||||
g->bloom_filter_settings->hash_version != settings->hash_version) {
|
||||
g->chunk_bloom_indexes = NULL;
|
||||
g->chunk_bloom_data = NULL;
|
||||
FREE_AND_NULL(g->bloom_filter_settings);
|
||||
|
||||
warning(_("disabling Bloom filters for commit-graph "
|
||||
"layer '%s' due to incompatible settings"),
|
||||
oid_to_hex(&g->oid));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int add_graph_to_chain(struct commit_graph *g,
|
||||
struct commit_graph *chain,
|
||||
struct object_id *oids,
|
||||
|
@ -666,6 +686,7 @@ struct commit_graph *load_commit_graph_chain_fd_st(struct repository *r,
|
|||
}
|
||||
|
||||
validate_mixed_generation_chain(graph_chain);
|
||||
validate_mixed_bloom_settings(graph_chain);
|
||||
|
||||
free(oids);
|
||||
fclose(fp);
|
||||
|
@ -810,6 +831,7 @@ void close_commit_graph(struct raw_object_store *o)
|
|||
return;
|
||||
|
||||
clear_commit_graph_data_slab(&commit_graph_data_slab);
|
||||
deinit_bloom_filters();
|
||||
free_commit_graph(o->commit_graph);
|
||||
o->commit_graph = NULL;
|
||||
}
|
||||
|
@ -1147,6 +1169,7 @@ struct write_commit_graph_context {
|
|||
int count_bloom_filter_not_computed;
|
||||
int count_bloom_filter_trunc_empty;
|
||||
int count_bloom_filter_trunc_large;
|
||||
int count_bloom_filter_upgraded;
|
||||
};
|
||||
|
||||
static int write_graph_chunk_fanout(struct hashfile *f,
|
||||
|
@ -1754,6 +1777,8 @@ static void trace2_bloom_filter_write_statistics(struct write_commit_graph_conte
|
|||
ctx->count_bloom_filter_trunc_empty);
|
||||
trace2_data_intmax("commit-graph", ctx->r, "filter-trunc-large",
|
||||
ctx->count_bloom_filter_trunc_large);
|
||||
trace2_data_intmax("commit-graph", ctx->r, "filter-upgraded",
|
||||
ctx->count_bloom_filter_upgraded);
|
||||
}
|
||||
|
||||
static void compute_bloom_filters(struct write_commit_graph_context *ctx)
|
||||
|
@ -1795,6 +1820,8 @@ static void compute_bloom_filters(struct write_commit_graph_context *ctx)
|
|||
ctx->count_bloom_filter_trunc_empty++;
|
||||
if (computed & BLOOM_TRUNC_LARGE)
|
||||
ctx->count_bloom_filter_trunc_large++;
|
||||
} else if (computed & BLOOM_UPGRADED) {
|
||||
ctx->count_bloom_filter_upgraded++;
|
||||
} else if (computed & BLOOM_NOT_COMPUTED)
|
||||
ctx->count_bloom_filter_not_computed++;
|
||||
ctx->total_bloom_filter_data_size += filter
|
||||
|
@ -2478,6 +2505,13 @@ int write_commit_graph(struct object_directory *odb,
|
|||
}
|
||||
if (!commit_graph_compatible(r))
|
||||
return 0;
|
||||
if (r->settings.commit_graph_changed_paths_version < -1
|
||||
|| r->settings.commit_graph_changed_paths_version > 2) {
|
||||
warning(_("attempting to write a commit-graph, but "
|
||||
"'commitgraph.changedPathsVersion' (%d) is not supported"),
|
||||
r->settings.commit_graph_changed_paths_version);
|
||||
return 0;
|
||||
}
|
||||
|
||||
CALLOC_ARRAY(ctx, 1);
|
||||
ctx->r = r;
|
||||
|
@ -2490,6 +2524,7 @@ int write_commit_graph(struct object_directory *odb,
|
|||
ctx->write_generation_data = (get_configured_generation_version(r) == 2);
|
||||
ctx->num_generation_data_overflows = 0;
|
||||
|
||||
bloom_settings.hash_version = r->settings.commit_graph_changed_paths_version;
|
||||
bloom_settings.bits_per_entry = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_BITS_PER_ENTRY",
|
||||
bloom_settings.bits_per_entry);
|
||||
bloom_settings.num_hashes = git_env_ulong("GIT_TEST_BLOOM_SETTINGS_NUM_HASHES",
|
||||
|
@ -2519,12 +2554,20 @@ int write_commit_graph(struct object_directory *odb,
|
|||
g = ctx->r->objects->commit_graph;
|
||||
|
||||
/* We have changed-paths already. Keep them in the next graph */
|
||||
if (g && g->chunk_bloom_data) {
|
||||
if (g && g->bloom_filter_settings) {
|
||||
ctx->changed_paths = 1;
|
||||
ctx->bloom_settings = g->bloom_filter_settings;
|
||||
|
||||
/* don't propagate the hash_version unless unspecified */
|
||||
if (bloom_settings.hash_version == -1)
|
||||
bloom_settings.hash_version = g->bloom_filter_settings->hash_version;
|
||||
bloom_settings.bits_per_entry = g->bloom_filter_settings->bits_per_entry;
|
||||
bloom_settings.num_hashes = g->bloom_filter_settings->num_hashes;
|
||||
bloom_settings.max_changed_paths = g->bloom_filter_settings->max_changed_paths;
|
||||
}
|
||||
}
|
||||
|
||||
bloom_settings.hash_version = bloom_settings.hash_version == 2 ? 2 : 1;
|
||||
|
||||
if (ctx->split) {
|
||||
struct commit_graph *g = ctx->r->objects->commit_graph;
|
||||
|
||||
|
@ -2607,6 +2650,9 @@ int write_commit_graph(struct object_directory *odb,
|
|||
|
||||
res = write_commit_graph_file(ctx);
|
||||
|
||||
if (ctx->changed_paths)
|
||||
deinit_bloom_filters();
|
||||
|
||||
if (ctx->split)
|
||||
mark_commit_graphs(ctx);
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ int main(int argc, const char **argv)
|
|||
setlocale(LC_CTYPE, "");
|
||||
git_setup_gettext();
|
||||
|
||||
initialize_the_repository();
|
||||
initialize_repository(the_repository);
|
||||
|
||||
attr_start();
|
||||
|
||||
|
|
35
config.c
35
config.c
|
@ -317,6 +317,21 @@ static int include_by_branch(const char *cond, size_t cond_len)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int include_by_hostname(const char *cond, size_t cond_len)
|
||||
{
|
||||
int ret;
|
||||
char my_host[HOST_NAME_MAX + 1];
|
||||
struct strbuf pattern = STRBUF_INIT;
|
||||
|
||||
if (xgethostname(my_host, sizeof(my_host)))
|
||||
return 0;
|
||||
|
||||
strbuf_add(&pattern, cond, cond_len);
|
||||
ret = !wildmatch(pattern.buf, my_host, 0);
|
||||
strbuf_release(&pattern);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int add_remote_url(const char *var, const char *value,
|
||||
const struct config_context *ctx UNUSED, void *data)
|
||||
{
|
||||
|
@ -406,6 +421,8 @@ static int include_condition_is_true(const struct key_value_info *kvi,
|
|||
else if (skip_prefix_mem(cond, cond_len, "hasconfig:remote.*.url:", &cond,
|
||||
&cond_len))
|
||||
return include_by_remote_url(inc, cond, cond_len);
|
||||
else if (skip_prefix_mem(cond, cond_len, "hostname:", &cond, &cond_len))
|
||||
return include_by_hostname(cond, cond_len);
|
||||
|
||||
/* unknown conditionals are always false */
|
||||
return 0;
|
||||
|
@ -2637,6 +2654,24 @@ int repo_config_get_pathname(struct repository *repo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int repo_config_get_expiry(struct repository *repo,
|
||||
const char *key, const char **dest)
|
||||
{
|
||||
int ret;
|
||||
|
||||
git_config_check_init(repo);
|
||||
|
||||
ret = repo_config_get_string(repo, key, (char **)dest);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (strcmp(*dest, "now")) {
|
||||
timestamp_t now = approxidate("now");
|
||||
if (approxidate(*dest) >= now)
|
||||
git_die_config(key, _("Invalid %s: '%s'"), key, *dest);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Read values into protected_config. */
|
||||
static void read_protected_config(void)
|
||||
{
|
||||
|
|
2
config.h
2
config.h
|
@ -578,6 +578,8 @@ int repo_config_get_maybe_bool(struct repository *repo,
|
|||
const char *key, int *dest);
|
||||
int repo_config_get_pathname(struct repository *repo,
|
||||
const char *key, const char **dest);
|
||||
int repo_config_get_expiry(struct repository *repo,
|
||||
const char *key, const char **dest);
|
||||
|
||||
/*
|
||||
* Functions for reading protected config. By definition, protected
|
||||
|
|
|
@ -68,6 +68,7 @@ ifeq ($(uname_S),Linux)
|
|||
ifneq ($(findstring .el7.,$(uname_R)),)
|
||||
BASIC_CFLAGS += -std=c99
|
||||
endif
|
||||
LINK_FUZZ_PROGRAMS = YesPlease
|
||||
endif
|
||||
ifeq ($(uname_S),GNU/kFreeBSD)
|
||||
HAVE_ALLOCA_H = YesPlease
|
||||
|
|
|
@ -1005,10 +1005,11 @@ endforeach()
|
|||
|
||||
#test-tool
|
||||
parse_makefile_for_sources(test-tool_SOURCES "TEST_BUILTINS_OBJS")
|
||||
add_library(test-lib OBJECT ${CMAKE_SOURCE_DIR}/t/unit-tests/test-lib.c)
|
||||
|
||||
list(TRANSFORM test-tool_SOURCES PREPEND "${CMAKE_SOURCE_DIR}/t/helper/")
|
||||
add_executable(test-tool ${CMAKE_SOURCE_DIR}/t/helper/test-tool.c ${test-tool_SOURCES} ${test-reftable_SOURCES})
|
||||
target_link_libraries(test-tool common-main)
|
||||
target_link_libraries(test-tool test-lib common-main)
|
||||
|
||||
set_target_properties(test-fake-ssh test-tool
|
||||
PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/t/helper)
|
||||
|
|
|
@ -31,15 +31,29 @@
|
|||
# Note that "git" is optional --- '!f() { : commit; ...}; f' would complete
|
||||
# just like the 'git commit' command.
|
||||
#
|
||||
# If you have a command that is not part of git, but you would still
|
||||
# like completion, you can use __git_complete:
|
||||
# To add completion for git subcommands that are implemented in external
|
||||
# scripts, define a function of the form '_git_${subcommand}' while replacing
|
||||
# all dashes with underscores, and the main git completion will make use of it.
|
||||
# For example, to add completion for 'git do-stuff' (which could e.g. live
|
||||
# in /usr/bin/git-do-stuff), name the completion function '_git_do_stuff'.
|
||||
# See _git_show, _git_bisect etc. below for more examples.
|
||||
#
|
||||
# If you have a shell command that is not part of git (and is not called as a
|
||||
# git subcommand), but you would still like git-style completion for it, use
|
||||
# __git_complete. For example, to use the same completion as for 'git log' also
|
||||
# for the 'gl' command:
|
||||
#
|
||||
# __git_complete gl git_log
|
||||
#
|
||||
# Or if it's a main command (i.e. git or gitk):
|
||||
# Or if the 'gk' command should be completed the same as 'gitk':
|
||||
#
|
||||
# __git_complete gk gitk
|
||||
#
|
||||
# The second parameter of __git_complete gives the completion function; it is
|
||||
# resolved as a function named "$2", or "__$2_main", or "_$2" in that order.
|
||||
# In the examples above, the actual functions used for completion will be
|
||||
# _git_log and __gitk_main.
|
||||
#
|
||||
# Compatible with bash 3.2.57.
|
||||
#
|
||||
# You can set the following environment variables to influence the behavior of
|
||||
|
@ -1315,10 +1329,12 @@ __git_find_last_on_cmdline ()
|
|||
while test $# -gt 1; do
|
||||
case "$1" in
|
||||
--show-idx) show_idx=y ;;
|
||||
--) shift && break ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
[ $# -eq 1 ] || return 1 # return 1 if we got wrong # of non-opts
|
||||
local wordlist="$1"
|
||||
|
||||
while [ $c -gt "$__git_cmd_idx" ]; do
|
||||
|
@ -1429,6 +1445,29 @@ __git_count_arguments ()
|
|||
printf "%d" $c
|
||||
}
|
||||
|
||||
# Complete actual dir (not pathspec), respecting any -C options.
|
||||
#
|
||||
# Usage: __git_complete_refs [<option>]...
|
||||
# --cur=<word>: The current dir to be completed. Defaults to the current word.
|
||||
__git_complete_dir ()
|
||||
{
|
||||
local cur_="$cur"
|
||||
|
||||
while test $# != 0; do
|
||||
case "$1" in
|
||||
--cur=*) cur_="${1##--cur=}" ;;
|
||||
*) return 1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# This rev-parse invocation amounts to a pwd which respects -C options
|
||||
local context_dir=$(__git rev-parse --show-toplevel --show-prefix 2>/dev/null | paste -s -d '/' 2>/dev/null)
|
||||
[ -d "$context_dir" ] || return 1
|
||||
|
||||
COMPREPLY=$(cd "$context_dir" 2>/dev/null && compgen -d -- "$cur_")
|
||||
}
|
||||
|
||||
__git_whitespacelist="nowarn warn error error-all fix"
|
||||
__git_patchformat="mbox stgit stgit-series hg mboxrd"
|
||||
__git_showcurrentpatch="diff raw"
|
||||
|
@ -1447,6 +1486,10 @@ _git_am ()
|
|||
__gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}"
|
||||
return
|
||||
;;
|
||||
--directory=*)
|
||||
__git_complete_dir --cur="${cur##--directory=}"
|
||||
return
|
||||
;;
|
||||
--patch-format=*)
|
||||
__gitcomp "$__git_patchformat" "" "${cur##--patch-format=}"
|
||||
return
|
||||
|
@ -1978,7 +2021,17 @@ __git_format_patch_extra_options="
|
|||
|
||||
_git_format_patch ()
|
||||
{
|
||||
case "$prev,$cur" in
|
||||
-o,*)
|
||||
__git_complete_dir
|
||||
return
|
||||
;;
|
||||
esac
|
||||
case "$cur" in
|
||||
--output-directory=*)
|
||||
__git_complete_dir --cur="${cur##--output-directory=}"
|
||||
return
|
||||
;;
|
||||
--thread=*)
|
||||
__gitcomp "
|
||||
deep shallow
|
||||
|
@ -2582,7 +2635,9 @@ _git_send_email ()
|
|||
return
|
||||
;;
|
||||
esac
|
||||
__git_complete_revlist
|
||||
if [ "$(__git_find_last_on_cmdline -- "--format-patch --no-format-patch")" != "--no-format-patch" ]; then
|
||||
__git_complete_revlist
|
||||
fi
|
||||
}
|
||||
|
||||
_git_stage ()
|
||||
|
@ -3581,6 +3636,17 @@ _git_svn ()
|
|||
fi
|
||||
}
|
||||
|
||||
_git_symbolic_ref () {
|
||||
case "$cur" in
|
||||
--*)
|
||||
__gitcomp_builtin symbolic-ref
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
__git_complete_refs
|
||||
}
|
||||
|
||||
_git_tag ()
|
||||
{
|
||||
local i c="$__git_cmd_idx" f=0
|
||||
|
|
138
credential.c
138
credential.c
|
@ -25,13 +25,64 @@ void credential_clear(struct credential *c)
|
|||
free(c->path);
|
||||
free(c->username);
|
||||
free(c->password);
|
||||
free(c->credential);
|
||||
free(c->oauth_refresh_token);
|
||||
free(c->authtype);
|
||||
string_list_clear(&c->helpers, 0);
|
||||
strvec_clear(&c->wwwauth_headers);
|
||||
strvec_clear(&c->state_headers);
|
||||
strvec_clear(&c->state_headers_to_send);
|
||||
|
||||
credential_init(c);
|
||||
}
|
||||
|
||||
void credential_next_state(struct credential *c)
|
||||
{
|
||||
strvec_clear(&c->state_headers_to_send);
|
||||
SWAP(c->state_headers, c->state_headers_to_send);
|
||||
}
|
||||
|
||||
void credential_clear_secrets(struct credential *c)
|
||||
{
|
||||
FREE_AND_NULL(c->password);
|
||||
FREE_AND_NULL(c->credential);
|
||||
}
|
||||
|
||||
static void credential_set_capability(struct credential_capability *capa,
|
||||
enum credential_op_type op_type)
|
||||
{
|
||||
switch (op_type) {
|
||||
case CREDENTIAL_OP_INITIAL:
|
||||
capa->request_initial = 1;
|
||||
break;
|
||||
case CREDENTIAL_OP_HELPER:
|
||||
capa->request_helper = 1;
|
||||
break;
|
||||
case CREDENTIAL_OP_RESPONSE:
|
||||
capa->response = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void credential_set_all_capabilities(struct credential *c,
|
||||
enum credential_op_type op_type)
|
||||
{
|
||||
credential_set_capability(&c->capa_authtype, op_type);
|
||||
credential_set_capability(&c->capa_state, op_type);
|
||||
}
|
||||
|
||||
static void announce_one(struct credential_capability *cc, const char *name, FILE *fp) {
|
||||
if (cc->request_initial)
|
||||
fprintf(fp, "capability %s\n", name);
|
||||
}
|
||||
|
||||
void credential_announce_capabilities(struct credential *c, FILE *fp) {
|
||||
fprintf(fp, "version 0\n");
|
||||
announce_one(&c->capa_authtype, "authtype", fp);
|
||||
announce_one(&c->capa_state, "state", fp);
|
||||
}
|
||||
|
||||
int credential_match(const struct credential *want,
|
||||
const struct credential *have, int match_password)
|
||||
{
|
||||
|
@ -40,7 +91,8 @@ int credential_match(const struct credential *want,
|
|||
CHECK(host) &&
|
||||
CHECK(path) &&
|
||||
CHECK(username) &&
|
||||
(!match_password || CHECK(password));
|
||||
(!match_password || CHECK(password)) &&
|
||||
(!match_password || CHECK(credential));
|
||||
#undef CHECK
|
||||
}
|
||||
|
||||
|
@ -208,7 +260,26 @@ static void credential_getpass(struct credential *c)
|
|||
PROMPT_ASKPASS);
|
||||
}
|
||||
|
||||
int credential_read(struct credential *c, FILE *fp)
|
||||
int credential_has_capability(const struct credential_capability *capa,
|
||||
enum credential_op_type op_type)
|
||||
{
|
||||
/*
|
||||
* We're checking here if each previous step indicated that we had the
|
||||
* capability. If it did, then we want to pass it along; conversely, if
|
||||
* it did not, we don't want to report that to our caller.
|
||||
*/
|
||||
switch (op_type) {
|
||||
case CREDENTIAL_OP_HELPER:
|
||||
return capa->request_initial;
|
||||
case CREDENTIAL_OP_RESPONSE:
|
||||
return capa->request_initial && capa->request_helper;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int credential_read(struct credential *c, FILE *fp,
|
||||
enum credential_op_type op_type)
|
||||
{
|
||||
struct strbuf line = STRBUF_INIT;
|
||||
|
||||
|
@ -233,6 +304,9 @@ int credential_read(struct credential *c, FILE *fp)
|
|||
} else if (!strcmp(key, "password")) {
|
||||
free(c->password);
|
||||
c->password = xstrdup(value);
|
||||
} else if (!strcmp(key, "credential")) {
|
||||
free(c->credential);
|
||||
c->credential = xstrdup(value);
|
||||
} else if (!strcmp(key, "protocol")) {
|
||||
free(c->protocol);
|
||||
c->protocol = xstrdup(value);
|
||||
|
@ -242,8 +316,19 @@ int credential_read(struct credential *c, FILE *fp)
|
|||
} else if (!strcmp(key, "path")) {
|
||||
free(c->path);
|
||||
c->path = xstrdup(value);
|
||||
} else if (!strcmp(key, "ephemeral")) {
|
||||
c->ephemeral = !!git_config_bool("ephemeral", value);
|
||||
} else if (!strcmp(key, "wwwauth[]")) {
|
||||
strvec_push(&c->wwwauth_headers, value);
|
||||
} else if (!strcmp(key, "state[]")) {
|
||||
strvec_push(&c->state_headers, value);
|
||||
} else if (!strcmp(key, "capability[]")) {
|
||||
if (!strcmp(value, "authtype"))
|
||||
credential_set_capability(&c->capa_authtype, op_type);
|
||||
else if (!strcmp(value, "state"))
|
||||
credential_set_capability(&c->capa_state, op_type);
|
||||
} else if (!strcmp(key, "continue")) {
|
||||
c->multistage = !!git_config_bool("continue", value);
|
||||
} else if (!strcmp(key, "password_expiry_utc")) {
|
||||
errno = 0;
|
||||
c->password_expiry_utc = parse_timestamp(value, NULL, 10);
|
||||
|
@ -252,6 +337,9 @@ int credential_read(struct credential *c, FILE *fp)
|
|||
} else if (!strcmp(key, "oauth_refresh_token")) {
|
||||
free(c->oauth_refresh_token);
|
||||
c->oauth_refresh_token = xstrdup(value);
|
||||
} else if (!strcmp(key, "authtype")) {
|
||||
free(c->authtype);
|
||||
c->authtype = xstrdup(value);
|
||||
} else if (!strcmp(key, "url")) {
|
||||
credential_from_url(c, value);
|
||||
} else if (!strcmp(key, "quit")) {
|
||||
|
@ -280,8 +368,20 @@ static void credential_write_item(FILE *fp, const char *key, const char *value,
|
|||
fprintf(fp, "%s=%s\n", key, value);
|
||||
}
|
||||
|
||||
void credential_write(const struct credential *c, FILE *fp)
|
||||
void credential_write(const struct credential *c, FILE *fp,
|
||||
enum credential_op_type op_type)
|
||||
{
|
||||
if (credential_has_capability(&c->capa_authtype, op_type))
|
||||
credential_write_item(fp, "capability[]", "authtype", 0);
|
||||
if (credential_has_capability(&c->capa_state, op_type))
|
||||
credential_write_item(fp, "capability[]", "state", 0);
|
||||
|
||||
if (credential_has_capability(&c->capa_authtype, op_type)) {
|
||||
credential_write_item(fp, "authtype", c->authtype, 0);
|
||||
credential_write_item(fp, "credential", c->credential, 0);
|
||||
if (c->ephemeral)
|
||||
credential_write_item(fp, "ephemeral", "1", 0);
|
||||
}
|
||||
credential_write_item(fp, "protocol", c->protocol, 1);
|
||||
credential_write_item(fp, "host", c->host, 1);
|
||||
credential_write_item(fp, "path", c->path, 0);
|
||||
|
@ -295,6 +395,12 @@ void credential_write(const struct credential *c, FILE *fp)
|
|||
}
|
||||
for (size_t i = 0; i < c->wwwauth_headers.nr; i++)
|
||||
credential_write_item(fp, "wwwauth[]", c->wwwauth_headers.v[i], 0);
|
||||
if (credential_has_capability(&c->capa_state, op_type)) {
|
||||
if (c->multistage)
|
||||
credential_write_item(fp, "continue", "1", 0);
|
||||
for (size_t i = 0; i < c->state_headers_to_send.nr; i++)
|
||||
credential_write_item(fp, "state[]", c->state_headers_to_send.v[i], 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int run_credential_helper(struct credential *c,
|
||||
|
@ -317,14 +423,14 @@ static int run_credential_helper(struct credential *c,
|
|||
|
||||
fp = xfdopen(helper.in, "w");
|
||||
sigchain_push(SIGPIPE, SIG_IGN);
|
||||
credential_write(c, fp);
|
||||
credential_write(c, fp, want_output ? CREDENTIAL_OP_HELPER : CREDENTIAL_OP_RESPONSE);
|
||||
fclose(fp);
|
||||
sigchain_pop(SIGPIPE);
|
||||
|
||||
if (want_output) {
|
||||
int r;
|
||||
fp = xfdopen(helper.out, "r");
|
||||
r = credential_read(c, fp);
|
||||
r = credential_read(c, fp, CREDENTIAL_OP_HELPER);
|
||||
fclose(fp);
|
||||
if (r < 0) {
|
||||
finish_command(&helper);
|
||||
|
@ -357,14 +463,19 @@ static int credential_do(struct credential *c, const char *helper,
|
|||
return r;
|
||||
}
|
||||
|
||||
void credential_fill(struct credential *c)
|
||||
void credential_fill(struct credential *c, int all_capabilities)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (c->username && c->password)
|
||||
if ((c->username && c->password) || c->credential)
|
||||
return;
|
||||
|
||||
credential_next_state(c);
|
||||
c->multistage = 0;
|
||||
|
||||
credential_apply_config(c);
|
||||
if (all_capabilities)
|
||||
credential_set_all_capabilities(c, CREDENTIAL_OP_INITIAL);
|
||||
|
||||
for (i = 0; i < c->helpers.nr; i++) {
|
||||
credential_do(c, c->helpers.items[i].string, "get");
|
||||
|
@ -374,15 +485,17 @@ void credential_fill(struct credential *c)
|
|||
/* Reset expiry to maintain consistency */
|
||||
c->password_expiry_utc = TIME_MAX;
|
||||
}
|
||||
if (c->username && c->password)
|
||||
if ((c->username && c->password) || c->credential) {
|
||||
strvec_clear(&c->wwwauth_headers);
|
||||
return;
|
||||
}
|
||||
if (c->quit)
|
||||
die("credential helper '%s' told us to quit",
|
||||
c->helpers.items[i].string);
|
||||
}
|
||||
|
||||
credential_getpass(c);
|
||||
if (!c->username && !c->password)
|
||||
if (!c->username && !c->password && !c->credential)
|
||||
die("unable to get password from user");
|
||||
}
|
||||
|
||||
|
@ -392,9 +505,11 @@ void credential_approve(struct credential *c)
|
|||
|
||||
if (c->approved)
|
||||
return;
|
||||
if (!c->username || !c->password || c->password_expiry_utc < time(NULL))
|
||||
if (((!c->username || !c->password) && !c->credential) || c->password_expiry_utc < time(NULL))
|
||||
return;
|
||||
|
||||
credential_next_state(c);
|
||||
|
||||
credential_apply_config(c);
|
||||
|
||||
for (i = 0; i < c->helpers.nr; i++)
|
||||
|
@ -406,6 +521,8 @@ void credential_reject(struct credential *c)
|
|||
{
|
||||
int i;
|
||||
|
||||
credential_next_state(c);
|
||||
|
||||
credential_apply_config(c);
|
||||
|
||||
for (i = 0; i < c->helpers.nr; i++)
|
||||
|
@ -413,6 +530,7 @@ void credential_reject(struct credential *c)
|
|||
|
||||
FREE_AND_NULL(c->username);
|
||||
FREE_AND_NULL(c->password);
|
||||
FREE_AND_NULL(c->credential);
|
||||
FREE_AND_NULL(c->oauth_refresh_token);
|
||||
c->password_expiry_utc = TIME_MAX;
|
||||
c->approved = 0;
|
||||
|
|
92
credential.h
92
credential.h
|
@ -93,6 +93,27 @@
|
|||
* -----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/*
|
||||
* These values define the kind of operation we're performing and the
|
||||
* capabilities at each stage. The first is either an external request (via git
|
||||
* credential fill) or an internal request (e.g., via the HTTP) code. The
|
||||
* second is the call to the credential helper, and the third is the response
|
||||
* we're providing.
|
||||
*
|
||||
* At each stage, we will emit the capability only if the previous stage
|
||||
* supported it.
|
||||
*/
|
||||
enum credential_op_type {
|
||||
CREDENTIAL_OP_INITIAL = 1,
|
||||
CREDENTIAL_OP_HELPER = 2,
|
||||
CREDENTIAL_OP_RESPONSE = 3,
|
||||
};
|
||||
|
||||
struct credential_capability {
|
||||
unsigned request_initial:1,
|
||||
request_helper:1,
|
||||
response:1;
|
||||
};
|
||||
|
||||
/**
|
||||
* This struct represents a single username/password combination
|
||||
|
@ -123,6 +144,16 @@ struct credential {
|
|||
*/
|
||||
struct strvec wwwauth_headers;
|
||||
|
||||
/**
|
||||
* A `strvec` of state headers received from credential helpers.
|
||||
*/
|
||||
struct strvec state_headers;
|
||||
|
||||
/**
|
||||
* A `strvec` of state headers to send to credential helpers.
|
||||
*/
|
||||
struct strvec state_headers_to_send;
|
||||
|
||||
/**
|
||||
* Internal use only. Keeps track of if we previously matched against a
|
||||
* WWW-Authenticate header line in order to re-fold future continuation
|
||||
|
@ -131,24 +162,38 @@ struct credential {
|
|||
unsigned header_is_last_match:1;
|
||||
|
||||
unsigned approved:1,
|
||||
ephemeral:1,
|
||||
configured:1,
|
||||
multistage: 1,
|
||||
quit:1,
|
||||
use_http_path:1,
|
||||
username_from_proto:1;
|
||||
|
||||
struct credential_capability capa_authtype;
|
||||
struct credential_capability capa_state;
|
||||
|
||||
char *username;
|
||||
char *password;
|
||||
char *credential;
|
||||
char *protocol;
|
||||
char *host;
|
||||
char *path;
|
||||
char *oauth_refresh_token;
|
||||
timestamp_t password_expiry_utc;
|
||||
|
||||
/**
|
||||
* The authorization scheme to use. If this is NULL, libcurl is free to
|
||||
* negotiate any scheme it likes.
|
||||
*/
|
||||
char *authtype;
|
||||
};
|
||||
|
||||
#define CREDENTIAL_INIT { \
|
||||
.helpers = STRING_LIST_INIT_DUP, \
|
||||
.password_expiry_utc = TIME_MAX, \
|
||||
.wwwauth_headers = STRVEC_INIT, \
|
||||
.state_headers = STRVEC_INIT, \
|
||||
.state_headers_to_send = STRVEC_INIT, \
|
||||
}
|
||||
|
||||
/* Initialize a credential structure, setting all fields to empty. */
|
||||
|
@ -167,8 +212,11 @@ void credential_clear(struct credential *);
|
|||
* returns, the username and password fields of the credential are
|
||||
* guaranteed to be non-NULL. If an error occurs, the function will
|
||||
* die().
|
||||
*
|
||||
* If all_capabilities is set, this is an internal user that is prepared
|
||||
* to deal with all known capabilities, and we should advertise that fact.
|
||||
*/
|
||||
void credential_fill(struct credential *);
|
||||
void credential_fill(struct credential *, int all_capabilities);
|
||||
|
||||
/**
|
||||
* Inform the credential subsystem that the provided credentials
|
||||
|
@ -191,8 +239,46 @@ void credential_approve(struct credential *);
|
|||
*/
|
||||
void credential_reject(struct credential *);
|
||||
|
||||
int credential_read(struct credential *, FILE *);
|
||||
void credential_write(const struct credential *, FILE *);
|
||||
/**
|
||||
* Enable all of the supported credential flags in this credential.
|
||||
*/
|
||||
void credential_set_all_capabilities(struct credential *c,
|
||||
enum credential_op_type op_type);
|
||||
|
||||
/**
|
||||
* Clear the secrets in this credential, but leave other data intact.
|
||||
*
|
||||
* This is useful for resetting credentials in preparation for a subsequent
|
||||
* stage of filling.
|
||||
*/
|
||||
void credential_clear_secrets(struct credential *c);
|
||||
|
||||
/**
|
||||
* Print a list of supported capabilities and version numbers to standard
|
||||
* output.
|
||||
*/
|
||||
void credential_announce_capabilities(struct credential *c, FILE *fp);
|
||||
|
||||
/**
|
||||
* Prepares the credential for the next iteration of the helper protocol by
|
||||
* updating the state headers to send with the ones read by the last iteration
|
||||
* of the protocol.
|
||||
*
|
||||
* Except for internal callers, this should be called exactly once between
|
||||
* reading credentials with `credential_fill` and writing them.
|
||||
*/
|
||||
void credential_next_state(struct credential *c);
|
||||
|
||||
/**
|
||||
* Return true if the capability is enabled for an operation of op_type.
|
||||
*/
|
||||
int credential_has_capability(const struct credential_capability *capa,
|
||||
enum credential_op_type op_type);
|
||||
|
||||
int credential_read(struct credential *, FILE *,
|
||||
enum credential_op_type);
|
||||
void credential_write(const struct credential *, FILE *,
|
||||
enum credential_op_type);
|
||||
|
||||
/*
|
||||
* Parse a url into a credential struct, replacing any existing contents.
|
||||
|
|
|
@ -10,14 +10,14 @@
|
|||
#include "diff.h"
|
||||
#include "progress.h"
|
||||
#include "refs.h"
|
||||
#include "khash.h"
|
||||
#include "khashl.h"
|
||||
#include "pack-bitmap.h"
|
||||
#include "pack-objects.h"
|
||||
#include "delta-islands.h"
|
||||
#include "oid-array.h"
|
||||
#include "config.h"
|
||||
|
||||
KHASH_INIT(str, const char *, void *, 1, kh_str_hash_func, kh_str_hash_equal)
|
||||
KHASHL_MAP_INIT(KH_LOCAL, kh_str, str, const char *, void *, kh_hash_str, kh_eq_str)
|
||||
|
||||
static kh_oid_map_t *island_marks;
|
||||
static unsigned island_counter;
|
||||
|
|
|
@ -138,6 +138,49 @@ void bitmap_or(struct bitmap *self, const struct bitmap *other)
|
|||
self->words[i] |= other->words[i];
|
||||
}
|
||||
|
||||
int ewah_bitmap_is_subset(struct ewah_bitmap *self, struct bitmap *other)
|
||||
{
|
||||
struct ewah_iterator it;
|
||||
eword_t word;
|
||||
size_t i;
|
||||
|
||||
ewah_iterator_init(&it, self);
|
||||
|
||||
for (i = 0; i < other->word_alloc; i++) {
|
||||
if (!ewah_iterator_next(&word, &it)) {
|
||||
/*
|
||||
* If we reached the end of `self`, and haven't
|
||||
* rejected `self` as a possible subset of
|
||||
* `other` yet, then we are done and `self` is
|
||||
* indeed a subset of `other`.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
if (word & ~other->words[i]) {
|
||||
/*
|
||||
* Otherwise, compare the next two pairs of
|
||||
* words. If the word from `self` has bit(s) not
|
||||
* in the word from `other`, `self` is not a
|
||||
* proper subset of `other`.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we got to this point, there may be zero or more words
|
||||
* remaining in `self`, with no remaining words left in `other`.
|
||||
* If there are any bits set in the remaining word(s) in `self`,
|
||||
* then `self` is not a proper subset of `other`.
|
||||
*/
|
||||
while (ewah_iterator_next(&word, &it))
|
||||
if (word)
|
||||
return 0;
|
||||
|
||||
/* `self` is definitely a subset of `other` */
|
||||
return 1;
|
||||
}
|
||||
|
||||
void bitmap_or_ewah(struct bitmap *self, struct ewah_bitmap *other)
|
||||
{
|
||||
size_t original_size = self->word_alloc;
|
||||
|
@ -169,6 +212,20 @@ size_t bitmap_popcount(struct bitmap *self)
|
|||
return count;
|
||||
}
|
||||
|
||||
size_t ewah_bitmap_popcount(struct ewah_bitmap *self)
|
||||
{
|
||||
struct ewah_iterator it;
|
||||
eword_t word;
|
||||
size_t count = 0;
|
||||
|
||||
ewah_iterator_init(&it, self);
|
||||
|
||||
while (ewah_iterator_next(&word, &it))
|
||||
count += ewah_bit_popcount64(word);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
int bitmap_is_empty(struct bitmap *self)
|
||||
{
|
||||
size_t i;
|
||||
|
@ -204,6 +261,25 @@ int bitmap_equals(struct bitmap *self, struct bitmap *other)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int bitmap_equals_ewah(struct bitmap *self, struct ewah_bitmap *other)
|
||||
{
|
||||
struct ewah_iterator it;
|
||||
eword_t word;
|
||||
size_t i = 0;
|
||||
|
||||
ewah_iterator_init(&it, other);
|
||||
|
||||
while (ewah_iterator_next(&word, &it))
|
||||
if (word != (i < self->word_alloc ? self->words[i++] : 0))
|
||||
return 0;
|
||||
|
||||
for (; i < self->word_alloc; i++)
|
||||
if (self->words[i])
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int bitmap_is_subset(struct bitmap *self, struct bitmap *other)
|
||||
{
|
||||
size_t common_size, i;
|
||||
|
|
|
@ -179,7 +179,9 @@ void bitmap_unset(struct bitmap *self, size_t pos);
|
|||
int bitmap_get(struct bitmap *self, size_t pos);
|
||||
void bitmap_free(struct bitmap *self);
|
||||
int bitmap_equals(struct bitmap *self, struct bitmap *other);
|
||||
int bitmap_equals_ewah(struct bitmap *self, struct ewah_bitmap *other);
|
||||
int bitmap_is_subset(struct bitmap *self, struct bitmap *other);
|
||||
int ewah_bitmap_is_subset(struct ewah_bitmap *self, struct bitmap *other);
|
||||
|
||||
struct ewah_bitmap * bitmap_to_ewah(struct bitmap *bitmap);
|
||||
struct bitmap *ewah_to_bitmap(struct ewah_bitmap *ewah);
|
||||
|
@ -189,6 +191,7 @@ void bitmap_or_ewah(struct bitmap *self, struct ewah_bitmap *other);
|
|||
void bitmap_or(struct bitmap *self, const struct bitmap *other);
|
||||
|
||||
size_t bitmap_popcount(struct bitmap *self);
|
||||
size_t ewah_bitmap_popcount(struct ewah_bitmap *self);
|
||||
int bitmap_is_empty(struct bitmap *self);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1361,7 +1361,6 @@ sub smtp_host_string {
|
|||
|
||||
# Returns 1 if authentication succeeded or was not necessary
|
||||
# (smtp_user was not specified), and 0 otherwise.
|
||||
|
||||
sub smtp_auth_maybe {
|
||||
if (!defined $smtp_authuser || $auth || (defined $smtp_auth && $smtp_auth eq "none")) {
|
||||
return 1;
|
||||
|
@ -1510,6 +1509,7 @@ sub gen_header {
|
|||
sub send_message {
|
||||
my ($recipients_ref, $to, $date, $gitversion, $cc, $ccline, $header) = gen_header();
|
||||
my @recipients = @$recipients_ref;
|
||||
my $confirm_shown = 0;
|
||||
|
||||
my @sendmail_parameters = ('-i', @recipients);
|
||||
my $raw_from = $sender;
|
||||
|
@ -1555,6 +1555,7 @@ sub send_message {
|
|||
} elsif (/^a/i) {
|
||||
$confirm = 'never';
|
||||
}
|
||||
$confirm_shown = 1;
|
||||
}
|
||||
|
||||
unshift (@sendmail_parameters, @smtp_server_options);
|
||||
|
@ -1576,7 +1577,6 @@ sub send_message {
|
|||
print $sm "$header\n$message";
|
||||
close $sm or die $!;
|
||||
} else {
|
||||
|
||||
if (!defined $smtp_server) {
|
||||
die __("The required SMTP server is not properly defined.")
|
||||
}
|
||||
|
@ -1664,9 +1664,13 @@ sub send_message {
|
|||
$smtp->code =~ /250|200/ or die sprintf(__("Failed to send %s\n"), $subject).$smtp->message;
|
||||
}
|
||||
if ($quiet) {
|
||||
printf($dry_run ? __("Dry-Sent %s\n") : __("Sent %s\n"), $subject);
|
||||
print "\n" if ($confirm_shown);
|
||||
printf($dry_run ? __("Dry-Sent %s") : __("Sent %s"), $subject);
|
||||
print "\n";
|
||||
} else {
|
||||
print($dry_run ? __("Dry-OK. Log says:\n") : __("OK. Log says:\n"));
|
||||
print "\n";
|
||||
print($dry_run ? __("Dry-OK. Log says:") : __("OK. Log says:"));
|
||||
print "\n";
|
||||
if (!defined $sendmail_cmd && !file_name_is_absolute($smtp_server)) {
|
||||
print "Server: $smtp_server\n";
|
||||
print "MAIL FROM:<$raw_from>\n";
|
||||
|
@ -1686,10 +1690,11 @@ sub send_message {
|
|||
print $header, "\n";
|
||||
if ($smtp) {
|
||||
print __("Result: "), $smtp->code, ' ',
|
||||
($smtp->message =~ /\n([^\n]+\n)$/s), "\n";
|
||||
($smtp->message =~ /\n([^\n]+\n)$/s);
|
||||
} else {
|
||||
print __("Result: OK\n");
|
||||
print __("Result: OK");
|
||||
}
|
||||
print "\n";
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -1920,7 +1925,7 @@ sub pre_process_file {
|
|||
sub process_file {
|
||||
my ($t) = @_;
|
||||
|
||||
pre_process_file($t, $quiet);
|
||||
pre_process_file($t, $quiet);
|
||||
|
||||
my $message_was_sent = send_message();
|
||||
if ($message_was_sent == -1) {
|
||||
|
|
129
http.c
129
http.c
|
@ -128,7 +128,6 @@ static unsigned long empty_auth_useless =
|
|||
| CURLAUTH_DIGEST;
|
||||
|
||||
static struct curl_slist *pragma_header;
|
||||
static struct curl_slist *no_pragma_header;
|
||||
static struct string_list extra_http_headers = STRING_LIST_INIT_DUP;
|
||||
|
||||
static struct curl_slist *host_resolutions;
|
||||
|
@ -299,6 +298,11 @@ size_t fwrite_null(char *ptr UNUSED, size_t eltsize UNUSED, size_t nmemb,
|
|||
return nmemb;
|
||||
}
|
||||
|
||||
static struct curl_slist *object_request_headers(void)
|
||||
{
|
||||
return curl_slist_append(http_copy_default_headers(), "Pragma:");
|
||||
}
|
||||
|
||||
static void closedown_active_slot(struct active_request_slot *slot)
|
||||
{
|
||||
active_requests--;
|
||||
|
@ -557,18 +561,34 @@ static int curl_empty_auth_enabled(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct curl_slist *http_append_auth_header(const struct credential *c,
|
||||
struct curl_slist *headers)
|
||||
{
|
||||
if (c->authtype && c->credential) {
|
||||
struct strbuf auth = STRBUF_INIT;
|
||||
strbuf_addf(&auth, "Authorization: %s %s",
|
||||
c->authtype, c->credential);
|
||||
headers = curl_slist_append(headers, auth.buf);
|
||||
strbuf_release(&auth);
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
static void init_curl_http_auth(CURL *result)
|
||||
{
|
||||
if (!http_auth.username || !*http_auth.username) {
|
||||
if ((!http_auth.username || !*http_auth.username) &&
|
||||
(!http_auth.credential || !*http_auth.credential)) {
|
||||
if (curl_empty_auth_enabled())
|
||||
curl_easy_setopt(result, CURLOPT_USERPWD, ":");
|
||||
return;
|
||||
}
|
||||
|
||||
credential_fill(&http_auth);
|
||||
credential_fill(&http_auth, 1);
|
||||
|
||||
curl_easy_setopt(result, CURLOPT_USERNAME, http_auth.username);
|
||||
curl_easy_setopt(result, CURLOPT_PASSWORD, http_auth.password);
|
||||
if (http_auth.password) {
|
||||
curl_easy_setopt(result, CURLOPT_USERNAME, http_auth.username);
|
||||
curl_easy_setopt(result, CURLOPT_PASSWORD, http_auth.password);
|
||||
}
|
||||
}
|
||||
|
||||
/* *var must be free-able */
|
||||
|
@ -582,17 +602,22 @@ static void var_override(const char **var, char *value)
|
|||
|
||||
static void set_proxyauth_name_password(CURL *result)
|
||||
{
|
||||
if (proxy_auth.password) {
|
||||
curl_easy_setopt(result, CURLOPT_PROXYUSERNAME,
|
||||
proxy_auth.username);
|
||||
curl_easy_setopt(result, CURLOPT_PROXYPASSWORD,
|
||||
proxy_auth.password);
|
||||
} else if (proxy_auth.authtype && proxy_auth.credential) {
|
||||
curl_easy_setopt(result, CURLOPT_PROXYHEADER,
|
||||
http_append_auth_header(&proxy_auth, NULL));
|
||||
}
|
||||
}
|
||||
|
||||
static void init_curl_proxy_auth(CURL *result)
|
||||
{
|
||||
if (proxy_auth.username) {
|
||||
if (!proxy_auth.password)
|
||||
credential_fill(&proxy_auth);
|
||||
if (!proxy_auth.password && !proxy_auth.credential)
|
||||
credential_fill(&proxy_auth, 1);
|
||||
set_proxyauth_name_password(result);
|
||||
}
|
||||
|
||||
|
@ -626,7 +651,7 @@ static int has_cert_password(void)
|
|||
cert_auth.host = xstrdup("");
|
||||
cert_auth.username = xstrdup("");
|
||||
cert_auth.path = xstrdup(ssl_cert);
|
||||
credential_fill(&cert_auth);
|
||||
credential_fill(&cert_auth, 0);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@ -641,7 +666,7 @@ static int has_proxy_cert_password(void)
|
|||
proxy_cert_auth.host = xstrdup("");
|
||||
proxy_cert_auth.username = xstrdup("");
|
||||
proxy_cert_auth.path = xstrdup(http_proxy_ssl_cert);
|
||||
credential_fill(&proxy_cert_auth);
|
||||
credential_fill(&proxy_cert_auth, 0);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@ -1275,8 +1300,6 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
|
|||
|
||||
pragma_header = curl_slist_append(http_copy_default_headers(),
|
||||
"Pragma: no-cache");
|
||||
no_pragma_header = curl_slist_append(http_copy_default_headers(),
|
||||
"Pragma:");
|
||||
|
||||
{
|
||||
char *http_max_requests = getenv("GIT_HTTP_MAX_REQUESTS");
|
||||
|
@ -1360,9 +1383,6 @@ void http_cleanup(void)
|
|||
curl_slist_free_all(pragma_header);
|
||||
pragma_header = NULL;
|
||||
|
||||
curl_slist_free_all(no_pragma_header);
|
||||
no_pragma_header = NULL;
|
||||
|
||||
curl_slist_free_all(host_resolutions);
|
||||
host_resolutions = NULL;
|
||||
|
||||
|
@ -1470,7 +1490,7 @@ struct active_request_slot *get_active_slot(void)
|
|||
|
||||
curl_easy_setopt(slot->curl, CURLOPT_IPRESOLVE, git_curl_ipresolve);
|
||||
curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, http_auth_methods);
|
||||
if (http_auth.password || curl_empty_auth_enabled())
|
||||
if (http_auth.password || http_auth.credential || curl_empty_auth_enabled())
|
||||
init_curl_http_auth(slot->curl);
|
||||
|
||||
return slot;
|
||||
|
@ -1759,7 +1779,12 @@ static int handle_curl_result(struct slot_results *results)
|
|||
} else if (missing_target(results))
|
||||
return HTTP_MISSING_TARGET;
|
||||
else if (results->http_code == 401) {
|
||||
if (http_auth.username && http_auth.password) {
|
||||
if ((http_auth.username && http_auth.password) ||\
|
||||
(http_auth.authtype && http_auth.credential)) {
|
||||
if (http_auth.multistage) {
|
||||
credential_clear_secrets(&http_auth);
|
||||
return HTTP_REAUTH;
|
||||
}
|
||||
credential_reject(&http_auth);
|
||||
return HTTP_NOAUTH;
|
||||
} else {
|
||||
|
@ -2067,11 +2092,15 @@ static int http_request(const char *url,
|
|||
/* Add additional headers here */
|
||||
if (options && options->extra_headers) {
|
||||
const struct string_list_item *item;
|
||||
for_each_string_list_item(item, options->extra_headers) {
|
||||
headers = curl_slist_append(headers, item->string);
|
||||
if (options && options->extra_headers) {
|
||||
for_each_string_list_item(item, options->extra_headers) {
|
||||
headers = curl_slist_append(headers, item->string);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
headers = http_append_auth_header(&http_auth, headers);
|
||||
|
||||
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
|
||||
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
|
||||
curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "");
|
||||
|
@ -2153,6 +2182,7 @@ static int http_request_reauth(const char *url,
|
|||
void *result, int target,
|
||||
struct http_get_options *options)
|
||||
{
|
||||
int i = 3;
|
||||
int ret = http_request(url, result, target, options);
|
||||
|
||||
if (ret != HTTP_OK && ret != HTTP_REAUTH)
|
||||
|
@ -2166,35 +2196,35 @@ static int http_request_reauth(const char *url,
|
|||
}
|
||||
}
|
||||
|
||||
if (ret != HTTP_REAUTH)
|
||||
return ret;
|
||||
while (ret == HTTP_REAUTH && --i) {
|
||||
/*
|
||||
* The previous request may have put cruft into our output stream; we
|
||||
* should clear it out before making our next request.
|
||||
*/
|
||||
switch (target) {
|
||||
case HTTP_REQUEST_STRBUF:
|
||||
strbuf_reset(result);
|
||||
break;
|
||||
case HTTP_REQUEST_FILE:
|
||||
if (fflush(result)) {
|
||||
error_errno("unable to flush a file");
|
||||
return HTTP_START_FAILED;
|
||||
}
|
||||
rewind(result);
|
||||
if (ftruncate(fileno(result), 0) < 0) {
|
||||
error_errno("unable to truncate a file");
|
||||
return HTTP_START_FAILED;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG("Unknown http_request target");
|
||||
}
|
||||
|
||||
/*
|
||||
* The previous request may have put cruft into our output stream; we
|
||||
* should clear it out before making our next request.
|
||||
*/
|
||||
switch (target) {
|
||||
case HTTP_REQUEST_STRBUF:
|
||||
strbuf_reset(result);
|
||||
break;
|
||||
case HTTP_REQUEST_FILE:
|
||||
if (fflush(result)) {
|
||||
error_errno("unable to flush a file");
|
||||
return HTTP_START_FAILED;
|
||||
}
|
||||
rewind(result);
|
||||
if (ftruncate(fileno(result), 0) < 0) {
|
||||
error_errno("unable to truncate a file");
|
||||
return HTTP_START_FAILED;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG("Unknown http_request target");
|
||||
credential_fill(&http_auth, 1);
|
||||
|
||||
ret = http_request(url, result, target, options);
|
||||
}
|
||||
|
||||
credential_fill(&http_auth);
|
||||
|
||||
return http_request(url, result, target, options);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int http_get_strbuf(const char *url,
|
||||
|
@ -2371,6 +2401,7 @@ void release_http_pack_request(struct http_pack_request *preq)
|
|||
}
|
||||
preq->slot = NULL;
|
||||
strbuf_release(&preq->tmpfile);
|
||||
curl_slist_free_all(preq->headers);
|
||||
free(preq->url);
|
||||
free(preq);
|
||||
}
|
||||
|
@ -2455,11 +2486,11 @@ struct http_pack_request *new_direct_http_pack_request(
|
|||
}
|
||||
|
||||
preq->slot = get_active_slot();
|
||||
preq->headers = object_request_headers();
|
||||
curl_easy_setopt(preq->slot->curl, CURLOPT_WRITEDATA, preq->packfile);
|
||||
curl_easy_setopt(preq->slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
|
||||
curl_easy_setopt(preq->slot->curl, CURLOPT_URL, preq->url);
|
||||
curl_easy_setopt(preq->slot->curl, CURLOPT_HTTPHEADER,
|
||||
no_pragma_header);
|
||||
curl_easy_setopt(preq->slot->curl, CURLOPT_HTTPHEADER, preq->headers);
|
||||
|
||||
/*
|
||||
* If there is data present from a previous transfer attempt,
|
||||
|
@ -2625,13 +2656,14 @@ struct http_object_request *new_http_object_request(const char *base_url,
|
|||
}
|
||||
|
||||
freq->slot = get_active_slot();
|
||||
freq->headers = object_request_headers();
|
||||
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_WRITEDATA, freq);
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_FAILONERROR, 0);
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file);
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_ERRORBUFFER, freq->errorstr);
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_URL, freq->url);
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
|
||||
curl_easy_setopt(freq->slot->curl, CURLOPT_HTTPHEADER, freq->headers);
|
||||
|
||||
/*
|
||||
* If we have successfully processed data from a previous fetch
|
||||
|
@ -2719,5 +2751,6 @@ void release_http_object_request(struct http_object_request *freq)
|
|||
release_active_slot(freq->slot);
|
||||
freq->slot = NULL;
|
||||
}
|
||||
curl_slist_free_all(freq->headers);
|
||||
strbuf_release(&freq->tmpfile);
|
||||
}
|
||||
|
|
5
http.h
5
http.h
|
@ -175,6 +175,9 @@ int http_get_file(const char *url, const char *filename,
|
|||
|
||||
int http_fetch_ref(const char *base, struct ref *ref);
|
||||
|
||||
struct curl_slist *http_append_auth_header(const struct credential *c,
|
||||
struct curl_slist *headers);
|
||||
|
||||
/* Helpers for fetching packs */
|
||||
int http_get_info_packs(const char *base_url,
|
||||
struct packed_git **packs_head);
|
||||
|
@ -196,6 +199,7 @@ struct http_pack_request {
|
|||
FILE *packfile;
|
||||
struct strbuf tmpfile;
|
||||
struct active_request_slot *slot;
|
||||
struct curl_slist *headers;
|
||||
};
|
||||
|
||||
struct http_pack_request *new_http_pack_request(
|
||||
|
@ -229,6 +233,7 @@ struct http_object_request {
|
|||
int zret;
|
||||
int rename;
|
||||
struct active_request_slot *slot;
|
||||
struct curl_slist *headers;
|
||||
};
|
||||
|
||||
struct http_object_request *new_http_object_request(
|
||||
|
|
|
@ -917,7 +917,7 @@ static void server_fill_credential(struct imap_server_conf *srvc, struct credent
|
|||
cred->username = xstrdup_or_null(srvc->user);
|
||||
cred->password = xstrdup_or_null(srvc->pass);
|
||||
|
||||
credential_fill(cred);
|
||||
credential_fill(cred, 1);
|
||||
|
||||
if (!srvc->user)
|
||||
srvc->user = xstrdup(cred->username);
|
||||
|
|
338
khash.h
338
khash.h
|
@ -1,338 +0,0 @@
|
|||
/* The MIT License
|
||||
|
||||
Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __AC_KHASH_H
|
||||
#define __AC_KHASH_H
|
||||
|
||||
#include "hash.h"
|
||||
|
||||
#define AC_VERSION_KHASH_H "0.2.8"
|
||||
|
||||
typedef uint32_t khint32_t;
|
||||
typedef uint64_t khint64_t;
|
||||
|
||||
typedef khint32_t khint_t;
|
||||
typedef khint_t khiter_t;
|
||||
|
||||
#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2)
|
||||
#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1)
|
||||
#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3)
|
||||
#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1)))
|
||||
#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1)))
|
||||
#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1)))
|
||||
#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1))
|
||||
|
||||
#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
|
||||
|
||||
#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
|
||||
|
||||
static inline khint_t __ac_X31_hash_string(const char *s)
|
||||
{
|
||||
khint_t h = (khint_t)*s;
|
||||
if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s;
|
||||
return h;
|
||||
}
|
||||
|
||||
#define kh_str_hash_func(key) __ac_X31_hash_string(key)
|
||||
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
|
||||
|
||||
static const double __ac_HASH_UPPER = 0.77;
|
||||
|
||||
#define __KHASH_TYPE(name, khkey_t, khval_t) \
|
||||
typedef struct kh_##name { \
|
||||
khint_t n_buckets, size, n_occupied, upper_bound; \
|
||||
khint32_t *flags; \
|
||||
khkey_t *keys; \
|
||||
khval_t *vals; \
|
||||
} kh_##name##_t;
|
||||
|
||||
#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
|
||||
kh_##name##_t *kh_init_##name(void); \
|
||||
void kh_destroy_##name(kh_##name##_t *h); \
|
||||
void kh_clear_##name(kh_##name##_t *h); \
|
||||
khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
|
||||
void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
|
||||
khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
|
||||
void kh_del_##name(kh_##name##_t *h, khint_t x);
|
||||
|
||||
#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
|
||||
SCOPE kh_##name##_t *kh_init_##name(void) { \
|
||||
return (kh_##name##_t*)xcalloc(1, sizeof(kh_##name##_t)); \
|
||||
} \
|
||||
SCOPE void kh_release_##name(kh_##name##_t *h) \
|
||||
{ \
|
||||
free(h->flags); \
|
||||
free((void *)h->keys); \
|
||||
free((void *)h->vals); \
|
||||
} \
|
||||
SCOPE void kh_destroy_##name(kh_##name##_t *h) \
|
||||
{ \
|
||||
if (h) { \
|
||||
kh_release_##name(h); \
|
||||
free(h); \
|
||||
} \
|
||||
} \
|
||||
SCOPE void kh_clear_##name(kh_##name##_t *h) \
|
||||
{ \
|
||||
if (h && h->flags) { \
|
||||
memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
|
||||
h->size = h->n_occupied = 0; \
|
||||
} \
|
||||
} \
|
||||
SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
|
||||
{ \
|
||||
if (h->n_buckets) { \
|
||||
khint_t k, i, last, mask, step = 0; \
|
||||
mask = h->n_buckets - 1; \
|
||||
k = __hash_func(key); i = k & mask; \
|
||||
last = i; \
|
||||
while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
|
||||
i = (i + (++step)) & mask; \
|
||||
if (i == last) return h->n_buckets; \
|
||||
} \
|
||||
return __ac_iseither(h->flags, i)? h->n_buckets : i; \
|
||||
} else return 0; \
|
||||
} \
|
||||
SCOPE void kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
|
||||
{ /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
|
||||
khint32_t *new_flags = NULL; \
|
||||
khint_t j = 1; \
|
||||
{ \
|
||||
kroundup32(new_n_buckets); \
|
||||
if (new_n_buckets < 4) new_n_buckets = 4; \
|
||||
if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
|
||||
else { /* hash table size to be changed (shrink or expand); rehash */ \
|
||||
ALLOC_ARRAY(new_flags, __ac_fsize(new_n_buckets)); \
|
||||
memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
|
||||
if (h->n_buckets < new_n_buckets) { /* expand */ \
|
||||
REALLOC_ARRAY(h->keys, new_n_buckets); \
|
||||
if (kh_is_map) { \
|
||||
REALLOC_ARRAY(h->vals, new_n_buckets); \
|
||||
} \
|
||||
} /* otherwise shrink */ \
|
||||
} \
|
||||
} \
|
||||
if (j) { /* rehashing is needed */ \
|
||||
for (j = 0; j != h->n_buckets; ++j) { \
|
||||
if (__ac_iseither(h->flags, j) == 0) { \
|
||||
khkey_t key = h->keys[j]; \
|
||||
khval_t val; \
|
||||
khint_t new_mask; \
|
||||
new_mask = new_n_buckets - 1; \
|
||||
if (kh_is_map) val = h->vals[j]; \
|
||||
__ac_set_isdel_true(h->flags, j); \
|
||||
while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
|
||||
khint_t k, i, step = 0; \
|
||||
k = __hash_func(key); \
|
||||
i = k & new_mask; \
|
||||
while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \
|
||||
__ac_set_isempty_false(new_flags, i); \
|
||||
if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
|
||||
{ khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
|
||||
if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
|
||||
__ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \
|
||||
} else { /* write the element and jump out of the loop */ \
|
||||
h->keys[i] = key; \
|
||||
if (kh_is_map) h->vals[i] = val; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
|
||||
REALLOC_ARRAY(h->keys, new_n_buckets); \
|
||||
if (kh_is_map) REALLOC_ARRAY(h->vals, new_n_buckets); \
|
||||
} \
|
||||
free(h->flags); /* free the working space */ \
|
||||
h->flags = new_flags; \
|
||||
h->n_buckets = new_n_buckets; \
|
||||
h->n_occupied = h->size; \
|
||||
h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
|
||||
} \
|
||||
} \
|
||||
SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
|
||||
{ \
|
||||
khint_t x; \
|
||||
if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
|
||||
if (h->n_buckets > (h->size<<1)) { \
|
||||
kh_resize_##name(h, h->n_buckets - 1); /* clear "deleted" elements */ \
|
||||
} else { \
|
||||
kh_resize_##name(h, h->n_buckets + 1); /* expand the hash table */ \
|
||||
} \
|
||||
} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
|
||||
{ \
|
||||
khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
|
||||
x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
|
||||
if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \
|
||||
else { \
|
||||
last = i; \
|
||||
while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
|
||||
if (__ac_isdel(h->flags, i)) site = i; \
|
||||
i = (i + (++step)) & mask; \
|
||||
if (i == last) { x = site; break; } \
|
||||
} \
|
||||
if (x == h->n_buckets) { \
|
||||
if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
|
||||
else x = i; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (__ac_isempty(h->flags, x)) { /* not present at all */ \
|
||||
h->keys[x] = key; \
|
||||
__ac_set_isboth_false(h->flags, x); \
|
||||
++h->size; ++h->n_occupied; \
|
||||
*ret = 1; \
|
||||
} else if (__ac_isdel(h->flags, x)) { /* deleted */ \
|
||||
h->keys[x] = key; \
|
||||
__ac_set_isboth_false(h->flags, x); \
|
||||
++h->size; \
|
||||
*ret = 2; \
|
||||
} else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
|
||||
return x; \
|
||||
} \
|
||||
SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
|
||||
{ \
|
||||
if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
|
||||
__ac_set_isdel_true(h->flags, x); \
|
||||
--h->size; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define KHASH_DECLARE(name, khkey_t, khval_t) \
|
||||
__KHASH_TYPE(name, khkey_t, khval_t) \
|
||||
__KHASH_PROTOTYPES(name, khkey_t, khval_t)
|
||||
|
||||
#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
|
||||
__KHASH_TYPE(name, khkey_t, khval_t) \
|
||||
__KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
|
||||
|
||||
#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
|
||||
KHASH_INIT2(name, MAYBE_UNUSED static inline, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
|
||||
|
||||
/* Other convenient macros... */
|
||||
|
||||
/*! @function
|
||||
@abstract Test whether a bucket contains data.
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@param x Iterator to the bucket [khint_t]
|
||||
@return 1 if containing data; 0 otherwise [int]
|
||||
*/
|
||||
#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
|
||||
|
||||
/*! @function
|
||||
@abstract Get key given an iterator
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@param x Iterator to the bucket [khint_t]
|
||||
@return Key [type of keys]
|
||||
*/
|
||||
#define kh_key(h, x) ((h)->keys[x])
|
||||
|
||||
/*! @function
|
||||
@abstract Get value given an iterator
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@param x Iterator to the bucket [khint_t]
|
||||
@return Value [type of values]
|
||||
@discussion For hash sets, calling this results in segfault.
|
||||
*/
|
||||
#define kh_val(h, x) ((h)->vals[x])
|
||||
|
||||
/*! @function
|
||||
@abstract Alias of kh_val()
|
||||
*/
|
||||
#define kh_value(h, x) ((h)->vals[x])
|
||||
|
||||
/*! @function
|
||||
@abstract Get the start iterator
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@return The start iterator [khint_t]
|
||||
*/
|
||||
#define kh_begin(h) (khint_t)(0)
|
||||
|
||||
/*! @function
|
||||
@abstract Get the end iterator
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@return The end iterator [khint_t]
|
||||
*/
|
||||
#define kh_end(h) ((h)->n_buckets)
|
||||
|
||||
/*! @function
|
||||
@abstract Get the number of elements in the hash table
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@return Number of elements in the hash table [khint_t]
|
||||
*/
|
||||
#define kh_size(h) ((h)->size)
|
||||
|
||||
/*! @function
|
||||
@abstract Get the number of buckets in the hash table
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@return Number of buckets in the hash table [khint_t]
|
||||
*/
|
||||
#define kh_n_buckets(h) ((h)->n_buckets)
|
||||
|
||||
/*! @function
|
||||
@abstract Iterate over the entries in the hash table
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@param kvar Variable to which key will be assigned
|
||||
@param vvar Variable to which value will be assigned
|
||||
@param code Block of code to execute
|
||||
*/
|
||||
#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
|
||||
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
|
||||
if (!kh_exist(h,__i)) continue; \
|
||||
(kvar) = kh_key(h,__i); \
|
||||
(vvar) = kh_val(h,__i); \
|
||||
code; \
|
||||
} }
|
||||
|
||||
/*! @function
|
||||
@abstract Iterate over the values in the hash table
|
||||
@param h Pointer to the hash table [khash_t(name)*]
|
||||
@param vvar Variable to which value will be assigned
|
||||
@param code Block of code to execute
|
||||
*/
|
||||
#define kh_foreach_value(h, vvar, code) { khint_t __i; \
|
||||
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
|
||||
if (!kh_exist(h,__i)) continue; \
|
||||
(vvar) = kh_val(h,__i); \
|
||||
code; \
|
||||
} }
|
||||
|
||||
static inline unsigned int oidhash_by_value(struct object_id oid)
|
||||
{
|
||||
return oidhash(&oid);
|
||||
}
|
||||
|
||||
static inline int oideq_by_value(struct object_id a, struct object_id b)
|
||||
{
|
||||
return oideq(&a, &b);
|
||||
}
|
||||
|
||||
KHASH_INIT(oid_set, struct object_id, int, 0, oidhash_by_value, oideq_by_value)
|
||||
|
||||
KHASH_INIT(oid_map, struct object_id, void *, 1, oidhash_by_value, oideq_by_value)
|
||||
|
||||
KHASH_INIT(oid_pos, struct object_id, int, 1, oidhash_by_value, oideq_by_value)
|
||||
|
||||
#endif /* __AC_KHASH_H */
|
|
@ -0,0 +1,522 @@
|
|||
/* The MIT License
|
||||
|
||||
Copyright (c) 2019-2023 by Attractive Chaos <attractor@live.co.uk>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __AC_KHASHL_H
|
||||
#define __AC_KHASHL_H
|
||||
|
||||
#include "hash.h"
|
||||
|
||||
#define AC_VERSION_KHASHL_H "0.2"
|
||||
|
||||
typedef uint32_t khint32_t;
|
||||
typedef uint64_t khint64_t;
|
||||
|
||||
typedef khint32_t khint_t;
|
||||
typedef khint_t khiter_t;
|
||||
|
||||
#define kh_inline inline /* portably handled elsewhere */
|
||||
#define KH_LOCAL static kh_inline MAYBE_UNUSED
|
||||
|
||||
#ifndef kcalloc
|
||||
#define kcalloc(N,Z) xcalloc(N,Z)
|
||||
#endif
|
||||
#ifndef kfree
|
||||
#define kfree(P) free(P)
|
||||
#endif
|
||||
|
||||
/****************************
|
||||
* Simple private functions *
|
||||
****************************/
|
||||
|
||||
#define __kh_used(flag, i) (flag[i>>5] >> (i&0x1fU) & 1U)
|
||||
#define __kh_set_used(flag, i) (flag[i>>5] |= 1U<<(i&0x1fU))
|
||||
#define __kh_set_unused(flag, i) (flag[i>>5] &= ~(1U<<(i&0x1fU)))
|
||||
|
||||
#define __kh_fsize(m) ((m) < 32? 1 : (m)>>5)
|
||||
|
||||
static kh_inline khint_t __kh_h2b(khint_t hash, khint_t bits) { return hash * 2654435769U >> (32 - bits); }
|
||||
|
||||
/*******************
|
||||
* Hash table base *
|
||||
*******************/
|
||||
|
||||
#define __KHASHL_TYPE(HType, khkey_t) \
|
||||
typedef struct HType { \
|
||||
khint_t bits, count; \
|
||||
khint32_t *used; \
|
||||
khkey_t *keys; \
|
||||
} HType;
|
||||
|
||||
#define __KHASHL_PROTOTYPES(HType, prefix, khkey_t) \
|
||||
extern HType *prefix##_init(void); \
|
||||
extern void prefix##_destroy(HType *h); \
|
||||
extern void prefix##_clear(HType *h); \
|
||||
extern khint_t prefix##_getp(const HType *h, const khkey_t *key); \
|
||||
extern void prefix##_resize(HType *h, khint_t new_n_buckets); \
|
||||
extern khint_t prefix##_putp(HType *h, const khkey_t *key, int *absent); \
|
||||
extern void prefix##_del(HType *h, khint_t k);
|
||||
|
||||
#define __KHASHL_IMPL_BASIC(SCOPE, HType, prefix) \
|
||||
SCOPE HType *prefix##_init(void) { \
|
||||
return (HType*)kcalloc(1, sizeof(HType)); \
|
||||
} \
|
||||
SCOPE void prefix##_release(HType *h) { \
|
||||
kfree((void *)h->keys); kfree(h->used); \
|
||||
} \
|
||||
SCOPE void prefix##_destroy(HType *h) { \
|
||||
if (!h) return; \
|
||||
prefix##_release(h); \
|
||||
kfree(h); \
|
||||
} \
|
||||
SCOPE void prefix##_clear(HType *h) { \
|
||||
if (h && h->used) { \
|
||||
khint_t n_buckets = (khint_t)1U << h->bits; \
|
||||
memset(h->used, 0, __kh_fsize(n_buckets) * sizeof(khint32_t)); \
|
||||
h->count = 0; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define __KHASHL_IMPL_GET(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
SCOPE khint_t prefix##_getp_core(const HType *h, const khkey_t *key, khint_t hash) { \
|
||||
khint_t i, last, n_buckets, mask; \
|
||||
if (!h->keys) return 0; \
|
||||
n_buckets = (khint_t)1U << h->bits; \
|
||||
mask = n_buckets - 1U; \
|
||||
i = last = __kh_h2b(hash, h->bits); \
|
||||
while (__kh_used(h->used, i) && !__hash_eq(h->keys[i], *key)) { \
|
||||
i = (i + 1U) & mask; \
|
||||
if (i == last) return n_buckets; \
|
||||
} \
|
||||
return !__kh_used(h->used, i)? n_buckets : i; \
|
||||
} \
|
||||
SCOPE khint_t prefix##_getp(const HType *h, const khkey_t *key) { return prefix##_getp_core(h, key, __hash_fn(*key)); } \
|
||||
SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { return prefix##_getp_core(h, &key, __hash_fn(key)); }
|
||||
|
||||
#define __KHASHL_IMPL_RESIZE(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
SCOPE void prefix##_resize(HType *h, khint_t new_n_buckets) { \
|
||||
khint32_t *new_used = NULL; \
|
||||
khint_t j = 0, x = new_n_buckets, n_buckets, new_bits, new_mask; \
|
||||
while ((x >>= 1) != 0) ++j; \
|
||||
if (new_n_buckets & (new_n_buckets - 1)) ++j; \
|
||||
new_bits = j > 2? j : 2; \
|
||||
new_n_buckets = (khint_t)1U << new_bits; \
|
||||
if (h->count > (new_n_buckets>>1) + (new_n_buckets>>2)) return; /* noop, requested size is too small */ \
|
||||
new_used = (khint32_t*)kcalloc(__kh_fsize(new_n_buckets), sizeof(khint32_t)); \
|
||||
n_buckets = h->keys? (khint_t)1U<<h->bits : 0U; \
|
||||
if (n_buckets < new_n_buckets) { /* expand */ \
|
||||
REALLOC_ARRAY(h->keys, new_n_buckets); \
|
||||
} /* otherwise shrink */ \
|
||||
new_mask = new_n_buckets - 1; \
|
||||
for (j = 0; j != n_buckets; ++j) { \
|
||||
khkey_t key; \
|
||||
if (!__kh_used(h->used, j)) continue; \
|
||||
key = h->keys[j]; \
|
||||
__kh_set_unused(h->used, j); \
|
||||
while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
|
||||
khint_t i; \
|
||||
i = __kh_h2b(__hash_fn(key), new_bits); \
|
||||
while (__kh_used(new_used, i)) i = (i + 1) & new_mask; \
|
||||
__kh_set_used(new_used, i); \
|
||||
if (i < n_buckets && __kh_used(h->used, i)) { /* kick out the existing element */ \
|
||||
{ khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
|
||||
__kh_set_unused(h->used, i); /* mark it as deleted in the old hash table */ \
|
||||
} else { /* write the element and jump out of the loop */ \
|
||||
h->keys[i] = key; \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (n_buckets > new_n_buckets) /* shrink the hash table */ \
|
||||
REALLOC_ARRAY(h->keys, new_n_buckets); \
|
||||
kfree(h->used); /* free the working space */ \
|
||||
h->used = new_used, h->bits = new_bits; \
|
||||
}
|
||||
|
||||
#define __KHASHL_IMPL_PUT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
SCOPE khint_t prefix##_putp_core(HType *h, const khkey_t *key, khint_t hash, int *absent) { \
|
||||
khint_t n_buckets, i, last, mask; \
|
||||
n_buckets = h->keys? (khint_t)1U<<h->bits : 0U; \
|
||||
*absent = -1; \
|
||||
if (h->count >= (n_buckets>>1) + (n_buckets>>2)) { /* rehashing */ \
|
||||
prefix##_resize(h, n_buckets + 1U); \
|
||||
n_buckets = (khint_t)1U<<h->bits; \
|
||||
} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
|
||||
mask = n_buckets - 1; \
|
||||
i = last = __kh_h2b(hash, h->bits); \
|
||||
while (__kh_used(h->used, i) && !__hash_eq(h->keys[i], *key)) { \
|
||||
i = (i + 1U) & mask; \
|
||||
if (i == last) break; \
|
||||
} \
|
||||
if (!__kh_used(h->used, i)) { /* not present at all */ \
|
||||
h->keys[i] = *key; \
|
||||
__kh_set_used(h->used, i); \
|
||||
++h->count; \
|
||||
*absent = 1; \
|
||||
} else *absent = 0; /* Don't touch h->keys[i] if present */ \
|
||||
return i; \
|
||||
} \
|
||||
SCOPE khint_t prefix##_putp(HType *h, const khkey_t *key, int *absent) { return prefix##_putp_core(h, key, __hash_fn(*key), absent); } \
|
||||
SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { return prefix##_putp_core(h, &key, __hash_fn(key), absent); }
|
||||
|
||||
#define __KHASHL_IMPL_DEL(SCOPE, HType, prefix, khkey_t, __hash_fn) \
|
||||
SCOPE int prefix##_del(HType *h, khint_t i) { \
|
||||
khint_t j = i, k, mask, n_buckets; \
|
||||
if (!h->keys) return 0; \
|
||||
n_buckets = (khint_t)1U<<h->bits; \
|
||||
mask = n_buckets - 1U; \
|
||||
while (1) { \
|
||||
j = (j + 1U) & mask; \
|
||||
if (j == i || !__kh_used(h->used, j)) break; /* j==i only when the table is completely full */ \
|
||||
k = __kh_h2b(__hash_fn(h->keys[j]), h->bits); \
|
||||
if ((j > i && (k <= i || k > j)) || (j < i && (k <= i && k > j))) \
|
||||
h->keys[i] = h->keys[j], i = j; \
|
||||
} \
|
||||
__kh_set_unused(h->used, i); \
|
||||
--h->count; \
|
||||
return 1; \
|
||||
}
|
||||
|
||||
#define KHASHL_DECLARE(HType, prefix, khkey_t) \
|
||||
__KHASHL_TYPE(HType, khkey_t) \
|
||||
__KHASHL_PROTOTYPES(HType, prefix, khkey_t)
|
||||
|
||||
/* compatibility wrappers to make khash -> khashl migration easier */
|
||||
#define __KHASH_COMPAT(SCOPE, HType, prefix, khkey_t) \
|
||||
typedef HType HType##_t; \
|
||||
SCOPE HType *kh_init_##prefix(void) { return prefix##_init(); } \
|
||||
SCOPE void kh_release_##prefix(HType *h) { prefix##_release(h); } \
|
||||
SCOPE void kh_destroy_##prefix(HType *h) { prefix##_destroy(h); } \
|
||||
SCOPE void kh_clear_##prefix(HType *h) { prefix##_clear(h); } \
|
||||
SCOPE khint_t kh_get_##prefix(const HType *h, khkey_t key) { \
|
||||
return prefix##_get(h, key); \
|
||||
} \
|
||||
SCOPE void kh_resize_##prefix(HType *h, khint_t new_n_buckets) { \
|
||||
prefix##_resize(h, new_n_buckets); \
|
||||
} \
|
||||
SCOPE khint_t kh_put_##prefix(HType *h, khkey_t key, int *absent) { \
|
||||
return prefix##_put(h, key, absent); \
|
||||
} \
|
||||
SCOPE int kh_del_##prefix(HType *h, khint_t i) { \
|
||||
return prefix##_del(h, i); \
|
||||
}
|
||||
|
||||
#define KHASHL_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
__KHASHL_TYPE(HType, khkey_t) \
|
||||
__KHASHL_IMPL_BASIC(SCOPE, HType, prefix) \
|
||||
__KHASHL_IMPL_GET(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
__KHASHL_IMPL_RESIZE(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
__KHASHL_IMPL_PUT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
__KHASHL_IMPL_DEL(SCOPE, HType, prefix, khkey_t, __hash_fn)
|
||||
|
||||
/***************************
|
||||
* Ensemble of hash tables *
|
||||
***************************/
|
||||
|
||||
typedef struct {
|
||||
khint_t sub, pos;
|
||||
} kh_ensitr_t;
|
||||
|
||||
#define KHASHE_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
KHASHL_INIT(KH_LOCAL, HType##_sub, prefix##_sub, khkey_t, __hash_fn, __hash_eq) \
|
||||
typedef struct HType { \
|
||||
khint64_t count:54, bits:8; \
|
||||
HType##_sub *sub; \
|
||||
} HType; \
|
||||
SCOPE HType *prefix##_init(int bits) { \
|
||||
HType *g; \
|
||||
g = (HType*)kcalloc(1, sizeof(*g)); \
|
||||
g->bits = bits; \
|
||||
g->sub = (HType##_sub*)kcalloc(1U<<bits, sizeof(*g->sub)); \
|
||||
return g; \
|
||||
} \
|
||||
SCOPE void prefix##_destroy(HType *g) { \
|
||||
int t; \
|
||||
if (!g) return; \
|
||||
for (t = 0; t < 1<<g->bits; ++t) { kfree((void*)g->sub[t].keys); kfree(g->sub[t].used); } \
|
||||
kfree(g->sub); kfree(g); \
|
||||
} \
|
||||
SCOPE kh_ensitr_t prefix##_getp(const HType *g, const khkey_t *key) { \
|
||||
khint_t hash, low, ret; \
|
||||
kh_ensitr_t r; \
|
||||
HType##_sub *h; \
|
||||
hash = __hash_fn(*key); \
|
||||
low = hash & ((1U<<g->bits) - 1); \
|
||||
h = &g->sub[low]; \
|
||||
ret = prefix##_sub_getp_core(h, key, hash); \
|
||||
if (ret >= kh_end(h)) r.sub = low, r.pos = (khint_t)-1; \
|
||||
else r.sub = low, r.pos = ret; \
|
||||
return r; \
|
||||
} \
|
||||
SCOPE kh_ensitr_t prefix##_get(const HType *g, const khkey_t key) { return prefix##_getp(g, &key); } \
|
||||
SCOPE kh_ensitr_t prefix##_putp(HType *g, const khkey_t *key, int *absent) { \
|
||||
khint_t hash, low, ret; \
|
||||
kh_ensitr_t r; \
|
||||
HType##_sub *h; \
|
||||
hash = __hash_fn(*key); \
|
||||
low = hash & ((1U<<g->bits) - 1); \
|
||||
h = &g->sub[low]; \
|
||||
ret = prefix##_sub_putp_core(h, key, hash, absent); \
|
||||
if (*absent) ++g->count; \
|
||||
if (ret == 1U<<h->bits) r.sub = low, r.pos = (khint_t)-1; \
|
||||
else r.sub = low, r.pos = ret; \
|
||||
return r; \
|
||||
} \
|
||||
SCOPE kh_ensitr_t prefix##_put(HType *g, const khkey_t key, int *absent) { return prefix##_putp(g, &key, absent); } \
|
||||
SCOPE int prefix##_del(HType *g, kh_ensitr_t itr) { \
|
||||
HType##_sub *h = &g->sub[itr.sub]; \
|
||||
int ret; \
|
||||
ret = prefix##_sub_del(h, itr.pos); \
|
||||
if (ret) --g->count; \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* More convenient interface *
|
||||
*****************************/
|
||||
|
||||
#define __kh_packed /* noop, we use -Werror=address-of-packed-member */
|
||||
#define __kh_cached_hash(x) ((x).hash)
|
||||
|
||||
#define KHASHL_SET_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
typedef struct { khkey_t key; } __kh_packed HType##_s_bucket_t; \
|
||||
static kh_inline khint_t prefix##_s_hash(HType##_s_bucket_t x) { return __hash_fn(x.key); } \
|
||||
static kh_inline int prefix##_s_eq(HType##_s_bucket_t x, HType##_s_bucket_t y) { return __hash_eq(x.key, y.key); } \
|
||||
KHASHL_INIT(KH_LOCAL, HType, prefix##_s, HType##_s_bucket_t, prefix##_s_hash, prefix##_s_eq) \
|
||||
SCOPE HType *prefix##_init(void) { return prefix##_s_init(); } \
|
||||
SCOPE void prefix##_release(HType *h) { prefix##_s_release(h); } \
|
||||
SCOPE void prefix##_destroy(HType *h) { prefix##_s_destroy(h); } \
|
||||
SCOPE void prefix##_clear(HType *h) { prefix##_s_clear(h); } \
|
||||
SCOPE void prefix##_resize(HType *h, khint_t new_n_buckets) { prefix##_s_resize(h, new_n_buckets); } \
|
||||
SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_s_bucket_t t; t.key = key; return prefix##_s_getp(h, &t); } \
|
||||
SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_s_del(h, k); } \
|
||||
SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_s_bucket_t t; t.key = key; return prefix##_s_putp(h, &t, absent); } \
|
||||
__KHASH_COMPAT(SCOPE, HType, prefix, khkey_t)
|
||||
|
||||
#define KHASHL_MAP_INIT(SCOPE, HType, prefix, khkey_t, kh_val_t, __hash_fn, __hash_eq) \
|
||||
typedef struct { khkey_t key; kh_val_t val; } __kh_packed HType##_m_bucket_t; \
|
||||
static kh_inline khint_t prefix##_m_hash(HType##_m_bucket_t x) { return __hash_fn(x.key); } \
|
||||
static kh_inline int prefix##_m_eq(HType##_m_bucket_t x, HType##_m_bucket_t y) { return __hash_eq(x.key, y.key); } \
|
||||
KHASHL_INIT(KH_LOCAL, HType, prefix##_m, HType##_m_bucket_t, prefix##_m_hash, prefix##_m_eq) \
|
||||
SCOPE HType *prefix##_init(void) { return prefix##_m_init(); } \
|
||||
SCOPE void prefix##_release(HType *h) { prefix##_m_release(h); } \
|
||||
SCOPE void prefix##_destroy(HType *h) { prefix##_m_destroy(h); } \
|
||||
SCOPE void prefix##_clear(HType *h) { prefix##_m_clear(h); } \
|
||||
SCOPE void prefix##_resize(HType *h, khint_t new_n_buckets) { prefix##_m_resize(h, new_n_buckets); } \
|
||||
SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_m_bucket_t t; t.key = key; return prefix##_m_getp(h, &t); } \
|
||||
SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_m_del(h, k); } \
|
||||
SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_m_bucket_t t; t.key = key; return prefix##_m_putp(h, &t, absent); } \
|
||||
__KHASH_COMPAT(SCOPE, HType, prefix, khkey_t)
|
||||
|
||||
#define KHASHL_CSET_INIT(SCOPE, HType, prefix, khkey_t, __hash_fn, __hash_eq) \
|
||||
typedef struct { khkey_t key; khint_t hash; } __kh_packed HType##_cs_bucket_t; \
|
||||
static kh_inline int prefix##_cs_eq(HType##_cs_bucket_t x, HType##_cs_bucket_t y) { return x.hash == y.hash && __hash_eq(x.key, y.key); } \
|
||||
KHASHL_INIT(KH_LOCAL, HType, prefix##_cs, HType##_cs_bucket_t, __kh_cached_hash, prefix##_cs_eq) \
|
||||
SCOPE HType *prefix##_init(void) { return prefix##_cs_init(); } \
|
||||
SCOPE void prefix##_destroy(HType *h) { prefix##_cs_destroy(h); } \
|
||||
SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_cs_bucket_t t; t.key = key; t.hash = __hash_fn(key); return prefix##_cs_getp(h, &t); } \
|
||||
SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_cs_del(h, k); } \
|
||||
SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_cs_bucket_t t; t.key = key, t.hash = __hash_fn(key); return prefix##_cs_putp(h, &t, absent); }
|
||||
|
||||
#define KHASHL_CMAP_INIT(SCOPE, HType, prefix, khkey_t, kh_val_t, __hash_fn, __hash_eq) \
|
||||
typedef struct { khkey_t key; kh_val_t val; khint_t hash; } __kh_packed HType##_cm_bucket_t; \
|
||||
static kh_inline int prefix##_cm_eq(HType##_cm_bucket_t x, HType##_cm_bucket_t y) { return x.hash == y.hash && __hash_eq(x.key, y.key); } \
|
||||
KHASHL_INIT(KH_LOCAL, HType, prefix##_cm, HType##_cm_bucket_t, __kh_cached_hash, prefix##_cm_eq) \
|
||||
SCOPE HType *prefix##_init(void) { return prefix##_cm_init(); } \
|
||||
SCOPE void prefix##_destroy(HType *h) { prefix##_cm_destroy(h); } \
|
||||
SCOPE khint_t prefix##_get(const HType *h, khkey_t key) { HType##_cm_bucket_t t; t.key = key; t.hash = __hash_fn(key); return prefix##_cm_getp(h, &t); } \
|
||||
SCOPE int prefix##_del(HType *h, khint_t k) { return prefix##_cm_del(h, k); } \
|
||||
SCOPE khint_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_cm_bucket_t t; t.key = key, t.hash = __hash_fn(key); return prefix##_cm_putp(h, &t, absent); }
|
||||
|
||||
#define KHASHE_MAP_INIT(SCOPE, HType, prefix, khkey_t, kh_val_t, __hash_fn, __hash_eq) \
|
||||
typedef struct { khkey_t key; kh_val_t val; } __kh_packed HType##_m_bucket_t; \
|
||||
static kh_inline khint_t prefix##_m_hash(HType##_m_bucket_t x) { return __hash_fn(x.key); } \
|
||||
static kh_inline int prefix##_m_eq(HType##_m_bucket_t x, HType##_m_bucket_t y) { return __hash_eq(x.key, y.key); } \
|
||||
KHASHE_INIT(KH_LOCAL, HType, prefix##_m, HType##_m_bucket_t, prefix##_m_hash, prefix##_m_eq) \
|
||||
SCOPE HType *prefix##_init(int bits) { return prefix##_m_init(bits); } \
|
||||
SCOPE void prefix##_destroy(HType *h) { prefix##_m_destroy(h); } \
|
||||
SCOPE kh_ensitr_t prefix##_get(const HType *h, khkey_t key) { HType##_m_bucket_t t; t.key = key; return prefix##_m_getp(h, &t); } \
|
||||
SCOPE int prefix##_del(HType *h, kh_ensitr_t k) { return prefix##_m_del(h, k); } \
|
||||
SCOPE kh_ensitr_t prefix##_put(HType *h, khkey_t key, int *absent) { HType##_m_bucket_t t; t.key = key; return prefix##_m_putp(h, &t, absent); }
|
||||
|
||||
/**************************
|
||||
* Public macro functions *
|
||||
**************************/
|
||||
|
||||
#define kh_bucket(h, x) ((h)->keys[x])
|
||||
|
||||
/*! @function
|
||||
@abstract Get the number of elements in the hash table
|
||||
@param h Pointer to the hash table
|
||||
@return Number of elements in the hash table [khint_t]
|
||||
*/
|
||||
#define kh_size(h) ((h)->count)
|
||||
|
||||
#define kh_capacity(h) ((h)->keys? 1U<<(h)->bits : 0U)
|
||||
|
||||
/*! @function
|
||||
@abstract Get the end iterator
|
||||
@param h Pointer to the hash table
|
||||
@return The end iterator [khint_t]
|
||||
*/
|
||||
#define kh_end(h) kh_capacity(h)
|
||||
|
||||
/*! @function
|
||||
@abstract Get key given an iterator
|
||||
@param h Pointer to the hash table
|
||||
@param x Iterator to the bucket [khint_t]
|
||||
@return Key [type of keys]
|
||||
*/
|
||||
#define kh_key(h, x) ((h)->keys[x].key)
|
||||
|
||||
/*! @function
|
||||
@abstract Get value given an iterator
|
||||
@param h Pointer to the hash table
|
||||
@param x Iterator to the bucket [khint_t]
|
||||
@return Value [type of values]
|
||||
@discussion For hash sets, calling this results in segfault.
|
||||
*/
|
||||
#define kh_val(h, x) ((h)->keys[x].val)
|
||||
|
||||
/*! @function
|
||||
@abstract Alias of kh_val()
|
||||
*/
|
||||
#define kh_value(h, x) kh_val(h, x)
|
||||
|
||||
/*! @function
|
||||
@abstract Test whether a bucket contains data.
|
||||
@param h Pointer to the hash table
|
||||
@param x Iterator to the bucket [khint_t]
|
||||
@return 1 if containing data; 0 otherwise [int]
|
||||
*/
|
||||
#define kh_exist(h, x) __kh_used((h)->used, (x))
|
||||
|
||||
#define kh_ens_key(g, x) kh_key(&(g)->sub[(x).sub], (x).pos)
|
||||
#define kh_ens_val(g, x) kh_val(&(g)->sub[(x).sub], (x).pos)
|
||||
#define kh_ens_exist(g, x) kh_exist(&(g)->sub[(x).sub], (x).pos)
|
||||
#define kh_ens_is_end(x) ((x).pos == (khint_t)-1)
|
||||
#define kh_ens_size(g) ((g)->count)
|
||||
|
||||
/**************************************
|
||||
* Common hash and equality functions *
|
||||
**************************************/
|
||||
|
||||
#define kh_eq_generic(a, b) ((a) == (b))
|
||||
#define kh_eq_str(a, b) (strcmp((a), (b)) == 0)
|
||||
#define kh_hash_dummy(x) ((khint_t)(x))
|
||||
|
||||
static kh_inline khint_t kh_hash_uint32(khint_t key) {
|
||||
key += ~(key << 15);
|
||||
key ^= (key >> 10);
|
||||
key += (key << 3);
|
||||
key ^= (key >> 6);
|
||||
key += ~(key << 11);
|
||||
key ^= (key >> 16);
|
||||
return key;
|
||||
}
|
||||
|
||||
static kh_inline khint_t kh_hash_uint64(khint64_t key) {
|
||||
key = ~key + (key << 21);
|
||||
key = key ^ key >> 24;
|
||||
key = (key + (key << 3)) + (key << 8);
|
||||
key = key ^ key >> 14;
|
||||
key = (key + (key << 2)) + (key << 4);
|
||||
key = key ^ key >> 28;
|
||||
key = key + (key << 31);
|
||||
return (khint_t)key;
|
||||
}
|
||||
|
||||
#define KH_FNV_SEED 11
|
||||
|
||||
static kh_inline khint_t kh_hash_str(const char *s) { /* FNV1a */
|
||||
khint_t h = KH_FNV_SEED ^ 2166136261U;
|
||||
const unsigned char *t = (const unsigned char*)s;
|
||||
for (; *t; ++t)
|
||||
h ^= *t, h *= 16777619;
|
||||
return h;
|
||||
}
|
||||
|
||||
static kh_inline khint_t kh_hash_bytes(int len, const unsigned char *s) {
|
||||
khint_t h = KH_FNV_SEED ^ 2166136261U;
|
||||
int i;
|
||||
for (i = 0; i < len; ++i)
|
||||
h ^= s[i], h *= 16777619;
|
||||
return h;
|
||||
}
|
||||
|
||||
/*! @function
|
||||
@abstract Get the start iterator
|
||||
@param h Pointer to the hash table
|
||||
@return The start iterator [khint_t]
|
||||
*/
|
||||
#define kh_begin(h) (khint_t)(0)
|
||||
|
||||
/*! @function
|
||||
@abstract Iterate over the entries in the hash table
|
||||
@param h Pointer to the hash table
|
||||
@param kvar Variable to which key will be assigned
|
||||
@param vvar Variable to which value will be assigned
|
||||
@param code Block of code to execute
|
||||
*/
|
||||
#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
|
||||
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
|
||||
if (!kh_exist(h,__i)) continue; \
|
||||
(kvar) = kh_key(h,__i); \
|
||||
(vvar) = kh_val(h,__i); \
|
||||
code; \
|
||||
} }
|
||||
|
||||
/*! @function
|
||||
@abstract Iterate over the values in the hash table
|
||||
@param h Pointer to the hash table
|
||||
@param vvar Variable to which value will be assigned
|
||||
@param code Block of code to execute
|
||||
*/
|
||||
#define kh_foreach_value(h, vvar, code) { khint_t __i; \
|
||||
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
|
||||
if (!kh_exist(h,__i)) continue; \
|
||||
(vvar) = kh_val(h,__i); \
|
||||
code; \
|
||||
} }
|
||||
|
||||
static inline unsigned int oidhash_by_value(struct object_id oid)
|
||||
{
|
||||
return oidhash(&oid);
|
||||
}
|
||||
|
||||
static inline int oideq_by_value(struct object_id a, struct object_id b)
|
||||
{
|
||||
return oideq(&a, &b);
|
||||
}
|
||||
|
||||
KHASHL_SET_INIT(KH_LOCAL, kh_oid_set, oid_set, struct object_id,
|
||||
oidhash_by_value, oideq_by_value)
|
||||
|
||||
KHASHL_MAP_INIT(KH_LOCAL, kh_oid_map, oid_map, struct object_id, void *,
|
||||
oidhash_by_value, oideq_by_value)
|
||||
|
||||
KHASHL_MAP_INIT(KH_LOCAL, kh_oid_pos, oid_pos, struct object_id, int,
|
||||
oidhash_by_value, oideq_by_value)
|
||||
|
||||
#endif /* __AC_KHASHL_H */
|
|
@ -704,7 +704,7 @@ static void filter_combine__free(void *filter_data)
|
|||
for (sub = 0; sub < d->nr; sub++) {
|
||||
list_objects_filter__free(d->sub[sub].filter);
|
||||
oidset_clear(&d->sub[sub].seen);
|
||||
if (d->sub[sub].omits.set.size)
|
||||
if (kh_size(&d->sub[sub].omits.set))
|
||||
BUG("expected oidset to be cleared already");
|
||||
}
|
||||
free(d->sub);
|
||||
|
|
2
loose.h
2
loose.h
|
@ -1,7 +1,7 @@
|
|||
#ifndef LOOSE_H
|
||||
#define LOOSE_H
|
||||
|
||||
#include "khash.h"
|
||||
#include "khashl.h"
|
||||
|
||||
struct loose_object_map {
|
||||
kh_oid_map_t *to_compat;
|
||||
|
|
|
@ -819,6 +819,7 @@ static int write_midx_bitmap(const char *midx_name,
|
|||
for (i = 0; i < pdata->nr_objects; i++)
|
||||
index[i] = &pdata->objects[i].idx;
|
||||
|
||||
bitmap_writer_init(the_repository);
|
||||
bitmap_writer_show_progress(flags & MIDX_PROGRESS);
|
||||
bitmap_writer_build_type_index(pdata, index, pdata->nr_objects);
|
||||
|
||||
|
@ -838,7 +839,7 @@ static int write_midx_bitmap(const char *midx_name,
|
|||
for (i = 0; i < pdata->nr_objects; i++)
|
||||
index[pack_order[i]] = &pdata->objects[i].idx;
|
||||
|
||||
bitmap_writer_select_commits(commits, commits_nr, -1);
|
||||
bitmap_writer_select_commits(commits, commits_nr);
|
||||
ret = bitmap_writer_build(pdata);
|
||||
if (ret < 0)
|
||||
goto cleanup;
|
||||
|
|
|
@ -163,7 +163,7 @@ struct raw_object_store {
|
|||
*/
|
||||
struct object_directory *odb;
|
||||
struct object_directory **odb_tail;
|
||||
struct kh_odb_path_map *odb_by_path;
|
||||
struct odb_path_map *odb_by_path;
|
||||
|
||||
int loaded_alternates;
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
#ifndef OBJECT_STORE_H
|
||||
#define OBJECT_STORE_H
|
||||
|
||||
#include "khash.h"
|
||||
#include "khashl.h"
|
||||
#include "dir.h"
|
||||
#include "object-store-ll.h"
|
||||
|
||||
KHASH_INIT(odb_path_map, const char * /* key: odb_path */,
|
||||
struct object_directory *, 1, fspathhash, fspatheq)
|
||||
KHASHL_MAP_INIT(KH_LOCAL, odb_path_map, odb_path_map,
|
||||
const char * /* key: odb_path */, struct object_directory *,
|
||||
fspathhash, fspatheq)
|
||||
|
||||
#endif /* OBJECT_STORE_H */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue