aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/CodingGuidelines16
-rw-r--r--Documentation/RelNotes/2.45.0.txt90
-rw-r--r--Documentation/config/core.txt2
-rw-r--r--Documentation/git-fast-import.txt35
-rw-r--r--Documentation/git-replay.txt2
-rw-r--r--Documentation/git-update-ref.txt58
-rw-r--r--Documentation/githooks.txt16
-rwxr-xr-xGIT-VERSION-GEN2
-rw-r--r--INSTALL2
-rw-r--r--Makefile97
-rw-r--r--apply.c22
-rw-r--r--branch.c10
-rw-r--r--branch.h14
-rw-r--r--builtin/add.c11
-rw-r--r--builtin/blame.c4
-rw-r--r--builtin/checkout.c6
-rw-r--r--builtin/commit.c7
-rw-r--r--builtin/credential-cache--daemon.c2
-rw-r--r--builtin/credential-cache.c3
-rw-r--r--builtin/fast-import.c162
-rw-r--r--builtin/fetch.c1
-rw-r--r--builtin/merge-tree.c2
-rw-r--r--builtin/update-ref.c26
-rw-r--r--compat/mingw.c19
-rw-r--r--compat/mingw.h6
-rw-r--r--config.c4
-rw-r--r--config.mak.uname156
-rw-r--r--contrib/completion/git-prompt.sh4
-rw-r--r--contrib/credential/osxkeychain/Makefile3
-rw-r--r--contrib/credential/osxkeychain/git-credential-osxkeychain.c376
-rwxr-xr-xcontrib/vscode/init.sh1
-rw-r--r--date.c36
-rw-r--r--date.h6
-rw-r--r--diff-lib.c11
-rw-r--r--editor.c7
-rw-r--r--git-compat-util.h13
-rw-r--r--git-curl-compat.h9
-rw-r--r--git-gui/.gitattributes1
-rw-r--r--git-gui/Makefile16
-rw-r--r--gitk-git/Makefile4
-rw-r--r--gpg-interface.c2
-rw-r--r--http.c1
-rw-r--r--imap-send.c24
-rw-r--r--log-tree.c2
-rw-r--r--mem-pool.c6
-rw-r--r--midx-write.c1525
-rw-r--r--midx.c1553
-rw-r--r--midx.h19
-rw-r--r--oss-fuzz/fuzz-date.c6
-rw-r--r--path.c17
-rw-r--r--path.h6
-rw-r--r--po/tr.po2
-rw-r--r--pretty.c18
-rw-r--r--pretty.h2
-rw-r--r--read-cache-ll.h4
-rw-r--r--read-cache.c8
-rw-r--r--ref-filter.c2
-rw-r--r--reflog-walk.c4
-rw-r--r--reflog-walk.h4
-rw-r--r--refs/reftable-backend.c3
-rw-r--r--reftable/basics.c7
-rw-r--r--reftable/basics.h7
-rw-r--r--reftable/basics_test.c55
-rw-r--r--reftable/block.c287
-rw-r--r--reftable/block.h47
-rw-r--r--reftable/block_test.c6
-rw-r--r--reftable/iter.c2
-rw-r--r--reftable/reader.c176
-rw-r--r--reftable/record.c34
-rw-r--r--reftable/record.h6
-rw-r--r--reftable/refname.c53
-rw-r--r--reftable/reftable-writer.h3
-rw-r--r--reftable/stack.c125
-rw-r--r--reftable/stack.h4
-rw-r--r--reftable/stack_test.c77
-rw-r--r--remote-curl.c3
-rw-r--r--revision.h1
-rwxr-xr-xt/check-non-portable-shell.pl2
-rw-r--r--t/helper/test-date.c2
-rw-r--r--t/lib-parallel-checkout.sh2
-rwxr-xr-xt/t0301-credential-cache.sh8
-rwxr-xr-xt/t0610-reftable-basics.sh138
-rwxr-xr-xt/t1016-compatObjectFormat.sh12
-rwxr-xr-xt/t1400-update-ref.sh34
-rwxr-xr-xt/t2020-checkout-detach.sh5
-rwxr-xr-xt/t2104-update-index-skip-worktree.sh30
-rwxr-xr-xt/t2200-add-update.sh10
-rwxr-xr-xt/t2400-worktree-add.sh2
-rwxr-xr-xt/t3428-rebase-signoff.sh67
-rwxr-xr-xt/t4011-diff-symlink.sh4
-rw-r--r--t/t4018/csharp-exclude-assignments20
-rw-r--r--t/t4018/csharp-exclude-control-statements34
-rw-r--r--t/t4018/csharp-exclude-exceptions29
-rw-r--r--t/t4018/csharp-exclude-generic-method-calls12
-rw-r--r--t/t4018/csharp-exclude-init-dispose22
-rw-r--r--t/t4018/csharp-exclude-iterations26
-rw-r--r--t/t4018/csharp-exclude-method-calls20
-rw-r--r--t/t4018/csharp-exclude-other18
-rw-r--r--t/t4018/csharp-method10
-rw-r--r--t/t4018/csharp-method-array10
-rw-r--r--t/t4018/csharp-method-explicit12
-rw-r--r--t/t4018/csharp-method-generics11
-rw-r--r--t/t4018/csharp-method-generics-alternate-spaces11
-rw-r--r--t/t4018/csharp-method-modifiers13
-rw-r--r--t/t4018/csharp-method-multiline10
-rw-r--r--t/t4018/csharp-method-params10
-rw-r--r--t/t4018/csharp-method-special-chars11
-rw-r--r--t/t4018/csharp-method-with-spacing10
-rw-r--r--t/t4018/csharp-property11
-rw-r--r--t/t4018/csharp-property-braces-same-line10
-rwxr-xr-xt/t4210-log-i18n.sh4
-rwxr-xr-xt/t7300-clean.sh1
-rwxr-xr-xt/t7501-commit-basic-functionality.sh16
-rwxr-xr-xt/t7700-repack.sh2
-rwxr-xr-xt/t9300-fast-import.sh630
-rw-r--r--t/test-lib-functions.sh12
-rw-r--r--t/unit-tests/t-prio-queue.c51
-rw-r--r--usage.c5
-rw-r--r--userdiff.c48
120 files changed, 3972 insertions, 2787 deletions
diff --git a/.mailmap b/.mailmap
index 82129be449..18128a1250 100644
--- a/.mailmap
+++ b/.mailmap
@@ -152,6 +152,7 @@ Lars Doelle <lars.doelle@on-line ! de>
Lars Doelle <lars.doelle@on-line.de>
Lars Noschinski <lars@public.noschinski.de> <lars.noschinski@rwth-aachen.de>
Li Hong <leehong@pku.edu.cn>
+Linus Arver <linus@ucla.edu> <linusa@google.com>
Linus Torvalds <torvalds@linux-foundation.org> <torvalds@evo.osdl.org>
Linus Torvalds <torvalds@linux-foundation.org> <torvalds@g5.osdl.org>
Linus Torvalds <torvalds@linux-foundation.org> <torvalds@osdl.org>
diff --git a/Documentation/CodingGuidelines b/Documentation/CodingGuidelines
index ab39509d59..1d92b2da03 100644
--- a/Documentation/CodingGuidelines
+++ b/Documentation/CodingGuidelines
@@ -188,6 +188,22 @@ For shell scripts specifically (not exhaustive):
hopefully nobody starts using "local" before they are reimplemented
in C ;-)
+ - Some versions of shell do not understand "export variable=value",
+ so we write "variable=value" and then "export variable" on two
+ separate lines.
+
+ - Some versions of dash have broken variable assignment when prefixed
+ with "local", "export", and "readonly", in that the value to be
+ assigned goes through field splitting at $IFS unless quoted.
+
+ (incorrect)
+ local variable=$value
+ local variable=$(command args)
+
+ (correct)
+ local variable="$value"
+ local variable="$(command args)"
+
- Use octal escape sequences (e.g. "\302\242"), not hexadecimal (e.g.
"\xc2\xa2") in printf format strings, since hexadecimal escape
sequences are not portable.
diff --git a/Documentation/RelNotes/2.45.0.txt b/Documentation/RelNotes/2.45.0.txt
index a65205cc95..15704ff98f 100644
--- a/Documentation/RelNotes/2.45.0.txt
+++ b/Documentation/RelNotes/2.45.0.txt
@@ -77,8 +77,17 @@ UI, Workflows & Features
skip showing the hunk immediately after it has already been shown, and
an additional action to explicitly ask to reshow the current hunk.
- * "git pack-refs" learned the "--auto" option, which is a useful
- addition to be triggered from "git gc --auto".
+ * "git pack-refs" learned the "--auto" option, which defers the decision of
+ whether and how to pack to the ref backend. This is used by the reftable
+ backend to avoid repacking of an already-optimal ref database. The new mode
+ is triggered from "git gc --auto".
+
+ * "git add -u <pathspec>" and "git commit [-i] <pathspec>" did not
+ diagnose a pathspec element that did not match any files in certain
+ situations, unlike "git add <pathspec>" did.
+
+ * The userdiff patterns for C# has been updated.
+
Performance, Internal Implementation, Development Support etc.
@@ -93,7 +102,7 @@ Performance, Internal Implementation, Development Support etc.
* The way placeholders are to be marked-up in documentation have been
specified; use "_<placeholder>_" to typeset the word inside a pair
- of <angle-brakets> emphasized.
+ of <angle-brackets> emphasized.
* "git --no-lazy-fetch cmd" allows to run "cmd" while disabling lazy
fetching of objects from the promisor remote, which may be handy
@@ -103,9 +112,6 @@ Performance, Internal Implementation, Development Support etc.
clean.requireForce has been simplified, together with the
documentation.
- * The code to iterate over refs with the reftable backend has seen
- some optimization.
-
* Uses of xwrite() helper have been audited and updated for better
error checking and simpler code.
@@ -156,6 +162,28 @@ Performance, Internal Implementation, Development Support etc.
* The .editorconfig file has been taught that a Makefile uses HT
indentation.
+ * t-prio-queue test has been cleaned up by using C99 compound
+ literals; this is meant to also serve as a weather-balloon to smoke
+ out folks with compilers who have trouble compiling code that uses
+ the feature.
+
+ * Windows binary used to decide the use of unix-domain socket at
+ build time, but it learned to make the decision at runtime instead.
+
+ * The "shared repository" test in the t0610 reftable test failed
+ under restrictive umask setting (e.g. 007), which has been
+ corrected.
+
+ * Document and apply workaround for a buggy version of dash that
+ mishandles "local var=val" construct.
+
+ * The codepaths that reach date_mode_from_type() have been updated to
+ pass "struct date_mode" by value to make them thread safe.
+
+ * The strategy to compact multiple tables of reftables after many
+ operations accumulate many entries has been improved to avoid
+ accumulating too many tables uncollected.
+
Fixes since v2.44
-----------------
@@ -219,7 +247,7 @@ Fixes since v2.44
This has been corrected.
(merge 199f44cb2e ps/remote-helper-repo-initialization-fix later to maint).
- * Various parts of upload-pack has been updated to bound the resource
+ * Various parts of upload-pack have been updated to bound the resource
consumption relative to the size of the repository to protect from
abusive clients.
(merge 6cd05e768b jk/upload-pack-bounded-resources later to maint).
@@ -266,11 +294,11 @@ Fixes since v2.44
variable that is no longer used.
(merge 72a8d3f027 pw/rebase-i-ignore-cherry-pick-help-environment later to maint).
- * The code to find the effective end of log message can fall into an
+ * The code to find the effective end of log messages can fall into an
endless loop, which has been corrected.
(merge 2541cba2d6 fs/find-end-of-log-message-fix later to maint).
- * Mark-ups used in the documentation has been improved for
+ * Mark-up used in the documentation has been improved for
consistency.
(merge 45d5ed3e50 ja/doc-markup-fixes later to maint).
@@ -335,6 +363,44 @@ Fixes since v2.44
call advise() after checking advice_enabled().
(merge 6412d01527 rj/use-adv-if-enabled later to maint).
+ * Another "set -u" fix for the bash prompt (in contrib/) script.
+ (merge d7805bc743 vs/complete-with-set-u-fix later to maint).
+
+ * "git checkout/switch --detach foo", after switching to the detached
+ HEAD state, gave the tracking information for the 'foo' branch,
+ which was pointless.
+
+ * "git apply" has been updated to lift the hardcoded pathname length
+ limit, which in turn allowed a mksnpath() function that is no
+ longer used.
+ (merge 708f7e0590 rs/apply-lift-path-length-limit later to maint).
+
+ * A file descriptor leak in an error codepath, used when "git apply
+ --reject" fails to create the *.rej file, has been corrected.
+ (merge 2b1f456adf rs/apply-reject-fd-leakfix later to maint).
+
+ * A config parser callback function fell through instead of returning
+ after recognising and processing a variable, wasting cycles, which
+ has been corrected.
+ (merge a816ccd642 ds/fetch-config-parse-microfix later to maint).
+
+ * Fix was added to work around a regression in libcURL 8.7.0 (which has
+ already been fixed in their tip of the tree).
+ (merge 92a209bf24 jk/libcurl-8.7-regression-workaround later to maint).
+
+ * The variable that holds the value read from the core.excludefile
+ configuration variable used to leak, which has been corrected.
+ (merge 0e0fefb29f jc/unleak-core-excludesfile later to maint).
+
+ * vreportf(), which is used by error() and friends, has been taught
+ to give the error message printf-format string when its vsnprintf()
+ call fails, instead of showing nothing useful to identify the
+ nature of the error.
+ (merge c63adab961 rs/usage-fallback-to-show-message-format later to maint).
+
+ * Adjust to an upcoming changes to GNU make that breaks our Makefiles.
+ (merge 227b8fd902 tb/make-indent-conditional-with-non-spaces later to maint).
+
* Other code cleanup, docfix, build fix, etc.
(merge f0e578c69c rs/use-xstrncmpz later to maint).
(merge 83e6eb7d7a ba/credential-test-clean-fix later to maint).
@@ -359,3 +425,9 @@ Fixes since v2.44
(merge 86f9ce7dd6 bl/doc-config-fixes later to maint).
(merge 0d527842b7 az/grep-group-error-message-update later to maint).
(merge 7c43bdf07b rs/strbuf-expand-bad-format later to maint).
+ (merge 8b68b48d5c ds/typofix-core-config-doc later to maint).
+ (merge 39bb692152 rs/imap-send-use-xsnprintf later to maint).
+ (merge 8d320cec60 jc/t2104-style-fixes later to maint).
+ (merge b4454d5a7b pw/t3428-cleanup later to maint).
+ (merge 84a7c33a4b pf/commitish-committish later to maint).
+ (merge 8882ee9d68 la/mailmap-entry later to maint).
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index bbe869c497..93d65e1dfd 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -703,7 +703,7 @@ core.createObject::
will not overwrite existing objects.
+
On some file system/operating system combinations, this is unreliable.
-Set this config setting to 'rename' there; However, This will remove the
+Set this config setting to 'rename' there; however, this will remove the
check that makes sure that existing object files will not get overwritten.
core.notesRef::
diff --git a/Documentation/git-fast-import.txt b/Documentation/git-fast-import.txt
index b2607366b9..8b6dde45f1 100644
--- a/Documentation/git-fast-import.txt
+++ b/Documentation/git-fast-import.txt
@@ -630,18 +630,28 @@ in octal. Git only supports the following modes:
In both formats `<path>` is the complete path of the file to be added
(if not already existing) or modified (if already existing).
-A `<path>` string must use UNIX-style directory separators (forward
-slash `/`), may contain any byte other than `LF`, and must not
-start with double quote (`"`).
-
-A path can use C-style string quoting; this is accepted in all cases
-and mandatory if the filename starts with double quote or contains
-`LF`. In C-style quoting, the complete name should be surrounded with
-double quotes, and any `LF`, backslash, or double quote characters
-must be escaped by preceding them with a backslash (e.g.,
-`"path/with\n, \\ and \" in it"`).
-
-The value of `<path>` must be in canonical form. That is it must not:
+A `<path>` can be written as unquoted bytes or a C-style quoted string.
+
+When a `<path>` does not start with a double quote (`"`), it is an
+unquoted string and is parsed as literal bytes without any escape
+sequences. However, if the filename contains `LF` or starts with double
+quote, it cannot be represented as an unquoted string and must be
+quoted. Additionally, the source `<path>` in `filecopy` or `filerename`
+must be quoted if it contains SP.
+
+When a `<path>` starts with a double quote (`"`), it is a C-style quoted
+string, where the complete filename is enclosed in a pair of double
+quotes and escape sequences are used. Certain characters must be escaped
+by preceding them with a backslash: `LF` is written as `\n`, backslash
+as `\\`, and double quote as `\"`. Some characters may optionally be
+written with escape sequences: `\a` for bell, `\b` for backspace, `\f`
+for form feed, `\n` for line feed, `\r` for carriage return, `\t` for
+horizontal tab, and `\v` for vertical tab. Any byte can be written with
+3-digit octal codes (e.g., `\033`). All filenames can be represented as
+quoted strings.
+
+A `<path>` must use UNIX-style directory separators (forward slash `/`)
+and its value must be in canonical form. That is it must not:
* contain an empty directory component (e.g. `foo//bar` is invalid),
* end with a directory separator (e.g. `foo/` is invalid),
@@ -651,6 +661,7 @@ The value of `<path>` must be in canonical form. That is it must not:
The root of the tree can be represented by an empty string as `<path>`.
+`<path>` cannot contain NUL, either literally or escaped as `\000`.
It is recommended that `<path>` always be encoded using UTF-8.
`filedelete`
diff --git a/Documentation/git-replay.txt b/Documentation/git-replay.txt
index f6c269c62d..8f3300c683 100644
--- a/Documentation/git-replay.txt
+++ b/Documentation/git-replay.txt
@@ -46,7 +46,7 @@ the new commits (in other words, this mimics a cherry-pick operation).
Range of commits to replay. More than one <revision-range> can
be passed, but in `--advance <branch>` mode, they should have
a single tip, so that it's clear where <branch> should point
- to. See "Specifying Ranges" in linkgit:git-rev-parse and the
+ to. See "Specifying Ranges" in linkgit:git-rev-parse[1] and the
"Commit Limiting" options below.
include::rev-list-options.txt[]
diff --git a/Documentation/git-update-ref.txt b/Documentation/git-update-ref.txt
index 0561808cca..374a2ebd2b 100644
--- a/Documentation/git-update-ref.txt
+++ b/Documentation/git-update-ref.txt
@@ -8,21 +8,21 @@ git-update-ref - Update the object name stored in a ref safely
SYNOPSIS
--------
[verse]
-'git update-ref' [-m <reason>] [--no-deref] (-d <ref> [<oldvalue>] | [--create-reflog] <ref> <newvalue> [<oldvalue>] | --stdin [-z])
+'git update-ref' [-m <reason>] [--no-deref] (-d <ref> [<old-oid>] | [--create-reflog] <ref> <new-oid> [<old-oid>] | --stdin [-z])
DESCRIPTION
-----------
-Given two arguments, stores the <newvalue> in the <ref>, possibly
+Given two arguments, stores the <new-oid> in the <ref>, possibly
dereferencing the symbolic refs. E.g. `git update-ref HEAD
-<newvalue>` updates the current branch head to the new object.
+<new-oid>` updates the current branch head to the new object.
-Given three arguments, stores the <newvalue> in the <ref>,
+Given three arguments, stores the <new-oid> in the <ref>,
possibly dereferencing the symbolic refs, after verifying that
-the current value of the <ref> matches <oldvalue>.
-E.g. `git update-ref refs/heads/master <newvalue> <oldvalue>`
-updates the master branch head to <newvalue> only if its current
-value is <oldvalue>. You can specify 40 "0" or an empty string
-as <oldvalue> to make sure that the ref you are creating does
+the current value of the <ref> matches <old-oid>.
+E.g. `git update-ref refs/heads/master <new-oid> <old-oid>`
+updates the master branch head to <new-oid> only if its current
+value is <old-oid>. You can specify 40 "0" or an empty string
+as <old-oid> to make sure that the ref you are creating does
not exist.
It also allows a "ref" file to be a symbolic pointer to another
@@ -56,15 +56,15 @@ ref symlink to some other tree, if you have copied a whole
archive by creating a symlink tree).
With `-d` flag, it deletes the named <ref> after verifying it
-still contains <oldvalue>.
+still contains <old-oid>.
With `--stdin`, update-ref reads instructions from standard input and
performs all modifications together. Specify commands of the form:
- update SP <ref> SP <newvalue> [SP <oldvalue>] LF
- create SP <ref> SP <newvalue> LF
- delete SP <ref> [SP <oldvalue>] LF
- verify SP <ref> [SP <oldvalue>] LF
+ update SP <ref> SP <new-oid> [SP <old-oid>] LF
+ create SP <ref> SP <new-oid> LF
+ delete SP <ref> [SP <old-oid>] LF
+ verify SP <ref> [SP <old-oid>] LF
option SP <opt> LF
start LF
prepare LF
@@ -82,10 +82,10 @@ specify a missing value, omit the value and its preceding SP entirely.
Alternatively, use `-z` to specify in NUL-terminated format, without
quoting:
- update SP <ref> NUL <newvalue> NUL [<oldvalue>] NUL
- create SP <ref> NUL <newvalue> NUL
- delete SP <ref> NUL [<oldvalue>] NUL
- verify SP <ref> NUL [<oldvalue>] NUL
+ update SP <ref> NUL <new-oid> NUL [<old-oid>] NUL
+ create SP <ref> NUL <new-oid> NUL
+ delete SP <ref> NUL [<old-oid>] NUL
+ verify SP <ref> NUL [<old-oid>] NUL
option SP <opt> NUL
start NUL
prepare NUL
@@ -100,22 +100,22 @@ recognizes as an object name. Commands in any other format or a
repeated <ref> produce an error. Command meanings are:
update::
- Set <ref> to <newvalue> after verifying <oldvalue>, if given.
- Specify a zero <newvalue> to ensure the ref does not exist
- after the update and/or a zero <oldvalue> to make sure the
+ Set <ref> to <new-oid> after verifying <old-oid>, if given.
+ Specify a zero <new-oid> to ensure the ref does not exist
+ after the update and/or a zero <old-oid> to make sure the
ref does not exist before the update.
create::
- Create <ref> with <newvalue> after verifying it does not
- exist. The given <newvalue> may not be zero.
+ Create <ref> with <new-oid> after verifying it does not
+ exist. The given <new-oid> may not be zero.
delete::
- Delete <ref> after verifying it exists with <oldvalue>, if
- given. If given, <oldvalue> may not be zero.
+ Delete <ref> after verifying it exists with <old-oid>, if
+ given. If given, <old-oid> may not be zero.
verify::
- Verify <ref> against <oldvalue> but do not change it. If
- <oldvalue> is zero or missing, the ref must not exist.
+ Verify <ref> against <old-oid> but do not change it. If
+ <old-oid> is zero or missing, the ref must not exist.
option::
Modify the behavior of the next command naming a <ref>.
@@ -141,7 +141,7 @@ abort::
Abort the transaction, releasing all locks if the transaction is in
prepared state.
-If all <ref>s can be locked with matching <oldvalue>s
+If all <ref>s can be locked with matching <old-oid>s
simultaneously, all modifications are performed. Otherwise, no
modifications are performed. Note that while each individual
<ref> is updated or deleted atomically, a concurrent reader may
@@ -161,7 +161,7 @@ formatted as:
Where "oldsha1" is the 40 character hexadecimal value previously
stored in <ref>, "newsha1" is the 40 character hexadecimal value of
-<newvalue> and "committer" is the committer's name, email address
+<new-oid> and "committer" is the committer's name, email address
and date in the standard Git committer ident format.
Optionally with -m:
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index 37f91d5b50..ee9b92c90d 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -275,12 +275,12 @@ This hook executes once for the receive operation. It takes no
arguments, but for each ref to be updated it receives on standard
input a line of the format:
- <old-value> SP <new-value> SP <ref-name> LF
+ <old-oid> SP <new-oid> SP <ref-name> LF
-where `<old-value>` is the old object name stored in the ref,
-`<new-value>` is the new object name to be stored in the ref and
+where `<old-oid>` is the old object name stored in the ref,
+`<new-oid>` is the new object name to be stored in the ref and
`<ref-name>` is the full name of the ref.
-When creating a new ref, `<old-value>` is the all-zeroes object name.
+When creating a new ref, `<old-oid>` is the all-zeroes object name.
If the hook exits with non-zero status, none of the refs will be
updated. If the hook exits with zero, updating of individual refs can
@@ -503,13 +503,13 @@ given reference transaction is in:
For each reference update that was added to the transaction, the hook
receives on standard input a line of the format:
- <old-value> SP <new-value> SP <ref-name> LF
+ <old-oid> SP <new-oid> SP <ref-name> LF
-where `<old-value>` is the old object name passed into the reference
-transaction, `<new-value>` is the new object name to be stored in the
+where `<old-oid>` is the old object name passed into the reference
+transaction, `<new-oid>` is the new object name to be stored in the
ref and `<ref-name>` is the full name of the ref. When force updating
the reference regardless of its current value or when the reference is
-to be created anew, `<old-value>` is the all-zeroes object name. To
+to be created anew, `<old-oid>` is the all-zeroes object name. To
distinguish these cases, you can inspect the current value of
`<ref-name>` via `git rev-parse`.
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index df788c764b..dabd2b5b89 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.44.GIT
+DEF_VER=v2.45.0-rc0
LF='
'
diff --git a/INSTALL b/INSTALL
index c6fb240c91..2a46d04592 100644
--- a/INSTALL
+++ b/INSTALL
@@ -139,7 +139,7 @@ Issues of note:
not need that functionality, use NO_CURL to build without
it.
- Git requires version "7.19.5" or later of "libcurl" to build
+ Git requires version "7.21.3" or later of "libcurl" to build
without NO_CURL. This version requirement may be bumped in
the future.
diff --git a/Makefile b/Makefile
index c43c1bd1a0..1e31acc72e 100644
--- a/Makefile
+++ b/Makefile
@@ -1075,6 +1075,7 @@ LIB_OBJS += merge-ort-wrappers.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += merge.o
LIB_OBJS += midx.o
+LIB_OBJS += midx-write.o
LIB_OBJS += name-hash.o
LIB_OBJS += negotiator/default.o
LIB_OBJS += negotiator/noop.o
@@ -1557,23 +1558,23 @@ ifneq (,$(SOCKLEN_T))
endif
ifeq ($(uname_S),Darwin)
- ifndef NO_FINK
- ifeq ($(shell test -d /sw/lib && echo y),y)
+ ifndef NO_FINK
+ ifeq ($(shell test -d /sw/lib && echo y),y)
BASIC_CFLAGS += -I/sw/include
BASIC_LDFLAGS += -L/sw/lib
- endif
- endif
- ifndef NO_DARWIN_PORTS
- ifeq ($(shell test -d /opt/local/lib && echo y),y)
+ endif
+ endif
+ ifndef NO_DARWIN_PORTS
+ ifeq ($(shell test -d /opt/local/lib && echo y),y)
BASIC_CFLAGS += -I/opt/local/include
BASIC_LDFLAGS += -L/opt/local/lib
- endif
- endif
- ifndef NO_APPLE_COMMON_CRYPTO
+ endif
+ endif
+ ifndef NO_APPLE_COMMON_CRYPTO
NO_OPENSSL = YesPlease
APPLE_COMMON_CRYPTO = YesPlease
COMPAT_CFLAGS += -DAPPLE_COMMON_CRYPTO
- endif
+ endif
PTHREAD_LIBS =
endif
@@ -1612,23 +1613,23 @@ ifdef NO_CURL
REMOTE_CURL_NAMES =
EXCLUDED_PROGRAMS += git-http-fetch git-http-push
else
- ifdef CURLDIR
+ ifdef CURLDIR
# Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case.
CURL_CFLAGS = -I$(CURLDIR)/include
CURL_LIBCURL = $(call libpath_template,$(CURLDIR)/$(lib))
- else
+ else
CURL_CFLAGS =
CURL_LIBCURL =
- endif
+ endif
- ifndef CURL_LDFLAGS
+ ifndef CURL_LDFLAGS
CURL_LDFLAGS = $(eval CURL_LDFLAGS := $$(shell $$(CURL_CONFIG) --libs))$(CURL_LDFLAGS)
- endif
+ endif
CURL_LIBCURL += $(CURL_LDFLAGS)
- ifndef CURL_CFLAGS
+ ifndef CURL_CFLAGS
CURL_CFLAGS = $(eval CURL_CFLAGS := $$(shell $$(CURL_CONFIG) --cflags))$(CURL_CFLAGS)
- endif
+ endif
BASIC_CFLAGS += $(CURL_CFLAGS)
REMOTE_CURL_PRIMARY = git-remote-http$X
@@ -1636,29 +1637,29 @@ else
REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
PROGRAM_OBJS += http-fetch.o
PROGRAMS += $(REMOTE_CURL_NAMES)
- ifndef NO_EXPAT
+ ifndef NO_EXPAT
PROGRAM_OBJS += http-push.o
- endif
+ endif
curl_check := $(shell (echo 072200; $(CURL_CONFIG) --vernum | sed -e '/^70[BC]/s/^/0/') 2>/dev/null | sort -r | sed -ne 2p)
- ifeq "$(curl_check)" "072200"
+ ifeq "$(curl_check)" "072200"
USE_CURL_FOR_IMAP_SEND = YesPlease
- endif
- ifdef USE_CURL_FOR_IMAP_SEND
+ endif
+ ifdef USE_CURL_FOR_IMAP_SEND
BASIC_CFLAGS += -DUSE_CURL_FOR_IMAP_SEND
IMAP_SEND_BUILDDEPS = http.o
IMAP_SEND_LDFLAGS += $(CURL_LIBCURL)
- endif
- ifndef NO_EXPAT
- ifdef EXPATDIR
+ endif
+ ifndef NO_EXPAT
+ ifdef EXPATDIR
BASIC_CFLAGS += -I$(EXPATDIR)/include
EXPAT_LIBEXPAT = $(call libpath_template,$(EXPATDIR)/$(lib)) -lexpat
- else
+ else
EXPAT_LIBEXPAT = -lexpat
- endif
- ifdef EXPAT_NEEDS_XMLPARSE_H
+ endif
+ ifdef EXPAT_NEEDS_XMLPARSE_H
BASIC_CFLAGS += -DEXPAT_NEEDS_XMLPARSE_H
- endif
- endif
+ endif
+ endif
endif
IMAP_SEND_LDFLAGS += $(OPENSSL_LINK) $(OPENSSL_LIBSSL) $(LIB_4_CRYPTO)
@@ -1670,15 +1671,15 @@ EXTLIBS += -lz
ifndef NO_OPENSSL
OPENSSL_LIBSSL = -lssl
- ifdef OPENSSLDIR
+ ifdef OPENSSLDIR
BASIC_CFLAGS += -I$(OPENSSLDIR)/include
OPENSSL_LINK = $(call libpath_template,$(OPENSSLDIR)/$(lib))
- else
+ else
OPENSSL_LINK =
- endif
- ifdef NEEDS_CRYPTO_WITH_SSL
+ endif
+ ifdef NEEDS_CRYPTO_WITH_SSL
OPENSSL_LIBSSL += -lcrypto
- endif
+ endif
else
BASIC_CFLAGS += -DNO_OPENSSL
OPENSSL_LIBSSL =
@@ -1696,18 +1697,18 @@ ifdef APPLE_COMMON_CRYPTO
endif
endif
ifndef NO_ICONV
- ifdef NEEDS_LIBICONV
- ifdef ICONVDIR
+ ifdef NEEDS_LIBICONV
+ ifdef ICONVDIR
BASIC_CFLAGS += -I$(ICONVDIR)/include
ICONV_LINK = $(call libpath_template,$(ICONVDIR)/$(lib))
- else
+ else
ICONV_LINK =
- endif
- ifdef NEEDS_LIBINTL_BEFORE_LIBICONV
+ endif
+ ifdef NEEDS_LIBINTL_BEFORE_LIBICONV
ICONV_LINK += -lintl
- endif
+ endif
EXTLIBS += $(ICONV_LINK) -liconv
- endif
+ endif
endif
ifdef ICONV_OMITS_BOM
BASIC_CFLAGS += -DICONV_OMITS_BOM
@@ -1828,10 +1829,10 @@ ifdef NO_MMAP
COMPAT_CFLAGS += -DNO_MMAP
COMPAT_OBJS += compat/mmap.o
else
- ifdef USE_WIN32_MMAP
+ ifdef USE_WIN32_MMAP
COMPAT_CFLAGS += -DUSE_WIN32_MMAP
COMPAT_OBJS += compat/win32mmap.o
- endif
+ endif
endif
ifdef MMAP_PREVENTS_DELETE
BASIC_CFLAGS += -DMMAP_PREVENTS_DELETE
@@ -1956,11 +1957,11 @@ else
BASIC_CFLAGS += -DSHA1_DC
LIB_OBJS += sha1dc_git.o
ifdef DC_SHA1_EXTERNAL
- ifdef DC_SHA1_SUBMODULE
- ifneq ($(DC_SHA1_SUBMODULE),auto)
+ ifdef DC_SHA1_SUBMODULE
+ ifneq ($(DC_SHA1_SUBMODULE),auto)
$(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both)
- endif
- endif
+ endif
+ endif
BASIC_CFLAGS += -DDC_SHA1_EXTERNAL
EXTLIBS += -lsha1detectcoll
else
diff --git a/apply.c b/apply.c
index e311013bc4..34f20326a7 100644
--- a/apply.c
+++ b/apply.c
@@ -4448,6 +4448,7 @@ static int create_one_file(struct apply_state *state,
const char *buf,
unsigned long size)
{
+ char *newpath = NULL;
int res;
if (state->cached)
@@ -4509,24 +4510,26 @@ static int create_one_file(struct apply_state *state,
unsigned int nr = getpid();
for (;;) {
- char newpath[PATH_MAX];
- mksnpath(newpath, sizeof(newpath), "%s~%u", path, nr);
+ newpath = mkpathdup("%s~%u", path, nr);
res = try_create_file(state, newpath, mode, buf, size);
if (res < 0)
- return -1;
+ goto out;
if (!res) {
if (!rename(newpath, path))
- return 0;
+ goto out;
unlink_or_warn(newpath);
break;
}
if (errno != EEXIST)
break;
++nr;
+ FREE_AND_NULL(newpath);
}
}
- return error_errno(_("unable to write file '%s' mode %o"),
- path, mode);
+ res = error_errno(_("unable to write file '%s' mode %o"), path, mode);
+out:
+ free(newpath);
+ return res;
}
static int add_conflicted_stages_file(struct apply_state *state,
@@ -4662,8 +4665,11 @@ static int write_out_one_reject(struct apply_state *state, struct patch *patch)
return error_errno(_("cannot open %s"), namebuf);
}
rej = fdopen(fd, "w");
- if (!rej)
- return error_errno(_("cannot open %s"), namebuf);
+ if (!rej) {
+ error_errno(_("cannot open %s"), namebuf);
+ close(fd);
+ return -1;
+ }
/* Normal git tools never deal with .rej, so do not pretend
* this is a git patch by saying --git or giving extended
diff --git a/branch.c b/branch.c
index 621019fcf4..e4a738fc7b 100644
--- a/branch.c
+++ b/branch.c
@@ -738,7 +738,7 @@ static int submodule_create_branch(struct repository *r,
}
void create_branches_recursively(struct repository *r, const char *name,
- const char *start_commitish,
+ const char *start_committish,
const char *tracking_name, int force,
int reflog, int quiet, enum branch_track track,
int dry_run)
@@ -748,8 +748,8 @@ void create_branches_recursively(struct repository *r, const char *name,
struct object_id super_oid;
struct submodule_entry_list submodule_entry_list;
- /* Perform dwim on start_commitish to get super_oid and branch_point. */
- dwim_branch_start(r, start_commitish, BRANCH_TRACK_NEVER,
+ /* Perform dwim on start_committish to get super_oid and branch_point. */
+ dwim_branch_start(r, start_committish, BRANCH_TRACK_NEVER,
&branch_point, &super_oid);
/*
@@ -772,7 +772,7 @@ void create_branches_recursively(struct repository *r, const char *name,
submodule_entry_list.entries[i].submodule->name);
if (advice_enabled(ADVICE_SUBMODULES_NOT_UPDATED))
advise(_("You may try updating the submodules using 'git checkout --no-recurse-submodules %s && git submodule update --init'"),
- start_commitish);
+ start_committish);
exit(code);
}
@@ -787,7 +787,7 @@ void create_branches_recursively(struct repository *r, const char *name,
name);
}
- create_branch(r, name, start_commitish, force, 0, reflog, quiet,
+ create_branch(r, name, start_committish, force, 0, reflog, quiet,
BRANCH_TRACK_NEVER, dry_run);
if (dry_run)
return;
diff --git a/branch.h b/branch.h
index 30c01aed73..ec2f35fda4 100644
--- a/branch.h
+++ b/branch.h
@@ -78,26 +78,26 @@ void create_branch(struct repository *r,
* those of create_branch() except for start_name, which is represented
* by two different parameters:
*
- * - start_commitish is the commit-ish, in repository r, that determines
+ * - start_committish is the commit-ish, in repository r, that determines
* which commits the branches will point to. The superproject branch
- * will point to the commit of start_commitish and the submodule
- * branches will point to the gitlink commit oids in start_commitish's
+ * will point to the commit of start_committish and the submodule
+ * branches will point to the gitlink commit oids in start_committish's
* tree.
*
* - tracking_name is the name of the ref, in repository r, that will be
* used to set up tracking information. This value is propagated to
* all submodules, which will evaluate the ref using their own ref
- * stores. If NULL, this defaults to start_commitish.
+ * stores. If NULL, this defaults to start_committish.
*
- * When this function is called on the superproject, start_commitish
+ * When this function is called on the superproject, start_committish
* can be any user-provided ref and tracking_name can be NULL (similar
* to create_branches()). But when recursing through submodules,
- * start_commitish is the plain gitlink commit oid. Since the oid cannot
+ * start_committish is the plain gitlink commit oid. Since the oid cannot
* be used for tracking information, tracking_name is propagated and
* used for tracking instead.
*/
void create_branches_recursively(struct repository *r, const char *name,
- const char *start_commitish,
+ const char *start_committish,
const char *tracking_name, int force,
int reflog, int quiet, enum branch_track track,
int dry_run);
diff --git a/builtin/add.c b/builtin/add.c
index e97699d6b9..ae723bc85e 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -368,6 +368,7 @@ int cmd_add(int argc, const char **argv, const char *prefix)
int add_new_files;
int require_pathspec;
char *seen = NULL;
+ char *ps_matched = NULL;
struct lock_file lock_file = LOCK_INIT;
git_config(add_config, NULL);
@@ -545,12 +546,17 @@ int cmd_add(int argc, const char **argv, const char *prefix)
begin_odb_transaction();
+ ps_matched = xcalloc(pathspec.nr, 1);
if (add_renormalize)
exit_status |= renormalize_tracked_files(&pathspec, flags);
else
exit_status |= add_files_to_cache(the_repository, prefix,
- &pathspec, include_sparse,
- flags);
+ &pathspec, ps_matched,
+ include_sparse, flags);
+
+ if (take_worktree_changes && !add_renormalize && !ignore_add_errors &&
+ report_path_error(ps_matched, &pathspec))
+ exit(128);
if (add_new_files)
exit_status |= add_files(&dir, flags);
@@ -564,6 +570,7 @@ finish:
COMMIT_LOCK | SKIP_IF_UNCHANGED))
die(_("unable to write new index file"));
+ free(ps_matched);
dir_clear(&dir);
clear_pathspec(&pathspec);
return exit_status;
diff --git a/builtin/blame.c b/builtin/blame.c
index db1f56de61..9aa74680a3 100644
--- a/builtin/blame.c
+++ b/builtin/blame.c
@@ -316,7 +316,7 @@ static const char *format_time(timestamp_t time, const char *tz_str,
size_t time_width;
int tz;
tz = atoi(tz_str);
- time_str = show_date(time, tz, &blame_date_mode);
+ time_str = show_date(time, tz, blame_date_mode);
strbuf_addstr(&time_buf, time_str);
/*
* Add space paddings to time_buf to display a fixed width
@@ -1029,7 +1029,7 @@ parse_done:
blame_date_width = sizeof("Thu Oct 19 16:00:04 2006 -0700");
break;
case DATE_STRFTIME:
- blame_date_width = strlen(show_date(0, 0, &blame_date_mode)) + 1; /* add the null */
+ blame_date_width = strlen(show_date(0, 0, blame_date_mode)) + 1; /* add the null */
break;
}
blame_date_width -= 1; /* strip the null */
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 2b6166c284..71e6036aab 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -882,7 +882,8 @@ static int merge_working_tree(const struct checkout_opts *opts,
* entries in the index.
*/
- add_files_to_cache(the_repository, NULL, NULL, 0, 0);
+ add_files_to_cache(the_repository, NULL, NULL, NULL, 0,
+ 0);
init_merge_options(&o, the_repository);
o.verbosity = 0;
work = write_in_core_index_as_tree(the_repository);
@@ -1035,7 +1036,8 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
remove_branch_state(the_repository, !opts->quiet);
strbuf_release(&msg);
if (!opts->quiet &&
- (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+ !opts->force_detach &&
+ (new_branch_info->path || !strcmp(new_branch_info->name, "HEAD")))
report_tracking(new_branch_info);
}
diff --git a/builtin/commit.c b/builtin/commit.c
index 7ba7201cfb..6e1484446b 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -441,16 +441,21 @@ static const char *prepare_index(const char **argv, const char *prefix,
* (B) on failure, rollback the real index.
*/
if (all || (also && pathspec.nr)) {
+ char *ps_matched = xcalloc(pathspec.nr, 1);
repo_hold_locked_index(the_repository, &index_lock,
LOCK_DIE_ON_ERROR);
add_files_to_cache(the_repository, also ? prefix : NULL,
- &pathspec, 0, 0);
+ &pathspec, ps_matched, 0, 0);
+ if (!all && report_path_error(ps_matched, &pathspec))
+ exit(128);
+
refresh_cache_or_die(refresh_flags);
cache_tree_update(&the_index, WRITE_TREE_SILENT);
if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to write new index file"));
commit_style = COMMIT_NORMAL;
ret = get_lock_file_path(&index_lock);
+ free(ps_matched);
goto out;
}
diff --git a/builtin/credential-cache--daemon.c b/builtin/credential-cache--daemon.c
index 3a6a750a8e..17f929dede 100644
--- a/builtin/credential-cache--daemon.c
+++ b/builtin/credential-cache--daemon.c
@@ -294,6 +294,8 @@ int cmd_credential_cache_daemon(int argc, const char **argv, const char *prefix)
argc = parse_options(argc, argv, prefix, options, usage, 0);
socket_path = argv[0];
+ if (!have_unix_sockets())
+ die(_("credential-cache--daemon unavailable; no unix socket support"));
if (!socket_path)
usage_with_options(usage, options);
diff --git a/builtin/credential-cache.c b/builtin/credential-cache.c
index bba96d4ffd..bef120b537 100644
--- a/builtin/credential-cache.c
+++ b/builtin/credential-cache.c
@@ -149,6 +149,9 @@ int cmd_credential_cache(int argc, const char **argv, const char *prefix)
usage_with_options(usage, options);
op = argv[0];
+ if (!have_unix_sockets())
+ die(_("credential-cache unavailable; no unix socket support"));
+
if (!socket_path)
socket_path = get_socket_path();
if (!socket_path)
diff --git a/builtin/fast-import.c b/builtin/fast-import.c
index 782bda007c..dc5a9d32dd 100644
--- a/builtin/fast-import.c
+++ b/builtin/fast-import.c
@@ -2210,7 +2210,7 @@ static int parse_mapped_oid_hex(const char *hex, struct object_id *oid, const ch
*
* idnum ::= ':' bigint;
*
- * Return the first character after the value in *endptr.
+ * Update *endptr to point to the first character after the value.
*
* Complain if the following character is not what is expected,
* either a space or end of the string.
@@ -2243,8 +2243,8 @@ static uintmax_t parse_mark_ref_eol(const char *p)
}
/*
- * Parse the mark reference, demanding a trailing space. Return a
- * pointer to the space.
+ * Parse the mark reference, demanding a trailing space. Update *p to
+ * point to the first character after the space.
*/
static uintmax_t parse_mark_ref_space(const char **p)
{
@@ -2258,10 +2258,62 @@ static uintmax_t parse_mark_ref_space(const char **p)
return mark;
}
+/*
+ * Parse the path string into the strbuf. The path can either be quoted with
+ * escape sequences or unquoted without escape sequences. Unquoted strings may
+ * contain spaces only if `is_last_field` is nonzero; otherwise, it stops
+ * parsing at the first space.
+ */
+static void parse_path(struct strbuf *sb, const char *p, const char **endp,
+ int is_last_field, const char *field)
+{
+ if (*p == '"') {
+ if (unquote_c_style(sb, p, endp))
+ die("Invalid %s: %s", field, command_buf.buf);
+ if (strlen(sb->buf) != sb->len)
+ die("NUL in %s: %s", field, command_buf.buf);
+ } else {
+ /*
+ * Unless we are parsing the last field of a line,
+ * SP is the end of this field.
+ */
+ *endp = is_last_field
+ ? p + strlen(p)
+ : strchrnul(p, ' ');
+ strbuf_add(sb, p, *endp - p);
+ }
+}
+
+/*
+ * Parse the path string into the strbuf, and complain if this is not the end of
+ * the string. Unquoted strings may contain spaces.
+ */
+static void parse_path_eol(struct strbuf *sb, const char *p, const char *field)
+{
+ const char *end;
+
+ parse_path(sb, p, &end, 1, field);
+ if (*end)
+ die("Garbage after %s: %s", field, command_buf.buf);
+}
+
+/*
+ * Parse the path string into the strbuf, and ensure it is followed by a space.
+ * Unquoted strings may not contain spaces. Update *endp to point to the first
+ * character after the space.
+ */
+static void parse_path_space(struct strbuf *sb, const char *p,
+ const char **endp, const char *field)
+{
+ parse_path(sb, p, endp, 0, field);
+ if (**endp != ' ')
+ die("Missing space after %s: %s", field, command_buf.buf);
+ (*endp)++;
+}
+
static void file_change_m(const char *p, struct branch *b)
{
- static struct strbuf uq = STRBUF_INIT;
- const char *endp;
+ static struct strbuf path = STRBUF_INIT;
struct object_entry *oe;
struct object_id oid;
uint16_t mode, inline_data = 0;
@@ -2298,16 +2350,12 @@ static void file_change_m(const char *p, struct branch *b)
die("Missing space after SHA1: %s", command_buf.buf);
}
- strbuf_reset(&uq);
- if (!unquote_c_style(&uq, p, &endp)) {
- if (*endp)
- die("Garbage after path in: %s", command_buf.buf);
- p = uq.buf;
- }
+ strbuf_reset(&path);
+ parse_path_eol(&path, p, "path");
/* Git does not track empty, non-toplevel directories. */
- if (S_ISDIR(mode) && is_empty_tree_oid(&oid) && *p) {
- tree_content_remove(&b->branch_tree, p, NULL, 0);
+ if (S_ISDIR(mode) && is_empty_tree_oid(&oid) && *path.buf) {
+ tree_content_remove(&b->branch_tree, path.buf, NULL, 0);
return;
}
@@ -2328,10 +2376,6 @@ static void file_change_m(const char *p, struct branch *b)
if (S_ISDIR(mode))
die("Directories cannot be specified 'inline': %s",
command_buf.buf);
- if (p != uq.buf) {
- strbuf_addstr(&uq, p);
- p = uq.buf;
- }
while (read_next_command() != EOF) {
const char *v;
if (skip_prefix(command_buf.buf, "cat-blob ", &v))
@@ -2357,74 +2401,48 @@ static void file_change_m(const char *p, struct branch *b)
command_buf.buf);
}
- if (!*p) {
+ if (!*path.buf) {
tree_content_replace(&b->branch_tree, &oid, mode, NULL);
return;
}
- tree_content_set(&b->branch_tree, p, &oid, mode, NULL);
+ tree_content_set(&b->branch_tree, path.buf, &oid, mode, NULL);
}
static void file_change_d(const char *p, struct branch *b)
{
- static struct strbuf uq = STRBUF_INIT;
- const char *endp;
+ static struct strbuf path = STRBUF_INIT;
- strbuf_reset(&uq);
- if (!unquote_c_style(&uq, p, &endp)) {
- if (*endp)
- die("Garbage after path in: %s", command_buf.buf);
- p = uq.buf;
- }
- tree_content_remove(&b->branch_tree, p, NULL, 1);
+ strbuf_reset(&path);
+ parse_path_eol(&path, p, "path");
+ tree_content_remove(&b->branch_tree, path.buf, NULL, 1);
}
-static void file_change_cr(const char *s, struct branch *b, int rename)
+static void file_change_cr(const char *p, struct branch *b, int rename)
{
- const char *d;
- static struct strbuf s_uq = STRBUF_INIT;
- static struct strbuf d_uq = STRBUF_INIT;
- const char *endp;
+ static struct strbuf source = STRBUF_INIT;
+ static struct strbuf dest = STRBUF_INIT;
struct tree_entry leaf;
- strbuf_reset(&s_uq);
- if (!unquote_c_style(&s_uq, s, &endp)) {
- if (*endp != ' ')
- die("Missing space after source: %s", command_buf.buf);
- } else {
- endp = strchr(s, ' ');
- if (!endp)
- die("Missing space after source: %s", command_buf.buf);
- strbuf_add(&s_uq, s, endp - s);
- }
- s = s_uq.buf;
-
- endp++;
- if (!*endp)
- die("Missing dest: %s", command_buf.buf);
-
- d = endp;
- strbuf_reset(&d_uq);
- if (!unquote_c_style(&d_uq, d, &endp)) {
- if (*endp)
- die("Garbage after dest in: %s", command_buf.buf);
- d = d_uq.buf;
- }
+ strbuf_reset(&source);
+ parse_path_space(&source, p, &p, "source");
+ strbuf_reset(&dest);
+ parse_path_eol(&dest, p, "dest");
memset(&leaf, 0, sizeof(leaf));
if (rename)
- tree_content_remove(&b->branch_tree, s, &leaf, 1);
+ tree_content_remove(&b->branch_tree, source.buf, &leaf, 1);
else
- tree_content_get(&b->branch_tree, s, &leaf, 1);
+ tree_content_get(&b->branch_tree, source.buf, &leaf, 1);
if (!leaf.versions[1].mode)
- die("Path %s not in branch", s);
- if (!*d) { /* C "path/to/subdir" "" */
+ die("Path %s not in branch", source.buf);
+ if (!*dest.buf) { /* C "path/to/subdir" "" */
tree_content_replace(&b->branch_tree,
&leaf.versions[1].oid,
leaf.versions[1].mode,
leaf.tree);
return;
}
- tree_content_set(&b->branch_tree, d,
+ tree_content_set(&b->branch_tree, dest.buf,
&leaf.versions[1].oid,
leaf.versions[1].mode,
leaf.tree);
@@ -2432,7 +2450,6 @@ static void file_change_cr(const char *s, struct branch *b, int rename)
static void note_change_n(const char *p, struct branch *b, unsigned char *old_fanout)
{
- static struct strbuf uq = STRBUF_INIT;
struct object_entry *oe;
struct branch *s;
struct object_id oid, commit_oid;
@@ -2497,10 +2514,6 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
die("Invalid ref name or SHA1 expression: %s", p);
if (inline_data) {
- if (p != uq.buf) {
- strbuf_addstr(&uq, p);
- p = uq.buf;
- }
read_next_command();
parse_and_store_blob(&last_blob, &oid, 0);
} else if (oe) {
@@ -3152,6 +3165,7 @@ static void print_ls(int mode, const unsigned char *hash, const char *path)
static void parse_ls(const char *p, struct branch *b)
{
+ static struct strbuf path = STRBUF_INIT;
struct tree_entry *root = NULL;
struct tree_entry leaf = {NULL};
@@ -3168,17 +3182,9 @@ static void parse_ls(const char *p, struct branch *b)
root->versions[1].mode = S_IFDIR;
load_tree(root);
}
- if (*p == '"') {
- static struct strbuf uq = STRBUF_INIT;
- const char *endp;
- strbuf_reset(&uq);
- if (unquote_c_style(&uq, p, &endp))
- die("Invalid path: %s", command_buf.buf);
- if (*endp)
- die("Garbage after path in: %s", command_buf.buf);
- p = uq.buf;
- }
- tree_content_get(root, p, &leaf, 1);
+ strbuf_reset(&path);
+ parse_path_eol(&path, p, "path");
+ tree_content_get(root, path.buf, &leaf, 1);
/*
* A directory in preparation would have a sha1 of zero
* until it is saved. Save, for simplicity.
@@ -3186,7 +3192,7 @@ static void parse_ls(const char *p, struct branch *b)
if (S_ISDIR(leaf.versions[1].mode))
store_tree(&leaf);
- print_ls(leaf.versions[1].mode, leaf.versions[1].oid.hash, p);
+ print_ls(leaf.versions[1].mode, leaf.versions[1].oid.hash, path.buf);
if (leaf.tree)
release_tree_content_recursive(leaf.tree);
if (!b || root != &b->branch_tree)
diff --git a/builtin/fetch.c b/builtin/fetch.c
index 46a793411a..5857d860db 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -138,6 +138,7 @@ static int git_fetch_config(const char *k, const char *v,
int r = git_config_bool(k, v) ?
RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
fetch_config->recurse_submodules = r;
+ return 0;
}
if (!strcmp(k, "submodule.fetchjobs")) {
diff --git a/builtin/merge-tree.c b/builtin/merge-tree.c
index 05d0cad554..8bdb439131 100644
--- a/builtin/merge-tree.c
+++ b/builtin/merge-tree.c
@@ -563,7 +563,7 @@ int cmd_merge_tree(int argc, const char **argv, const char *prefix)
PARSE_OPT_NONEG),
OPT_STRING(0, "merge-base",
&merge_base,
- N_("commit"),
+ N_("tree-ish"),
N_("specify a merge-base for the merge")),
OPT_STRVEC('X', "strategy-option", &xopts, N_("option=value"),
N_("option for selected merge strategy")),
diff --git a/builtin/update-ref.c b/builtin/update-ref.c
index 61338a01ec..e46afbc46d 100644
--- a/builtin/update-ref.c
+++ b/builtin/update-ref.c
@@ -9,8 +9,8 @@
#include "repository.h"
static const char * const git_update_ref_usage[] = {
- N_("git update-ref [<options>] -d <refname> [<old-val>]"),
- N_("git update-ref [<options>] <refname> <new-val> [<old-val>]"),
+ N_("git update-ref [<options>] -d <refname> [<old-oid>]"),
+ N_("git update-ref [<options>] <refname> <new-oid> [<old-oid>]"),
N_("git update-ref [<options>] --stdin [-z]"),
NULL
};
@@ -77,14 +77,14 @@ static char *parse_refname(const char **next)
}
/*
- * The value being parsed is <oldvalue> (as opposed to <newvalue>; the
+ * The value being parsed is <old-oid> (as opposed to <new-oid>; the
* difference affects which error messages are generated):
*/
#define PARSE_SHA1_OLD 0x01
/*
* For backwards compatibility, accept an empty string for update's
- * <newvalue> in binary mode to be equivalent to specifying zeros.
+ * <new-oid> in binary mode to be equivalent to specifying zeros.
*/
#define PARSE_SHA1_ALLOW_EMPTY 0x02
@@ -140,7 +140,7 @@ static int parse_next_oid(const char **next, const char *end,
goto invalid;
} else if (flags & PARSE_SHA1_ALLOW_EMPTY) {
/* With -z, treat an empty value as all zeros: */
- warning("%s %s: missing <newvalue>, treating as zero",
+ warning("%s %s: missing <new-oid>, treating as zero",
command, refname);
oidclr(oid);
} else {
@@ -158,14 +158,14 @@ static int parse_next_oid(const char **next, const char *end,
invalid:
die(flags & PARSE_SHA1_OLD ?
- "%s %s: invalid <oldvalue>: %s" :
- "%s %s: invalid <newvalue>: %s",
+ "%s %s: invalid <old-oid>: %s" :
+ "%s %s: invalid <new-oid>: %s",
command, refname, arg.buf);
eof:
die(flags & PARSE_SHA1_OLD ?
- "%s %s: unexpected end of input when reading <oldvalue>" :
- "%s %s: unexpected end of input when reading <newvalue>",
+ "%s %s: unexpected end of input when reading <old-oid>" :
+ "%s %s: unexpected end of input when reading <new-oid>",
command, refname);
}
@@ -194,7 +194,7 @@ static void parse_cmd_update(struct ref_transaction *transaction,
if (parse_next_oid(&next, end, &new_oid, "update", refname,
PARSE_SHA1_ALLOW_EMPTY))
- die("update %s: missing <newvalue>", refname);
+ die("update %s: missing <new-oid>", refname);
have_old = !parse_next_oid(&next, end, &old_oid, "update", refname,
PARSE_SHA1_OLD);
@@ -225,10 +225,10 @@ static void parse_cmd_create(struct ref_transaction *transaction,
die("create: missing <ref>");
if (parse_next_oid(&next, end, &new_oid, "create", refname, 0))
- die("create %s: missing <newvalue>", refname);
+ die("create %s: missing <new-oid>", refname);
if (is_null_oid(&new_oid))
- die("create %s: zero <newvalue>", refname);
+ die("create %s: zero <new-oid>", refname);
if (*next != line_termination)
die("create %s: extra input: %s", refname, next);
@@ -260,7 +260,7 @@ static void parse_cmd_delete(struct ref_transaction *transaction,
have_old = 0;
} else {
if (is_null_oid(&old_oid))
- die("delete %s: zero <oldvalue>", refname);
+ die("delete %s: zero <old-oid>", refname);
have_old = 1;
}
diff --git a/compat/mingw.c b/compat/mingw.c
index 320fb99a90..4876344b5b 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -3158,3 +3158,22 @@ int uname(struct utsname *buf)
"%u", (v >> 16) & 0x7fff);
return 0;
}
+
+int mingw_have_unix_sockets(void)
+{
+ SC_HANDLE scm, srvc;
+ SERVICE_STATUS_PROCESS status;
+ DWORD bytes;
+ int ret = 0;
+ scm = OpenSCManagerA(NULL, NULL, SC_MANAGER_CONNECT);
+ if (scm) {
+ srvc = OpenServiceA(scm, "afunix", SERVICE_QUERY_STATUS);
+ if (srvc) {
+ if(QueryServiceStatusEx(srvc, SC_STATUS_PROCESS_INFO, (LPBYTE)&status, sizeof(SERVICE_STATUS_PROCESS), &bytes))
+ ret = status.dwCurrentState == SERVICE_RUNNING;
+ CloseServiceHandle(srvc);
+ }
+ CloseServiceHandle(scm);
+ }
+ return ret;
+}
diff --git a/compat/mingw.h b/compat/mingw.h
index 6aec50e412..27b61284f4 100644
--- a/compat/mingw.h
+++ b/compat/mingw.h
@@ -631,3 +631,9 @@ void open_in_gdb(void);
* Used by Pthread API implementation for Windows
*/
int err_win_to_posix(DWORD winerr);
+
+#ifndef NO_UNIX_SOCKETS
+int mingw_have_unix_sockets(void);
+#undef have_unix_sockets
+#define have_unix_sockets mingw_have_unix_sockets
+#endif
diff --git a/config.c b/config.c
index eebce8c7e0..ae3652b08f 100644
--- a/config.c
+++ b/config.c
@@ -1584,8 +1584,10 @@ static int git_default_core_config(const char *var, const char *value,
if (!strcmp(var, "core.askpass"))
return git_config_string(&askpass_program, var, value);
- if (!strcmp(var, "core.excludesfile"))
+ if (!strcmp(var, "core.excludesfile")) {
+ free((char *)excludes_file);
return git_config_pathname(&excludes_file, var, value);
+ }
if (!strcmp(var, "core.whitespace")) {
if (!value)
diff --git a/config.mak.uname b/config.mak.uname
index d0dcca2ec5..a7607a5676 100644
--- a/config.mak.uname
+++ b/config.mak.uname
@@ -65,9 +65,9 @@ ifeq ($(uname_S),Linux)
HAVE_PLATFORM_PROCINFO = YesPlease
COMPAT_OBJS += compat/linux/procinfo.o
# centos7/rhel7 provides gcc 4.8.5 and zlib 1.2.7.
- ifneq ($(findstring .el7.,$(uname_R)),)
+ ifneq ($(findstring .el7.,$(uname_R)),)
BASIC_CFLAGS += -std=c99
- endif
+ endif
endif
ifeq ($(uname_S),GNU/kFreeBSD)
HAVE_ALLOCA_H = YesPlease
@@ -95,13 +95,13 @@ ifeq ($(uname_S),UnixWare)
NO_MEMMEM = YesPlease
endif
ifeq ($(uname_S),SCO_SV)
- ifeq ($(uname_R),3.2)
+ ifeq ($(uname_R),3.2)
CFLAGS = -O2
- endif
- ifeq ($(uname_R),5)
+ endif
+ ifeq ($(uname_R),5)
CC = cc
BASIC_CFLAGS += -Kthread
- endif
+ endif
NEEDS_SOCKET = YesPlease
NEEDS_NSL = YesPlease
NEEDS_SSL_WITH_CRYPTO = YesPlease
@@ -124,19 +124,19 @@ ifeq ($(uname_S),Darwin)
# - MacOS 10.0.* and MacOS 10.1.0 = Darwin 1.*
# - MacOS 10.x.* = Darwin (x+4).* for (1 <= x)
# i.e. "begins with [15678] and a dot" means "10.4.* or older".
- ifeq ($(shell expr "$(uname_R)" : '[15678]\.'),2)
+ ifeq ($(shell expr "$(uname_R)" : '[15678]\.'),2)
OLD_ICONV = UnfortunatelyYes
NO_APPLE_COMMON_CRYPTO = YesPlease
- endif
- ifeq ($(shell expr "$(uname_R)" : '[15]\.'),2)
+ endif
+ ifeq ($(shell expr "$(uname_R)" : '[15]\.'),2)
NO_STRLCPY = YesPlease
- endif
- ifeq ($(shell test "`expr "$(uname_R)" : '\([0-9][0-9]*\)\.'`" -ge 11 && echo 1),1)
+ endif
+ ifeq ($(shell test "`expr "$(uname_R)" : '\([0-9][0-9]*\)\.'`" -ge 11 && echo 1),1)
HAVE_GETDELIM = YesPlease
- endif
- ifeq ($(shell test "`expr "$(uname_R)" : '\([0-9][0-9]*\)\.'`" -ge 20 && echo 1),1)
+ endif
+ ifeq ($(shell test "`expr "$(uname_R)" : '\([0-9][0-9]*\)\.'`" -ge 20 && echo 1),1)
OPEN_RETURNS_EINTR = UnfortunatelyYes
- endif
+ endif
NO_MEMMEM = YesPlease
USE_ST_TIMESPEC = YesPlease
HAVE_DEV_TTY = YesPlease
@@ -152,12 +152,12 @@ ifeq ($(uname_S),Darwin)
# Workaround for `gettext` being keg-only and not even being linked via
# `brew link --force gettext`, should be obsolete as of
# https://github.com/Homebrew/homebrew-core/pull/53489
- ifeq ($(shell test -d /usr/local/opt/gettext/ && echo y),y)
+ ifeq ($(shell test -d /usr/local/opt/gettext/ && echo y),y)
BASIC_CFLAGS += -I/usr/local/include -I/usr/local/opt/gettext/include
BASIC_LDFLAGS += -L/usr/local/lib -L/usr/local/opt/gettext/lib
- ifeq ($(shell test -x /usr/local/opt/gettext/bin/msgfmt && echo y),y)
+ ifeq ($(shell test -x /usr/local/opt/gettext/bin/msgfmt && echo y),y)
MSGFMT = /usr/local/opt/gettext/bin/msgfmt
- endif
+ endif
# On newer ARM-based machines the default installation path has changed to
# /opt/homebrew. Include it in our search paths so that the user does not
# have to configure this manually.
@@ -165,22 +165,22 @@ ifeq ($(uname_S),Darwin)
# Note that we do not employ the same workaround as above where we manually
# add gettext. The issue was fixed more than three years ago by now, and at
# that point there haven't been any ARM-based Macs yet.
- else ifeq ($(shell test -d /opt/homebrew/ && echo y),y)
+ else ifeq ($(shell test -d /opt/homebrew/ && echo y),y)
BASIC_CFLAGS += -I/opt/homebrew/include
BASIC_LDFLAGS += -L/opt/homebrew/lib
- ifeq ($(shell test -x /opt/homebrew/bin/msgfmt && echo y),y)
+ ifeq ($(shell test -x /opt/homebrew/bin/msgfmt && echo y),y)
MSGFMT = /opt/homebrew/bin/msgfmt
- endif
- endif
+ endif
+ endif
# The builtin FSMonitor on MacOS builds upon Simple-IPC. Both require
# Unix domain sockets and PThreads.
- ifndef NO_PTHREADS
- ifndef NO_UNIX_SOCKETS
+ ifndef NO_PTHREADS
+ ifndef NO_UNIX_SOCKETS
FSMONITOR_DAEMON_BACKEND = darwin
FSMONITOR_OS_SETTINGS = darwin
- endif
- endif
+ endif
+ endif
BASIC_LDFLAGS += -framework CoreServices
endif
@@ -196,7 +196,7 @@ ifeq ($(uname_S),SunOS)
NO_REGEX = YesPlease
NO_MSGFMT_EXTENDED_OPTIONS = YesPlease
HAVE_DEV_TTY = YesPlease
- ifeq ($(uname_R),5.6)
+ ifeq ($(uname_R),5.6)
SOCKLEN_T = int
NO_HSTRERROR = YesPlease
NO_IPV6 = YesPlease
@@ -206,8 +206,8 @@ ifeq ($(uname_S),SunOS)
NO_STRLCPY = YesPlease
NO_STRTOUMAX = YesPlease
GIT_TEST_CMP = cmp
- endif
- ifeq ($(uname_R),5.7)
+ endif
+ ifeq ($(uname_R),5.7)
NEEDS_RESOLV = YesPlease
NO_IPV6 = YesPlease
NO_SOCKADDR_STORAGE = YesPlease
@@ -216,25 +216,25 @@ ifeq ($(uname_S),SunOS)
NO_STRLCPY = YesPlease
NO_STRTOUMAX = YesPlease
GIT_TEST_CMP = cmp
- endif
- ifeq ($(uname_R),5.8)
+ endif
+ ifeq ($(uname_R),5.8)
NO_UNSETENV = YesPlease
NO_SETENV = YesPlease
NO_STRTOUMAX = YesPlease
GIT_TEST_CMP = cmp
- endif
- ifeq ($(uname_R),5.9)
+ endif
+ ifeq ($(uname_R),5.9)
NO_UNSETENV = YesPlease
NO_SETENV = YesPlease
NO_STRTOUMAX = YesPlease
GIT_TEST_CMP = cmp
- endif
+ endif
INSTALL = /usr/ucb/install
TAR = gtar
BASIC_CFLAGS += -D__EXTENSIONS__ -D__sun__
endif
ifeq ($(uname_O),Cygwin)
- ifeq ($(shell expr "$(uname_R)" : '1\.[1-6]\.'),4)
+ ifeq ($(shell expr "$(uname_R)" : '1\.[1-6]\.'),4)
NO_D_TYPE_IN_DIRENT = YesPlease
NO_STRCASESTR = YesPlease
NO_MEMMEM = YesPlease
@@ -245,9 +245,9 @@ ifeq ($(uname_O),Cygwin)
# On some boxes NO_MMAP is needed, and not so elsewhere.
# Try commenting this out if you suspect MMAP is more efficient
NO_MMAP = YesPlease
- else
+ else
NO_REGEX = UnfortunatelyYes
- endif
+ endif
HAVE_ALLOCA_H = YesPlease
NEEDS_LIBICONV = YesPlease
NO_FAST_WORKING_DIRECTORY = UnfortunatelyYes
@@ -263,25 +263,25 @@ ifeq ($(uname_S),FreeBSD)
NEEDS_LIBICONV = YesPlease
# Versions up to 10.1 require OLD_ICONV; 10.2 and beyond don't.
# A typical version string looks like "10.2-RELEASE".
- ifeq ($(shell expr "$(uname_R)" : '[1-9]\.'),2)
+ ifeq ($(shell expr "$(uname_R)" : '[1-9]\.'),2)
OLD_ICONV = YesPlease
- endif
- ifeq ($(firstword $(subst -, ,$(uname_R))),10.0)
+ endif
+ ifeq ($(firstword $(subst -, ,$(uname_R))),10.0)
OLD_ICONV = YesPlease
- endif
- ifeq ($(firstword $(subst -, ,$(uname_R))),10.1)
+ endif
+ ifeq ($(firstword $(subst -, ,$(uname_R))),10.1)
OLD_ICONV = YesPlease
- endif
+ endif
NO_MEMMEM = YesPlease
BASIC_CFLAGS += -I/usr/local/include
BASIC_LDFLAGS += -L/usr/local/lib
DIR_HAS_BSD_GROUP_SEMANTICS = YesPlease
USE_ST_TIMESPEC = YesPlease
- ifeq ($(shell expr "$(uname_R)" : '4\.'),2)
+ ifeq ($(shell expr "$(uname_R)" : '4\.'),2)
PTHREAD_LIBS = -pthread
NO_UINTMAX_T = YesPlease
NO_STRTOUMAX = YesPlease
- endif
+ endif
PYTHON_PATH = /usr/local/bin/python
PERL_PATH = /usr/local/bin/perl
HAVE_PATHS_H = YesPlease
@@ -317,9 +317,9 @@ ifeq ($(uname_S),MirBSD)
CSPRNG_METHOD = arc4random
endif
ifeq ($(uname_S),NetBSD)
- ifeq ($(shell expr "$(uname_R)" : '[01]\.'),2)
+ ifeq ($(shell expr "$(uname_R)" : '[01]\.'),2)
NEEDS_LIBICONV = YesPlease
- endif
+ endif
BASIC_CFLAGS += -I/usr/pkg/include
BASIC_LDFLAGS += -L/usr/pkg/lib $(CC_LD_DYNPATH)/usr/pkg/lib
USE_ST_TIMESPEC = YesPlease
@@ -343,14 +343,14 @@ ifeq ($(uname_S),AIX)
BASIC_CFLAGS += -D_LARGE_FILES
FILENO_IS_A_MACRO = UnfortunatelyYes
NEED_ACCESS_ROOT_HANDLER = UnfortunatelyYes
- ifeq ($(shell expr "$(uname_V)" : '[1234]'),1)
+ ifeq ($(shell expr "$(uname_V)" : '[1234]'),1)
NO_PTHREADS = YesPlease
- else
+ else
PTHREAD_LIBS = -lpthread
- endif
- ifeq ($(shell expr "$(uname_V).$(uname_R)" : '5\.1'),3)
+ endif
+ ifeq ($(shell expr "$(uname_V).$(uname_R)" : '5\.1'),3)
INLINE = ''
- endif
+ endif
GIT_TEST_CMP = cmp
endif
ifeq ($(uname_S),GNU)
@@ -410,29 +410,29 @@ ifeq ($(uname_S),HP-UX)
NO_SYS_SELECT_H = YesPlease
SNPRINTF_RETURNS_BOGUS = YesPlease
NO_NSEC = YesPlease
- ifeq ($(uname_R),B.11.00)
+ ifeq ($(uname_R),B.11.00)
NO_INET_NTOP = YesPlease
NO_INET_PTON = YesPlease
- endif
- ifeq ($(uname_R),B.10.20)
+ endif
+ ifeq ($(uname_R),B.10.20)
# Override HP-UX 11.x setting:
INLINE =
SOCKLEN_T = size_t
NO_PREAD = YesPlease
NO_INET_NTOP = YesPlease
NO_INET_PTON = YesPlease
- endif
+ endif
GIT_TEST_CMP = cmp
endif
ifeq ($(uname_S),Windows)
GIT_VERSION := $(GIT_VERSION).MSVC
pathsep = ;
# Assume that this is built in Git for Windows' SDK
- ifeq (MINGW32,$(MSYSTEM))
+ ifeq (MINGW32,$(MSYSTEM))
prefix = /mingw32
- else
+ else
prefix = /mingw64
- endif
+ endif
# Prepend MSVC 64-bit tool-chain to PATH.
#
# A regular Git Bash *does not* have cl.exe in its $PATH. As there is a
@@ -447,7 +447,6 @@ ifeq ($(uname_S),Windows)
NO_POLL = YesPlease
NO_SYMLINK_HEAD = YesPlease
NO_IPV6 = YesPlease
- NO_UNIX_SOCKETS = YesPlease
NO_SETENV = YesPlease
NO_STRCASESTR = YesPlease
NO_STRLCPY = YesPlease
@@ -550,16 +549,16 @@ ifeq ($(uname_S),Interix)
NO_MKDTEMP = YesPlease
NO_STRTOUMAX = YesPlease
NO_NSEC = YesPlease
- ifeq ($(uname_R),3.5)
+ ifeq ($(uname_R),3.5)
NO_INET_NTOP = YesPlease
NO_INET_PTON = YesPlease
NO_SOCKADDR_STORAGE = YesPlease
- endif
- ifeq ($(uname_R),5.2)
+ endif
+ ifeq ($(uname_R),5.2)
NO_INET_NTOP = YesPlease
NO_INET_PTON = YesPlease
NO_SOCKADDR_STORAGE = YesPlease
- endif
+ endif
endif
ifeq ($(uname_S),Minix)
NO_IPV6 = YesPlease
@@ -579,12 +578,12 @@ ifeq ($(uname_S),NONSTOP_KERNEL)
# still not compile in c89 mode, due to non-const array initializations.
CC = cc -c99
# Build down-rev compatible objects that don't use our new getopt_long.
- ifeq ($(uname_R).$(uname_V),J06.21)
+ ifeq ($(uname_R).$(uname_V),J06.21)
CC += -WRVU=J06.20
- endif
- ifeq ($(uname_R).$(uname_V),L17.02)
+ endif
+ ifeq ($(uname_R).$(uname_V),L17.02)
CC += -WRVU=L16.05
- endif
+ endif
# Disable all optimization, seems to result in bad code, with -O or -O2
# or even -O1 (default), /usr/local/libexec/git-core/git-pack-objects
# abends on "git push". Needs more investigation.
@@ -651,9 +650,9 @@ ifeq ($(uname_S),OS/390)
NEEDS_MODE_TRANSLATION = YesPlease
endif
ifeq ($(uname_S),MINGW)
- ifeq ($(shell expr "$(uname_R)" : '1\.'),2)
+ ifeq ($(shell expr "$(uname_R)" : '1\.'),2)
$(error "Building with MSys is no longer supported")
- endif
+ endif
pathsep = ;
HAVE_ALLOCA_H = YesPlease
NO_PREAD = YesPlease
@@ -661,7 +660,6 @@ ifeq ($(uname_S),MINGW)
NO_LIBGEN_H = YesPlease
NO_POLL = YesPlease
NO_SYMLINK_HEAD = YesPlease
- NO_UNIX_SOCKETS = YesPlease
NO_SETENV = YesPlease
NO_STRCASESTR = YesPlease
NO_STRLCPY = YesPlease
@@ -712,22 +710,22 @@ ifeq ($(uname_S),MINGW)
# Enable DEP
BASIC_LDFLAGS += -Wl,--nxcompat
# Enable ASLR (unless debugging)
- ifneq (,$(findstring -O,$(filter-out -O0 -Og,$(CFLAGS))))
+ ifneq (,$(findstring -O,$(filter-out -O0 -Og,$(CFLAGS))))
BASIC_LDFLAGS += -Wl,--dynamicbase
- endif
- ifeq (MINGW32,$(MSYSTEM))
+ endif
+ ifeq (MINGW32,$(MSYSTEM))
prefix = /mingw32
HOST_CPU = i686
BASIC_LDFLAGS += -Wl,--pic-executable,-e,_mainCRTStartup
- endif
- ifeq (MINGW64,$(MSYSTEM))
+ endif
+ ifeq (MINGW64,$(MSYSTEM))
prefix = /mingw64
HOST_CPU = x86_64
BASIC_LDFLAGS += -Wl,--pic-executable,-e,mainCRTStartup
- else
+ else
COMPAT_CFLAGS += -D_USE_32BIT_TIME_T
BASIC_LDFLAGS += -Wl,--large-address-aware
- endif
+ endif
CC = gcc
COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0 -DDETECT_MSYS_TTY \
-fstack-protector-strong
@@ -739,11 +737,11 @@ ifeq ($(uname_S),MINGW)
USE_GETTEXT_SCHEME = fallthrough
USE_LIBPCRE = YesPlease
USE_NED_ALLOCATOR = YesPlease
- ifeq (/mingw64,$(subst 32,64,$(prefix)))
+ ifeq (/mingw64,$(subst 32,64,$(prefix)))
# Move system config into top-level /etc/
ETC_GITCONFIG = ../etc/gitconfig
ETC_GITATTRIBUTES = ../etc/gitattributes
- endif
+ endif
endif
ifeq ($(uname_S),QNX)
COMPAT_CFLAGS += -DSA_RESTART=0
diff --git a/contrib/completion/git-prompt.sh b/contrib/completion/git-prompt.sh
index 71f179cba3..5330e769a7 100644
--- a/contrib/completion/git-prompt.sh
+++ b/contrib/completion/git-prompt.sh
@@ -141,7 +141,7 @@ __git_ps1_show_upstream ()
# parse configuration values
local option
- for option in ${GIT_PS1_SHOWUPSTREAM}; do
+ for option in ${GIT_PS1_SHOWUPSTREAM-}; do
case "$option" in
git|svn) upstream_type="$option" ;;
verbose) verbose=1 ;;
@@ -528,7 +528,7 @@ __git_ps1 ()
fi
local conflict="" # state indicator for unresolved conflicts
- if [[ "${GIT_PS1_SHOWCONFLICTSTATE}" == "yes" ]] &&
+ if [[ "${GIT_PS1_SHOWCONFLICTSTATE-}" == "yes" ]] &&
[[ $(git ls-files --unmerged 2>/dev/null) ]]; then
conflict="|CONFLICT"
fi
diff --git a/contrib/credential/osxkeychain/Makefile b/contrib/credential/osxkeychain/Makefile
index 4b3a08a2ba..238f5f8c36 100644
--- a/contrib/credential/osxkeychain/Makefile
+++ b/contrib/credential/osxkeychain/Makefile
@@ -8,7 +8,8 @@ CFLAGS = -g -O2 -Wall
-include ../../../config.mak
git-credential-osxkeychain: git-credential-osxkeychain.o
- $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS) -Wl,-framework -Wl,Security
+ $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS) \
+ -framework Security -framework CoreFoundation
git-credential-osxkeychain.o: git-credential-osxkeychain.c
$(CC) -c $(CFLAGS) $<
diff --git a/contrib/credential/osxkeychain/git-credential-osxkeychain.c b/contrib/credential/osxkeychain/git-credential-osxkeychain.c
index 5f2e5f16c8..6a40917b1e 100644
--- a/contrib/credential/osxkeychain/git-credential-osxkeychain.c
+++ b/contrib/credential/osxkeychain/git-credential-osxkeychain.c
@@ -3,14 +3,51 @@
#include <stdlib.h>
#include <Security/Security.h>
-static SecProtocolType protocol;
-static char *host;
-static char *path;
-static char *username;
-static char *password;
-static UInt16 port;
-
-__attribute__((format (printf, 1, 2)))
+#define ENCODING kCFStringEncodingUTF8
+static CFStringRef protocol; /* Stores constant strings - not memory managed */
+static CFStringRef host;
+static CFNumberRef port;
+static CFStringRef path;
+static CFStringRef username;
+static CFDataRef password;
+static CFDataRef password_expiry_utc;
+static CFDataRef oauth_refresh_token;
+
+static void clear_credential(void)
+{
+ if (host) {
+ CFRelease(host);
+ host = NULL;
+ }
+ if (port) {
+ CFRelease(port);
+ port = NULL;
+ }
+ if (path) {
+ CFRelease(path);
+ path = NULL;
+ }
+ if (username) {
+ CFRelease(username);
+ username = NULL;
+ }
+ if (password) {
+ CFRelease(password);
+ password = NULL;
+ }
+ if (password_expiry_utc) {
+ CFRelease(password_expiry_utc);
+ password_expiry_utc = NULL;
+ }
+ if (oauth_refresh_token) {
+ CFRelease(oauth_refresh_token);
+ oauth_refresh_token = NULL;
+ }
+}
+
+#define STRING_WITH_LENGTH(s) s, sizeof(s) - 1
+
+__attribute__((format (printf, 1, 2), __noreturn__))
static void die(const char *err, ...)
{
char msg[4096];
@@ -19,70 +56,199 @@ static void die(const char *err, ...)
vsnprintf(msg, sizeof(msg), err, params);
fprintf(stderr, "%s\n", msg);
va_end(params);
+ clear_credential();
exit(1);
}
-static void *xstrdup(const char *s1)
+static void *xmalloc(size_t len)
{
- void *ret = strdup(s1);
+ void *ret = malloc(len);
if (!ret)
die("Out of memory");
return ret;
}
-#define KEYCHAIN_ITEM(x) (x ? strlen(x) : 0), x
-#define KEYCHAIN_ARGS \
- NULL, /* default keychain */ \
- KEYCHAIN_ITEM(host), \
- 0, NULL, /* account domain */ \
- KEYCHAIN_ITEM(username), \
- KEYCHAIN_ITEM(path), \
- port, \
- protocol, \
- kSecAuthenticationTypeDefault
-
-static void write_item(const char *what, const char *buf, int len)
+static CFDictionaryRef create_dictionary(CFAllocatorRef allocator, ...)
+{
+ va_list args;
+ const void *key;
+ CFMutableDictionaryRef result;
+
+ result = CFDictionaryCreateMutable(allocator,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+
+
+ va_start(args, allocator);
+ while ((key = va_arg(args, const void *)) != NULL) {
+ const void *value;
+ value = va_arg(args, const void *);
+ if (value)
+ CFDictionarySetValue(result, key, value);
+ }
+ va_end(args);
+
+ return result;
+}
+
+#define CREATE_SEC_ATTRIBUTES(...) \
+ create_dictionary(kCFAllocatorDefault, \
+ kSecClass, kSecClassInternetPassword, \
+ kSecAttrServer, host, \
+ kSecAttrAccount, username, \
+ kSecAttrPath, path, \
+ kSecAttrPort, port, \
+ kSecAttrProtocol, protocol, \
+ kSecAttrAuthenticationType, \
+ kSecAttrAuthenticationTypeDefault, \
+ __VA_ARGS__);
+
+static void write_item(const char *what, const char *buf, size_t len)
{
printf("%s=", what);
fwrite(buf, 1, len, stdout);
putchar('\n');
}
-static void find_username_in_item(SecKeychainItemRef item)
+static void find_username_in_item(CFDictionaryRef item)
{
- SecKeychainAttributeList list;
- SecKeychainAttribute attr;
+ CFStringRef account_ref;
+ char *username_buf;
+ CFIndex buffer_len;
- list.count = 1;
- list.attr = &attr;
- attr.tag = kSecAccountItemAttr;
+ account_ref = CFDictionaryGetValue(item, kSecAttrAccount);
+ if (!account_ref)
+ {
+ write_item("username", "", 0);
+ return;
+ }
- if (SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL))
+ username_buf = (char *)CFStringGetCStringPtr(account_ref, ENCODING);
+ if (username_buf)
+ {
+ write_item("username", username_buf, strlen(username_buf));
return;
+ }
- write_item("username", attr.data, attr.length);
- SecKeychainItemFreeContent(&list, NULL);
+ /* If we can't get a CString pointer then
+ * we need to allocate our own buffer */
+ buffer_len = CFStringGetMaximumSizeForEncoding(
+ CFStringGetLength(account_ref), ENCODING) + 1;
+ username_buf = xmalloc(buffer_len);
+ if (CFStringGetCString(account_ref,
+ username_buf,
+ buffer_len,
+ ENCODING)) {
+ write_item("username", username_buf, buffer_len - 1);
+ }
+ free(username_buf);
}
-static void find_internet_password(void)
+static OSStatus find_internet_password(void)
{
- void *buf;
- UInt32 len;
- SecKeychainItemRef item;
+ CFDictionaryRef attrs;
+ CFDictionaryRef item;
+ CFDataRef data;
+ OSStatus result;
- if (SecKeychainFindInternetPassword(KEYCHAIN_ARGS, &len, &buf, &item))
- return;
+ attrs = CREATE_SEC_ATTRIBUTES(kSecMatchLimit, kSecMatchLimitOne,
+ kSecReturnAttributes, kCFBooleanTrue,
+ kSecReturnData, kCFBooleanTrue,
+ NULL);
+ result = SecItemCopyMatching(attrs, (CFTypeRef *)&item);
+ if (result) {
+ goto out;
+ }
+
+ data = CFDictionaryGetValue(item, kSecValueData);
- write_item("password", buf, len);
+ write_item("password",
+ (const char *)CFDataGetBytePtr(data),
+ CFDataGetLength(data));
if (!username)
find_username_in_item(item);
- SecKeychainItemFreeContent(NULL, buf);
+ CFRelease(item);
+
+out:
+ CFRelease(attrs);
+
+ /* We consider not found to not be an error */
+ if (result == errSecItemNotFound)
+ result = errSecSuccess;
+
+ return result;
+}
+
+static OSStatus delete_ref(const void *itemRef)
+{
+ CFArrayRef item_ref_list;
+ CFDictionaryRef delete_query;
+ OSStatus result;
+
+ item_ref_list = CFArrayCreate(kCFAllocatorDefault,
+ &itemRef,
+ 1,
+ &kCFTypeArrayCallBacks);
+ delete_query = create_dictionary(kCFAllocatorDefault,
+ kSecClass, kSecClassInternetPassword,
+ kSecMatchItemList, item_ref_list,
+ NULL);
+
+ if (password) {
+ /* We only want to delete items with a matching password */
+ CFIndex capacity;
+ CFMutableDictionaryRef query;
+ CFDataRef data;
+
+ capacity = CFDictionaryGetCount(delete_query) + 1;
+ query = CFDictionaryCreateMutableCopy(kCFAllocatorDefault,
+ capacity,
+ delete_query);
+ CFDictionarySetValue(query, kSecReturnData, kCFBooleanTrue);
+ result = SecItemCopyMatching(query, (CFTypeRef *)&data);
+ if (!result) {
+ CFDataRef kc_password;
+ const UInt8 *raw_data;
+ const UInt8 *line;
+
+ /* Don't match appended metadata */
+ raw_data = CFDataGetBytePtr(data);
+ line = memchr(raw_data, '\n', CFDataGetLength(data));
+ if (line)
+ kc_password = CFDataCreateWithBytesNoCopy(
+ kCFAllocatorDefault,
+ raw_data,
+ line - raw_data,
+ kCFAllocatorNull);
+ else
+ kc_password = data;
+
+ if (CFEqual(kc_password, password))
+ result = SecItemDelete(delete_query);
+
+ if (line)
+ CFRelease(kc_password);
+ CFRelease(data);
+ }
+
+ CFRelease(query);
+ } else {
+ result = SecItemDelete(delete_query);
+ }
+
+ CFRelease(delete_query);
+ CFRelease(item_ref_list);
+
+ return result;
}
-static void delete_internet_password(void)
+static OSStatus delete_internet_password(void)
{
- SecKeychainItemRef item;
+ CFDictionaryRef attrs;
+ CFArrayRef refs;
+ OSStatus result;
/*
* Require at least a protocol and host for removal, which is what git
@@ -90,25 +256,69 @@ static void delete_internet_password(void)
* Keychain manager.
*/
if (!protocol || !host)
- return;
+ return -1;
- if (SecKeychainFindInternetPassword(KEYCHAIN_ARGS, 0, NULL, &item))
- return;
+ attrs = CREATE_SEC_ATTRIBUTES(kSecMatchLimit, kSecMatchLimitAll,
+ kSecReturnRef, kCFBooleanTrue,
+ NULL);
+ result = SecItemCopyMatching(attrs, (CFTypeRef *)&refs);
+ CFRelease(attrs);
+
+ if (!result) {
+ for (CFIndex i = 0; !result && i < CFArrayGetCount(refs); i++)
+ result = delete_ref(CFArrayGetValueAtIndex(refs, i));
- SecKeychainItemDelete(item);
+ CFRelease(refs);
+ }
+
+ /* We consider not found to not be an error */
+ if (result == errSecItemNotFound)
+ result = errSecSuccess;
+
+ return result;
}
-static void add_internet_password(void)
+static OSStatus add_internet_password(void)
{
+ CFMutableDataRef data;
+ CFDictionaryRef attrs;
+ OSStatus result;
+
/* Only store complete credentials */
if (!protocol || !host || !username || !password)
- return;
+ return -1;
- if (SecKeychainAddInternetPassword(
- KEYCHAIN_ARGS,
- KEYCHAIN_ITEM(password),
- NULL))
- return;
+ data = CFDataCreateMutableCopy(kCFAllocatorDefault, 0, password);
+ if (password_expiry_utc) {
+ CFDataAppendBytes(data,
+ (const UInt8 *)STRING_WITH_LENGTH("\npassword_expiry_utc="));
+ CFDataAppendBytes(data,
+ CFDataGetBytePtr(password_expiry_utc),
+ CFDataGetLength(password_expiry_utc));
+ }
+ if (oauth_refresh_token) {
+ CFDataAppendBytes(data,
+ (const UInt8 *)STRING_WITH_LENGTH("\noauth_refresh_token="));
+ CFDataAppendBytes(data,
+ CFDataGetBytePtr(oauth_refresh_token),
+ CFDataGetLength(oauth_refresh_token));
+ }
+
+ attrs = CREATE_SEC_ATTRIBUTES(kSecValueData, data,
+ NULL);
+
+ result = SecItemAdd(attrs, NULL);
+ if (result == errSecDuplicateItem) {
+ CFDictionaryRef query;
+ query = CREATE_SEC_ATTRIBUTES(NULL);
+ result = SecItemUpdate(query, attrs);
+ CFRelease(query);
+ }
+
+ CFRelease(data);
+ CFRelease(attrs);
+
+ return result;
}
static void read_credential(void)
@@ -131,36 +341,60 @@ static void read_credential(void)
if (!strcmp(buf, "protocol")) {
if (!strcmp(v, "imap"))
- protocol = kSecProtocolTypeIMAP;
+ protocol = kSecAttrProtocolIMAP;
else if (!strcmp(v, "imaps"))
- protocol = kSecProtocolTypeIMAPS;
+ protocol = kSecAttrProtocolIMAPS;
else if (!strcmp(v, "ftp"))
- protocol = kSecProtocolTypeFTP;
+ protocol = kSecAttrProtocolFTP;
else if (!strcmp(v, "ftps"))
- protocol = kSecProtocolTypeFTPS;
+ protocol = kSecAttrProtocolFTPS;
else if (!strcmp(v, "https"))
- protocol = kSecProtocolTypeHTTPS;
+ protocol = kSecAttrProtocolHTTPS;
else if (!strcmp(v, "http"))
- protocol = kSecProtocolTypeHTTP;
+ protocol = kSecAttrProtocolHTTP;
else if (!strcmp(v, "smtp"))
- protocol = kSecProtocolTypeSMTP;
- else /* we don't yet handle other protocols */
+ protocol = kSecAttrProtocolSMTP;
+ else {
+ /* we don't yet handle other protocols */
+ clear_credential();
exit(0);
+ }
}
else if (!strcmp(buf, "host")) {
char *colon = strchr(v, ':');
if (colon) {
+ UInt16 port_i;
*colon++ = '\0';
- port = atoi(colon);
+ port_i = atoi(colon);
+ port = CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberShortType,
+ &port_i);
}
- host = xstrdup(v);
+ host = CFStringCreateWithCString(kCFAllocatorDefault,
+ v,
+ ENCODING);
}
else if (!strcmp(buf, "path"))
- path = xstrdup(v);
+ path = CFStringCreateWithCString(kCFAllocatorDefault,
+ v,
+ ENCODING);
else if (!strcmp(buf, "username"))
- username = xstrdup(v);
+ username = CFStringCreateWithCString(
+ kCFAllocatorDefault,
+ v,
+ ENCODING);
else if (!strcmp(buf, "password"))
- password = xstrdup(v);
+ password = CFDataCreate(kCFAllocatorDefault,
+ (UInt8 *)v,
+ strlen(v));
+ else if (!strcmp(buf, "password_expiry_utc"))
+ password_expiry_utc = CFDataCreate(kCFAllocatorDefault,
+ (UInt8 *)v,
+ strlen(v));
+ else if (!strcmp(buf, "oauth_refresh_token"))
+ oauth_refresh_token = CFDataCreate(kCFAllocatorDefault,
+ (UInt8 *)v,
+ strlen(v));
/*
* Ignore other lines; we don't know what they mean, but
* this future-proofs us when later versions of git do
@@ -173,6 +407,7 @@ static void read_credential(void)
int main(int argc, const char **argv)
{
+ OSStatus result = 0;
const char *usage =
"usage: git credential-osxkeychain <get|store|erase>";
@@ -182,12 +417,17 @@ int main(int argc, const char **argv)
read_credential();
if (!strcmp(argv[1], "get"))
- find_internet_password();
+ result = find_internet_password();
else if (!strcmp(argv[1], "store"))
- add_internet_password();
+ result = add_internet_password();
else if (!strcmp(argv[1], "erase"))
- delete_internet_password();
+ result = delete_internet_password();
/* otherwise, ignore unknown action */
+ if (result)
+ die("failed to %s: %d", argv[1], (int)result);
+
+ clear_credential();
+
return 0;
}
diff --git a/contrib/vscode/init.sh b/contrib/vscode/init.sh
index 521d303722..f2d61bb0e6 100755
--- a/contrib/vscode/init.sh
+++ b/contrib/vscode/init.sh
@@ -92,7 +92,6 @@ cat >.vscode/settings.json.new <<\EOF ||
"isexe",
"iskeychar",
"kompare",
- "mksnpath",
"mktag",
"mktree",
"mmblob",
diff --git a/date.c b/date.c
index 44cf2221d8..7365a4ad24 100644
--- a/date.c
+++ b/date.c
@@ -207,13 +207,13 @@ void show_date_relative(timestamp_t time, struct strbuf *timebuf)
(diff + 183) / 365);
}
-struct date_mode *date_mode_from_type(enum date_mode_type type)
+struct date_mode date_mode_from_type(enum date_mode_type type)
{
- static struct date_mode mode = DATE_MODE_INIT;
+ struct date_mode mode = DATE_MODE_INIT;
if (type == DATE_STRFTIME)
BUG("cannot create anonymous strftime date_mode struct");
mode.type = type;
- return &mode;
+ return mode;
}
static void show_date_normal(struct strbuf *buf, timestamp_t time, struct tm *tm, int tz, struct tm *human_tm, int human_tz, int local)
@@ -283,7 +283,7 @@ static void show_date_normal(struct strbuf *buf, timestamp_t time, struct tm *tm
strbuf_addf(buf, " %+05d", tz);
}
-const char *show_date(timestamp_t time, int tz, const struct date_mode *mode)
+const char *show_date(timestamp_t time, int tz, struct date_mode mode)
{
struct tm *tm;
struct tm tmbuf = { 0 };
@@ -291,13 +291,13 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode)
int human_tz = -1;
static struct strbuf timebuf = STRBUF_INIT;
- if (mode->type == DATE_UNIX) {
+ if (mode.type == DATE_UNIX) {
strbuf_reset(&timebuf);
strbuf_addf(&timebuf, "%"PRItime, time);
return timebuf.buf;
}
- if (mode->type == DATE_HUMAN) {
+ if (mode.type == DATE_HUMAN) {
struct timeval now;
get_time(&now);
@@ -306,22 +306,22 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode)
human_tz = local_time_tzoffset(now.tv_sec, &human_tm);
}
- if (mode->local)
+ if (mode.local)
tz = local_tzoffset(time);
- if (mode->type == DATE_RAW) {
+ if (mode.type == DATE_RAW) {
strbuf_reset(&timebuf);
strbuf_addf(&timebuf, "%"PRItime" %+05d", time, tz);
return timebuf.buf;
}
- if (mode->type == DATE_RELATIVE) {
+ if (mode.type == DATE_RELATIVE) {
strbuf_reset(&timebuf);
show_date_relative(time, &timebuf);
return timebuf.buf;
}
- if (mode->local)
+ if (mode.local)
tm = time_to_tm_local(time, &tmbuf);
else
tm = time_to_tm(time, tz, &tmbuf);
@@ -331,17 +331,17 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode)
}
strbuf_reset(&timebuf);
- if (mode->type == DATE_SHORT)
+ if (mode.type == DATE_SHORT)
strbuf_addf(&timebuf, "%04d-%02d-%02d", tm->tm_year + 1900,
tm->tm_mon + 1, tm->tm_mday);
- else if (mode->type == DATE_ISO8601)
+ else if (mode.type == DATE_ISO8601)
strbuf_addf(&timebuf, "%04d-%02d-%02d %02d:%02d:%02d %+05d",
tm->tm_year + 1900,
tm->tm_mon + 1,
tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec,
tz);
- else if (mode->type == DATE_ISO8601_STRICT) {
+ else if (mode.type == DATE_ISO8601_STRICT) {
strbuf_addf(&timebuf, "%04d-%02d-%02dT%02d:%02d:%02d",
tm->tm_year + 1900,
tm->tm_mon + 1,
@@ -354,16 +354,16 @@ const char *show_date(timestamp_t time, int tz, const struct date_mode *mode)
tz = abs(tz);
strbuf_addf(&timebuf, "%02d:%02d", tz / 100, tz % 100);
}
- } else if (mode->type == DATE_RFC2822)
+ } else if (mode.type == DATE_RFC2822)
strbuf_addf(&timebuf, "%.3s, %d %.3s %d %02d:%02d:%02d %+05d",
weekday_names[tm->tm_wday], tm->tm_mday,
month_names[tm->tm_mon], tm->tm_year + 1900,
tm->tm_hour, tm->tm_min, tm->tm_sec, tz);
- else if (mode->type == DATE_STRFTIME)
- strbuf_addftime(&timebuf, mode->strftime_fmt, tm, tz,
- !mode->local);
+ else if (mode.type == DATE_STRFTIME)
+ strbuf_addftime(&timebuf, mode.strftime_fmt, tm, tz,
+ !mode.local);
else
- show_date_normal(&timebuf, time, tm, tz, &human_tm, human_tz, mode->local);
+ show_date_normal(&timebuf, time, tm, tz, &human_tm, human_tz, mode.local);
return timebuf.buf;
}
diff --git a/date.h b/date.h
index 6136212a19..0747864fd7 100644
--- a/date.h
+++ b/date.h
@@ -22,8 +22,8 @@ enum date_mode_type {
struct date_mode {
enum date_mode_type type;
- const char *strftime_fmt;
int local;
+ const char *strftime_fmt;
};
#define DATE_MODE_INIT { \
@@ -36,14 +36,14 @@ struct date_mode {
* show_date(t, tz, DATE_MODE(NORMAL));
*/
#define DATE_MODE(t) date_mode_from_type(DATE_##t)
-struct date_mode *date_mode_from_type(enum date_mode_type type);
+struct date_mode date_mode_from_type(enum date_mode_type type);
/**
* Format <'time', 'timezone'> into static memory according to 'mode'
* and return it. The mode is an initialized "struct date_mode"
* (usually from the DATE_MODE() macro).
*/
-const char *show_date(timestamp_t time, int timezone, const struct date_mode *mode);
+const char *show_date(timestamp_t time, int timezone, struct date_mode mode);
/**
* Parse a date format for later use with show_date().
diff --git a/diff-lib.c b/diff-lib.c
index 1cd790a4d2..683f11e509 100644
--- a/diff-lib.c
+++ b/diff-lib.c
@@ -127,7 +127,16 @@ void run_diff_files(struct rev_info *revs, unsigned int option)
if (diff_can_quit_early(&revs->diffopt))
break;
- if (!ce_path_match(istate, ce, &revs->prune_data, NULL))
+ /*
+ * NEEDSWORK:
+ * Here we filter with pathspec but the result is further
+ * filtered out when --relative is in effect. To end-users,
+ * a pathspec element that matched only to paths outside the
+ * current directory is like not matching anything at all;
+ * the handling of ps_matched[] here may become problematic
+ * if/when we add the "--error-unmatch" option to "git diff".
+ */
+ if (!ce_path_match(istate, ce, &revs->prune_data, revs->ps_matched))
continue;
if (revs->diffopt.prefix &&
diff --git a/editor.c b/editor.c
index b67b802ddf..d1ba2d7c34 100644
--- a/editor.c
+++ b/editor.c
@@ -104,16 +104,15 @@ static int launch_specified_editor(const char *editor, const char *path,
sigchain_pop(SIGQUIT);
if (sig == SIGINT || sig == SIGQUIT)
raise(sig);
- if (ret)
- return error("There was a problem with the editor '%s'.",
- editor);
-
if (print_waiting_for_editor && !is_terminal_dumb())
/*
* Erase the entire line to avoid wasting the
* vertical space.
*/
term_clear_line();
+ if (ret)
+ return error("there was a problem with the editor '%s'",
+ editor);
}
if (!buffer)
diff --git a/git-compat-util.h b/git-compat-util.h
index 7c2a6538e5..ca7678a379 100644
--- a/git-compat-util.h
+++ b/git-compat-util.h
@@ -218,6 +218,18 @@ struct strbuf;
#define GIT_WINDOWS_NATIVE
#endif
+#if defined(NO_UNIX_SOCKETS) || !defined(GIT_WINDOWS_NATIVE)
+static inline int _have_unix_sockets(void)
+{
+#if defined(NO_UNIX_SOCKETS)
+ return 0;
+#else
+ return 1;
+#endif
+}
+#define have_unix_sockets _have_unix_sockets
+#endif
+
#include <unistd.h>
#include <stdio.h>
#include <sys/stat.h>
@@ -391,6 +403,7 @@ char *gitdirname(char *);
#ifndef NO_OPENSSL
#ifdef __APPLE__
+#undef __AVAILABILITY_MACROS_USES_AVAILABILITY
#define __AVAILABILITY_MACROS_USES_AVAILABILITY 0
#include <AvailabilityMacros.h>
#undef DEPRECATED_ATTRIBUTE
diff --git a/git-curl-compat.h b/git-curl-compat.h
index fd96b3cdff..e1d0bdd273 100644
--- a/git-curl-compat.h
+++ b/git-curl-compat.h
@@ -127,6 +127,15 @@
#endif
/**
+ * Versions before curl 7.66.0 (September 2019) required manually setting the
+ * transfer-encoding for a streaming POST; after that this is handled
+ * automatically.
+ */
+#if LIBCURL_VERSION_NUM < 0x074200
+#define GIT_CURL_NEED_TRANSFER_ENCODING_HEADER
+#endif
+
+/**
* CURLOPT_PROTOCOLS_STR and CURLOPT_REDIR_PROTOCOLS_STR were added in 7.85.0,
* released in August 2022.
*/
diff --git a/git-gui/.gitattributes b/git-gui/.gitattributes
index 59cd41dbff..118d56cfbd 100644
--- a/git-gui/.gitattributes
+++ b/git-gui/.gitattributes
@@ -3,3 +3,4 @@
git-gui.sh encoding=UTF-8
/po/*.po encoding=UTF-8
/GIT-VERSION-GEN eol=lf
+Makefile whitespace=!indent,trail,space
diff --git a/git-gui/Makefile b/git-gui/Makefile
index 3f80435436..667c39ed56 100644
--- a/git-gui/Makefile
+++ b/git-gui/Makefile
@@ -107,12 +107,12 @@ endif
ifeq ($(uname_S),Darwin)
TKFRAMEWORK = /Library/Frameworks/Tk.framework/Resources/Wish.app
- ifeq ($(shell echo "$(uname_R)" | awk -F. '{if ($$1 >= 9) print "y"}')_$(shell test -d $(TKFRAMEWORK) || echo n),y_n)
+ ifeq ($(shell echo "$(uname_R)" | awk -F. '{if ($$1 >= 9) print "y"}')_$(shell test -d $(TKFRAMEWORK) || echo n),y_n)
TKFRAMEWORK = /System/Library/Frameworks/Tk.framework/Resources/Wish.app
- ifeq ($(shell test -d $(TKFRAMEWORK) || echo n),n)
+ ifeq ($(shell test -d $(TKFRAMEWORK) || echo n),n)
TKFRAMEWORK = /System/Library/Frameworks/Tk.framework/Resources/Wish\ Shell.app
- endif
- endif
+ endif
+ endif
TKEXECUTABLE = $(shell basename "$(TKFRAMEWORK)" .app)
endif
@@ -143,9 +143,9 @@ ifeq ($(exedir),$(gg_libdir))
endif
gg_libdir_sed_in := $(gg_libdir)
ifeq ($(uname_S),Darwin)
- ifeq ($(shell test -d $(TKFRAMEWORK) && echo y),y)
+ ifeq ($(shell test -d $(TKFRAMEWORK) && echo y),y)
GITGUI_MACOSXAPP := YesPlease
- endif
+ endif
endif
ifneq (,$(findstring MINGW,$(uname_S)))
ifeq ($(shell expr "$(uname_R)" : '1\.'),2)
@@ -220,9 +220,9 @@ ifdef NO_MSGFMT
MSGFMT ?= $(TCL_PATH) po/po2msg.sh
else
MSGFMT ?= msgfmt
- ifneq ($(shell $(MSGFMT) --tcl -l C -d . /dev/null 2>/dev/null; echo $$?),0)
+ ifneq ($(shell $(MSGFMT) --tcl -l C -d . /dev/null 2>/dev/null; echo $$?),0)
MSGFMT := $(TCL_PATH) po/po2msg.sh
- endif
+ endif
endif
msgsdir = $(gg_libdir)/msgs
diff --git a/gitk-git/Makefile b/gitk-git/Makefile
index 5bdd52a6eb..e1f0aff4a1 100644
--- a/gitk-git/Makefile
+++ b/gitk-git/Makefile
@@ -33,9 +33,9 @@ ifdef NO_MSGFMT
MSGFMT ?= $(TCL_PATH) po/po2msg.sh
else
MSGFMT ?= msgfmt
- ifneq ($(shell $(MSGFMT) --tcl -l C -d . /dev/null 2>/dev/null; echo $$?),0)
+ ifneq ($(shell $(MSGFMT) --tcl -l C -d . /dev/null 2>/dev/null; echo $$?),0)
MSGFMT := $(TCL_PATH) po/po2msg.sh
- endif
+ endif
endif
PO_TEMPLATE = po/gitk.pot
diff --git a/gpg-interface.c b/gpg-interface.c
index b5993385ff..1ff94266d2 100644
--- a/gpg-interface.c
+++ b/gpg-interface.c
@@ -483,7 +483,7 @@ static int verify_ssh_signed_buffer(struct signature_check *sigc,
if (sigc->payload_timestamp)
strbuf_addf(&verify_time, "-Overify-time=%s",
- show_date(sigc->payload_timestamp, 0, &verify_date_mode));
+ show_date(sigc->payload_timestamp, 0, verify_date_mode));
/* Find the principal from the signers */
strvec_pushl(&ssh_keygen.args, fmt->program,
diff --git a/http.c b/http.c
index e73b136e58..3d80bd6116 100644
--- a/http.c
+++ b/http.c
@@ -1452,6 +1452,7 @@ struct active_request_slot *get_active_slot(void)
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, NULL);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, NULL);
curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDS, NULL);
+ curl_easy_setopt(slot->curl, CURLOPT_POSTFIELDSIZE, -1L);
curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 0);
curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1);
curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 1);
diff --git a/imap-send.c b/imap-send.c
index f2e1947e63..4caa8668e6 100644
--- a/imap-send.c
+++ b/imap-send.c
@@ -68,9 +68,6 @@ static void imap_warn(const char *, ...);
static char *next_arg(char **);
-__attribute__((format (printf, 3, 4)))
-static int nfsnprintf(char *buf, int blen, const char *fmt, ...);
-
static int nfvasprintf(char **strp, const char *fmt, va_list ap)
{
int len;
@@ -500,19 +497,6 @@ static char *next_arg(char **s)
return ret;
}
-__attribute__((format (printf, 3, 4)))
-static int nfsnprintf(char *buf, int blen, const char *fmt, ...)
-{
- int ret;
- va_list va;
-
- va_start(va, fmt);
- if (blen <= 0 || (unsigned)(ret = vsnprintf(buf, blen, fmt, va)) >= (unsigned)blen)
- BUG("buffer too small. Please report a bug.");
- va_end(va);
- return ret;
-}
-
static struct imap_cmd *issue_imap_cmd(struct imap_store *ctx,
struct imap_cmd_cb *cb,
const char *fmt, va_list ap)
@@ -535,11 +519,11 @@ static struct imap_cmd *issue_imap_cmd(struct imap_store *ctx,
get_cmd_result(ctx, NULL);
if (!cmd->cb.data)
- bufl = nfsnprintf(buf, sizeof(buf), "%d %s\r\n", cmd->tag, cmd->cmd);
+ bufl = xsnprintf(buf, sizeof(buf), "%d %s\r\n", cmd->tag, cmd->cmd);
else
- bufl = nfsnprintf(buf, sizeof(buf), "%d %s{%d%s}\r\n",
- cmd->tag, cmd->cmd, cmd->cb.dlen,
- CAP(LITERALPLUS) ? "+" : "");
+ bufl = xsnprintf(buf, sizeof(buf), "%d %s{%d%s}\r\n",
+ cmd->tag, cmd->cmd, cmd->cb.dlen,
+ CAP(LITERALPLUS) ? "+" : "");
if (0 < verbosity) {
if (imap->num_in_progress)
diff --git a/log-tree.c b/log-tree.c
index 59eeaef1f7..16031b44e7 100644
--- a/log-tree.c
+++ b/log-tree.c
@@ -773,7 +773,7 @@ void show_log(struct rev_info *opt)
*/
show_reflog_message(opt->reflog_info,
opt->commit_format == CMIT_FMT_ONELINE,
- &opt->date_mode,
+ opt->date_mode,
opt->date_mode_explicit);
if (opt->commit_format == CMIT_FMT_ONELINE)
return;
diff --git a/mem-pool.c b/mem-pool.c
index 2078c22b09..3065b12b23 100644
--- a/mem-pool.c
+++ b/mem-pool.c
@@ -115,6 +115,7 @@ static char *mem_pool_strvfmt(struct mem_pool *pool, const char *fmt,
size_t available = block ? block->end - block->next_free : 0;
va_list cp;
int len, len2;
+ size_t size;
char *ret;
va_copy(cp, ap);
@@ -123,13 +124,14 @@ static char *mem_pool_strvfmt(struct mem_pool *pool, const char *fmt,
if (len < 0)
BUG("your vsnprintf is broken (returned %d)", len);
- ret = mem_pool_alloc(pool, len + 1); /* 1 for NUL */
+ size = st_add(len, 1); /* 1 for NUL */
+ ret = mem_pool_alloc(pool, size);
/* Shortcut; relies on mem_pool_alloc() not touching buffer contents. */
if (ret == next_free)
return ret;
- len2 = vsnprintf(ret, len + 1, fmt, ap);
+ len2 = vsnprintf(ret, size, fmt, ap);
if (len2 != len)
BUG("your vsnprintf is broken (returns inconsistent lengths)");
return ret;
diff --git a/midx-write.c b/midx-write.c
new file mode 100644
index 0000000000..65e69d2de7
--- /dev/null
+++ b/midx-write.c
@@ -0,0 +1,1525 @@
+#include "git-compat-util.h"
+#include "abspath.h"
+#include "config.h"
+#include "hex.h"
+#include "lockfile.h"
+#include "packfile.h"
+#include "object-file.h"
+#include "hash-lookup.h"
+#include "midx.h"
+#include "progress.h"
+#include "trace2.h"
+#include "run-command.h"
+#include "chunk-format.h"
+#include "pack-bitmap.h"
+#include "refs.h"
+#include "revision.h"
+#include "list-objects.h"
+
+#define PACK_EXPIRED UINT_MAX
+#define BITMAP_POS_UNKNOWN (~((uint32_t)0))
+#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
+#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
+
+extern int midx_checksum_valid(struct multi_pack_index *m);
+extern void clear_midx_files_ext(const char *object_dir, const char *ext,
+ unsigned char *keep_hash);
+extern int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+ const char *idx_name);
+
+static size_t write_midx_header(struct hashfile *f,
+ unsigned char num_chunks,
+ uint32_t num_packs)
+{
+ hashwrite_be32(f, MIDX_SIGNATURE);
+ hashwrite_u8(f, MIDX_VERSION);
+ hashwrite_u8(f, oid_version(the_hash_algo));
+ hashwrite_u8(f, num_chunks);
+ hashwrite_u8(f, 0); /* unused */
+ hashwrite_be32(f, num_packs);
+
+ return MIDX_HEADER_SIZE;
+}
+
+struct pack_info {
+ uint32_t orig_pack_int_id;
+ char *pack_name;
+ struct packed_git *p;
+
+ uint32_t bitmap_pos;
+ uint32_t bitmap_nr;
+
+ unsigned expired : 1;
+};
+
+static void fill_pack_info(struct pack_info *info,
+ struct packed_git *p, const char *pack_name,
+ uint32_t orig_pack_int_id)
+{
+ memset(info, 0, sizeof(struct pack_info));
+
+ info->orig_pack_int_id = orig_pack_int_id;
+ info->pack_name = xstrdup(pack_name);
+ info->p = p;
+ info->bitmap_pos = BITMAP_POS_UNKNOWN;
+}
+
+static int pack_info_compare(const void *_a, const void *_b)
+{
+ struct pack_info *a = (struct pack_info *)_a;
+ struct pack_info *b = (struct pack_info *)_b;
+ return strcmp(a->pack_name, b->pack_name);
+}
+
+static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
+{
+ const char *pack_name = _va;
+ const struct pack_info *compar = _vb;
+
+ return cmp_idx_or_pack_name(pack_name, compar->pack_name);
+}
+
+struct write_midx_context {
+ struct pack_info *info;
+ size_t nr;
+ size_t alloc;
+ struct multi_pack_index *m;
+ struct progress *progress;
+ unsigned pack_paths_checked;
+
+ struct pack_midx_entry *entries;
+ size_t entries_nr;
+
+ uint32_t *pack_perm;
+ uint32_t *pack_order;
+ unsigned large_offsets_needed:1;
+ uint32_t num_large_offsets;
+
+ int preferred_pack_idx;
+
+ struct string_list *to_include;
+};
+
+static void add_pack_to_midx(const char *full_path, size_t full_path_len,
+ const char *file_name, void *data)
+{
+ struct write_midx_context *ctx = data;
+ struct packed_git *p;
+
+ if (ends_with(file_name, ".idx")) {
+ display_progress(ctx->progress, ++ctx->pack_paths_checked);
+ /*
+ * Note that at most one of ctx->m and ctx->to_include are set,
+ * so we are testing midx_contains_pack() and
+ * string_list_has_string() independently (guarded by the
+ * appropriate NULL checks).
+ *
+ * We could support passing to_include while reusing an existing
+ * MIDX, but don't currently since the reuse process drags
+ * forward all packs from an existing MIDX (without checking
+ * whether or not they appear in the to_include list).
+ *
+ * If we added support for that, these next two conditional
+ * should be performed independently (likely checking
+ * to_include before the existing MIDX).
+ */
+ if (ctx->m && midx_contains_pack(ctx->m, file_name))
+ return;
+ else if (ctx->to_include &&
+ !string_list_has_string(ctx->to_include, file_name))
+ return;
+
+ ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
+
+ p = add_packed_git(full_path, full_path_len, 0);
+ if (!p) {
+ warning(_("failed to add packfile '%s'"),
+ full_path);
+ return;
+ }
+
+ if (open_pack_index(p)) {
+ warning(_("failed to open pack-index '%s'"),
+ full_path);
+ close_pack(p);
+ free(p);
+ return;
+ }
+
+ fill_pack_info(&ctx->info[ctx->nr], p, file_name, ctx->nr);
+ ctx->nr++;
+ }
+}
+
+struct pack_midx_entry {
+ struct object_id oid;
+ uint32_t pack_int_id;
+ time_t pack_mtime;
+ uint64_t offset;
+ unsigned preferred : 1;
+};
+
+static int midx_oid_compare(const void *_a, const void *_b)
+{
+ const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
+ const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
+ int cmp = oidcmp(&a->oid, &b->oid);
+
+ if (cmp)
+ return cmp;
+
+ /* Sort objects in a preferred pack first when multiple copies exist. */
+ if (a->preferred > b->preferred)
+ return -1;
+ if (a->preferred < b->preferred)
+ return 1;
+
+ if (a->pack_mtime > b->pack_mtime)
+ return -1;
+ else if (a->pack_mtime < b->pack_mtime)
+ return 1;
+
+ return a->pack_int_id - b->pack_int_id;
+}
+
+static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
+ struct pack_midx_entry *e,
+ uint32_t pos)
+{
+ if (pos >= m->num_objects)
+ return 1;
+
+ nth_midxed_object_oid(&e->oid, m, pos);
+ e->pack_int_id = nth_midxed_pack_int_id(m, pos);
+ e->offset = nth_midxed_offset(m, pos);
+
+ /* consider objects in midx to be from "old" packs */
+ e->pack_mtime = 0;
+ return 0;
+}
+
+static void fill_pack_entry(uint32_t pack_int_id,
+ struct packed_git *p,
+ uint32_t cur_object,
+ struct pack_midx_entry *entry,
+ int preferred)
+{
+ if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
+ die(_("failed to locate object %d in packfile"), cur_object);
+
+ entry->pack_int_id = pack_int_id;
+ entry->pack_mtime = p->mtime;
+
+ entry->offset = nth_packed_object_offset(p, cur_object);
+ entry->preferred = !!preferred;
+}
+
+struct midx_fanout {
+ struct pack_midx_entry *entries;
+ size_t nr, alloc;
+};
+
+static void midx_fanout_grow(struct midx_fanout *fanout, size_t nr)
+{
+ if (nr < fanout->nr)
+ BUG("negative growth in midx_fanout_grow() (%"PRIuMAX" < %"PRIuMAX")",
+ (uintmax_t)nr, (uintmax_t)fanout->nr);
+ ALLOC_GROW(fanout->entries, nr, fanout->alloc);
+}
+
+static void midx_fanout_sort(struct midx_fanout *fanout)
+{
+ QSORT(fanout->entries, fanout->nr, midx_oid_compare);
+}
+
+static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
+ struct multi_pack_index *m,
+ uint32_t cur_fanout,
+ int preferred_pack)
+{
+ uint32_t start = 0, end;
+ uint32_t cur_object;
+
+ if (cur_fanout)
+ start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
+ end = ntohl(m->chunk_oid_fanout[cur_fanout]);
+
+ for (cur_object = start; cur_object < end; cur_object++) {
+ if ((preferred_pack > -1) &&
+ (preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
+ /*
+ * Objects from preferred packs are added
+ * separately.
+ */
+ continue;
+ }
+
+ midx_fanout_grow(fanout, fanout->nr + 1);
+ nth_midxed_pack_midx_entry(m,
+ &fanout->entries[fanout->nr],
+ cur_object);
+ fanout->entries[fanout->nr].preferred = 0;
+ fanout->nr++;
+ }
+}
+
+static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
+ struct pack_info *info,
+ uint32_t cur_pack,
+ int preferred,
+ uint32_t cur_fanout)
+{
+ struct packed_git *pack = info[cur_pack].p;
+ uint32_t start = 0, end;
+ uint32_t cur_object;
+
+ if (cur_fanout)
+ start = get_pack_fanout(pack, cur_fanout - 1);
+ end = get_pack_fanout(pack, cur_fanout);
+
+ for (cur_object = start; cur_object < end; cur_object++) {
+ midx_fanout_grow(fanout, fanout->nr + 1);
+ fill_pack_entry(cur_pack,
+ info[cur_pack].p,
+ cur_object,
+ &fanout->entries[fanout->nr],
+ preferred);
+ fanout->nr++;
+ }
+}
+
+/*
+ * It is possible to artificially get into a state where there are many
+ * duplicate copies of objects. That can create high memory pressure if
+ * we are to create a list of all objects before de-duplication. To reduce
+ * this memory pressure without a significant performance drop, automatically
+ * group objects by the first byte of their object id. Use the IDX fanout
+ * tables to group the data, copy to a local array, then sort.
+ *
+ * Copy only the de-duplicated entries (selected by most-recent modified time
+ * of a packfile containing the object).
+ */
+static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
+ struct pack_info *info,
+ uint32_t nr_packs,
+ size_t *nr_objects,
+ int preferred_pack)
+{
+ uint32_t cur_fanout, cur_pack, cur_object;
+ size_t alloc_objects, total_objects = 0;
+ struct midx_fanout fanout = { 0 };
+ struct pack_midx_entry *deduplicated_entries = NULL;
+ uint32_t start_pack = m ? m->num_packs : 0;
+
+ for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
+ total_objects = st_add(total_objects,
+ info[cur_pack].p->num_objects);
+
+ /*
+ * As we de-duplicate by fanout value, we expect the fanout
+ * slices to be evenly distributed, with some noise. Hence,
+ * allocate slightly more than one 256th.
+ */
+ alloc_objects = fanout.alloc = total_objects > 3200 ? total_objects / 200 : 16;
+
+ ALLOC_ARRAY(fanout.entries, fanout.alloc);
+ ALLOC_ARRAY(deduplicated_entries, alloc_objects);
+ *nr_objects = 0;
+
+ for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
+ fanout.nr = 0;
+
+ if (m)
+ midx_fanout_add_midx_fanout(&fanout, m, cur_fanout,
+ preferred_pack);
+
+ for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
+ int preferred = cur_pack == preferred_pack;
+ midx_fanout_add_pack_fanout(&fanout,
+ info, cur_pack,
+ preferred, cur_fanout);
+ }
+
+ if (-1 < preferred_pack && preferred_pack < start_pack)
+ midx_fanout_add_pack_fanout(&fanout, info,
+ preferred_pack, 1,
+ cur_fanout);
+
+ midx_fanout_sort(&fanout);
+
+ /*
+ * The batch is now sorted by OID and then mtime (descending).
+ * Take only the first duplicate.
+ */
+ for (cur_object = 0; cur_object < fanout.nr; cur_object++) {
+ if (cur_object && oideq(&fanout.entries[cur_object - 1].oid,
+ &fanout.entries[cur_object].oid))
+ continue;
+
+ ALLOC_GROW(deduplicated_entries, st_add(*nr_objects, 1),
+ alloc_objects);
+ memcpy(&deduplicated_entries[*nr_objects],
+ &fanout.entries[cur_object],
+ sizeof(struct pack_midx_entry));
+ (*nr_objects)++;
+ }
+ }
+
+ free(fanout.entries);
+ return deduplicated_entries;
+}
+
+static int write_midx_pack_names(struct hashfile *f, void *data)
+{
+ struct write_midx_context *ctx = data;
+ uint32_t i;
+ unsigned char padding[MIDX_CHUNK_ALIGNMENT];
+ size_t written = 0;
+
+ for (i = 0; i < ctx->nr; i++) {
+ size_t writelen;
+
+ if (ctx->info[i].expired)
+ continue;
+
+ if (i && strcmp(ctx->info[i].pack_name, ctx->info[i - 1].pack_name) <= 0)
+ BUG("incorrect pack-file order: %s before %s",
+ ctx->info[i - 1].pack_name,
+ ctx->info[i].pack_name);
+
+ writelen = strlen(ctx->info[i].pack_name) + 1;
+ hashwrite(f, ctx->info[i].pack_name, writelen);
+ written += writelen;
+ }
+
+ /* add padding to be aligned */
+ i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
+ if (i < MIDX_CHUNK_ALIGNMENT) {
+ memset(padding, 0, sizeof(padding));
+ hashwrite(f, padding, i);
+ }
+
+ return 0;
+}
+
+static int write_midx_bitmapped_packs(struct hashfile *f, void *data)
+{
+ struct write_midx_context *ctx = data;
+ size_t i;
+
+ for (i = 0; i < ctx->nr; i++) {
+ struct pack_info *pack = &ctx->info[i];
+ if (pack->expired)
+ continue;
+
+ if (pack->bitmap_pos == BITMAP_POS_UNKNOWN && pack->bitmap_nr)
+ BUG("pack '%s' has no bitmap position, but has %d bitmapped object(s)",
+ pack->pack_name, pack->bitmap_nr);
+
+ hashwrite_be32(f, pack->bitmap_pos);
+ hashwrite_be32(f, pack->bitmap_nr);
+ }
+ return 0;
+}
+
+static int write_midx_oid_fanout(struct hashfile *f,
+ void *data)
+{
+ struct write_midx_context *ctx = data;
+ struct pack_midx_entry *list = ctx->entries;
+ struct pack_midx_entry *last = ctx->entries + ctx->entries_nr;
+ uint32_t count = 0;
+ uint32_t i;
+
+ /*
+ * Write the first-level table (the list is sorted,
+ * but we use a 256-entry lookup to be able to avoid
+ * having to do eight extra binary search iterations).
+ */
+ for (i = 0; i < 256; i++) {
+ struct pack_midx_entry *next = list;
+
+ while (next < last && next->oid.hash[0] == i) {
+ count++;
+ next++;
+ }
+
+ hashwrite_be32(f, count);
+ list = next;
+ }
+
+ return 0;
+}
+
+static int write_midx_oid_lookup(struct hashfile *f,
+ void *data)
+{
+ struct write_midx_context *ctx = data;
+ unsigned char hash_len = the_hash_algo->rawsz;
+ struct pack_midx_entry *list = ctx->entries;
+ uint32_t i;
+
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *obj = list++;
+
+ if (i < ctx->entries_nr - 1) {
+ struct pack_midx_entry *next = list;
+ if (oidcmp(&obj->oid, &next->oid) >= 0)
+ BUG("OIDs not in order: %s >= %s",
+ oid_to_hex(&obj->oid),
+ oid_to_hex(&next->oid));
+ }
+
+ hashwrite(f, obj->oid.hash, (int)hash_len);
+ }
+
+ return 0;
+}
+
+static int write_midx_object_offsets(struct hashfile *f,
+ void *data)
+{
+ struct write_midx_context *ctx = data;
+ struct pack_midx_entry *list = ctx->entries;
+ uint32_t i, nr_large_offset = 0;
+
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *obj = list++;
+
+ if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
+ BUG("object %s is in an expired pack with int-id %d",
+ oid_to_hex(&obj->oid),
+ obj->pack_int_id);
+
+ hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
+
+ if (ctx->large_offsets_needed && obj->offset >> 31)
+ hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
+ else if (!ctx->large_offsets_needed && obj->offset >> 32)
+ BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
+ oid_to_hex(&obj->oid),
+ obj->offset);
+ else
+ hashwrite_be32(f, (uint32_t)obj->offset);
+ }
+
+ return 0;
+}
+
+static int write_midx_large_offsets(struct hashfile *f,
+ void *data)
+{
+ struct write_midx_context *ctx = data;
+ struct pack_midx_entry *list = ctx->entries;
+ struct pack_midx_entry *end = ctx->entries + ctx->entries_nr;
+ uint32_t nr_large_offset = ctx->num_large_offsets;
+
+ while (nr_large_offset) {
+ struct pack_midx_entry *obj;
+ uint64_t offset;
+
+ if (list >= end)
+ BUG("too many large-offset objects");
+
+ obj = list++;
+ offset = obj->offset;
+
+ if (!(offset >> 31))
+ continue;
+
+ hashwrite_be64(f, offset);
+
+ nr_large_offset--;
+ }
+
+ return 0;
+}
+
+static int write_midx_revindex(struct hashfile *f,
+ void *data)
+{
+ struct write_midx_context *ctx = data;
+ uint32_t i;
+
+ for (i = 0; i < ctx->entries_nr; i++)
+ hashwrite_be32(f, ctx->pack_order[i]);
+
+ return 0;
+}
+
+struct midx_pack_order_data {
+ uint32_t nr;
+ uint32_t pack;
+ off_t offset;
+};
+
+static int midx_pack_order_cmp(const void *va, const void *vb)
+{
+ const struct midx_pack_order_data *a = va, *b = vb;
+ if (a->pack < b->pack)
+ return -1;
+ else if (a->pack > b->pack)
+ return 1;
+ else if (a->offset < b->offset)
+ return -1;
+ else if (a->offset > b->offset)
+ return 1;
+ else
+ return 0;
+}
+
+static uint32_t *midx_pack_order(struct write_midx_context *ctx)
+{
+ struct midx_pack_order_data *data;
+ uint32_t *pack_order;
+ uint32_t i;
+
+ trace2_region_enter("midx", "midx_pack_order", the_repository);
+
+ ALLOC_ARRAY(data, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *e = &ctx->entries[i];
+ data[i].nr = i;
+ data[i].pack = ctx->pack_perm[e->pack_int_id];
+ if (!e->preferred)
+ data[i].pack |= (1U << 31);
+ data[i].offset = e->offset;
+ }
+
+ QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
+
+ ALLOC_ARRAY(pack_order, ctx->entries_nr);
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *e = &ctx->entries[data[i].nr];
+ struct pack_info *pack = &ctx->info[ctx->pack_perm[e->pack_int_id]];
+ if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
+ pack->bitmap_pos = i;
+ pack->bitmap_nr++;
+ pack_order[i] = data[i].nr;
+ }
+ for (i = 0; i < ctx->nr; i++) {
+ struct pack_info *pack = &ctx->info[ctx->pack_perm[i]];
+ if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
+ pack->bitmap_pos = 0;
+ }
+ free(data);
+
+ trace2_region_leave("midx", "midx_pack_order", the_repository);
+
+ return pack_order;
+}
+
+static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
+ struct write_midx_context *ctx)
+{
+ struct strbuf buf = STRBUF_INIT;
+ const char *tmp_file;
+
+ trace2_region_enter("midx", "write_midx_reverse_index", the_repository);
+
+ strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
+
+ tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
+ midx_hash, WRITE_REV);
+
+ if (finalize_object_file(tmp_file, buf.buf))
+ die(_("cannot store reverse index file"));
+
+ strbuf_release(&buf);
+
+ trace2_region_leave("midx", "write_midx_reverse_index", the_repository);
+}
+
+static void prepare_midx_packing_data(struct packing_data *pdata,
+ struct write_midx_context *ctx)
+{
+ uint32_t i;
+
+ trace2_region_enter("midx", "prepare_midx_packing_data", the_repository);
+
+ memset(pdata, 0, sizeof(struct packing_data));
+ prepare_packing_data(the_repository, pdata);
+
+ for (i = 0; i < ctx->entries_nr; i++) {
+ struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]];
+ struct object_entry *to = packlist_alloc(pdata, &from->oid);
+
+ oe_set_in_pack(pdata, to,
+ ctx->info[ctx->pack_perm[from->pack_int_id]].p);
+ }
+
+ trace2_region_leave("midx", "prepare_midx_packing_data", the_repository);
+}
+
+static int add_ref_to_pending(const char *refname,
+ const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct rev_info *revs = (struct rev_info*)cb_data;
+ struct object_id peeled;
+ struct object *object;
+
+ if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) {
+ warning("symbolic ref is dangling: %s", refname);
+ return 0;
+ }
+
+ if (!peel_iterated_oid(oid, &peeled))
+ oid = &peeled;
+
+ object = parse_object_or_die(oid, refname);
+ if (object->type != OBJ_COMMIT)
+ return 0;
+
+ add_pending_object(revs, object, "");
+ if (bitmap_is_preferred_refname(revs->repo, refname))
+ object->flags |= NEEDS_BITMAP;
+ return 0;
+}
+
+struct bitmap_commit_cb {
+ struct commit **commits;
+ size_t commits_nr, commits_alloc;
+
+ struct write_midx_context *ctx;
+};
+
+static const struct object_id *bitmap_oid_access(size_t index,
+ const void *_entries)
+{
+ const struct pack_midx_entry *entries = _entries;
+ return &entries[index].oid;
+}
+
+static void bitmap_show_commit(struct commit *commit, void *_data)
+{
+ struct bitmap_commit_cb *data = _data;
+ int pos = oid_pos(&commit->object.oid, data->ctx->entries,
+ data->ctx->entries_nr,
+ bitmap_oid_access);
+ if (pos < 0)
+ return;
+
+ ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc);
+ data->commits[data->commits_nr++] = commit;
+}
+
+static int read_refs_snapshot(const char *refs_snapshot,
+ struct rev_info *revs)
+{
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id oid;
+ FILE *f = xfopen(refs_snapshot, "r");
+
+ while (strbuf_getline(&buf, f) != EOF) {
+ struct object *object;
+ int preferred = 0;
+ char *hex = buf.buf;
+ const char *end = NULL;
+
+ if (buf.len && *buf.buf == '+') {
+ preferred = 1;
+ hex = &buf.buf[1];
+ }
+
+ if (parse_oid_hex(hex, &oid, &end) < 0)
+ die(_("could not parse line: %s"), buf.buf);
+ if (*end)
+ die(_("malformed line: %s"), buf.buf);
+
+ object = parse_object_or_die(&oid, NULL);
+ if (preferred)
+ object->flags |= NEEDS_BITMAP;
+
+ add_pending_object(revs, object, "");
+ }
+
+ fclose(f);
+ strbuf_release(&buf);
+ return 0;
+}
+static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p,
+ const char *refs_snapshot,
+ struct write_midx_context *ctx)
+{
+ struct rev_info revs;
+ struct bitmap_commit_cb cb = {0};
+
+ trace2_region_enter("midx", "find_commits_for_midx_bitmap",
+ the_repository);
+
+ cb.ctx = ctx;
+
+ repo_init_revisions(the_repository, &revs, NULL);
+ if (refs_snapshot) {
+ read_refs_snapshot(refs_snapshot, &revs);
+ } else {
+ setup_revisions(0, NULL, &revs, NULL);
+ for_each_ref(add_ref_to_pending, &revs);
+ }
+
+ /*
+ * Skipping promisor objects here is intentional, since it only excludes
+ * them from the list of reachable commits that we want to select from
+ * when computing the selection of MIDX'd commits to receive bitmaps.
+ *
+ * Reachability bitmaps do require that their objects be closed under
+ * reachability, but fetching any objects missing from promisors at this
+ * point is too late. But, if one of those objects can be reached from
+ * an another object that is included in the bitmap, then we will
+ * complain later that we don't have reachability closure (and fail
+ * appropriately).
+ */
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+
+ traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb);
+ if (indexed_commits_nr_p)
+ *indexed_commits_nr_p = cb.commits_nr;
+
+ release_revisions(&revs);
+
+ trace2_region_leave("midx", "find_commits_for_midx_bitmap",
+ the_repository);
+
+ return cb.commits;
+}
+
+static int write_midx_bitmap(const char *midx_name,
+ const unsigned char *midx_hash,
+ struct packing_data *pdata,
+ struct commit **commits,
+ uint32_t commits_nr,
+ uint32_t *pack_order,
+ unsigned flags)
+{
+ int ret, i;
+ uint16_t options = 0;
+ struct pack_idx_entry **index;
+ char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name,
+ hash_to_hex(midx_hash));
+
+ trace2_region_enter("midx", "write_midx_bitmap", the_repository);
+
+ if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
+ options |= BITMAP_OPT_HASH_CACHE;
+
+ if (flags & MIDX_WRITE_BITMAP_LOOKUP_TABLE)
+ options |= BITMAP_OPT_LOOKUP_TABLE;
+
+ /*
+ * Build the MIDX-order index based on pdata.objects (which is already
+ * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
+ * this order).
+ */
+ ALLOC_ARRAY(index, pdata->nr_objects);
+ for (i = 0; i < pdata->nr_objects; i++)
+ index[i] = &pdata->objects[i].idx;
+
+ bitmap_writer_show_progress(flags & MIDX_PROGRESS);
+ bitmap_writer_build_type_index(pdata, index, pdata->nr_objects);
+
+ /*
+ * bitmap_writer_finish expects objects in lex order, but pack_order
+ * gives us exactly that. use it directly instead of re-sorting the
+ * array.
+ *
+ * This changes the order of objects in 'index' between
+ * bitmap_writer_build_type_index and bitmap_writer_finish.
+ *
+ * The same re-ordering takes place in the single-pack bitmap code via
+ * write_idx_file(), which is called by finish_tmp_packfile(), which
+ * happens between bitmap_writer_build_type_index() and
+ * bitmap_writer_finish().
+ */
+ for (i = 0; i < pdata->nr_objects; i++)
+ index[pack_order[i]] = &pdata->objects[i].idx;
+
+ bitmap_writer_select_commits(commits, commits_nr, -1);
+ ret = bitmap_writer_build(pdata);
+ if (ret < 0)
+ goto cleanup;
+
+ bitmap_writer_set_checksum(midx_hash);
+ bitmap_writer_finish(index, pdata->nr_objects, bitmap_name, options);
+
+cleanup:
+ free(index);
+ free(bitmap_name);
+
+ trace2_region_leave("midx", "write_midx_bitmap", the_repository);
+
+ return ret;
+}
+
+static struct multi_pack_index *lookup_multi_pack_index(struct repository *r,
+ const char *object_dir)
+{
+ struct multi_pack_index *result = NULL;
+ struct multi_pack_index *cur;
+ char *obj_dir_real = real_pathdup(object_dir, 1);
+ struct strbuf cur_path_real = STRBUF_INIT;
+
+ /* Ensure the given object_dir is local, or a known alternate. */
+ find_odb(r, obj_dir_real);
+
+ for (cur = get_multi_pack_index(r); cur; cur = cur->next) {
+ strbuf_realpath(&cur_path_real, cur->object_dir, 1);
+ if (!strcmp(obj_dir_real, cur_path_real.buf)) {
+ result = cur;
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ free(obj_dir_real);
+ strbuf_release(&cur_path_real);
+ return result;
+}
+
+static int write_midx_internal(const char *object_dir,
+ struct string_list *packs_to_include,
+ struct string_list *packs_to_drop,
+ const char *preferred_pack_name,
+ const char *refs_snapshot,
+ unsigned flags)
+{
+ struct strbuf midx_name = STRBUF_INIT;
+ unsigned char midx_hash[GIT_MAX_RAWSZ];
+ uint32_t i;
+ struct hashfile *f = NULL;
+ struct lock_file lk;
+ struct write_midx_context ctx = { 0 };
+ int bitmapped_packs_concat_len = 0;
+ int pack_name_concat_len = 0;
+ int dropped_packs = 0;
+ int result = 0;
+ struct chunkfile *cf;
+
+ trace2_region_enter("midx", "write_midx_internal", the_repository);
+
+ get_midx_filename(&midx_name, object_dir);
+ if (safe_create_leading_directories(midx_name.buf))
+ die_errno(_("unable to create leading directories of %s"),
+ midx_name.buf);
+
+ if (!packs_to_include) {
+ /*
+ * Only reference an existing MIDX when not filtering which
+ * packs to include, since all packs and objects are copied
+ * blindly from an existing MIDX if one is present.
+ */
+ ctx.m = lookup_multi_pack_index(the_repository, object_dir);
+ }
+
+ if (ctx.m && !midx_checksum_valid(ctx.m)) {
+ warning(_("ignoring existing multi-pack-index; checksum mismatch"));
+ ctx.m = NULL;
+ }
+
+ ctx.nr = 0;
+ ctx.alloc = ctx.m ? ctx.m->num_packs : 16;
+ ctx.info = NULL;
+ ALLOC_ARRAY(ctx.info, ctx.alloc);
+
+ if (ctx.m) {
+ for (i = 0; i < ctx.m->num_packs; i++) {
+ ALLOC_GROW(ctx.info, ctx.nr + 1, ctx.alloc);
+
+ if (flags & MIDX_WRITE_REV_INDEX) {
+ /*
+ * If generating a reverse index, need to have
+ * packed_git's loaded to compare their
+ * mtimes and object count.
+ */
+ if (prepare_midx_pack(the_repository, ctx.m, i)) {
+ error(_("could not load pack"));
+ result = 1;
+ goto cleanup;
+ }
+
+ if (open_pack_index(ctx.m->packs[i]))
+ die(_("could not open index for %s"),
+ ctx.m->packs[i]->pack_name);
+ }
+
+ fill_pack_info(&ctx.info[ctx.nr++], ctx.m->packs[i],
+ ctx.m->pack_names[i], i);
+ }
+ }
+
+ ctx.pack_paths_checked = 0;
+ if (flags & MIDX_PROGRESS)
+ ctx.progress = start_delayed_progress(_("Adding packfiles to multi-pack-index"), 0);
+ else
+ ctx.progress = NULL;
+
+ ctx.to_include = packs_to_include;
+
+ for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
+ stop_progress(&ctx.progress);
+
+ if ((ctx.m && ctx.nr == ctx.m->num_packs) &&
+ !(packs_to_include || packs_to_drop)) {
+ struct bitmap_index *bitmap_git;
+ int bitmap_exists;
+ int want_bitmap = flags & MIDX_WRITE_BITMAP;
+
+ bitmap_git = prepare_midx_bitmap_git(ctx.m);
+ bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git);
+ free_bitmap_index(bitmap_git);
+
+ if (bitmap_exists || !want_bitmap) {
+ /*
+ * The correct MIDX already exists, and so does a
+ * corresponding bitmap (or one wasn't requested).
+ */
+ if (!want_bitmap)
+ clear_midx_files_ext(object_dir, ".bitmap",
+ NULL);
+ goto cleanup;
+ }
+ }
+
+ if (preferred_pack_name) {
+ ctx.preferred_pack_idx = -1;
+
+ for (i = 0; i < ctx.nr; i++) {
+ if (!cmp_idx_or_pack_name(preferred_pack_name,
+ ctx.info[i].pack_name)) {
+ ctx.preferred_pack_idx = i;
+ break;
+ }
+ }
+
+ if (ctx.preferred_pack_idx == -1)
+ warning(_("unknown preferred pack: '%s'"),
+ preferred_pack_name);
+ } else if (ctx.nr &&
+ (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
+ struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
+ ctx.preferred_pack_idx = 0;
+
+ if (packs_to_drop && packs_to_drop->nr)
+ BUG("cannot write a MIDX bitmap during expiration");
+
+ /*
+ * set a preferred pack when writing a bitmap to ensure that
+ * the pack from which the first object is selected in pseudo
+ * pack-order has all of its objects selected from that pack
+ * (and not another pack containing a duplicate)
+ */
+ for (i = 1; i < ctx.nr; i++) {
+ struct packed_git *p = ctx.info[i].p;
+
+ if (!oldest->num_objects || p->mtime < oldest->mtime) {
+ oldest = p;
+ ctx.preferred_pack_idx = i;
+ }
+ }
+
+ if (!oldest->num_objects) {
+ /*
+ * If all packs are empty; unset the preferred index.
+ * This is acceptable since there will be no duplicate
+ * objects to resolve, so the preferred value doesn't
+ * matter.
+ */
+ ctx.preferred_pack_idx = -1;
+ }
+ } else {
+ /*
+ * otherwise don't mark any pack as preferred to avoid
+ * interfering with expiration logic below
+ */
+ ctx.preferred_pack_idx = -1;
+ }
+
+ if (ctx.preferred_pack_idx > -1) {
+ struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
+ if (!preferred->num_objects) {
+ error(_("cannot select preferred pack %s with no objects"),
+ preferred->pack_name);
+ result = 1;
+ goto cleanup;
+ }
+ }
+
+ ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
+ ctx.preferred_pack_idx);
+
+ ctx.large_offsets_needed = 0;
+ for (i = 0; i < ctx.entries_nr; i++) {
+ if (ctx.entries[i].offset > 0x7fffffff)
+ ctx.num_large_offsets++;
+ if (ctx.entries[i].offset > 0xffffffff)
+ ctx.large_offsets_needed = 1;
+ }
+
+ QSORT(ctx.info, ctx.nr, pack_info_compare);
+
+ if (packs_to_drop && packs_to_drop->nr) {
+ int drop_index = 0;
+ int missing_drops = 0;
+
+ for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
+ int cmp = strcmp(ctx.info[i].pack_name,
+ packs_to_drop->items[drop_index].string);
+
+ if (!cmp) {
+ drop_index++;
+ ctx.info[i].expired = 1;
+ } else if (cmp > 0) {
+ error(_("did not see pack-file %s to drop"),
+ packs_to_drop->items[drop_index].string);
+ drop_index++;
+ missing_drops++;
+ i--;
+ } else {
+ ctx.info[i].expired = 0;
+ }
+ }
+
+ if (missing_drops) {
+ result = 1;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * pack_perm stores a permutation between pack-int-ids from the
+ * previous multi-pack-index to the new one we are writing:
+ *
+ * pack_perm[old_id] = new_id
+ */
+ ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
+ for (i = 0; i < ctx.nr; i++) {
+ if (ctx.info[i].expired) {
+ dropped_packs++;
+ ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
+ } else {
+ ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
+ }
+ }
+
+ for (i = 0; i < ctx.nr; i++) {
+ if (ctx.info[i].expired)
+ continue;
+ pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
+ bitmapped_packs_concat_len += 2 * sizeof(uint32_t);
+ }
+
+ /* Check that the preferred pack wasn't expired (if given). */
+ if (preferred_pack_name) {
+ struct pack_info *preferred = bsearch(preferred_pack_name,
+ ctx.info, ctx.nr,
+ sizeof(*ctx.info),
+ idx_or_pack_name_cmp);
+ if (preferred) {
+ uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
+ if (perm == PACK_EXPIRED)
+ warning(_("preferred pack '%s' is expired"),
+ preferred_pack_name);
+ }
+ }
+
+ if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
+ pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
+ (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
+
+ hold_lock_file_for_update(&lk, midx_name.buf, LOCK_DIE_ON_ERROR);
+ f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
+
+ if (ctx.nr - dropped_packs == 0) {
+ error(_("no pack files to index."));
+ result = 1;
+ goto cleanup;
+ }
+
+ if (!ctx.entries_nr) {
+ if (flags & MIDX_WRITE_BITMAP)
+ warning(_("refusing to write multi-pack .bitmap without any objects"));
+ flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
+ }
+
+ cf = init_chunkfile(f);
+
+ add_chunk(cf, MIDX_CHUNKID_PACKNAMES, pack_name_concat_len,
+ write_midx_pack_names);
+ add_chunk(cf, MIDX_CHUNKID_OIDFANOUT, MIDX_CHUNK_FANOUT_SIZE,
+ write_midx_oid_fanout);
+ add_chunk(cf, MIDX_CHUNKID_OIDLOOKUP,
+ st_mult(ctx.entries_nr, the_hash_algo->rawsz),
+ write_midx_oid_lookup);
+ add_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS,
+ st_mult(ctx.entries_nr, MIDX_CHUNK_OFFSET_WIDTH),
+ write_midx_object_offsets);
+
+ if (ctx.large_offsets_needed)
+ add_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS,
+ st_mult(ctx.num_large_offsets,
+ MIDX_CHUNK_LARGE_OFFSET_WIDTH),
+ write_midx_large_offsets);
+
+ if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
+ ctx.pack_order = midx_pack_order(&ctx);
+ add_chunk(cf, MIDX_CHUNKID_REVINDEX,
+ st_mult(ctx.entries_nr, sizeof(uint32_t)),
+ write_midx_revindex);
+ add_chunk(cf, MIDX_CHUNKID_BITMAPPEDPACKS,
+ bitmapped_packs_concat_len,
+ write_midx_bitmapped_packs);
+ }
+
+ write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
+ write_chunkfile(cf, &ctx);
+
+ finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
+ CSUM_FSYNC | CSUM_HASH_IN_STREAM);
+ free_chunkfile(cf);
+
+ if (flags & MIDX_WRITE_REV_INDEX &&
+ git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
+ write_midx_reverse_index(midx_name.buf, midx_hash, &ctx);
+
+ if (flags & MIDX_WRITE_BITMAP) {
+ struct packing_data pdata;
+ struct commit **commits;
+ uint32_t commits_nr;
+
+ if (!ctx.entries_nr)
+ BUG("cannot write a bitmap without any objects");
+
+ prepare_midx_packing_data(&pdata, &ctx);
+
+ commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, &ctx);
+
+ /*
+ * The previous steps translated the information from
+ * 'entries' into information suitable for constructing
+ * bitmaps. We no longer need that array, so clear it to
+ * reduce memory pressure.
+ */
+ FREE_AND_NULL(ctx.entries);
+ ctx.entries_nr = 0;
+
+ if (write_midx_bitmap(midx_name.buf, midx_hash, &pdata,
+ commits, commits_nr, ctx.pack_order,
+ flags) < 0) {
+ error(_("could not write multi-pack bitmap"));
+ result = 1;
+ clear_packing_data(&pdata);
+ free(commits);
+ goto cleanup;
+ }
+
+ clear_packing_data(&pdata);
+ free(commits);
+ }
+ /*
+ * NOTE: Do not use ctx.entries beyond this point, since it might
+ * have been freed in the previous if block.
+ */
+
+ if (ctx.m)
+ close_object_store(the_repository->objects);
+
+ if (commit_lock_file(&lk) < 0)
+ die_errno(_("could not write multi-pack-index"));
+
+ clear_midx_files_ext(object_dir, ".bitmap", midx_hash);
+ clear_midx_files_ext(object_dir, ".rev", midx_hash);
+
+cleanup:
+ for (i = 0; i < ctx.nr; i++) {
+ if (ctx.info[i].p) {
+ close_pack(ctx.info[i].p);
+ free(ctx.info[i].p);
+ }
+ free(ctx.info[i].pack_name);
+ }
+
+ free(ctx.info);
+ free(ctx.entries);
+ free(ctx.pack_perm);
+ free(ctx.pack_order);
+ strbuf_release(&midx_name);
+
+ trace2_region_leave("midx", "write_midx_internal", the_repository);
+
+ return result;
+}
+
+int write_midx_file(const char *object_dir,
+ const char *preferred_pack_name,
+ const char *refs_snapshot,
+ unsigned flags)
+{
+ return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
+ refs_snapshot, flags);
+}
+
+int write_midx_file_only(const char *object_dir,
+ struct string_list *packs_to_include,
+ const char *preferred_pack_name,
+ const char *refs_snapshot,
+ unsigned flags)
+{
+ return write_midx_internal(object_dir, packs_to_include, NULL,
+ preferred_pack_name, refs_snapshot, flags);
+}
+
+int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags)
+{
+ uint32_t i, *count, result = 0;
+ struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
+ struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+ struct progress *progress = NULL;
+
+ if (!m)
+ return 0;
+
+ CALLOC_ARRAY(count, m->num_packs);
+
+ if (flags & MIDX_PROGRESS)
+ progress = start_delayed_progress(_("Counting referenced objects"),
+ m->num_objects);
+ for (i = 0; i < m->num_objects; i++) {
+ int pack_int_id = nth_midxed_pack_int_id(m, i);
+ count[pack_int_id]++;
+ display_progress(progress, i + 1);
+ }
+ stop_progress(&progress);
+
+ if (flags & MIDX_PROGRESS)
+ progress = start_delayed_progress(_("Finding and deleting unreferenced packfiles"),
+ m->num_packs);
+ for (i = 0; i < m->num_packs; i++) {
+ char *pack_name;
+ display_progress(progress, i + 1);
+
+ if (count[i])
+ continue;
+
+ if (prepare_midx_pack(r, m, i))
+ continue;
+
+ if (m->packs[i]->pack_keep || m->packs[i]->is_cruft)
+ continue;
+
+ pack_name = xstrdup(m->packs[i]->pack_name);
+ close_pack(m->packs[i]);
+
+ string_list_insert(&packs_to_drop, m->pack_names[i]);
+ unlink_pack_path(pack_name, 0);
+ free(pack_name);
+ }
+ stop_progress(&progress);
+
+ free(count);
+
+ if (packs_to_drop.nr)
+ result = write_midx_internal(object_dir, NULL, &packs_to_drop, NULL, NULL, flags);
+
+ string_list_clear(&packs_to_drop, 0);
+
+ return result;
+}
+
+struct repack_info {
+ timestamp_t mtime;
+ uint32_t referenced_objects;
+ uint32_t pack_int_id;
+};
+
+static int compare_by_mtime(const void *a_, const void *b_)
+{
+ const struct repack_info *a, *b;
+
+ a = (const struct repack_info *)a_;
+ b = (const struct repack_info *)b_;
+
+ if (a->mtime < b->mtime)
+ return -1;
+ if (a->mtime > b->mtime)
+ return 1;
+ return 0;
+}
+
+static int want_included_pack(struct repository *r,
+ struct multi_pack_index *m,
+ int pack_kept_objects,
+ uint32_t pack_int_id)
+{
+ struct packed_git *p;
+ if (prepare_midx_pack(r, m, pack_int_id))
+ return 0;
+ p = m->packs[pack_int_id];
+ if (!pack_kept_objects && p->pack_keep)
+ return 0;
+ if (p->is_cruft)
+ return 0;
+ if (open_pack_index(p) || !p->num_objects)
+ return 0;
+ return 1;
+}
+
+static void fill_included_packs_all(struct repository *r,
+ struct multi_pack_index *m,
+ unsigned char *include_pack)
+{
+ uint32_t i;
+ int pack_kept_objects = 0;
+
+ repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
+
+ for (i = 0; i < m->num_packs; i++) {
+ if (!want_included_pack(r, m, pack_kept_objects, i))
+ continue;
+
+ include_pack[i] = 1;
+ }
+}
+
+static void fill_included_packs_batch(struct repository *r,
+ struct multi_pack_index *m,
+ unsigned char *include_pack,
+ size_t batch_size)
+{
+ uint32_t i;
+ size_t total_size;
+ struct repack_info *pack_info;
+ int pack_kept_objects = 0;
+
+ CALLOC_ARRAY(pack_info, m->num_packs);
+
+ repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
+
+ for (i = 0; i < m->num_packs; i++) {
+ pack_info[i].pack_int_id = i;
+
+ if (prepare_midx_pack(r, m, i))
+ continue;
+
+ pack_info[i].mtime = m->packs[i]->mtime;
+ }
+
+ for (i = 0; i < m->num_objects; i++) {
+ uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
+ pack_info[pack_int_id].referenced_objects++;
+ }
+
+ QSORT(pack_info, m->num_packs, compare_by_mtime);
+
+ total_size = 0;
+ for (i = 0; total_size < batch_size && i < m->num_packs; i++) {
+ int pack_int_id = pack_info[i].pack_int_id;
+ struct packed_git *p = m->packs[pack_int_id];
+ size_t expected_size;
+
+ if (!want_included_pack(r, m, pack_kept_objects, pack_int_id))
+ continue;
+
+ expected_size = st_mult(p->pack_size,
+ pack_info[i].referenced_objects);
+ expected_size /= p->num_objects;
+
+ if (expected_size >= batch_size)
+ continue;
+
+ total_size += expected_size;
+ include_pack[pack_int_id] = 1;
+ }
+
+ free(pack_info);
+}
+
+int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags)
+{
+ int result = 0;
+ uint32_t i, packs_to_repack = 0;
+ unsigned char *include_pack;
+ struct child_process cmd = CHILD_PROCESS_INIT;
+ FILE *cmd_in;
+ struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
+
+ /*
+ * When updating the default for these configuration
+ * variables in builtin/repack.c, these must be adjusted
+ * to match.
+ */
+ int delta_base_offset = 1;
+ int use_delta_islands = 0;
+
+ if (!m)
+ return 0;
+
+ CALLOC_ARRAY(include_pack, m->num_packs);
+
+ if (batch_size)
+ fill_included_packs_batch(r, m, include_pack, batch_size);
+ else
+ fill_included_packs_all(r, m, include_pack);
+
+ for (i = 0; i < m->num_packs; i++) {
+ if (include_pack[i])
+ packs_to_repack++;
+ }
+ if (packs_to_repack <= 1)
+ goto cleanup;
+
+ repo_config_get_bool(r, "repack.usedeltabaseoffset", &delta_base_offset);
+ repo_config_get_bool(r, "repack.usedeltaislands", &use_delta_islands);
+
+ strvec_pushl(&cmd.args, "pack-objects", "--stdin-packs", "--non-empty",
+ NULL);
+
+ strvec_pushf(&cmd.args, "%s/pack/pack", object_dir);
+
+ if (delta_base_offset)
+ strvec_push(&cmd.args, "--delta-base-offset");
+ if (use_delta_islands)
+ strvec_push(&cmd.args, "--delta-islands");
+
+ if (flags & MIDX_PROGRESS)
+ strvec_push(&cmd.args, "--progress");
+ else
+ strvec_push(&cmd.args, "-q");
+
+ cmd.git_cmd = 1;
+ cmd.in = cmd.out = -1;
+
+ if (start_command(&cmd)) {
+ error(_("could not start pack-objects"));
+ result = 1;
+ goto cleanup;
+ }
+
+ cmd_in = xfdopen(cmd.in, "w");
+ for (i = 0; i < m->num_packs; i++) {
+ struct packed_git *p = m->packs[i];
+ if (!p)
+ continue;
+
+ if (include_pack[i])
+ fprintf(cmd_in, "%s\n", pack_basename(p));
+ else
+ fprintf(cmd_in, "^%s\n", pack_basename(p));
+ }
+ fclose(cmd_in);
+
+ if (finish_command(&cmd)) {
+ error(_("could not finish pack-objects"));
+ result = 1;
+ goto cleanup;
+ }
+
+ result = write_midx_internal(object_dir, NULL, NULL, NULL, NULL, flags);
+
+cleanup:
+ free(include_pack);
+ return result;
+}
diff --git a/midx.c b/midx.c
index 41521e019c..ae3b49166c 100644
--- a/midx.c
+++ b/midx.c
@@ -1,52 +1,22 @@
#include "git-compat-util.h"
-#include "abspath.h"
#include "config.h"
-#include "csum-file.h"
#include "dir.h"
-#include "gettext.h"
#include "hex.h"
-#include "lockfile.h"
#include "packfile.h"
#include "object-file.h"
-#include "object-store-ll.h"
#include "hash-lookup.h"
#include "midx.h"
#include "progress.h"
#include "trace2.h"
-#include "run-command.h"
-#include "repository.h"
#include "chunk-format.h"
-#include "pack.h"
#include "pack-bitmap.h"
-#include "refs.h"
-#include "revision.h"
-#include "list-objects.h"
#include "pack-revindex.h"
-#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
-#define MIDX_VERSION 1
-#define MIDX_BYTE_FILE_VERSION 4
-#define MIDX_BYTE_HASH_VERSION 5
-#define MIDX_BYTE_NUM_CHUNKS 6
-#define MIDX_BYTE_NUM_PACKS 8
-#define MIDX_HEADER_SIZE 12
-#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
-
-#define MIDX_CHUNK_ALIGNMENT 4
-#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
-#define MIDX_CHUNKID_BITMAPPEDPACKS 0x42544d50 /* "BTMP" */
-#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
-#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
-#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
-#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
-#define MIDX_CHUNKID_REVINDEX 0x52494458 /* "RIDX" */
-#define MIDX_CHUNK_FANOUT_SIZE (sizeof(uint32_t) * 256)
-#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
-#define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
-#define MIDX_CHUNK_BITMAPPED_PACKS_WIDTH (2 * sizeof(uint32_t))
-#define MIDX_LARGE_OFFSET_NEEDED 0x80000000
-
-#define PACK_EXPIRED UINT_MAX
+int midx_checksum_valid(struct multi_pack_index *m);
+void clear_midx_files_ext(const char *object_dir, const char *ext,
+ unsigned char *keep_hash);
+int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+ const char *idx_name);
const unsigned char *get_midx_checksum(struct multi_pack_index *m)
{
@@ -115,6 +85,8 @@ static int midx_read_object_offsets(const unsigned char *chunk_start,
return 0;
}
+#define MIDX_MIN_SIZE (MIDX_HEADER_SIZE + the_hash_algo->rawsz)
+
struct multi_pack_index *load_multi_pack_index(const char *object_dir, int local)
{
struct multi_pack_index *m = NULL;
@@ -294,6 +266,8 @@ int prepare_midx_pack(struct repository *r, struct multi_pack_index *m, uint32_t
return 0;
}
+#define MIDX_CHUNK_BITMAPPED_PACKS_WIDTH (2 * sizeof(uint32_t))
+
int nth_bitmapped_pack(struct repository *r, struct multi_pack_index *m,
struct bitmapped_pack *bp, uint32_t pack_int_id)
{
@@ -400,8 +374,8 @@ int fill_midx_entry(struct repository *r,
}
/* Match "foo.idx" against either "foo.pack" _or_ "foo.idx". */
-static int cmp_idx_or_pack_name(const char *idx_or_pack_name,
- const char *idx_name)
+int cmp_idx_or_pack_name(const char *idx_or_pack_name,
+ const char *idx_name)
{
/* Skip past any initial matching prefix. */
while (*idx_name && *idx_name == *idx_or_pack_name) {
@@ -508,1262 +482,11 @@ int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, i
return 0;
}
-static size_t write_midx_header(struct hashfile *f,
- unsigned char num_chunks,
- uint32_t num_packs)
-{
- hashwrite_be32(f, MIDX_SIGNATURE);
- hashwrite_u8(f, MIDX_VERSION);
- hashwrite_u8(f, oid_version(the_hash_algo));
- hashwrite_u8(f, num_chunks);
- hashwrite_u8(f, 0); /* unused */
- hashwrite_be32(f, num_packs);
-
- return MIDX_HEADER_SIZE;
-}
-
-#define BITMAP_POS_UNKNOWN (~((uint32_t)0))
-
-struct pack_info {
- uint32_t orig_pack_int_id;
- char *pack_name;
- struct packed_git *p;
-
- uint32_t bitmap_pos;
- uint32_t bitmap_nr;
-
- unsigned expired : 1;
-};
-
-static void fill_pack_info(struct pack_info *info,
- struct packed_git *p, const char *pack_name,
- uint32_t orig_pack_int_id)
-{
- memset(info, 0, sizeof(struct pack_info));
-
- info->orig_pack_int_id = orig_pack_int_id;
- info->pack_name = xstrdup(pack_name);
- info->p = p;
- info->bitmap_pos = BITMAP_POS_UNKNOWN;
-}
-
-static int pack_info_compare(const void *_a, const void *_b)
-{
- struct pack_info *a = (struct pack_info *)_a;
- struct pack_info *b = (struct pack_info *)_b;
- return strcmp(a->pack_name, b->pack_name);
-}
-
-static int idx_or_pack_name_cmp(const void *_va, const void *_vb)
-{
- const char *pack_name = _va;
- const struct pack_info *compar = _vb;
-
- return cmp_idx_or_pack_name(pack_name, compar->pack_name);
-}
-
-struct write_midx_context {
- struct pack_info *info;
- size_t nr;
- size_t alloc;
- struct multi_pack_index *m;
- struct progress *progress;
- unsigned pack_paths_checked;
-
- struct pack_midx_entry *entries;
- size_t entries_nr;
-
- uint32_t *pack_perm;
- uint32_t *pack_order;
- unsigned large_offsets_needed:1;
- uint32_t num_large_offsets;
-
- int preferred_pack_idx;
-
- struct string_list *to_include;
-};
-
-static void add_pack_to_midx(const char *full_path, size_t full_path_len,
- const char *file_name, void *data)
-{
- struct write_midx_context *ctx = data;
- struct packed_git *p;
-
- if (ends_with(file_name, ".idx")) {
- display_progress(ctx->progress, ++ctx->pack_paths_checked);
- /*
- * Note that at most one of ctx->m and ctx->to_include are set,
- * so we are testing midx_contains_pack() and
- * string_list_has_string() independently (guarded by the
- * appropriate NULL checks).
- *
- * We could support passing to_include while reusing an existing
- * MIDX, but don't currently since the reuse process drags
- * forward all packs from an existing MIDX (without checking
- * whether or not they appear in the to_include list).
- *
- * If we added support for that, these next two conditional
- * should be performed independently (likely checking
- * to_include before the existing MIDX).
- */
- if (ctx->m && midx_contains_pack(ctx->m, file_name))
- return;
- else if (ctx->to_include &&
- !string_list_has_string(ctx->to_include, file_name))
- return;
-
- ALLOC_GROW(ctx->info, ctx->nr + 1, ctx->alloc);
-
- p = add_packed_git(full_path, full_path_len, 0);
- if (!p) {
- warning(_("failed to add packfile '%s'"),
- full_path);
- return;
- }
-
- if (open_pack_index(p)) {
- warning(_("failed to open pack-index '%s'"),
- full_path);
- close_pack(p);
- free(p);
- return;
- }
-
- fill_pack_info(&ctx->info[ctx->nr], p, file_name, ctx->nr);
- ctx->nr++;
- }
-}
-
-struct pack_midx_entry {
- struct object_id oid;
- uint32_t pack_int_id;
- time_t pack_mtime;
- uint64_t offset;
- unsigned preferred : 1;
-};
-
-static int midx_oid_compare(const void *_a, const void *_b)
-{
- const struct pack_midx_entry *a = (const struct pack_midx_entry *)_a;
- const struct pack_midx_entry *b = (const struct pack_midx_entry *)_b;
- int cmp = oidcmp(&a->oid, &b->oid);
-
- if (cmp)
- return cmp;
-
- /* Sort objects in a preferred pack first when multiple copies exist. */
- if (a->preferred > b->preferred)
- return -1;
- if (a->preferred < b->preferred)
- return 1;
-
- if (a->pack_mtime > b->pack_mtime)
- return -1;
- else if (a->pack_mtime < b->pack_mtime)
- return 1;
-
- return a->pack_int_id - b->pack_int_id;
-}
-
-static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
- struct pack_midx_entry *e,
- uint32_t pos)
-{
- if (pos >= m->num_objects)
- return 1;
-
- nth_midxed_object_oid(&e->oid, m, pos);
- e->pack_int_id = nth_midxed_pack_int_id(m, pos);
- e->offset = nth_midxed_offset(m, pos);
-
- /* consider objects in midx to be from "old" packs */
- e->pack_mtime = 0;
- return 0;
-}
-
-static void fill_pack_entry(uint32_t pack_int_id,
- struct packed_git *p,
- uint32_t cur_object,
- struct pack_midx_entry *entry,
- int preferred)
-{
- if (nth_packed_object_id(&entry->oid, p, cur_object) < 0)
- die(_("failed to locate object %d in packfile"), cur_object);
-
- entry->pack_int_id = pack_int_id;
- entry->pack_mtime = p->mtime;
-
- entry->offset = nth_packed_object_offset(p, cur_object);
- entry->preferred = !!preferred;
-}
-
-struct midx_fanout {
- struct pack_midx_entry *entries;
- size_t nr, alloc;
-};
-
-static void midx_fanout_grow(struct midx_fanout *fanout, size_t nr)
-{
- if (nr < fanout->nr)
- BUG("negative growth in midx_fanout_grow() (%"PRIuMAX" < %"PRIuMAX")",
- (uintmax_t)nr, (uintmax_t)fanout->nr);
- ALLOC_GROW(fanout->entries, nr, fanout->alloc);
-}
-
-static void midx_fanout_sort(struct midx_fanout *fanout)
-{
- QSORT(fanout->entries, fanout->nr, midx_oid_compare);
-}
-
-static void midx_fanout_add_midx_fanout(struct midx_fanout *fanout,
- struct multi_pack_index *m,
- uint32_t cur_fanout,
- int preferred_pack)
-{
- uint32_t start = 0, end;
- uint32_t cur_object;
-
- if (cur_fanout)
- start = ntohl(m->chunk_oid_fanout[cur_fanout - 1]);
- end = ntohl(m->chunk_oid_fanout[cur_fanout]);
-
- for (cur_object = start; cur_object < end; cur_object++) {
- if ((preferred_pack > -1) &&
- (preferred_pack == nth_midxed_pack_int_id(m, cur_object))) {
- /*
- * Objects from preferred packs are added
- * separately.
- */
- continue;
- }
-
- midx_fanout_grow(fanout, fanout->nr + 1);
- nth_midxed_pack_midx_entry(m,
- &fanout->entries[fanout->nr],
- cur_object);
- fanout->entries[fanout->nr].preferred = 0;
- fanout->nr++;
- }
-}
-
-static void midx_fanout_add_pack_fanout(struct midx_fanout *fanout,
- struct pack_info *info,
- uint32_t cur_pack,
- int preferred,
- uint32_t cur_fanout)
-{
- struct packed_git *pack = info[cur_pack].p;
- uint32_t start = 0, end;
- uint32_t cur_object;
-
- if (cur_fanout)
- start = get_pack_fanout(pack, cur_fanout - 1);
- end = get_pack_fanout(pack, cur_fanout);
-
- for (cur_object = start; cur_object < end; cur_object++) {
- midx_fanout_grow(fanout, fanout->nr + 1);
- fill_pack_entry(cur_pack,
- info[cur_pack].p,
- cur_object,
- &fanout->entries[fanout->nr],
- preferred);
- fanout->nr++;
- }
-}
-
-/*
- * It is possible to artificially get into a state where there are many
- * duplicate copies of objects. That can create high memory pressure if
- * we are to create a list of all objects before de-duplication. To reduce
- * this memory pressure without a significant performance drop, automatically
- * group objects by the first byte of their object id. Use the IDX fanout
- * tables to group the data, copy to a local array, then sort.
- *
- * Copy only the de-duplicated entries (selected by most-recent modified time
- * of a packfile containing the object).
- */
-static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
- struct pack_info *info,
- uint32_t nr_packs,
- size_t *nr_objects,
- int preferred_pack)
-{
- uint32_t cur_fanout, cur_pack, cur_object;
- size_t alloc_objects, total_objects = 0;
- struct midx_fanout fanout = { 0 };
- struct pack_midx_entry *deduplicated_entries = NULL;
- uint32_t start_pack = m ? m->num_packs : 0;
-
- for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
- total_objects = st_add(total_objects,
- info[cur_pack].p->num_objects);
-
- /*
- * As we de-duplicate by fanout value, we expect the fanout
- * slices to be evenly distributed, with some noise. Hence,
- * allocate slightly more than one 256th.
- */
- alloc_objects = fanout.alloc = total_objects > 3200 ? total_objects / 200 : 16;
-
- ALLOC_ARRAY(fanout.entries, fanout.alloc);
- ALLOC_ARRAY(deduplicated_entries, alloc_objects);
- *nr_objects = 0;
-
- for (cur_fanout = 0; cur_fanout < 256; cur_fanout++) {
- fanout.nr = 0;
-
- if (m)
- midx_fanout_add_midx_fanout(&fanout, m, cur_fanout,
- preferred_pack);
-
- for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++) {
- int preferred = cur_pack == preferred_pack;
- midx_fanout_add_pack_fanout(&fanout,
- info, cur_pack,
- preferred, cur_fanout);
- }
-
- if (-1 < preferred_pack && preferred_pack < start_pack)
- midx_fanout_add_pack_fanout(&fanout, info,
- preferred_pack, 1,
- cur_fanout);
-
- midx_fanout_sort(&fanout);
-
- /*
- * The batch is now sorted by OID and then mtime (descending).
- * Take only the first duplicate.
- */
- for (cur_object = 0; cur_object < fanout.nr; cur_object++) {
- if (cur_object && oideq(&fanout.entries[cur_object - 1].oid,
- &fanout.entries[cur_object].oid))
- continue;
-
- ALLOC_GROW(deduplicated_entries, st_add(*nr_objects, 1),
- alloc_objects);
- memcpy(&deduplicated_entries[*nr_objects],
- &fanout.entries[cur_object],
- sizeof(struct pack_midx_entry));
- (*nr_objects)++;
- }
- }
-
- free(fanout.entries);
- return deduplicated_entries;
-}
-
-static int write_midx_pack_names(struct hashfile *f, void *data)
-{
- struct write_midx_context *ctx = data;
- uint32_t i;
- unsigned char padding[MIDX_CHUNK_ALIGNMENT];
- size_t written = 0;
-
- for (i = 0; i < ctx->nr; i++) {
- size_t writelen;
-
- if (ctx->info[i].expired)
- continue;
-
- if (i && strcmp(ctx->info[i].pack_name, ctx->info[i - 1].pack_name) <= 0)
- BUG("incorrect pack-file order: %s before %s",
- ctx->info[i - 1].pack_name,
- ctx->info[i].pack_name);
-
- writelen = strlen(ctx->info[i].pack_name) + 1;
- hashwrite(f, ctx->info[i].pack_name, writelen);
- written += writelen;
- }
-
- /* add padding to be aligned */
- i = MIDX_CHUNK_ALIGNMENT - (written % MIDX_CHUNK_ALIGNMENT);
- if (i < MIDX_CHUNK_ALIGNMENT) {
- memset(padding, 0, sizeof(padding));
- hashwrite(f, padding, i);
- }
-
- return 0;
-}
-
-static int write_midx_bitmapped_packs(struct hashfile *f, void *data)
-{
- struct write_midx_context *ctx = data;
- size_t i;
-
- for (i = 0; i < ctx->nr; i++) {
- struct pack_info *pack = &ctx->info[i];
- if (pack->expired)
- continue;
-
- if (pack->bitmap_pos == BITMAP_POS_UNKNOWN && pack->bitmap_nr)
- BUG("pack '%s' has no bitmap position, but has %d bitmapped object(s)",
- pack->pack_name, pack->bitmap_nr);
-
- hashwrite_be32(f, pack->bitmap_pos);
- hashwrite_be32(f, pack->bitmap_nr);
- }
- return 0;
-}
-
-static int write_midx_oid_fanout(struct hashfile *f,
- void *data)
-{
- struct write_midx_context *ctx = data;
- struct pack_midx_entry *list = ctx->entries;
- struct pack_midx_entry *last = ctx->entries + ctx->entries_nr;
- uint32_t count = 0;
- uint32_t i;
-
- /*
- * Write the first-level table (the list is sorted,
- * but we use a 256-entry lookup to be able to avoid
- * having to do eight extra binary search iterations).
- */
- for (i = 0; i < 256; i++) {
- struct pack_midx_entry *next = list;
-
- while (next < last && next->oid.hash[0] == i) {
- count++;
- next++;
- }
-
- hashwrite_be32(f, count);
- list = next;
- }
-
- return 0;
-}
-
-static int write_midx_oid_lookup(struct hashfile *f,
- void *data)
-{
- struct write_midx_context *ctx = data;
- unsigned char hash_len = the_hash_algo->rawsz;
- struct pack_midx_entry *list = ctx->entries;
- uint32_t i;
-
- for (i = 0; i < ctx->entries_nr; i++) {
- struct pack_midx_entry *obj = list++;
-
- if (i < ctx->entries_nr - 1) {
- struct pack_midx_entry *next = list;
- if (oidcmp(&obj->oid, &next->oid) >= 0)
- BUG("OIDs not in order: %s >= %s",
- oid_to_hex(&obj->oid),
- oid_to_hex(&next->oid));
- }
-
- hashwrite(f, obj->oid.hash, (int)hash_len);
- }
-
- return 0;
-}
-
-static int write_midx_object_offsets(struct hashfile *f,
- void *data)
-{
- struct write_midx_context *ctx = data;
- struct pack_midx_entry *list = ctx->entries;
- uint32_t i, nr_large_offset = 0;
-
- for (i = 0; i < ctx->entries_nr; i++) {
- struct pack_midx_entry *obj = list++;
-
- if (ctx->pack_perm[obj->pack_int_id] == PACK_EXPIRED)
- BUG("object %s is in an expired pack with int-id %d",
- oid_to_hex(&obj->oid),
- obj->pack_int_id);
-
- hashwrite_be32(f, ctx->pack_perm[obj->pack_int_id]);
-
- if (ctx->large_offsets_needed && obj->offset >> 31)
- hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
- else if (!ctx->large_offsets_needed && obj->offset >> 32)
- BUG("object %s requires a large offset (%"PRIx64") but the MIDX is not writing large offsets!",
- oid_to_hex(&obj->oid),
- obj->offset);
- else
- hashwrite_be32(f, (uint32_t)obj->offset);
- }
-
- return 0;
-}
-
-static int write_midx_large_offsets(struct hashfile *f,
- void *data)
-{
- struct write_midx_context *ctx = data;
- struct pack_midx_entry *list = ctx->entries;
- struct pack_midx_entry *end = ctx->entries + ctx->entries_nr;
- uint32_t nr_large_offset = ctx->num_large_offsets;
-
- while (nr_large_offset) {
- struct pack_midx_entry *obj;
- uint64_t offset;
-
- if (list >= end)
- BUG("too many large-offset objects");
-
- obj = list++;
- offset = obj->offset;
-
- if (!(offset >> 31))
- continue;
-
- hashwrite_be64(f, offset);
-
- nr_large_offset--;
- }
-
- return 0;
-}
-
-static int write_midx_revindex(struct hashfile *f,
- void *data)
-{
- struct write_midx_context *ctx = data;
- uint32_t i;
-
- for (i = 0; i < ctx->entries_nr; i++)
- hashwrite_be32(f, ctx->pack_order[i]);
-
- return 0;
-}
-
-struct midx_pack_order_data {
- uint32_t nr;
- uint32_t pack;
- off_t offset;
-};
-
-static int midx_pack_order_cmp(const void *va, const void *vb)
-{
- const struct midx_pack_order_data *a = va, *b = vb;
- if (a->pack < b->pack)
- return -1;
- else if (a->pack > b->pack)
- return 1;
- else if (a->offset < b->offset)
- return -1;
- else if (a->offset > b->offset)
- return 1;
- else
- return 0;
-}
-
-static uint32_t *midx_pack_order(struct write_midx_context *ctx)
-{
- struct midx_pack_order_data *data;
- uint32_t *pack_order;
- uint32_t i;
-
- trace2_region_enter("midx", "midx_pack_order", the_repository);
-
- ALLOC_ARRAY(data, ctx->entries_nr);
- for (i = 0; i < ctx->entries_nr; i++) {
- struct pack_midx_entry *e = &ctx->entries[i];
- data[i].nr = i;
- data[i].pack = ctx->pack_perm[e->pack_int_id];
- if (!e->preferred)
- data[i].pack |= (1U << 31);
- data[i].offset = e->offset;
- }
-
- QSORT(data, ctx->entries_nr, midx_pack_order_cmp);
-
- ALLOC_ARRAY(pack_order, ctx->entries_nr);
- for (i = 0; i < ctx->entries_nr; i++) {
- struct pack_midx_entry *e = &ctx->entries[data[i].nr];
- struct pack_info *pack = &ctx->info[ctx->pack_perm[e->pack_int_id]];
- if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
- pack->bitmap_pos = i;
- pack->bitmap_nr++;
- pack_order[i] = data[i].nr;
- }
- for (i = 0; i < ctx->nr; i++) {
- struct pack_info *pack = &ctx->info[ctx->pack_perm[i]];
- if (pack->bitmap_pos == BITMAP_POS_UNKNOWN)
- pack->bitmap_pos = 0;
- }
- free(data);
-
- trace2_region_leave("midx", "midx_pack_order", the_repository);
-
- return pack_order;
-}
-
-static void write_midx_reverse_index(char *midx_name, unsigned char *midx_hash,
- struct write_midx_context *ctx)
-{
- struct strbuf buf = STRBUF_INIT;
- const char *tmp_file;
-
- trace2_region_enter("midx", "write_midx_reverse_index", the_repository);
-
- strbuf_addf(&buf, "%s-%s.rev", midx_name, hash_to_hex(midx_hash));
-
- tmp_file = write_rev_file_order(NULL, ctx->pack_order, ctx->entries_nr,
- midx_hash, WRITE_REV);
-
- if (finalize_object_file(tmp_file, buf.buf))
- die(_("cannot store reverse index file"));
-
- strbuf_release(&buf);
-
- trace2_region_leave("midx", "write_midx_reverse_index", the_repository);
-}
-
-static void clear_midx_files_ext(const char *object_dir, const char *ext,
- unsigned char *keep_hash);
-
-static int midx_checksum_valid(struct multi_pack_index *m)
+int midx_checksum_valid(struct multi_pack_index *m)
{
return hashfile_checksum_valid(m->data, m->data_len);
}
-static void prepare_midx_packing_data(struct packing_data *pdata,
- struct write_midx_context *ctx)
-{
- uint32_t i;
-
- trace2_region_enter("midx", "prepare_midx_packing_data", the_repository);
-
- memset(pdata, 0, sizeof(struct packing_data));
- prepare_packing_data(the_repository, pdata);
-
- for (i = 0; i < ctx->entries_nr; i++) {
- struct pack_midx_entry *from = &ctx->entries[ctx->pack_order[i]];
- struct object_entry *to = packlist_alloc(pdata, &from->oid);
-
- oe_set_in_pack(pdata, to,
- ctx->info[ctx->pack_perm[from->pack_int_id]].p);
- }
-
- trace2_region_leave("midx", "prepare_midx_packing_data", the_repository);
-}
-
-static int add_ref_to_pending(const char *refname,
- const struct object_id *oid,
- int flag, void *cb_data)
-{
- struct rev_info *revs = (struct rev_info*)cb_data;
- struct object_id peeled;
- struct object *object;
-
- if ((flag & REF_ISSYMREF) && (flag & REF_ISBROKEN)) {
- warning("symbolic ref is dangling: %s", refname);
- return 0;
- }
-
- if (!peel_iterated_oid(oid, &peeled))
- oid = &peeled;
-
- object = parse_object_or_die(oid, refname);
- if (object->type != OBJ_COMMIT)
- return 0;
-
- add_pending_object(revs, object, "");
- if (bitmap_is_preferred_refname(revs->repo, refname))
- object->flags |= NEEDS_BITMAP;
- return 0;
-}
-
-struct bitmap_commit_cb {
- struct commit **commits;
- size_t commits_nr, commits_alloc;
-
- struct write_midx_context *ctx;
-};
-
-static const struct object_id *bitmap_oid_access(size_t index,
- const void *_entries)
-{
- const struct pack_midx_entry *entries = _entries;
- return &entries[index].oid;
-}
-
-static void bitmap_show_commit(struct commit *commit, void *_data)
-{
- struct bitmap_commit_cb *data = _data;
- int pos = oid_pos(&commit->object.oid, data->ctx->entries,
- data->ctx->entries_nr,
- bitmap_oid_access);
- if (pos < 0)
- return;
-
- ALLOC_GROW(data->commits, data->commits_nr + 1, data->commits_alloc);
- data->commits[data->commits_nr++] = commit;
-}
-
-static int read_refs_snapshot(const char *refs_snapshot,
- struct rev_info *revs)
-{
- struct strbuf buf = STRBUF_INIT;
- struct object_id oid;
- FILE *f = xfopen(refs_snapshot, "r");
-
- while (strbuf_getline(&buf, f) != EOF) {
- struct object *object;
- int preferred = 0;
- char *hex = buf.buf;
- const char *end = NULL;
-
- if (buf.len && *buf.buf == '+') {
- preferred = 1;
- hex = &buf.buf[1];
- }
-
- if (parse_oid_hex(hex, &oid, &end) < 0)
- die(_("could not parse line: %s"), buf.buf);
- if (*end)
- die(_("malformed line: %s"), buf.buf);
-
- object = parse_object_or_die(&oid, NULL);
- if (preferred)
- object->flags |= NEEDS_BITMAP;
-
- add_pending_object(revs, object, "");
- }
-
- fclose(f);
- strbuf_release(&buf);
- return 0;
-}
-
-static struct commit **find_commits_for_midx_bitmap(uint32_t *indexed_commits_nr_p,
- const char *refs_snapshot,
- struct write_midx_context *ctx)
-{
- struct rev_info revs;
- struct bitmap_commit_cb cb = {0};
-
- trace2_region_enter("midx", "find_commits_for_midx_bitmap",
- the_repository);
-
- cb.ctx = ctx;
-
- repo_init_revisions(the_repository, &revs, NULL);
- if (refs_snapshot) {
- read_refs_snapshot(refs_snapshot, &revs);
- } else {
- setup_revisions(0, NULL, &revs, NULL);
- for_each_ref(add_ref_to_pending, &revs);
- }
-
- /*
- * Skipping promisor objects here is intentional, since it only excludes
- * them from the list of reachable commits that we want to select from
- * when computing the selection of MIDX'd commits to receive bitmaps.
- *
- * Reachability bitmaps do require that their objects be closed under
- * reachability, but fetching any objects missing from promisors at this
- * point is too late. But, if one of those objects can be reached from
- * an another object that is included in the bitmap, then we will
- * complain later that we don't have reachability closure (and fail
- * appropriately).
- */
- fetch_if_missing = 0;
- revs.exclude_promisor_objects = 1;
-
- if (prepare_revision_walk(&revs))
- die(_("revision walk setup failed"));
-
- traverse_commit_list(&revs, bitmap_show_commit, NULL, &cb);
- if (indexed_commits_nr_p)
- *indexed_commits_nr_p = cb.commits_nr;
-
- release_revisions(&revs);
-
- trace2_region_leave("midx", "find_commits_for_midx_bitmap",
- the_repository);
-
- return cb.commits;
-}
-
-static int write_midx_bitmap(const char *midx_name,
- const unsigned char *midx_hash,
- struct packing_data *pdata,
- struct commit **commits,
- uint32_t commits_nr,
- uint32_t *pack_order,
- unsigned flags)
-{
- int ret, i;
- uint16_t options = 0;
- struct pack_idx_entry **index;
- char *bitmap_name = xstrfmt("%s-%s.bitmap", midx_name,
- hash_to_hex(midx_hash));
-
- trace2_region_enter("midx", "write_midx_bitmap", the_repository);
-
- if (flags & MIDX_WRITE_BITMAP_HASH_CACHE)
- options |= BITMAP_OPT_HASH_CACHE;
-
- if (flags & MIDX_WRITE_BITMAP_LOOKUP_TABLE)
- options |= BITMAP_OPT_LOOKUP_TABLE;
-
- /*
- * Build the MIDX-order index based on pdata.objects (which is already
- * in MIDX order; c.f., 'midx_pack_order_cmp()' for the definition of
- * this order).
- */
- ALLOC_ARRAY(index, pdata->nr_objects);
- for (i = 0; i < pdata->nr_objects; i++)
- index[i] = &pdata->objects[i].idx;
-
- bitmap_writer_show_progress(flags & MIDX_PROGRESS);
- bitmap_writer_build_type_index(pdata, index, pdata->nr_objects);
-
- /*
- * bitmap_writer_finish expects objects in lex order, but pack_order
- * gives us exactly that. use it directly instead of re-sorting the
- * array.
- *
- * This changes the order of objects in 'index' between
- * bitmap_writer_build_type_index and bitmap_writer_finish.
- *
- * The same re-ordering takes place in the single-pack bitmap code via
- * write_idx_file(), which is called by finish_tmp_packfile(), which
- * happens between bitmap_writer_build_type_index() and
- * bitmap_writer_finish().
- */
- for (i = 0; i < pdata->nr_objects; i++)
- index[pack_order[i]] = &pdata->objects[i].idx;
-
- bitmap_writer_select_commits(commits, commits_nr, -1);
- ret = bitmap_writer_build(pdata);
- if (ret < 0)
- goto cleanup;
-
- bitmap_writer_set_checksum(midx_hash);
- bitmap_writer_finish(index, pdata->nr_objects, bitmap_name, options);
-
-cleanup:
- free(index);
- free(bitmap_name);
-
- trace2_region_leave("midx", "write_midx_bitmap", the_repository);
-
- return ret;
-}
-
-static struct multi_pack_index *lookup_multi_pack_index(struct repository *r,
- const char *object_dir)
-{
- struct multi_pack_index *result = NULL;
- struct multi_pack_index *cur;
- char *obj_dir_real = real_pathdup(object_dir, 1);
- struct strbuf cur_path_real = STRBUF_INIT;
-
- /* Ensure the given object_dir is local, or a known alternate. */
- find_odb(r, obj_dir_real);
-
- for (cur = get_multi_pack_index(r); cur; cur = cur->next) {
- strbuf_realpath(&cur_path_real, cur->object_dir, 1);
- if (!strcmp(obj_dir_real, cur_path_real.buf)) {
- result = cur;
- goto cleanup;
- }
- }
-
-cleanup:
- free(obj_dir_real);
- strbuf_release(&cur_path_real);
- return result;
-}
-
-static int write_midx_internal(const char *object_dir,
- struct string_list *packs_to_include,
- struct string_list *packs_to_drop,
- const char *preferred_pack_name,
- const char *refs_snapshot,
- unsigned flags)
-{
- struct strbuf midx_name = STRBUF_INIT;
- unsigned char midx_hash[GIT_MAX_RAWSZ];
- uint32_t i;
- struct hashfile *f = NULL;
- struct lock_file lk;
- struct write_midx_context ctx = { 0 };
- int bitmapped_packs_concat_len = 0;
- int pack_name_concat_len = 0;
- int dropped_packs = 0;
- int result = 0;
- struct chunkfile *cf;
-
- trace2_region_enter("midx", "write_midx_internal", the_repository);
-
- get_midx_filename(&midx_name, object_dir);
- if (safe_create_leading_directories(midx_name.buf))
- die_errno(_("unable to create leading directories of %s"),
- midx_name.buf);
-
- if (!packs_to_include) {
- /*
- * Only reference an existing MIDX when not filtering which
- * packs to include, since all packs and objects are copied
- * blindly from an existing MIDX if one is present.
- */
- ctx.m = lookup_multi_pack_index(the_repository, object_dir);
- }
-
- if (ctx.m && !midx_checksum_valid(ctx.m)) {
- warning(_("ignoring existing multi-pack-index; checksum mismatch"));
- ctx.m = NULL;
- }
-
- ctx.nr = 0;
- ctx.alloc = ctx.m ? ctx.m->num_packs : 16;
- ctx.info = NULL;
- ALLOC_ARRAY(ctx.info, ctx.alloc);
-
- if (ctx.m) {
- for (i = 0; i < ctx.m->num_packs; i++) {
- ALLOC_GROW(ctx.info, ctx.nr + 1, ctx.alloc);
-
- if (flags & MIDX_WRITE_REV_INDEX) {
- /*
- * If generating a reverse index, need to have
- * packed_git's loaded to compare their
- * mtimes and object count.
- */
- if (prepare_midx_pack(the_repository, ctx.m, i)) {
- error(_("could not load pack"));
- result = 1;
- goto cleanup;
- }
-
- if (open_pack_index(ctx.m->packs[i]))
- die(_("could not open index for %s"),
- ctx.m->packs[i]->pack_name);
- }
-
- fill_pack_info(&ctx.info[ctx.nr++], ctx.m->packs[i],
- ctx.m->pack_names[i], i);
- }
- }
-
- ctx.pack_paths_checked = 0;
- if (flags & MIDX_PROGRESS)
- ctx.progress = start_delayed_progress(_("Adding packfiles to multi-pack-index"), 0);
- else
- ctx.progress = NULL;
-
- ctx.to_include = packs_to_include;
-
- for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &ctx);
- stop_progress(&ctx.progress);
-
- if ((ctx.m && ctx.nr == ctx.m->num_packs) &&
- !(packs_to_include || packs_to_drop)) {
- struct bitmap_index *bitmap_git;
- int bitmap_exists;
- int want_bitmap = flags & MIDX_WRITE_BITMAP;
-
- bitmap_git = prepare_midx_bitmap_git(ctx.m);
- bitmap_exists = bitmap_git && bitmap_is_midx(bitmap_git);
- free_bitmap_index(bitmap_git);
-
- if (bitmap_exists || !want_bitmap) {
- /*
- * The correct MIDX already exists, and so does a
- * corresponding bitmap (or one wasn't requested).
- */
- if (!want_bitmap)
- clear_midx_files_ext(object_dir, ".bitmap",
- NULL);
- goto cleanup;
- }
- }
-
- if (preferred_pack_name) {
- ctx.preferred_pack_idx = -1;
-
- for (i = 0; i < ctx.nr; i++) {
- if (!cmp_idx_or_pack_name(preferred_pack_name,
- ctx.info[i].pack_name)) {
- ctx.preferred_pack_idx = i;
- break;
- }
- }
-
- if (ctx.preferred_pack_idx == -1)
- warning(_("unknown preferred pack: '%s'"),
- preferred_pack_name);
- } else if (ctx.nr &&
- (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP))) {
- struct packed_git *oldest = ctx.info[ctx.preferred_pack_idx].p;
- ctx.preferred_pack_idx = 0;
-
- if (packs_to_drop && packs_to_drop->nr)
- BUG("cannot write a MIDX bitmap during expiration");
-
- /*
- * set a preferred pack when writing a bitmap to ensure that
- * the pack from which the first object is selected in pseudo
- * pack-order has all of its objects selected from that pack
- * (and not another pack containing a duplicate)
- */
- for (i = 1; i < ctx.nr; i++) {
- struct packed_git *p = ctx.info[i].p;
-
- if (!oldest->num_objects || p->mtime < oldest->mtime) {
- oldest = p;
- ctx.preferred_pack_idx = i;
- }
- }
-
- if (!oldest->num_objects) {
- /*
- * If all packs are empty; unset the preferred index.
- * This is acceptable since there will be no duplicate
- * objects to resolve, so the preferred value doesn't
- * matter.
- */
- ctx.preferred_pack_idx = -1;
- }
- } else {
- /*
- * otherwise don't mark any pack as preferred to avoid
- * interfering with expiration logic below
- */
- ctx.preferred_pack_idx = -1;
- }
-
- if (ctx.preferred_pack_idx > -1) {
- struct packed_git *preferred = ctx.info[ctx.preferred_pack_idx].p;
- if (!preferred->num_objects) {
- error(_("cannot select preferred pack %s with no objects"),
- preferred->pack_name);
- result = 1;
- goto cleanup;
- }
- }
-
- ctx.entries = get_sorted_entries(ctx.m, ctx.info, ctx.nr, &ctx.entries_nr,
- ctx.preferred_pack_idx);
-
- ctx.large_offsets_needed = 0;
- for (i = 0; i < ctx.entries_nr; i++) {
- if (ctx.entries[i].offset > 0x7fffffff)
- ctx.num_large_offsets++;
- if (ctx.entries[i].offset > 0xffffffff)
- ctx.large_offsets_needed = 1;
- }
-
- QSORT(ctx.info, ctx.nr, pack_info_compare);
-
- if (packs_to_drop && packs_to_drop->nr) {
- int drop_index = 0;
- int missing_drops = 0;
-
- for (i = 0; i < ctx.nr && drop_index < packs_to_drop->nr; i++) {
- int cmp = strcmp(ctx.info[i].pack_name,
- packs_to_drop->items[drop_index].string);
-
- if (!cmp) {
- drop_index++;
- ctx.info[i].expired = 1;
- } else if (cmp > 0) {
- error(_("did not see pack-file %s to drop"),
- packs_to_drop->items[drop_index].string);
- drop_index++;
- missing_drops++;
- i--;
- } else {
- ctx.info[i].expired = 0;
- }
- }
-
- if (missing_drops) {
- result = 1;
- goto cleanup;
- }
- }
-
- /*
- * pack_perm stores a permutation between pack-int-ids from the
- * previous multi-pack-index to the new one we are writing:
- *
- * pack_perm[old_id] = new_id
- */
- ALLOC_ARRAY(ctx.pack_perm, ctx.nr);
- for (i = 0; i < ctx.nr; i++) {
- if (ctx.info[i].expired) {
- dropped_packs++;
- ctx.pack_perm[ctx.info[i].orig_pack_int_id] = PACK_EXPIRED;
- } else {
- ctx.pack_perm[ctx.info[i].orig_pack_int_id] = i - dropped_packs;
- }
- }
-
- for (i = 0; i < ctx.nr; i++) {
- if (ctx.info[i].expired)
- continue;
- pack_name_concat_len += strlen(ctx.info[i].pack_name) + 1;
- bitmapped_packs_concat_len += 2 * sizeof(uint32_t);
- }
-
- /* Check that the preferred pack wasn't expired (if given). */
- if (preferred_pack_name) {
- struct pack_info *preferred = bsearch(preferred_pack_name,
- ctx.info, ctx.nr,
- sizeof(*ctx.info),
- idx_or_pack_name_cmp);
- if (preferred) {
- uint32_t perm = ctx.pack_perm[preferred->orig_pack_int_id];
- if (perm == PACK_EXPIRED)
- warning(_("preferred pack '%s' is expired"),
- preferred_pack_name);
- }
- }
-
- if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
- pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
- (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
-
- hold_lock_file_for_update(&lk, midx_name.buf, LOCK_DIE_ON_ERROR);
- f = hashfd(get_lock_file_fd(&lk), get_lock_file_path(&lk));
-
- if (ctx.nr - dropped_packs == 0) {
- error(_("no pack files to index."));
- result = 1;
- goto cleanup;
- }
-
- if (!ctx.entries_nr) {
- if (flags & MIDX_WRITE_BITMAP)
- warning(_("refusing to write multi-pack .bitmap without any objects"));
- flags &= ~(MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP);
- }
-
- cf = init_chunkfile(f);
-
- add_chunk(cf, MIDX_CHUNKID_PACKNAMES, pack_name_concat_len,
- write_midx_pack_names);
- add_chunk(cf, MIDX_CHUNKID_OIDFANOUT, MIDX_CHUNK_FANOUT_SIZE,
- write_midx_oid_fanout);
- add_chunk(cf, MIDX_CHUNKID_OIDLOOKUP,
- st_mult(ctx.entries_nr, the_hash_algo->rawsz),
- write_midx_oid_lookup);
- add_chunk(cf, MIDX_CHUNKID_OBJECTOFFSETS,
- st_mult(ctx.entries_nr, MIDX_CHUNK_OFFSET_WIDTH),
- write_midx_object_offsets);
-
- if (ctx.large_offsets_needed)
- add_chunk(cf, MIDX_CHUNKID_LARGEOFFSETS,
- st_mult(ctx.num_large_offsets,
- MIDX_CHUNK_LARGE_OFFSET_WIDTH),
- write_midx_large_offsets);
-
- if (flags & (MIDX_WRITE_REV_INDEX | MIDX_WRITE_BITMAP)) {
- ctx.pack_order = midx_pack_order(&ctx);
- add_chunk(cf, MIDX_CHUNKID_REVINDEX,
- st_mult(ctx.entries_nr, sizeof(uint32_t)),
- write_midx_revindex);
- add_chunk(cf, MIDX_CHUNKID_BITMAPPEDPACKS,
- bitmapped_packs_concat_len,
- write_midx_bitmapped_packs);
- }
-
- write_midx_header(f, get_num_chunks(cf), ctx.nr - dropped_packs);
- write_chunkfile(cf, &ctx);
-
- finalize_hashfile(f, midx_hash, FSYNC_COMPONENT_PACK_METADATA,
- CSUM_FSYNC | CSUM_HASH_IN_STREAM);
- free_chunkfile(cf);
-
- if (flags & MIDX_WRITE_REV_INDEX &&
- git_env_bool("GIT_TEST_MIDX_WRITE_REV", 0))
- write_midx_reverse_index(midx_name.buf, midx_hash, &ctx);
-
- if (flags & MIDX_WRITE_BITMAP) {
- struct packing_data pdata;
- struct commit **commits;
- uint32_t commits_nr;
-
- if (!ctx.entries_nr)
- BUG("cannot write a bitmap without any objects");
-
- prepare_midx_packing_data(&pdata, &ctx);
-
- commits = find_commits_for_midx_bitmap(&commits_nr, refs_snapshot, &ctx);
-
- /*
- * The previous steps translated the information from
- * 'entries' into information suitable for constructing
- * bitmaps. We no longer need that array, so clear it to
- * reduce memory pressure.
- */
- FREE_AND_NULL(ctx.entries);
- ctx.entries_nr = 0;
-
- if (write_midx_bitmap(midx_name.buf, midx_hash, &pdata,
- commits, commits_nr, ctx.pack_order,
- flags) < 0) {
- error(_("could not write multi-pack bitmap"));
- result = 1;
- clear_packing_data(&pdata);
- free(commits);
- goto cleanup;
- }
-
- clear_packing_data(&pdata);
- free(commits);
- }
- /*
- * NOTE: Do not use ctx.entries beyond this point, since it might
- * have been freed in the previous if block.
- */
-
- if (ctx.m)
- close_object_store(the_repository->objects);
-
- if (commit_lock_file(&lk) < 0)
- die_errno(_("could not write multi-pack-index"));
-
- clear_midx_files_ext(object_dir, ".bitmap", midx_hash);
- clear_midx_files_ext(object_dir, ".rev", midx_hash);
-
-cleanup:
- for (i = 0; i < ctx.nr; i++) {
- if (ctx.info[i].p) {
- close_pack(ctx.info[i].p);
- free(ctx.info[i].p);
- }
- free(ctx.info[i].pack_name);
- }
-
- free(ctx.info);
- free(ctx.entries);
- free(ctx.pack_perm);
- free(ctx.pack_order);
- strbuf_release(&midx_name);
-
- trace2_region_leave("midx", "write_midx_internal", the_repository);
-
- return result;
-}
-
-int write_midx_file(const char *object_dir,
- const char *preferred_pack_name,
- const char *refs_snapshot,
- unsigned flags)
-{
- return write_midx_internal(object_dir, NULL, NULL, preferred_pack_name,
- refs_snapshot, flags);
-}
-
-int write_midx_file_only(const char *object_dir,
- struct string_list *packs_to_include,
- const char *preferred_pack_name,
- const char *refs_snapshot,
- unsigned flags)
-{
- return write_midx_internal(object_dir, packs_to_include, NULL,
- preferred_pack_name, refs_snapshot, flags);
-}
-
struct clear_midx_data {
char *keep;
const char *ext;
@@ -1784,8 +507,8 @@ static void clear_midx_file_ext(const char *full_path, size_t full_path_len UNUS
die_errno(_("failed to remove %s"), full_path);
}
-static void clear_midx_files_ext(const char *object_dir, const char *ext,
- unsigned char *keep_hash)
+void clear_midx_files_ext(const char *object_dir, const char *ext,
+ unsigned char *keep_hash)
{
struct clear_midx_data data;
memset(&data, 0, sizeof(struct clear_midx_data));
@@ -1988,251 +711,3 @@ cleanup:
return verify_midx_error;
}
-
-int expire_midx_packs(struct repository *r, const char *object_dir, unsigned flags)
-{
- uint32_t i, *count, result = 0;
- struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
- struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
- struct progress *progress = NULL;
-
- if (!m)
- return 0;
-
- CALLOC_ARRAY(count, m->num_packs);
-
- if (flags & MIDX_PROGRESS)
- progress = start_delayed_progress(_("Counting referenced objects"),
- m->num_objects);
- for (i = 0; i < m->num_objects; i++) {
- int pack_int_id = nth_midxed_pack_int_id(m, i);
- count[pack_int_id]++;
- display_progress(progress, i + 1);
- }
- stop_progress(&progress);
-
- if (flags & MIDX_PROGRESS)
- progress = start_delayed_progress(_("Finding and deleting unreferenced packfiles"),
- m->num_packs);
- for (i = 0; i < m->num_packs; i++) {
- char *pack_name;
- display_progress(progress, i + 1);
-
- if (count[i])
- continue;
-
- if (prepare_midx_pack(r, m, i))
- continue;
-
- if (m->packs[i]->pack_keep || m->packs[i]->is_cruft)
- continue;
-
- pack_name = xstrdup(m->packs[i]->pack_name);
- close_pack(m->packs[i]);
-
- string_list_insert(&packs_to_drop, m->pack_names[i]);
- unlink_pack_path(pack_name, 0);
- free(pack_name);
- }
- stop_progress(&progress);
-
- free(count);
-
- if (packs_to_drop.nr)
- result = write_midx_internal(object_dir, NULL, &packs_to_drop, NULL, NULL, flags);
-
- string_list_clear(&packs_to_drop, 0);
-
- return result;
-}
-
-struct repack_info {
- timestamp_t mtime;
- uint32_t referenced_objects;
- uint32_t pack_int_id;
-};
-
-static int compare_by_mtime(const void *a_, const void *b_)
-{
- const struct repack_info *a, *b;
-
- a = (const struct repack_info *)a_;
- b = (const struct repack_info *)b_;
-
- if (a->mtime < b->mtime)
- return -1;
- if (a->mtime > b->mtime)
- return 1;
- return 0;
-}
-
-static int fill_included_packs_all(struct repository *r,
- struct multi_pack_index *m,
- unsigned char *include_pack)
-{
- uint32_t i, count = 0;
- int pack_kept_objects = 0;
-
- repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
-
- for (i = 0; i < m->num_packs; i++) {
- if (prepare_midx_pack(r, m, i))
- continue;
- if (!pack_kept_objects && m->packs[i]->pack_keep)
- continue;
- if (m->packs[i]->is_cruft)
- continue;
-
- include_pack[i] = 1;
- count++;
- }
-
- return count < 2;
-}
-
-static int fill_included_packs_batch(struct repository *r,
- struct multi_pack_index *m,
- unsigned char *include_pack,
- size_t batch_size)
-{
- uint32_t i, packs_to_repack;
- size_t total_size;
- struct repack_info *pack_info;
- int pack_kept_objects = 0;
-
- CALLOC_ARRAY(pack_info, m->num_packs);
-
- repo_config_get_bool(r, "repack.packkeptobjects", &pack_kept_objects);
-
- for (i = 0; i < m->num_packs; i++) {
- pack_info[i].pack_int_id = i;
-
- if (prepare_midx_pack(r, m, i))
- continue;
-
- pack_info[i].mtime = m->packs[i]->mtime;
- }
-
- for (i = 0; i < m->num_objects; i++) {
- uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
- pack_info[pack_int_id].referenced_objects++;
- }
-
- QSORT(pack_info, m->num_packs, compare_by_mtime);
-
- total_size = 0;
- packs_to_repack = 0;
- for (i = 0; total_size < batch_size && i < m->num_packs; i++) {
- int pack_int_id = pack_info[i].pack_int_id;
- struct packed_git *p = m->packs[pack_int_id];
- size_t expected_size;
-
- if (!p)
- continue;
- if (!pack_kept_objects && p->pack_keep)
- continue;
- if (p->is_cruft)
- continue;
- if (open_pack_index(p) || !p->num_objects)
- continue;
-
- expected_size = st_mult(p->pack_size,
- pack_info[i].referenced_objects);
- expected_size /= p->num_objects;
-
- if (expected_size >= batch_size)
- continue;
-
- packs_to_repack++;
- total_size += expected_size;
- include_pack[pack_int_id] = 1;
- }
-
- free(pack_info);
-
- if (packs_to_repack < 2)
- return 1;
-
- return 0;
-}
-
-int midx_repack(struct repository *r, const char *object_dir, size_t batch_size, unsigned flags)
-{
- int result = 0;
- uint32_t i;
- unsigned char *include_pack;
- struct child_process cmd = CHILD_PROCESS_INIT;
- FILE *cmd_in;
- struct multi_pack_index *m = lookup_multi_pack_index(r, object_dir);
-
- /*
- * When updating the default for these configuration
- * variables in builtin/repack.c, these must be adjusted
- * to match.
- */
- int delta_base_offset = 1;
- int use_delta_islands = 0;
-
- if (!m)
- return 0;
-
- CALLOC_ARRAY(include_pack, m->num_packs);
-
- if (batch_size) {
- if (fill_included_packs_batch(r, m, include_pack, batch_size))
- goto cleanup;
- } else if (fill_included_packs_all(r, m, include_pack))
- goto cleanup;
-
- repo_config_get_bool(r, "repack.usedeltabaseoffset", &delta_base_offset);
- repo_config_get_bool(r, "repack.usedeltaislands", &use_delta_islands);
-
- strvec_push(&cmd.args, "pack-objects");
-
- strvec_pushf(&cmd.args, "%s/pack/pack", object_dir);
-
- if (delta_base_offset)
- strvec_push(&cmd.args, "--delta-base-offset");
- if (use_delta_islands)
- strvec_push(&cmd.args, "--delta-islands");
-
- if (flags & MIDX_PROGRESS)
- strvec_push(&cmd.args, "--progress");
- else
- strvec_push(&cmd.args, "-q");
-
- cmd.git_cmd = 1;
- cmd.in = cmd.out = -1;
-
- if (start_command(&cmd)) {
- error(_("could not start pack-objects"));
- result = 1;
- goto cleanup;
- }
-
- cmd_in = xfdopen(cmd.in, "w");
-
- for (i = 0; i < m->num_objects; i++) {
- struct object_id oid;
- uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
-
- if (!include_pack[pack_int_id])
- continue;
-
- nth_midxed_object_oid(&oid, m, i);
- fprintf(cmd_in, "%s\n", oid_to_hex(&oid));
- }
- fclose(cmd_in);
-
- if (finish_command(&cmd)) {
- error(_("could not finish pack-objects"));
- result = 1;
- goto cleanup;
- }
-
- result = write_midx_internal(object_dir, NULL, NULL, NULL, NULL, flags);
-
-cleanup:
- free(include_pack);
- return result;
-}
diff --git a/midx.h b/midx.h
index b374a7afaf..dc477dff44 100644
--- a/midx.h
+++ b/midx.h
@@ -8,6 +8,25 @@ struct pack_entry;
struct repository;
struct bitmapped_pack;
+#define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
+#define MIDX_VERSION 1
+#define MIDX_BYTE_FILE_VERSION 4
+#define MIDX_BYTE_HASH_VERSION 5
+#define MIDX_BYTE_NUM_CHUNKS 6
+#define MIDX_BYTE_NUM_PACKS 8
+#define MIDX_HEADER_SIZE 12
+
+#define MIDX_CHUNK_ALIGNMENT 4
+#define MIDX_CHUNKID_PACKNAMES 0x504e414d /* "PNAM" */
+#define MIDX_CHUNKID_BITMAPPEDPACKS 0x42544d50 /* "BTMP" */
+#define MIDX_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
+#define MIDX_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
+#define MIDX_CHUNKID_OBJECTOFFSETS 0x4f4f4646 /* "OOFF" */
+#define MIDX_CHUNKID_LARGEOFFSETS 0x4c4f4646 /* "LOFF" */
+#define MIDX_CHUNKID_REVINDEX 0x52494458 /* "RIDX" */
+#define MIDX_CHUNK_OFFSET_WIDTH (2 * sizeof(uint32_t))
+#define MIDX_LARGE_OFFSET_NEEDED 0x80000000
+
#define GIT_TEST_MULTI_PACK_INDEX "GIT_TEST_MULTI_PACK_INDEX"
#define GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP \
"GIT_TEST_MULTI_PACK_INDEX_WRITE_BITMAP"
diff --git a/oss-fuzz/fuzz-date.c b/oss-fuzz/fuzz-date.c
index 036378b946..9619dae40e 100644
--- a/oss-fuzz/fuzz-date.c
+++ b/oss-fuzz/fuzz-date.c
@@ -11,7 +11,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
int16_t tz;
timestamp_t ts;
enum date_mode_type dmtype;
- struct date_mode *dm;
+ struct date_mode dm;
if (size <= 4)
/*
@@ -40,10 +40,10 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
free(str);
dm = date_mode_from_type(dmtype);
- dm->local = local;
+ dm.local = local;
show_date(ts, (int)tz, dm);
- date_mode_release(dm);
+ date_mode_release(&dm);
return 0;
}
diff --git a/path.c b/path.c
index 8bb223c92c..67229edb9c 100644
--- a/path.c
+++ b/path.c
@@ -28,8 +28,6 @@ static int get_st_mode_bits(const char *path, int *mode)
return 0;
}
-static char bad_path[] = "/bad-path/";
-
static struct strbuf *get_pathname(void)
{
static struct strbuf pathname_array[4] = {
@@ -59,21 +57,6 @@ static void strbuf_cleanup_path(struct strbuf *sb)
strbuf_remove(sb, 0, path - sb->buf);
}
-char *mksnpath(char *buf, size_t n, const char *fmt, ...)
-{
- va_list args;
- unsigned len;
-
- va_start(args, fmt);
- len = vsnprintf(buf, n, fmt, args);
- va_end(args);
- if (len >= n) {
- strlcpy(buf, bad_path, n);
- return buf;
- }
- return (char *)cleanup_path(buf);
-}
-
static int dir_prefix(const char *buf, const char *dir)
{
int len = strlen(dir);
diff --git a/path.h b/path.h
index e053effef2..ea96487b29 100644
--- a/path.h
+++ b/path.h
@@ -24,12 +24,6 @@ char *mkpathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
/*
- * Construct a path and place the result in the provided buffer `buf`.
- */
-char *mksnpath(char *buf, size_t n, const char *fmt, ...)
- __attribute__((format (printf, 3, 4)));
-
-/*
* The `git_common_path` family of functions will construct a path into a
* repository's common git directory, which is shared by all worktrees.
*/
diff --git a/po/tr.po b/po/tr.po
index 19d6661bbe..5837752d0b 100644
--- a/po/tr.po
+++ b/po/tr.po
@@ -20,7 +20,7 @@
# clone | klon(lamak) #
# commit (ad) | iÅŸleme #
# commit (eyl.) | iÅŸlemek #
-# commitish | iÅŸlememsi #
+# commit-ish | iÅŸlememsi #
# conflict | çakışma #
# cruft | süprüntü #
# dangling object | sallanan nesne #
diff --git a/pretty.c b/pretty.c
index 2faf651d3e..7ead078998 100644
--- a/pretty.c
+++ b/pretty.c
@@ -428,7 +428,7 @@ static void add_rfc2047(struct strbuf *sb, const char *line, size_t len,
}
const char *show_ident_date(const struct ident_split *ident,
- const struct date_mode *mode)
+ struct date_mode mode)
{
timestamp_t date = 0;
long tz = 0;
@@ -592,7 +592,7 @@ void pp_user_info(struct pretty_print_context *pp,
switch (pp->fmt) {
case CMIT_FMT_MEDIUM:
strbuf_addf(sb, "Date: %s\n",
- show_ident_date(&ident, &pp->date_mode));
+ show_ident_date(&ident, pp->date_mode));
break;
case CMIT_FMT_EMAIL:
case CMIT_FMT_MBOXRD:
@@ -601,7 +601,7 @@ void pp_user_info(struct pretty_print_context *pp,
break;
case CMIT_FMT_FULLER:
strbuf_addf(sb, "%sDate: %s\n", what,
- show_ident_date(&ident, &pp->date_mode));
+ show_ident_date(&ident, pp->date_mode));
break;
default:
/* notin' */
@@ -775,7 +775,7 @@ static int mailmap_name(const char **email, size_t *email_len,
static size_t format_person_part(struct strbuf *sb, char part,
const char *msg, int len,
- const struct date_mode *dmode)
+ struct date_mode dmode)
{
/* currently all placeholders have same length */
const int placeholder_len = 2;
@@ -1034,7 +1034,7 @@ static void rewrap_message_tail(struct strbuf *sb,
static int format_reflog_person(struct strbuf *sb,
char part,
struct reflog_walk_info *log,
- const struct date_mode *dmode)
+ struct date_mode dmode)
{
const char *ident;
@@ -1602,7 +1602,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
if (c->pretty_ctx->reflog_info)
get_reflog_selector(sb,
c->pretty_ctx->reflog_info,
- &c->pretty_ctx->date_mode,
+ c->pretty_ctx->date_mode,
c->pretty_ctx->date_mode_explicit,
(placeholder[1] == 'd'));
return 2;
@@ -1617,7 +1617,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
return format_reflog_person(sb,
placeholder[1],
c->pretty_ctx->reflog_info,
- &c->pretty_ctx->date_mode);
+ c->pretty_ctx->date_mode);
}
return 0; /* unknown %g placeholder */
case 'N':
@@ -1712,11 +1712,11 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
case 'a': /* author ... */
return format_person_part(sb, placeholder[1],
msg + c->author.off, c->author.len,
- &c->pretty_ctx->date_mode);
+ c->pretty_ctx->date_mode);
case 'c': /* committer ... */
return format_person_part(sb, placeholder[1],
msg + c->committer.off, c->committer.len,
- &c->pretty_ctx->date_mode);
+ c->pretty_ctx->date_mode);
case 'e': /* encoding */
if (c->commit_encoding)
strbuf_addstr(sb, c->commit_encoding);
diff --git a/pretty.h b/pretty.h
index 9cc9e5d42b..df267afe4a 100644
--- a/pretty.h
+++ b/pretty.h
@@ -167,7 +167,7 @@ int format_set_trailers_options(struct process_trailer_options *opts,
* a well-known sentinel date if they appear bogus.
*/
const char *show_ident_date(const struct ident_split *id,
- const struct date_mode *mode);
+ struct date_mode mode);
#endif /* PRETTY_H */
diff --git a/read-cache-ll.h b/read-cache-ll.h
index 2a50a784f0..09414afd04 100644
--- a/read-cache-ll.h
+++ b/read-cache-ll.h
@@ -480,8 +480,8 @@ extern int verify_ce_order;
int cmp_cache_name_compare(const void *a_, const void *b_);
int add_files_to_cache(struct repository *repo, const char *prefix,
- const struct pathspec *pathspec, int include_sparse,
- int flags);
+ const struct pathspec *pathspec, char *ps_matched,
+ int include_sparse, int flags);
void overlay_tree_on_index(struct index_state *istate,
const char *tree_name, const char *prefix);
diff --git a/read-cache.c b/read-cache.c
index f546cf7875..e1723ad796 100644
--- a/read-cache.c
+++ b/read-cache.c
@@ -3958,8 +3958,8 @@ static void update_callback(struct diff_queue_struct *q,
}
int add_files_to_cache(struct repository *repo, const char *prefix,
- const struct pathspec *pathspec, int include_sparse,
- int flags)
+ const struct pathspec *pathspec, char *ps_matched,
+ int include_sparse, int flags)
{
struct update_callback_data data;
struct rev_info rev;
@@ -3971,8 +3971,10 @@ int add_files_to_cache(struct repository *repo, const char *prefix,
repo_init_revisions(repo, &rev, prefix);
setup_revisions(0, NULL, &rev, NULL);
- if (pathspec)
+ if (pathspec) {
copy_pathspec(&rev.prune_data, pathspec);
+ rev.ps_matched = ps_matched;
+ }
rev.diffopt.output_format = DIFF_FORMAT_CALLBACK;
rev.diffopt.format_callback = update_callback;
rev.diffopt.format_callback_data = &data;
diff --git a/ref-filter.c b/ref-filter.c
index 03542d0236..59ad6f54dd 100644
--- a/ref-filter.c
+++ b/ref-filter.c
@@ -1627,7 +1627,7 @@ static void grab_date(const char *buf, struct atom_value *v, const char *atomnam
tz = strtol(zone, NULL, 10);
if ((tz == LONG_MIN || tz == LONG_MAX) && errno == ERANGE)
goto bad;
- v->s = xstrdup(show_date(timestamp, tz, &date_mode));
+ v->s = xstrdup(show_date(timestamp, tz, date_mode));
v->value = timestamp;
date_mode_release(&date_mode);
return;
diff --git a/reflog-walk.c b/reflog-walk.c
index d216f6f966..66484f4f32 100644
--- a/reflog-walk.c
+++ b/reflog-walk.c
@@ -223,7 +223,7 @@ int add_reflog_for_walk(struct reflog_walk_info *info,
void get_reflog_selector(struct strbuf *sb,
struct reflog_walk_info *reflog_info,
- const struct date_mode *dmode, int force_date,
+ struct date_mode dmode, int force_date,
int shorten)
{
struct commit_reflog *commit_reflog = reflog_info->last_commit_reflog;
@@ -297,7 +297,7 @@ timestamp_t get_reflog_timestamp(struct reflog_walk_info *reflog_info)
}
void show_reflog_message(struct reflog_walk_info *reflog_info, int oneline,
- const struct date_mode *dmode, int force_date)
+ struct date_mode dmode, int force_date)
{
if (reflog_info && reflog_info->last_commit_reflog) {
struct commit_reflog *commit_reflog = reflog_info->last_commit_reflog;
diff --git a/reflog-walk.h b/reflog-walk.h
index 4d93a26957..989583dc55 100644
--- a/reflog-walk.h
+++ b/reflog-walk.h
@@ -10,14 +10,14 @@ void reflog_walk_info_release(struct reflog_walk_info *info);
int add_reflog_for_walk(struct reflog_walk_info *info,
struct commit *commit, const char *name);
void show_reflog_message(struct reflog_walk_info *info, int,
- const struct date_mode *, int force_date);
+ struct date_mode, int force_date);
void get_reflog_message(struct strbuf *sb,
struct reflog_walk_info *reflog_info);
const char *get_reflog_ident(struct reflog_walk_info *reflog_info);
timestamp_t get_reflog_timestamp(struct reflog_walk_info *reflog_info);
void get_reflog_selector(struct strbuf *sb,
struct reflog_walk_info *reflog_info,
- const struct date_mode *dmode, int force_date,
+ struct date_mode dmode, int force_date,
int shorten);
int reflog_walk_empty(struct reflog_walk_info *walk);
diff --git a/refs/reftable-backend.c b/refs/reftable-backend.c
index 0bed6d2ab4..1cda48c504 100644
--- a/refs/reftable-backend.c
+++ b/refs/reftable-backend.c
@@ -18,6 +18,7 @@
#include "../reftable/reftable-merged.h"
#include "../setup.h"
#include "../strmap.h"
+#include "parse.h"
#include "refs-internal.h"
/*
@@ -247,6 +248,8 @@ static struct ref_store *reftable_be_init(struct repository *repo,
refs->write_options.block_size = 4096;
refs->write_options.hash_id = repo->hash_algo->format_id;
refs->write_options.default_permissions = calc_shared_perm(0666 & ~mask);
+ refs->write_options.disable_auto_compact =
+ !git_env_bool("GIT_TEST_REFTABLE_AUTOCOMPACTION", 1);
/*
* Set up the main reftable stack that is hosted in GIT_COMMON_DIR.
diff --git a/reftable/basics.c b/reftable/basics.c
index 0785aff941..fea711db7e 100644
--- a/reftable/basics.c
+++ b/reftable/basics.c
@@ -27,7 +27,7 @@ void put_be16(uint8_t *out, uint16_t i)
out[1] = (uint8_t)(i & 0xff);
}
-int binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
+size_t binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
{
size_t lo = 0;
size_t hi = sz;
@@ -39,8 +39,11 @@ int binsearch(size_t sz, int (*f)(size_t k, void *args), void *args)
*/
while (hi - lo > 1) {
size_t mid = lo + (hi - lo) / 2;
+ int ret = f(mid, args);
+ if (ret < 0)
+ return sz;
- if (f(mid, args))
+ if (ret > 0)
hi = mid;
else
lo = mid;
diff --git a/reftable/basics.h b/reftable/basics.h
index 91f3533efe..523ecd5307 100644
--- a/reftable/basics.h
+++ b/reftable/basics.h
@@ -22,13 +22,14 @@ uint32_t get_be24(uint8_t *in);
void put_be16(uint8_t *out, uint16_t i);
/*
- * find smallest index i in [0, sz) at which f(i) is true, assuming
- * that f is ascending. Return sz if f(i) is false for all indices.
+ * find smallest index i in [0, sz) at which `f(i) > 0`, assuming that f is
+ * ascending. Return sz if `f(i) == 0` for all indices. The search is aborted
+ * and `sz` is returned in case `f(i) < 0`.
*
* Contrary to bsearch(3), this returns something useful if the argument is not
* found.
*/
-int binsearch(size_t sz, int (*f)(size_t k, void *args), void *args);
+size_t binsearch(size_t sz, int (*f)(size_t k, void *args), void *args);
/*
* Frees a NULL terminated array of malloced strings. The array itself is also
diff --git a/reftable/basics_test.c b/reftable/basics_test.c
index 1fcd229725..997c4d9e01 100644
--- a/reftable/basics_test.c
+++ b/reftable/basics_test.c
@@ -12,40 +12,47 @@ https://developers.google.com/open-source/licenses/bsd
#include "test_framework.h"
#include "reftable-tests.h"
-struct binsearch_args {
- int key;
- int *arr;
+struct integer_needle_lesseq_args {
+ int needle;
+ int *haystack;
};
-static int binsearch_func(size_t i, void *void_args)
+static int integer_needle_lesseq(size_t i, void *_args)
{
- struct binsearch_args *args = void_args;
-
- return args->key < args->arr[i];
+ struct integer_needle_lesseq_args *args = _args;
+ return args->needle <= args->haystack[i];
}
static void test_binsearch(void)
{
- int arr[] = { 2, 4, 6, 8, 10 };
- size_t sz = ARRAY_SIZE(arr);
- struct binsearch_args args = {
- .arr = arr,
+ int haystack[] = { 2, 4, 6, 8, 10 };
+ struct {
+ int needle;
+ size_t expected_idx;
+ } testcases[] = {
+ {-9000, 0},
+ {-1, 0},
+ {0, 0},
+ {2, 0},
+ {3, 1},
+ {4, 1},
+ {7, 3},
+ {9, 4},
+ {10, 4},
+ {11, 5},
+ {9000, 5},
};
+ size_t i = 0;
- int i = 0;
- for (i = 1; i < 11; i++) {
- int res;
- args.key = i;
- res = binsearch(sz, &binsearch_func, &args);
+ for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+ struct integer_needle_lesseq_args args = {
+ .haystack = haystack,
+ .needle = testcases[i].needle,
+ };
+ size_t idx;
- if (res < sz) {
- EXPECT(args.key < arr[res]);
- if (res > 0) {
- EXPECT(args.key >= arr[res - 1]);
- }
- } else {
- EXPECT(args.key == 10 || args.key == 11);
- }
+ idx = binsearch(ARRAY_SIZE(haystack), &integer_needle_lesseq, &args);
+ EXPECT(idx == testcases[i].expected_idx);
}
}
diff --git a/reftable/block.c b/reftable/block.c
index e2a2cee58d..3e87460cba 100644
--- a/reftable/block.c
+++ b/reftable/block.c
@@ -175,11 +175,6 @@ int block_writer_finish(struct block_writer *w)
return w->next;
}
-uint8_t block_reader_type(struct block_reader *r)
-{
- return r->block.data[r->header_off];
-}
-
int block_reader_init(struct block_reader *br, struct reftable_block *block,
uint32_t header_off, uint32_t table_block_size,
int hash_size)
@@ -191,7 +186,8 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uint16_t restart_count = 0;
uint32_t restart_start = 0;
uint8_t *restart_bytes = NULL;
- uint8_t *uncompressed = NULL;
+
+ reftable_block_done(&br->block);
if (!reftable_is_block_type(typ)) {
err = REFTABLE_FORMAT_ERROR;
@@ -199,37 +195,57 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
}
if (typ == BLOCK_TYPE_LOG) {
- int block_header_skip = 4 + header_off;
- uLongf dst_len = sz - block_header_skip; /* total size of dest
- buffer. */
- uLongf src_len = block->len - block_header_skip;
+ uint32_t block_header_skip = 4 + header_off;
+ uLong dst_len = sz - block_header_skip;
+ uLong src_len = block->len - block_header_skip;
/* Log blocks specify the *uncompressed* size in their header. */
- REFTABLE_ALLOC_ARRAY(uncompressed, sz);
+ REFTABLE_ALLOC_GROW(br->uncompressed_data, sz,
+ br->uncompressed_cap);
/* Copy over the block header verbatim. It's not compressed. */
- memcpy(uncompressed, block->data, block_header_skip);
+ memcpy(br->uncompressed_data, block->data, block_header_skip);
+
+ if (!br->zstream) {
+ REFTABLE_CALLOC_ARRAY(br->zstream, 1);
+ err = inflateInit(br->zstream);
+ } else {
+ err = inflateReset(br->zstream);
+ }
+ if (err != Z_OK) {
+ err = REFTABLE_ZLIB_ERROR;
+ goto done;
+ }
- /* Uncompress */
- if (Z_OK !=
- uncompress2(uncompressed + block_header_skip, &dst_len,
- block->data + block_header_skip, &src_len)) {
+ br->zstream->next_in = block->data + block_header_skip;
+ br->zstream->avail_in = src_len;
+ br->zstream->next_out = br->uncompressed_data + block_header_skip;
+ br->zstream->avail_out = dst_len;
+
+ /*
+ * We know both input as well as output size, and we know that
+ * the sizes should never be bigger than `uInt_MAX` because
+ * blocks can at most be 16MB large. We can thus use `Z_FINISH`
+ * here to instruct zlib to inflate the data in one go, which
+ * is more efficient than using `Z_NO_FLUSH`.
+ */
+ err = inflate(br->zstream, Z_FINISH);
+ if (err != Z_STREAM_END) {
err = REFTABLE_ZLIB_ERROR;
goto done;
}
+ err = 0;
- if (dst_len + block_header_skip != sz) {
+ if (br->zstream->total_out + block_header_skip != sz) {
err = REFTABLE_FORMAT_ERROR;
goto done;
}
/* We're done with the input data. */
reftable_block_done(block);
- block->data = uncompressed;
- uncompressed = NULL;
+ block->data = br->uncompressed_data;
block->len = sz;
- block->source = malloc_block_source();
- full_block_size = src_len + block_header_skip;
+ full_block_size = src_len + block_header_skip - br->zstream->avail_in;
} else if (full_block_size == 0) {
full_block_size = sz;
} else if (sz < full_block_size && sz < block->len &&
@@ -257,72 +273,109 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
br->restart_bytes = restart_bytes;
done:
- reftable_free(uncompressed);
return err;
}
-static uint32_t block_reader_restart_offset(struct block_reader *br, int i)
+void block_reader_release(struct block_reader *br)
+{
+ inflateEnd(br->zstream);
+ reftable_free(br->zstream);
+ reftable_free(br->uncompressed_data);
+ reftable_block_done(&br->block);
+}
+
+uint8_t block_reader_type(const struct block_reader *r)
+{
+ return r->block.data[r->header_off];
+}
+
+int block_reader_first_key(const struct block_reader *br, struct strbuf *key)
+{
+ int off = br->header_off + 4, n;
+ struct string_view in = {
+ .buf = br->block.data + off,
+ .len = br->block_len - off,
+ };
+ uint8_t extra = 0;
+
+ strbuf_reset(key);
+
+ n = reftable_decode_key(key, &extra, in);
+ if (n < 0)
+ return n;
+ if (!key->len)
+ return REFTABLE_FORMAT_ERROR;
+
+ return 0;
+}
+
+static uint32_t block_reader_restart_offset(const struct block_reader *br, int i)
{
return get_be24(br->restart_bytes + 3 * i);
}
-void block_reader_start(struct block_reader *br, struct block_iter *it)
+void block_iter_seek_start(struct block_iter *it, const struct block_reader *br)
{
- it->br = br;
+ it->block = br->block.data;
+ it->block_len = br->block_len;
+ it->hash_size = br->hash_size;
strbuf_reset(&it->last_key);
it->next_off = br->header_off + 4;
}
-struct restart_find_args {
+struct restart_needle_less_args {
int error;
- struct strbuf key;
- struct block_reader *r;
+ struct strbuf needle;
+ const struct block_reader *reader;
};
-static int restart_key_less(size_t idx, void *args)
+static int restart_needle_less(size_t idx, void *_args)
{
- struct restart_find_args *a = args;
- uint32_t off = block_reader_restart_offset(a->r, idx);
+ struct restart_needle_less_args *args = _args;
+ uint32_t off = block_reader_restart_offset(args->reader, idx);
struct string_view in = {
- .buf = a->r->block.data + off,
- .len = a->r->block_len - off,
+ .buf = args->reader->block.data + off,
+ .len = args->reader->block_len - off,
};
+ uint64_t prefix_len, suffix_len;
+ uint8_t extra;
+ int n;
+
+ /*
+ * Records at restart points are stored without prefix compression, so
+ * there is no need to fully decode the record key here. This removes
+ * the need for allocating memory.
+ */
+ n = reftable_decode_keylen(in, &prefix_len, &suffix_len, &extra);
+ if (n < 0 || prefix_len) {
+ args->error = 1;
+ return -1;
+ }
- /* the restart key is verbatim in the block, so this could avoid the
- alloc for decoding the key */
- struct strbuf rkey = STRBUF_INIT;
- uint8_t unused_extra;
- int n = reftable_decode_key(&rkey, &unused_extra, in);
- int result;
- if (n < 0) {
- a->error = 1;
+ string_view_consume(&in, n);
+ if (suffix_len > in.len) {
+ args->error = 1;
return -1;
}
- result = strbuf_cmp(&a->key, &rkey);
- strbuf_release(&rkey);
- return result < 0;
-}
-
-void block_iter_copy_from(struct block_iter *dest, struct block_iter *src)
-{
- dest->br = src->br;
- dest->next_off = src->next_off;
- strbuf_reset(&dest->last_key);
- strbuf_addbuf(&dest->last_key, &src->last_key);
+ n = memcmp(args->needle.buf, in.buf,
+ args->needle.len < suffix_len ? args->needle.len : suffix_len);
+ if (n)
+ return n < 0;
+ return args->needle.len < suffix_len;
}
int block_iter_next(struct block_iter *it, struct reftable_record *rec)
{
struct string_view in = {
- .buf = it->br->block.data + it->next_off,
- .len = it->br->block_len - it->next_off,
+ .buf = (unsigned char *) it->block + it->next_off,
+ .len = it->block_len - it->next_off,
};
struct string_view start = in;
uint8_t extra = 0;
int n = 0;
- if (it->next_off >= it->br->block_len)
+ if (it->next_off >= it->block_len)
return 1;
n = reftable_decode_key(&it->last_key, &extra, in);
@@ -332,7 +385,7 @@ int block_iter_next(struct block_iter *it, struct reftable_record *rec)
return REFTABLE_FORMAT_ERROR;
string_view_consume(&in, n);
- n = reftable_record_decode(rec, it->last_key, extra, in, it->br->hash_size,
+ n = reftable_record_decode(rec, it->last_key, extra, in, it->hash_size,
&it->scratch);
if (n < 0)
return -1;
@@ -342,29 +395,13 @@ int block_iter_next(struct block_iter *it, struct reftable_record *rec)
return 0;
}
-int block_reader_first_key(struct block_reader *br, struct strbuf *key)
+void block_iter_reset(struct block_iter *it)
{
- int off = br->header_off + 4, n;
- struct string_view in = {
- .buf = br->block.data + off,
- .len = br->block_len - off,
- };
- uint8_t extra = 0;
-
- strbuf_reset(key);
-
- n = reftable_decode_key(key, &extra, in);
- if (n < 0)
- return n;
- if (!key->len)
- return REFTABLE_FORMAT_ERROR;
-
- return 0;
-}
-
-int block_iter_seek(struct block_iter *it, struct strbuf *want)
-{
- return block_reader_seek(it->br, it, want);
+ strbuf_reset(&it->last_key);
+ it->next_off = 0;
+ it->block = NULL;
+ it->block_len = 0;
+ it->hash_size = 0;
}
void block_iter_close(struct block_iter *it)
@@ -373,53 +410,105 @@ void block_iter_close(struct block_iter *it)
strbuf_release(&it->scratch);
}
-int block_reader_seek(struct block_reader *br, struct block_iter *it,
- struct strbuf *want)
+int block_iter_seek_key(struct block_iter *it, const struct block_reader *br,
+ struct strbuf *want)
{
- struct restart_find_args args = {
- .key = *want,
- .r = br,
+ struct restart_needle_less_args args = {
+ .needle = *want,
+ .reader = br,
};
- struct block_iter next = BLOCK_ITER_INIT;
struct reftable_record rec;
- int err = 0, i;
-
+ int err = 0;
+ size_t i;
+
+ /*
+ * Perform a binary search over the block's restart points, which
+ * avoids doing a linear scan over the whole block. Like this, we
+ * identify the section of the block that should contain our key.
+ *
+ * Note that we explicitly search for the first restart point _greater_
+ * than the sought-after record, not _greater or equal_ to it. In case
+ * the sought-after record is located directly at the restart point we
+ * would otherwise start doing the linear search at the preceding
+ * restart point. While that works alright, we would end up scanning
+ * too many record.
+ */
+ i = binsearch(br->restart_count, &restart_needle_less, &args);
if (args.error) {
err = REFTABLE_FORMAT_ERROR;
goto done;
}
- i = binsearch(br->restart_count, &restart_key_less, &args);
+ /*
+ * Now there are multiple cases:
+ *
+ * - `i == 0`: The wanted record is smaller than the record found at
+ * the first restart point. As the first restart point is the first
+ * record in the block, our wanted record cannot be located in this
+ * block at all. We still need to position the iterator so that the
+ * next call to `block_iter_next()` will yield an end-of-iterator
+ * signal.
+ *
+ * - `i == restart_count`: The wanted record was not found at any of
+ * the restart points. As there is no restart point at the end of
+ * the section the record may thus be contained in the last block.
+ *
+ * - `i > 0`: The wanted record must be contained in the section
+ * before the found restart point. We thus do a linear search
+ * starting from the preceding restart point.
+ */
if (i > 0)
it->next_off = block_reader_restart_offset(br, i - 1);
else
it->next_off = br->header_off + 4;
- it->br = br;
+ it->block = br->block.data;
+ it->block_len = br->block_len;
+ it->hash_size = br->hash_size;
reftable_record_init(&rec, block_reader_type(br));
- /* We're looking for the last entry less/equal than the wanted key, so
- we have to go one entry too far and then back up.
- */
+ /*
+ * We're looking for the last entry less than the wanted key so that
+ * the next call to `block_reader_next()` would yield the wanted
+ * record. We thus don't want to position our reader at the sought
+ * after record, but one before. To do so, we have to go one entry too
+ * far and then back up.
+ */
while (1) {
- block_iter_copy_from(&next, it);
- err = block_iter_next(&next, &rec);
+ size_t prev_off = it->next_off;
+
+ err = block_iter_next(it, &rec);
if (err < 0)
goto done;
-
- reftable_record_key(&rec, &it->last_key);
- if (err > 0 || strbuf_cmp(&it->last_key, want) >= 0) {
+ if (err > 0) {
+ it->next_off = prev_off;
err = 0;
goto done;
}
- block_iter_copy_from(it, &next);
+ /*
+ * Check whether the current key is greater or equal to the
+ * sought-after key. In case it is greater we know that the
+ * record does not exist in the block and can thus abort early.
+ * In case it is equal to the sought-after key we have found
+ * the desired record.
+ *
+ * Note that we store the next record's key record directly in
+ * `last_key` without restoring the key of the preceding record
+ * in case we need to go one record back. This is safe to do as
+ * `block_iter_next()` would return the ref whose key is equal
+ * to `last_key` now, and naturally all keys share a prefix
+ * with themselves.
+ */
+ reftable_record_key(&rec, &it->last_key);
+ if (strbuf_cmp(&it->last_key, want) >= 0) {
+ it->next_off = prev_off;
+ goto done;
+ }
}
done:
- block_iter_close(&next);
reftable_record_release(&rec);
-
return err;
}
diff --git a/reftable/block.h b/reftable/block.h
index 47acc62c0a..ea4384a7e2 100644
--- a/reftable/block.h
+++ b/reftable/block.h
@@ -56,6 +56,8 @@ int block_writer_finish(struct block_writer *w);
/* clears out internally allocated block_writer members. */
void block_writer_release(struct block_writer *bw);
+struct z_stream;
+
/* Read a block. */
struct block_reader {
/* offset of the block header; nonzero for the first block in a
@@ -66,6 +68,11 @@ struct block_reader {
struct reftable_block block;
int hash_size;
+ /* Uncompressed data for log entries. */
+ z_stream *zstream;
+ unsigned char *uncompressed_data;
+ size_t uncompressed_cap;
+
/* size of the data, excluding restart data. */
uint32_t block_len;
uint8_t *restart_bytes;
@@ -76,11 +83,26 @@ struct block_reader {
uint32_t full_block_size;
};
+/* initializes a block reader. */
+int block_reader_init(struct block_reader *br, struct reftable_block *bl,
+ uint32_t header_off, uint32_t table_block_size,
+ int hash_size);
+
+void block_reader_release(struct block_reader *br);
+
+/* Returns the block type (eg. 'r' for refs) */
+uint8_t block_reader_type(const struct block_reader *r);
+
+/* Decodes the first key in the block */
+int block_reader_first_key(const struct block_reader *br, struct strbuf *key);
+
/* Iterate over entries in a block */
struct block_iter {
/* offset within the block of the next entry to read. */
uint32_t next_off;
- struct block_reader *br;
+ const unsigned char *block;
+ size_t block_len;
+ int hash_size;
/* key for last entry we read. */
struct strbuf last_key;
@@ -92,31 +114,18 @@ struct block_iter {
.scratch = STRBUF_INIT, \
}
-/* initializes a block reader. */
-int block_reader_init(struct block_reader *br, struct reftable_block *bl,
- uint32_t header_off, uint32_t table_block_size,
- int hash_size);
-
/* Position `it` at start of the block */
-void block_reader_start(struct block_reader *br, struct block_iter *it);
+void block_iter_seek_start(struct block_iter *it, const struct block_reader *br);
/* Position `it` to the `want` key in the block */
-int block_reader_seek(struct block_reader *br, struct block_iter *it,
- struct strbuf *want);
-
-/* Returns the block type (eg. 'r' for refs) */
-uint8_t block_reader_type(struct block_reader *r);
-
-/* Decodes the first key in the block */
-int block_reader_first_key(struct block_reader *br, struct strbuf *key);
-
-void block_iter_copy_from(struct block_iter *dest, struct block_iter *src);
+int block_iter_seek_key(struct block_iter *it, const struct block_reader *br,
+ struct strbuf *want);
/* return < 0 for error, 0 for OK, > 0 for EOF. */
int block_iter_next(struct block_iter *it, struct reftable_record *rec);
-/* Seek to `want` with in the block pointed to by `it` */
-int block_iter_seek(struct block_iter *it, struct strbuf *want);
+/* Reset the block iterator to pristine state without releasing its memory. */
+void block_iter_reset(struct block_iter *it);
/* deallocate memory for `it`. The block reader and its block is left intact. */
void block_iter_close(struct block_iter *it);
diff --git a/reftable/block_test.c b/reftable/block_test.c
index e162c6e33f..26a9cfbc83 100644
--- a/reftable/block_test.c
+++ b/reftable/block_test.c
@@ -69,7 +69,7 @@ static void test_block_read_write(void)
block_reader_init(&br, &block, header_off, block_size, GIT_SHA1_RAWSZ);
- block_reader_start(&br, &it);
+ block_iter_seek_start(&it, &br);
while (1) {
int r = block_iter_next(&it, &rec);
@@ -89,7 +89,7 @@ static void test_block_read_write(void)
strbuf_reset(&want);
strbuf_addstr(&want, names[i]);
- n = block_reader_seek(&br, &it, &want);
+ n = block_iter_seek_key(&it, &br, &want);
EXPECT(n == 0);
n = block_iter_next(&it, &rec);
@@ -98,7 +98,7 @@ static void test_block_read_write(void)
EXPECT_STREQ(names[i], rec.u.ref.refname);
want.len--;
- n = block_reader_seek(&br, &it, &want);
+ n = block_iter_seek_key(&it, &br, &want);
EXPECT(n == 0);
n = block_iter_next(&it, &rec);
diff --git a/reftable/iter.c b/reftable/iter.c
index 7aa30c4a51..aa9ac199b1 100644
--- a/reftable/iter.c
+++ b/reftable/iter.c
@@ -115,7 +115,7 @@ static int indexed_table_ref_iter_next_block(struct indexed_table_ref_iter *it)
/* indexed block does not exist. */
return REFTABLE_FORMAT_ERROR;
}
- block_reader_start(&it->block_reader, &it->cur);
+ block_iter_seek_start(&it->cur, &it->block_reader);
return 0;
}
diff --git a/reftable/reader.c b/reftable/reader.c
index b113daab77..481dff10d4 100644
--- a/reftable/reader.c
+++ b/reftable/reader.c
@@ -220,6 +220,7 @@ struct table_iter {
struct reftable_reader *r;
uint8_t typ;
uint64_t block_off;
+ struct block_reader br;
struct block_iter bi;
int is_finished;
};
@@ -227,16 +228,6 @@ struct table_iter {
.bi = BLOCK_ITER_INIT \
}
-static void table_iter_copy_from(struct table_iter *dest,
- struct table_iter *src)
-{
- dest->r = src->r;
- dest->typ = src->typ;
- dest->block_off = src->block_off;
- dest->is_finished = src->is_finished;
- block_iter_copy_from(&dest->bi, &src->bi);
-}
-
static int table_iter_next_in_block(struct table_iter *ti,
struct reftable_record *rec)
{
@@ -250,14 +241,8 @@ static int table_iter_next_in_block(struct table_iter *ti,
static void table_iter_block_done(struct table_iter *ti)
{
- if (!ti->bi.br) {
- return;
- }
- reftable_block_done(&ti->bi.br->block);
- FREE_AND_NULL(ti->bi.br);
-
- ti->bi.last_key.len = 0;
- ti->bi.next_off = 0;
+ block_reader_release(&ti->br);
+ block_iter_reset(&ti->bi);
}
static int32_t extract_block_size(uint8_t *data, uint8_t *typ, uint64_t off,
@@ -321,32 +306,27 @@ done:
return err;
}
-static int table_iter_next_block(struct table_iter *dest,
- struct table_iter *src)
+static void table_iter_close(struct table_iter *ti)
{
- uint64_t next_block_off = src->block_off + src->bi.br->full_block_size;
- struct block_reader br = { 0 };
- int err = 0;
+ table_iter_block_done(ti);
+ block_iter_close(&ti->bi);
+}
- dest->r = src->r;
- dest->typ = src->typ;
- dest->block_off = next_block_off;
+static int table_iter_next_block(struct table_iter *ti)
+{
+ uint64_t next_block_off = ti->block_off + ti->br.full_block_size;
+ int err;
- err = reader_init_block_reader(src->r, &br, next_block_off, src->typ);
- if (err > 0) {
- dest->is_finished = 1;
- return 1;
- }
- if (err != 0)
+ err = reader_init_block_reader(ti->r, &ti->br, next_block_off, ti->typ);
+ if (err > 0)
+ ti->is_finished = 1;
+ if (err)
return err;
- else {
- struct block_reader *brp =
- reftable_malloc(sizeof(struct block_reader));
- *brp = br;
- dest->is_finished = 0;
- block_reader_start(brp, &dest->bi);
- }
+ ti->block_off = next_block_off;
+ ti->is_finished = 0;
+ block_iter_seek_start(&ti->bi, &ti->br);
+
return 0;
}
@@ -356,7 +336,6 @@ static int table_iter_next(struct table_iter *ti, struct reftable_record *rec)
return REFTABLE_API_ERROR;
while (1) {
- struct table_iter next = TABLE_ITER_INIT;
int err;
if (ti->is_finished)
@@ -376,15 +355,11 @@ static int table_iter_next(struct table_iter *ti, struct reftable_record *rec)
* table and retry. If there are no more blocks then the
* iterator is drained.
*/
- err = table_iter_next_block(&next, ti);
- table_iter_block_done(ti);
+ err = table_iter_next_block(ti);
if (err) {
ti->is_finished = 1;
return err;
}
-
- table_iter_copy_from(ti, &next);
- block_iter_close(&next.bi);
}
}
@@ -393,16 +368,14 @@ static int table_iter_next_void(void *ti, struct reftable_record *rec)
return table_iter_next(ti, rec);
}
-static void table_iter_close(void *p)
+static void table_iter_close_void(void *ti)
{
- struct table_iter *ti = p;
- table_iter_block_done(ti);
- block_iter_close(&ti->bi);
+ table_iter_close(ti);
}
static struct reftable_iterator_vtable table_iter_vtable = {
.next = &table_iter_next_void,
- .close = &table_iter_close,
+ .close = &table_iter_close_void,
};
static void iterator_from_table_iter(struct reftable_iterator *it,
@@ -417,19 +390,16 @@ static int reader_table_iter_at(struct reftable_reader *r,
struct table_iter *ti, uint64_t off,
uint8_t typ)
{
- struct block_reader br = { 0 };
- struct block_reader *brp = NULL;
+ int err;
- int err = reader_init_block_reader(r, &br, off, typ);
+ err = reader_init_block_reader(r, &ti->br, off, typ);
if (err != 0)
return err;
- brp = reftable_malloc(sizeof(struct block_reader));
- *brp = br;
ti->r = r;
- ti->typ = block_reader_type(brp);
+ ti->typ = block_reader_type(&ti->br);
ti->block_off = off;
- block_reader_start(brp, &ti->bi);
+ block_iter_seek_start(&ti->bi, &ti->br);
return 0;
}
@@ -454,23 +424,52 @@ static int reader_seek_linear(struct table_iter *ti,
{
struct strbuf want_key = STRBUF_INIT;
struct strbuf got_key = STRBUF_INIT;
- struct table_iter next = TABLE_ITER_INIT;
struct reftable_record rec;
int err = -1;
reftable_record_init(&rec, reftable_record_type(want));
reftable_record_key(want, &want_key);
+ /*
+ * First we need to locate the block that must contain our record. To
+ * do so we scan through blocks linearly until we find the first block
+ * whose first key is bigger than our wanted key. Once we have found
+ * that block we know that the key must be contained in the preceding
+ * block.
+ *
+ * This algorithm is somewhat unfortunate because it means that we
+ * always have to seek one block too far and then back up. But as we
+ * can only decode the _first_ key of a block but not its _last_ key we
+ * have no other way to do this.
+ */
while (1) {
- err = table_iter_next_block(&next, ti);
+ struct table_iter next = *ti;
+
+ /*
+ * We must be careful to not modify underlying data of `ti`
+ * because we may find that `next` does not contain our desired
+ * block, but that `ti` does. In that case, we would discard
+ * `next` and continue with `ti`.
+ *
+ * This also means that we cannot reuse allocated memory for
+ * `next` here. While it would be great if we could, it should
+ * in practice not be too bad given that we should only ever
+ * end up doing linear seeks with at most three blocks. As soon
+ * as we have more than three blocks we would have an index, so
+ * we would not do a linear search there anymore.
+ */
+ memset(&next.br.block, 0, sizeof(next.br.block));
+ next.br.zstream = NULL;
+ next.br.uncompressed_data = NULL;
+ next.br.uncompressed_cap = 0;
+
+ err = table_iter_next_block(&next);
if (err < 0)
goto done;
-
- if (err > 0) {
+ if (err > 0)
break;
- }
- err = block_reader_first_key(next.bi.br, &got_key);
+ err = block_reader_first_key(&next.br, &got_key);
if (err < 0)
goto done;
@@ -480,16 +479,20 @@ static int reader_seek_linear(struct table_iter *ti,
}
table_iter_block_done(ti);
- table_iter_copy_from(ti, &next);
+ *ti = next;
}
- err = block_iter_seek(&ti->bi, &want_key);
+ /*
+ * We have located the block that must contain our record, so we seek
+ * the wanted key inside of it. If the block does not contain our key
+ * we know that the corresponding record does not exist.
+ */
+ err = block_iter_seek_key(&ti->bi, &ti->br, &want_key);
if (err < 0)
goto done;
err = 0;
done:
- block_iter_close(&next.bi);
reftable_record_release(&rec);
strbuf_release(&want_key);
strbuf_release(&got_key);
@@ -508,6 +511,7 @@ static int reader_seek_indexed(struct reftable_reader *r,
.u.idx = { .last_key = STRBUF_INIT },
};
struct table_iter index_iter = TABLE_ITER_INIT;
+ struct table_iter empty = TABLE_ITER_INIT;
struct table_iter next = TABLE_ITER_INIT;
int err = 0;
@@ -549,7 +553,6 @@ static int reader_seek_indexed(struct reftable_reader *r,
* not exist.
*/
err = table_iter_next(&index_iter, &index_result);
- table_iter_block_done(&index_iter);
if (err != 0)
goto done;
@@ -558,7 +561,7 @@ static int reader_seek_indexed(struct reftable_reader *r,
if (err != 0)
goto done;
- err = block_iter_seek(&next.bi, &want_index.u.idx.last_key);
+ err = block_iter_seek_key(&next.bi, &next.br, &want_index.u.idx.last_key);
if (err < 0)
goto done;
@@ -572,18 +575,20 @@ static int reader_seek_indexed(struct reftable_reader *r,
break;
}
- table_iter_copy_from(&index_iter, &next);
+ table_iter_close(&index_iter);
+ index_iter = next;
+ next = empty;
}
if (err == 0) {
- struct table_iter empty = TABLE_ITER_INIT;
struct table_iter *malloced = reftable_calloc(1, sizeof(*malloced));
- *malloced = empty;
- table_iter_copy_from(malloced, &next);
+ *malloced = next;
+ next = empty;
iterator_from_table_iter(it, malloced);
}
+
done:
- block_iter_close(&next.bi);
+ table_iter_close(&next);
table_iter_close(&index_iter);
reftable_record_release(&want_index);
reftable_record_release(&index_result);
@@ -597,25 +602,28 @@ static int reader_seek_internal(struct reftable_reader *r,
struct reftable_reader_offsets *offs =
reader_offsets_for(r, reftable_record_type(rec));
uint64_t idx = offs->index_offset;
- struct table_iter ti = TABLE_ITER_INIT;
- int err = 0;
+ struct table_iter ti = TABLE_ITER_INIT, *p;
+ int err;
+
if (idx > 0)
return reader_seek_indexed(r, it, rec);
err = reader_start(r, &ti, reftable_record_type(rec), 0);
if (err < 0)
- return err;
+ goto out;
+
err = reader_seek_linear(&ti, rec);
if (err < 0)
- return err;
- else {
- struct table_iter *p =
- reftable_malloc(sizeof(struct table_iter));
- *p = ti;
- iterator_from_table_iter(it, p);
- }
+ goto out;
- return 0;
+ REFTABLE_ALLOC_ARRAY(p, 1);
+ *p = ti;
+ iterator_from_table_iter(it, p);
+
+out:
+ if (err)
+ table_iter_close(&ti);
+ return err;
}
static int reader_seek(struct reftable_reader *r, struct reftable_iterator *it,
diff --git a/reftable/record.c b/reftable/record.c
index 23b497adab..5506f3e913 100644
--- a/reftable/record.c
+++ b/reftable/record.c
@@ -159,26 +159,42 @@ int reftable_encode_key(int *restart, struct string_view dest,
return start.len - dest.len;
}
-int reftable_decode_key(struct strbuf *last_key, uint8_t *extra,
- struct string_view in)
+int reftable_decode_keylen(struct string_view in,
+ uint64_t *prefix_len,
+ uint64_t *suffix_len,
+ uint8_t *extra)
{
- int start_len = in.len;
- uint64_t prefix_len = 0;
- uint64_t suffix_len = 0;
+ size_t start_len = in.len;
int n;
- n = get_var_int(&prefix_len, &in);
+ n = get_var_int(prefix_len, &in);
if (n < 0)
return -1;
string_view_consume(&in, n);
- n = get_var_int(&suffix_len, &in);
+ n = get_var_int(suffix_len, &in);
if (n <= 0)
return -1;
string_view_consume(&in, n);
- *extra = (uint8_t)(suffix_len & 0x7);
- suffix_len >>= 3;
+ *extra = (uint8_t)(*suffix_len & 0x7);
+ *suffix_len >>= 3;
+
+ return start_len - in.len;
+}
+
+int reftable_decode_key(struct strbuf *last_key, uint8_t *extra,
+ struct string_view in)
+{
+ int start_len = in.len;
+ uint64_t prefix_len = 0;
+ uint64_t suffix_len = 0;
+ int n;
+
+ n = reftable_decode_keylen(in, &prefix_len, &suffix_len, extra);
+ if (n < 0)
+ return -1;
+ string_view_consume(&in, n);
if (in.len < suffix_len ||
prefix_len > last_key->len)
diff --git a/reftable/record.h b/reftable/record.h
index 826ee1c55c..d778133e6e 100644
--- a/reftable/record.h
+++ b/reftable/record.h
@@ -86,6 +86,12 @@ int reftable_encode_key(int *is_restart, struct string_view dest,
struct strbuf prev_key, struct strbuf key,
uint8_t extra);
+/* Decode a record's key lengths. */
+int reftable_decode_keylen(struct string_view in,
+ uint64_t *prefix_len,
+ uint64_t *suffix_len,
+ uint8_t *extra);
+
/*
* Decode into `last_key` and `extra` from `in`. `last_key` is expected to
* contain the decoded key of the preceding record, if any.
diff --git a/reftable/refname.c b/reftable/refname.c
index 7570e4acf9..bbfde15754 100644
--- a/reftable/refname.c
+++ b/reftable/refname.c
@@ -12,15 +12,15 @@
#include "refname.h"
#include "reftable-iterator.h"
-struct find_arg {
- char **names;
- const char *want;
+struct refname_needle_lesseq_args {
+ char **haystack;
+ const char *needle;
};
-static int find_name(size_t k, void *arg)
+static int refname_needle_lesseq(size_t k, void *_args)
{
- struct find_arg *f_arg = arg;
- return strcmp(f_arg->names[k], f_arg->want) >= 0;
+ struct refname_needle_lesseq_args *args = _args;
+ return strcmp(args->needle, args->haystack[k]) <= 0;
}
static int modification_has_ref(struct modification *mod, const char *name)
@@ -29,25 +29,23 @@ static int modification_has_ref(struct modification *mod, const char *name)
int err = 0;
if (mod->add_len > 0) {
- struct find_arg arg = {
- .names = mod->add,
- .want = name,
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->add,
+ .needle = name,
};
- int idx = binsearch(mod->add_len, find_name, &arg);
- if (idx < mod->add_len && !strcmp(mod->add[idx], name)) {
+ size_t idx = binsearch(mod->add_len, refname_needle_lesseq, &args);
+ if (idx < mod->add_len && !strcmp(mod->add[idx], name))
return 0;
- }
}
if (mod->del_len > 0) {
- struct find_arg arg = {
- .names = mod->del,
- .want = name,
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->del,
+ .needle = name,
};
- int idx = binsearch(mod->del_len, find_name, &arg);
- if (idx < mod->del_len && !strcmp(mod->del[idx], name)) {
+ size_t idx = binsearch(mod->del_len, refname_needle_lesseq, &args);
+ if (idx < mod->del_len && !strcmp(mod->del[idx], name))
return 1;
- }
}
err = reftable_table_read_ref(&mod->tab, name, &ref);
@@ -73,11 +71,11 @@ static int modification_has_ref_with_prefix(struct modification *mod,
int err = 0;
if (mod->add_len > 0) {
- struct find_arg arg = {
- .names = mod->add,
- .want = prefix,
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->add,
+ .needle = prefix,
};
- int idx = binsearch(mod->add_len, find_name, &arg);
+ size_t idx = binsearch(mod->add_len, refname_needle_lesseq, &args);
if (idx < mod->add_len &&
!strncmp(prefix, mod->add[idx], strlen(prefix)))
goto done;
@@ -92,15 +90,14 @@ static int modification_has_ref_with_prefix(struct modification *mod,
goto done;
if (mod->del_len > 0) {
- struct find_arg arg = {
- .names = mod->del,
- .want = ref.refname,
+ struct refname_needle_lesseq_args args = {
+ .haystack = mod->del,
+ .needle = ref.refname,
};
- int idx = binsearch(mod->del_len, find_name, &arg);
+ size_t idx = binsearch(mod->del_len, refname_needle_lesseq, &args);
if (idx < mod->del_len &&
- !strcmp(ref.refname, mod->del[idx])) {
+ !strcmp(ref.refname, mod->del[idx]))
continue;
- }
}
if (strncmp(ref.refname, prefix, strlen(prefix))) {
diff --git a/reftable/reftable-writer.h b/reftable/reftable-writer.h
index 7c7cae5f99..155bf0bbe2 100644
--- a/reftable/reftable-writer.h
+++ b/reftable/reftable-writer.h
@@ -46,6 +46,9 @@ struct reftable_write_options {
* is a single line, and add '\n' if missing.
*/
unsigned exact_log_message : 1;
+
+ /* boolean: Prevent auto-compaction of tables. */
+ unsigned disable_auto_compact : 1;
};
/* reftable_block_stats holds statistics for a single block type */
diff --git a/reftable/stack.c b/reftable/stack.c
index dde50b61d6..80266bcbab 100644
--- a/reftable/stack.c
+++ b/reftable/stack.c
@@ -680,7 +680,7 @@ int reftable_addition_commit(struct reftable_addition *add)
if (err)
goto done;
- if (!add->stack->disable_auto_compact) {
+ if (!add->stack->config.disable_auto_compact) {
/*
* Auto-compact the stack to keep the number of tables in
* control. It is possible that a concurrent writer is already
@@ -1216,75 +1216,76 @@ static int segment_size(struct segment *s)
return s->end - s->start;
}
-int fastlog2(uint64_t sz)
-{
- int l = 0;
- if (sz == 0)
- return 0;
- for (; sz; sz /= 2) {
- l++;
- }
- return l - 1;
-}
-
-struct segment *sizes_to_segments(size_t *seglen, uint64_t *sizes, size_t n)
-{
- struct segment *segs = reftable_calloc(n, sizeof(*segs));
- struct segment cur = { 0 };
- size_t next = 0, i;
-
- if (n == 0) {
- *seglen = 0;
- return segs;
- }
- for (i = 0; i < n; i++) {
- int log = fastlog2(sizes[i]);
- if (cur.log != log && cur.bytes > 0) {
- struct segment fresh = {
- .start = i,
- };
-
- segs[next++] = cur;
- cur = fresh;
- }
-
- cur.log = log;
- cur.end = i + 1;
- cur.bytes += sizes[i];
- }
- segs[next++] = cur;
- *seglen = next;
- return segs;
-}
-
struct segment suggest_compaction_segment(uint64_t *sizes, size_t n)
{
- struct segment min_seg = {
- .log = 64,
- };
- struct segment *segs;
- size_t seglen = 0, i;
-
- segs = sizes_to_segments(&seglen, sizes, n);
- for (i = 0; i < seglen; i++) {
- if (segment_size(&segs[i]) == 1)
- continue;
+ struct segment seg = { 0 };
+ uint64_t bytes;
+ size_t i;
- if (segs[i].log < min_seg.log)
- min_seg = segs[i];
- }
+ /*
+ * If there are no tables or only a single one then we don't have to
+ * compact anything. The sequence is geometric by definition already.
+ */
+ if (n <= 1)
+ return seg;
- while (min_seg.start > 0) {
- size_t prev = min_seg.start - 1;
- if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev]))
+ /*
+ * Find the ending table of the compaction segment needed to restore the
+ * geometric sequence. Note that the segment end is exclusive.
+ *
+ * To do so, we iterate backwards starting from the most recent table
+ * until a valid segment end is found. If the preceding table is smaller
+ * than the current table multiplied by the geometric factor (2), the
+ * compaction segment end has been identified.
+ *
+ * Tables after the ending point are not added to the byte count because
+ * they are already valid members of the geometric sequence. Due to the
+ * properties of a geometric sequence, it is not possible for the sum of
+ * these tables to exceed the value of the ending point table.
+ *
+ * Example table size sequence requiring no compaction:
+ * 64, 32, 16, 8, 4, 2, 1
+ *
+ * Example table size sequence where compaction segment end is set to
+ * the last table. Since the segment end is exclusive, the last table is
+ * excluded during subsequent compaction and the table with size 3 is
+ * the final table included:
+ * 64, 32, 16, 8, 4, 3, 1
+ */
+ for (i = n - 1; i > 0; i--) {
+ if (sizes[i - 1] < sizes[i] * 2) {
+ seg.end = i + 1;
+ bytes = sizes[i];
break;
+ }
+ }
- min_seg.start = prev;
- min_seg.bytes += sizes[prev];
+ /*
+ * Find the starting table of the compaction segment by iterating
+ * through the remaining tables and keeping track of the accumulated
+ * size of all tables seen from the segment end table. The previous
+ * table is compared to the accumulated size because the tables from the
+ * segment end are merged backwards recursively.
+ *
+ * Note that we keep iterating even after we have found the first
+ * starting point. This is because there may be tables in the stack
+ * preceding that first starting point which violate the geometric
+ * sequence.
+ *
+ * Example compaction segment start set to table with size 32:
+ * 128, 32, 16, 8, 4, 3, 1
+ */
+ for (; i > 0; i--) {
+ uint64_t curr = bytes;
+ bytes += sizes[i - 1];
+
+ if (sizes[i - 1] < curr * 2) {
+ seg.start = i - 1;
+ seg.bytes = bytes;
+ }
}
- reftable_free(segs);
- return min_seg;
+ return seg;
}
static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
diff --git a/reftable/stack.h b/reftable/stack.h
index d919455669..d43efa4760 100644
--- a/reftable/stack.h
+++ b/reftable/stack.h
@@ -19,7 +19,6 @@ struct reftable_stack {
int list_fd;
char *reftable_dir;
- int disable_auto_compact;
struct reftable_write_options config;
@@ -33,12 +32,9 @@ int read_lines(const char *filename, char ***lines);
struct segment {
size_t start, end;
- int log;
uint64_t bytes;
};
-int fastlog2(uint64_t sz);
-struct segment *sizes_to_segments(size_t *seglen, uint64_t *sizes, size_t n);
struct segment suggest_compaction_segment(uint64_t *sizes, size_t n);
#endif
diff --git a/reftable/stack_test.c b/reftable/stack_test.c
index 351e35bd86..1df3ffce52 100644
--- a/reftable/stack_test.c
+++ b/reftable/stack_test.c
@@ -325,7 +325,7 @@ static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
* we can ensure that we indeed honor this setting and have
* better control over when exactly auto compaction runs.
*/
- st->disable_auto_compact = i != n;
+ st->config.disable_auto_compact = i != n;
err = reftable_stack_new_addition(&add, st);
EXPECT_ERR(err);
@@ -497,6 +497,7 @@ static void test_reftable_stack_add(void)
struct reftable_write_options cfg = {
.exact_log_message = 1,
.default_permissions = 0660,
+ .disable_auto_compact = 1,
};
struct reftable_stack *st = NULL;
char *dir = get_tmp_dir(__LINE__);
@@ -508,7 +509,6 @@ static void test_reftable_stack_add(void)
err = reftable_new_stack(&st, dir, cfg);
EXPECT_ERR(err);
- st->disable_auto_compact = 1;
for (i = 0; i < N; i++) {
char buf[256];
@@ -770,59 +770,13 @@ static void test_reftable_stack_hash_id(void)
clear_dir(dir);
}
-static void test_log2(void)
-{
- EXPECT(1 == fastlog2(3));
- EXPECT(2 == fastlog2(4));
- EXPECT(2 == fastlog2(5));
-}
-
-static void test_sizes_to_segments(void)
-{
- uint64_t sizes[] = { 2, 3, 4, 5, 7, 9 };
- /* .................0 1 2 3 4 5 */
-
- size_t seglen = 0;
- struct segment *segs =
- sizes_to_segments(&seglen, sizes, ARRAY_SIZE(sizes));
- EXPECT(segs[2].log == 3);
- EXPECT(segs[2].start == 5);
- EXPECT(segs[2].end == 6);
-
- EXPECT(segs[1].log == 2);
- EXPECT(segs[1].start == 2);
- EXPECT(segs[1].end == 5);
- reftable_free(segs);
-}
-
-static void test_sizes_to_segments_empty(void)
-{
- size_t seglen = 0;
- struct segment *segs = sizes_to_segments(&seglen, NULL, 0);
- EXPECT(seglen == 0);
- reftable_free(segs);
-}
-
-static void test_sizes_to_segments_all_equal(void)
-{
- uint64_t sizes[] = { 5, 5 };
- size_t seglen = 0;
- struct segment *segs =
- sizes_to_segments(&seglen, sizes, ARRAY_SIZE(sizes));
- EXPECT(seglen == 1);
- EXPECT(segs[0].start == 0);
- EXPECT(segs[0].end == 2);
- reftable_free(segs);
-}
-
static void test_suggest_compaction_segment(void)
{
- uint64_t sizes[] = { 128, 64, 17, 16, 9, 9, 9, 16, 16 };
- /* .................0 1 2 3 4 5 6 */
+ uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
struct segment min =
suggest_compaction_segment(sizes, ARRAY_SIZE(sizes));
- EXPECT(min.start == 2);
- EXPECT(min.end == 7);
+ EXPECT(min.start == 1);
+ EXPECT(min.end == 10);
}
static void test_suggest_compaction_segment_nothing(void)
@@ -933,9 +887,21 @@ static void test_empty_add(void)
reftable_stack_destroy(st2);
}
+static int fastlog2(uint64_t sz)
+{
+ int l = 0;
+ if (sz == 0)
+ return 0;
+ for (; sz; sz /= 2)
+ l++;
+ return l - 1;
+}
+
static void test_reftable_stack_auto_compaction(void)
{
- struct reftable_write_options cfg = { 0 };
+ struct reftable_write_options cfg = {
+ .disable_auto_compact = 1,
+ };
struct reftable_stack *st = NULL;
char *dir = get_tmp_dir(__LINE__);
@@ -945,7 +911,6 @@ static void test_reftable_stack_auto_compaction(void)
err = reftable_new_stack(&st, dir, cfg);
EXPECT_ERR(err);
- st->disable_auto_compact = 1; /* call manually below for coverage. */
for (i = 0; i < N; i++) {
char name[100];
struct reftable_ref_record ref = {
@@ -994,7 +959,7 @@ static void test_reftable_stack_add_performs_auto_compaction(void)
* we can ensure that we indeed honor this setting and have
* better control over when exactly auto compaction runs.
*/
- st->disable_auto_compact = i != n;
+ st->config.disable_auto_compact = i != n;
strbuf_reset(&refname);
strbuf_addf(&refname, "branch-%04d", i);
@@ -1121,7 +1086,6 @@ static void test_reftable_stack_compaction_concurrent_clean(void)
int stack_test_main(int argc, const char *argv[])
{
RUN_TEST(test_empty_add);
- RUN_TEST(test_log2);
RUN_TEST(test_names_equal);
RUN_TEST(test_parse_names);
RUN_TEST(test_read_file);
@@ -1142,9 +1106,6 @@ int stack_test_main(int argc, const char *argv[])
RUN_TEST(test_reftable_stack_update_index_check);
RUN_TEST(test_reftable_stack_uptodate);
RUN_TEST(test_reftable_stack_validate_refname);
- RUN_TEST(test_sizes_to_segments);
- RUN_TEST(test_sizes_to_segments_all_equal);
- RUN_TEST(test_sizes_to_segments_empty);
RUN_TEST(test_suggest_compaction_segment);
RUN_TEST(test_suggest_compaction_segment_nothing);
return 0;
diff --git a/remote-curl.c b/remote-curl.c
index 31b02b8840..0b6d7815fd 100644
--- a/remote-curl.c
+++ b/remote-curl.c
@@ -1,4 +1,5 @@
#include "git-compat-util.h"
+#include "git-curl-compat.h"
#include "config.h"
#include "environment.h"
#include "gettext.h"
@@ -955,7 +956,9 @@ retry:
/* The request body is large and the size cannot be predicted.
* We must use chunked encoding to send it.
*/
+#ifdef GIT_CURL_NEED_TRANSFER_ENCODING_HEADER
headers = curl_slist_append(headers, "Transfer-Encoding: chunked");
+#endif
rpc->initial_buffer = 1;
curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, rpc_out);
curl_easy_setopt(slot->curl, CURLOPT_INFILE, rpc);
diff --git a/revision.h b/revision.h
index 94c43138bc..0e470d1df1 100644
--- a/revision.h
+++ b/revision.h
@@ -142,6 +142,7 @@ struct rev_info {
/* Basic information */
const char *prefix;
const char *def;
+ char *ps_matched; /* optionally record matches of prune_data */
struct pathspec prune_data;
/*
diff --git a/t/check-non-portable-shell.pl b/t/check-non-portable-shell.pl
index dd8107cd7d..b2b28c2ced 100755
--- a/t/check-non-portable-shell.pl
+++ b/t/check-non-portable-shell.pl
@@ -47,6 +47,8 @@ while (<>) {
/\bgrep\b.*--file\b/ and err 'grep --file FILE is not portable (use grep -f FILE)';
/\b[ef]grep\b/ and err 'egrep/fgrep obsolescent (use grep -E/-F)';
/\bexport\s+[A-Za-z0-9_]*=/ and err '"export FOO=bar" is not portable (use FOO=bar && export FOO)';
+ /\blocal\s+[A-Za-z0-9_]*=\$([A-Za-z0-9_{]|[(][^(])/ and
+ err q(quote "$val" in 'local var=$val');
/^\s*([A-Z0-9_]+=(\w*|(["']).*?\3)\s+)+(\w+)/ and exists($func{$4}) and
err '"FOO=bar shell_func" assignment extends beyond "shell_func"';
$line = '';
diff --git a/t/helper/test-date.c b/t/helper/test-date.c
index 0683d46574..f25512de9a 100644
--- a/t/helper/test-date.c
+++ b/t/helper/test-date.c
@@ -52,7 +52,7 @@ static void show_dates(const char **argv, const char *format)
arg++;
tz = atoi(arg);
- printf("%s -> %s\n", *argv, show_date(t, tz, &mode));
+ printf("%s -> %s\n", *argv, show_date(t, tz, mode));
}
date_mode_release(&mode);
diff --git a/t/lib-parallel-checkout.sh b/t/lib-parallel-checkout.sh
index acaee9cbb6..8324d6c96d 100644
--- a/t/lib-parallel-checkout.sh
+++ b/t/lib-parallel-checkout.sh
@@ -20,7 +20,7 @@ test_checkout_workers () {
BUG "too few arguments to test_checkout_workers"
fi &&
- local expected_workers=$1 &&
+ local expected_workers="$1" &&
shift &&
local trace_file=trace-test-checkout-workers &&
diff --git a/t/t0301-credential-cache.sh b/t/t0301-credential-cache.sh
index 8300faadea..f2c146fa2a 100755
--- a/t/t0301-credential-cache.sh
+++ b/t/t0301-credential-cache.sh
@@ -8,6 +8,14 @@ test -z "$NO_UNIX_SOCKETS" || {
skip_all='skipping credential-cache tests, unix sockets not available'
test_done
}
+if test_have_prereq MINGW
+then
+ service_running=$(sc query afunix | grep "4 RUNNING")
+ test -z "$service_running" || {
+ skip_all='skipping credential-cache tests, unix sockets not available'
+ test_done
+ }
+fi
uname_s=$(uname -s)
case $uname_s in
diff --git a/t/t0610-reftable-basics.sh b/t/t0610-reftable-basics.sh
index 931d888bbb..178791e086 100755
--- a/t/t0610-reftable-basics.sh
+++ b/t/t0610-reftable-basics.sh
@@ -83,7 +83,7 @@ test_expect_success 'init: reinitializing reftable with files backend fails' '
test_expect_perms () {
local perms="$1"
local file="$2"
- local actual=$(ls -l "$file") &&
+ local actual="$(ls -l "$file")" &&
case "$actual" in
$perms*)
@@ -96,23 +96,54 @@ test_expect_perms () {
esac
}
-for umask in 002 022
-do
- test_expect_success POSIXPERM 'init: honors core.sharedRepository' '
+test_expect_reftable_perms () {
+ local umask="$1"
+ local shared="$2"
+ local expect="$3"
+
+ test_expect_success POSIXPERM "init: honors --shared=$shared with umask $umask" '
test_when_finished "rm -rf repo" &&
(
umask $umask &&
- git init --shared=true repo &&
- test 1 = "$(git -C repo config core.sharedrepository)"
+ git init --shared=$shared repo
) &&
- test_expect_perms "-rw-rw-r--" repo/.git/reftable/tables.list &&
+ test_expect_perms "$expect" repo/.git/reftable/tables.list &&
for table in repo/.git/reftable/*.ref
do
- test_expect_perms "-rw-rw-r--" "$table" ||
+ test_expect_perms "$expect" "$table" ||
return 1
done
'
-done
+
+ test_expect_success POSIXPERM "pack-refs: honors --shared=$shared with umask $umask" '
+ test_when_finished "rm -rf repo" &&
+ (
+ umask $umask &&
+ git init --shared=$shared repo &&
+ test_commit -C repo A &&
+ test_line_count = 2 repo/.git/reftable/tables.list &&
+ git -C repo pack-refs
+ ) &&
+ test_expect_perms "$expect" repo/.git/reftable/tables.list &&
+ for table in repo/.git/reftable/*.ref
+ do
+ test_expect_perms "$expect" "$table" ||
+ return 1
+ done
+ '
+}
+
+test_expect_reftable_perms 002 umask "-rw-rw-r--"
+test_expect_reftable_perms 022 umask "-rw-r--r--"
+test_expect_reftable_perms 027 umask "-rw-r-----"
+
+test_expect_reftable_perms 002 group "-rw-rw-r--"
+test_expect_reftable_perms 022 group "-rw-rw-r--"
+test_expect_reftable_perms 027 group "-rw-rw----"
+
+test_expect_reftable_perms 002 world "-rw-rw-r--"
+test_expect_reftable_perms 022 world "-rw-rw-r--"
+test_expect_reftable_perms 027 world "-rw-rw-r--"
test_expect_success 'clone: can clone reftable repository' '
test_when_finished "rm -rf repo clone" &&
@@ -293,12 +324,46 @@ test_expect_success 'ref transaction: writes cause auto-compaction' '
test_line_count = 1 repo/.git/reftable/tables.list &&
test_commit -C repo --no-tag A &&
- test_line_count = 2 repo/.git/reftable/tables.list &&
+ test_line_count = 1 repo/.git/reftable/tables.list &&
test_commit -C repo --no-tag B &&
test_line_count = 1 repo/.git/reftable/tables.list
'
+test_expect_success 'ref transaction: env var disables compaction' '
+ test_when_finished "rm -rf repo" &&
+
+ git init repo &&
+ test_commit -C repo A &&
+
+ start=$(wc -l <repo/.git/reftable/tables.list) &&
+ iterations=5 &&
+ expected=$((start + iterations)) &&
+
+ for i in $(test_seq $iterations)
+ do
+ GIT_TEST_REFTABLE_AUTOCOMPACTION=false \
+ git -C repo update-ref branch-$i HEAD || return 1
+ done &&
+ test_line_count = $expected repo/.git/reftable/tables.list &&
+
+ git -C repo update-ref foo HEAD &&
+ test_line_count -lt $expected repo/.git/reftable/tables.list
+'
+
+test_expect_success 'ref transaction: alternating table sizes are compacted' '
+ test_when_finished "rm -rf repo" &&
+
+ git init repo &&
+ test_commit -C repo A &&
+ for i in $(test_seq 5)
+ do
+ git -C repo branch -f foo &&
+ git -C repo branch -d foo || return 1
+ done &&
+ test_line_count = 2 repo/.git/reftable/tables.list
+'
+
check_fsync_events () {
local trace="$1" &&
shift &&
@@ -324,7 +389,7 @@ test_expect_success 'ref transaction: writes are synced' '
git -C repo -c core.fsync=reference \
-c core.fsyncMethod=fsync update-ref refs/heads/branch HEAD &&
check_fsync_events trace2.txt <<-EOF
- "name":"hardware-flush","count":2
+ "name":"hardware-flush","count":4
EOF
'
@@ -356,7 +421,7 @@ test_expect_success 'ref transaction: fails gracefully when auto compaction fail
done ||
exit 1
done &&
- test_line_count = 13 .git/reftable/tables.list
+ test_line_count = 10 .git/reftable/tables.list
)
'
@@ -366,8 +431,8 @@ test_expect_success 'pack-refs: compacts tables' '
test_commit -C repo A &&
ls -1 repo/.git/reftable >table-files &&
- test_line_count = 4 table-files &&
- test_line_count = 3 repo/.git/reftable/tables.list &&
+ test_line_count = 3 table-files &&
+ test_line_count = 2 repo/.git/reftable/tables.list &&
git -C repo pack-refs &&
ls -1 repo/.git/reftable >table-files &&
@@ -408,7 +473,7 @@ test_expect_success "$command: auto compaction" '
# The tables should have been auto-compacted, and thus auto
# compaction should not have to do anything.
ls -1 .git/reftable >tables-expect &&
- test_line_count = 4 tables-expect &&
+ test_line_count = 3 tables-expect &&
git $command --auto &&
ls -1 .git/reftable >tables-actual &&
test_cmp tables-expect tables-actual &&
@@ -426,7 +491,7 @@ test_expect_success "$command: auto compaction" '
git branch B &&
git branch C &&
rm .git/reftable/*.lock &&
- test_line_count = 5 .git/reftable/tables.list &&
+ test_line_count = 4 .git/reftable/tables.list &&
git $command --auto &&
test_line_count = 1 .git/reftable/tables.list
@@ -450,26 +515,6 @@ test_expect_success 'pack-refs: does not prune non-table files' '
test_path_is_file repo/.git/reftable/garbage
'
-for umask in 002 022
-do
- test_expect_success POSIXPERM 'pack-refs: honors core.sharedRepository' '
- test_when_finished "rm -rf repo" &&
- (
- umask $umask &&
- git init --shared=true repo &&
- test_commit -C repo A &&
- test_line_count = 3 repo/.git/reftable/tables.list
- ) &&
- git -C repo pack-refs &&
- test_expect_perms "-rw-rw-r--" repo/.git/reftable/tables.list &&
- for table in repo/.git/reftable/*.ref
- do
- test_expect_perms "-rw-rw-r--" "$table" ||
- return 1
- done
- '
-done
-
test_expect_success 'packed-refs: writes are synced' '
test_when_finished "rm -rf repo" &&
git init repo &&
@@ -826,12 +871,16 @@ test_expect_success 'worktree: pack-refs in main repo packs main refs' '
test_when_finished "rm -rf repo worktree" &&
git init repo &&
test_commit -C repo A &&
+
+ GIT_TEST_REFTABLE_AUTOCOMPACTION=false \
git -C repo worktree add ../worktree &&
+ GIT_TEST_REFTABLE_AUTOCOMPACTION=false \
+ git -C worktree update-ref refs/worktree/per-worktree HEAD &&
- test_line_count = 3 repo/.git/worktrees/worktree/reftable/tables.list &&
- test_line_count = 4 repo/.git/reftable/tables.list &&
+ test_line_count = 4 repo/.git/worktrees/worktree/reftable/tables.list &&
+ test_line_count = 3 repo/.git/reftable/tables.list &&
git -C repo pack-refs &&
- test_line_count = 3 repo/.git/worktrees/worktree/reftable/tables.list &&
+ test_line_count = 4 repo/.git/worktrees/worktree/reftable/tables.list &&
test_line_count = 1 repo/.git/reftable/tables.list
'
@@ -839,13 +888,17 @@ test_expect_success 'worktree: pack-refs in worktree packs worktree refs' '
test_when_finished "rm -rf repo worktree" &&
git init repo &&
test_commit -C repo A &&
+
+ GIT_TEST_REFTABLE_AUTOCOMPACTION=false \
git -C repo worktree add ../worktree &&
+ GIT_TEST_REFTABLE_AUTOCOMPACTION=false \
+ git -C worktree update-ref refs/worktree/per-worktree HEAD &&
- test_line_count = 3 repo/.git/worktrees/worktree/reftable/tables.list &&
- test_line_count = 4 repo/.git/reftable/tables.list &&
+ test_line_count = 4 repo/.git/worktrees/worktree/reftable/tables.list &&
+ test_line_count = 3 repo/.git/reftable/tables.list &&
git -C worktree pack-refs &&
test_line_count = 1 repo/.git/worktrees/worktree/reftable/tables.list &&
- test_line_count = 4 repo/.git/reftable/tables.list
+ test_line_count = 3 repo/.git/reftable/tables.list
'
test_expect_success 'worktree: creating shared ref updates main stack' '
@@ -859,6 +912,7 @@ test_expect_success 'worktree: creating shared ref updates main stack' '
test_line_count = 1 repo/.git/worktrees/worktree/reftable/tables.list &&
test_line_count = 1 repo/.git/reftable/tables.list &&
+ GIT_TEST_REFTABLE_AUTOCOMPACTION=false \
git -C worktree update-ref refs/heads/shared HEAD &&
test_line_count = 1 repo/.git/worktrees/worktree/reftable/tables.list &&
test_line_count = 2 repo/.git/reftable/tables.list
diff --git a/t/t1016-compatObjectFormat.sh b/t/t1016-compatObjectFormat.sh
index 8132cd37b8..be3206a16f 100755
--- a/t/t1016-compatObjectFormat.sh
+++ b/t/t1016-compatObjectFormat.sh
@@ -79,7 +79,7 @@ commit2_oid () {
}
del_sigcommit () {
- local delete=$1
+ local delete="$1"
if test "$delete" = "sha256" ; then
local pattern="gpgsig-sha256"
@@ -91,8 +91,8 @@ del_sigcommit () {
del_sigtag () {
- local storage=$1
- local delete=$2
+ local storage="$1"
+ local delete="$2"
if test "$storage" = "$delete" ; then
local pattern="trailer"
@@ -181,7 +181,7 @@ done
cd "$base"
compare_oids () {
- test "$#" = 5 && { local PREREQ=$1; shift; } || PREREQ=
+ test "$#" = 5 && { local PREREQ="$1"; shift; } || PREREQ=
local type="$1"
local name="$2"
local sha1_oid="$3"
@@ -193,8 +193,8 @@ compare_oids () {
git --git-dir=repo-sha1/.git rev-parse --output-object-format=sha256 ${sha1_oid} > ${name}_sha1_sha256_found
git --git-dir=repo-sha256/.git rev-parse --output-object-format=sha1 ${sha256_oid} > ${name}_sha256_sha1_found
- local sha1_sha256_oid=$(cat ${name}_sha1_sha256_found)
- local sha256_sha1_oid=$(cat ${name}_sha256_sha1_found)
+ local sha1_sha256_oid="$(cat ${name}_sha1_sha256_found)"
+ local sha256_sha1_oid="$(cat ${name}_sha256_sha1_found)"
test_expect_success $PREREQ "Verify ${type} ${name}'s sha1 oid" '
git --git-dir=repo-sha256/.git rev-parse --output-object-format=sha1 ${sha256_oid} > ${name}_sha1 &&
diff --git a/t/t1400-update-ref.sh b/t/t1400-update-ref.sh
index 6ebc3ef945..ec3443cc87 100755
--- a/t/t1400-update-ref.sh
+++ b/t/t1400-update-ref.sh
@@ -622,7 +622,7 @@ test_expect_success 'stdin fails create with no ref' '
test_expect_success 'stdin fails create with no new value' '
echo "create $a" >stdin &&
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: create $a: missing <newvalue>" err
+ grep "fatal: create $a: missing <new-oid>" err
'
test_expect_success 'stdin fails create with too many arguments' '
@@ -640,7 +640,7 @@ test_expect_success 'stdin fails update with no ref' '
test_expect_success 'stdin fails update with no new value' '
echo "update $a" >stdin &&
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: update $a: missing <newvalue>" err
+ grep "fatal: update $a: missing <new-oid>" err
'
test_expect_success 'stdin fails update with too many arguments' '
@@ -765,21 +765,21 @@ test_expect_success 'stdin update ref fails with wrong old value' '
test_expect_success 'stdin update ref fails with bad old value' '
echo "update $c $m does-not-exist" >stdin &&
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: update $c: invalid <oldvalue>: does-not-exist" err &&
+ grep "fatal: update $c: invalid <old-oid>: does-not-exist" err &&
test_must_fail git rev-parse --verify -q $c
'
test_expect_success 'stdin create ref fails with bad new value' '
echo "create $c does-not-exist" >stdin &&
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: create $c: invalid <newvalue>: does-not-exist" err &&
+ grep "fatal: create $c: invalid <new-oid>: does-not-exist" err &&
test_must_fail git rev-parse --verify -q $c
'
test_expect_success 'stdin create ref fails with zero new value' '
echo "create $c " >stdin &&
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: create $c: zero <newvalue>" err &&
+ grep "fatal: create $c: zero <new-oid>" err &&
test_must_fail git rev-parse --verify -q $c
'
@@ -803,7 +803,7 @@ test_expect_success 'stdin delete ref fails with wrong old value' '
test_expect_success 'stdin delete ref fails with zero old value' '
echo "delete $a " >stdin &&
test_must_fail git update-ref --stdin <stdin 2>err &&
- grep "fatal: delete $a: zero <oldvalue>" err &&
+ grep "fatal: delete $a: zero <old-oid>" err &&
git rev-parse $m >expect &&
git rev-parse $a >actual &&
test_cmp expect actual
@@ -1027,7 +1027,7 @@ test_expect_success 'stdin -z fails create with no ref' '
test_expect_success 'stdin -z fails create with no new value' '
printf $F "create $a" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: create $a: unexpected end of input when reading <newvalue>" err
+ grep "fatal: create $a: unexpected end of input when reading <new-oid>" err
'
test_expect_success 'stdin -z fails create with too many arguments' '
@@ -1045,27 +1045,27 @@ test_expect_success 'stdin -z fails update with no ref' '
test_expect_success 'stdin -z fails update with too few args' '
printf $F "update $a" "$m" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: update $a: unexpected end of input when reading <oldvalue>" err
+ grep "fatal: update $a: unexpected end of input when reading <old-oid>" err
'
test_expect_success 'stdin -z emits warning with empty new value' '
git update-ref $a $m &&
printf $F "update $a" "" "" >stdin &&
git update-ref -z --stdin <stdin 2>err &&
- grep "warning: update $a: missing <newvalue>, treating as zero" err &&
+ grep "warning: update $a: missing <new-oid>, treating as zero" err &&
test_must_fail git rev-parse --verify -q $a
'
test_expect_success 'stdin -z fails update with no new value' '
printf $F "update $a" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: update $a: unexpected end of input when reading <newvalue>" err
+ grep "fatal: update $a: unexpected end of input when reading <new-oid>" err
'
test_expect_success 'stdin -z fails update with no old value' '
printf $F "update $a" "$m" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: update $a: unexpected end of input when reading <oldvalue>" err
+ grep "fatal: update $a: unexpected end of input when reading <old-oid>" err
'
test_expect_success 'stdin -z fails update with too many arguments' '
@@ -1083,7 +1083,7 @@ test_expect_success 'stdin -z fails delete with no ref' '
test_expect_success 'stdin -z fails delete with no old value' '
printf $F "delete $a" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: delete $a: unexpected end of input when reading <oldvalue>" err
+ grep "fatal: delete $a: unexpected end of input when reading <old-oid>" err
'
test_expect_success 'stdin -z fails delete with too many arguments' '
@@ -1101,7 +1101,7 @@ test_expect_success 'stdin -z fails verify with too many arguments' '
test_expect_success 'stdin -z fails verify with no old value' '
printf $F "verify $a" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: verify $a: unexpected end of input when reading <oldvalue>" err
+ grep "fatal: verify $a: unexpected end of input when reading <old-oid>" err
'
test_expect_success 'stdin -z fails option with unknown name' '
@@ -1160,7 +1160,7 @@ test_expect_success 'stdin -z update ref fails with wrong old value' '
test_expect_success 'stdin -z update ref fails with bad old value' '
printf $F "update $c" "$m" "does-not-exist" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: update $c: invalid <oldvalue>: does-not-exist" err &&
+ grep "fatal: update $c: invalid <old-oid>: does-not-exist" err &&
test_must_fail git rev-parse --verify -q $c
'
@@ -1178,14 +1178,14 @@ test_expect_success 'stdin -z create ref fails with bad new value' '
git update-ref -d "$c" &&
printf $F "create $c" "does-not-exist" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: create $c: invalid <newvalue>: does-not-exist" err &&
+ grep "fatal: create $c: invalid <new-oid>: does-not-exist" err &&
test_must_fail git rev-parse --verify -q $c
'
test_expect_success 'stdin -z create ref fails with empty new value' '
printf $F "create $c" "" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: create $c: missing <newvalue>" err &&
+ grep "fatal: create $c: missing <new-oid>" err &&
test_must_fail git rev-parse --verify -q $c
'
@@ -1209,7 +1209,7 @@ test_expect_success 'stdin -z delete ref fails with wrong old value' '
test_expect_success 'stdin -z delete ref fails with zero old value' '
printf $F "delete $a" "$Z" >stdin &&
test_must_fail git update-ref -z --stdin <stdin 2>err &&
- grep "fatal: delete $a: zero <oldvalue>" err &&
+ grep "fatal: delete $a: zero <old-oid>" err &&
git rev-parse $m >expect &&
git rev-parse $a >actual &&
test_cmp expect actual
diff --git a/t/t2020-checkout-detach.sh b/t/t2020-checkout-detach.sh
index bce284c297..8d90d02850 100755
--- a/t/t2020-checkout-detach.sh
+++ b/t/t2020-checkout-detach.sh
@@ -176,7 +176,10 @@ test_expect_success 'tracking count is accurate after orphan check' '
git config branch.child.merge refs/heads/main &&
git checkout child^ &&
git checkout child >stdout &&
- test_cmp expect stdout
+ test_cmp expect stdout &&
+
+ git checkout --detach child >stdout &&
+ test_grep ! "can be fast-forwarded\." stdout
'
test_expect_success 'no advice given for explicit detached head state' '
diff --git a/t/t2104-update-index-skip-worktree.sh b/t/t2104-update-index-skip-worktree.sh
index 0bab134d71..7ec7f30b44 100755
--- a/t/t2104-update-index-skip-worktree.sh
+++ b/t/t2104-update-index-skip-worktree.sh
@@ -11,27 +11,27 @@ TEST_PASSES_SANITIZE_LEAK=true
sane_unset GIT_TEST_SPLIT_INDEX
test_set_index_version () {
- GIT_INDEX_VERSION="$1"
- export GIT_INDEX_VERSION
+ GIT_INDEX_VERSION="$1"
+ export GIT_INDEX_VERSION
}
test_set_index_version 3
-cat >expect.full <<EOF
-H 1
-H 2
-H sub/1
-H sub/2
-EOF
+test_expect_success 'setup' '
+ cat >expect.full <<-\EOF &&
+ H 1
+ H 2
+ H sub/1
+ H sub/2
+ EOF
-cat >expect.skip <<EOF
-S 1
-H 2
-S sub/1
-H sub/2
-EOF
+ cat >expect.skip <<-\EOF &&
+ S 1
+ H 2
+ S sub/1
+ H sub/2
+ EOF
-test_expect_success 'setup' '
mkdir sub &&
touch ./1 ./2 sub/1 sub/2 &&
git add 1 2 sub/1 sub/2 &&
diff --git a/t/t2200-add-update.sh b/t/t2200-add-update.sh
index c01492f33f..df235ac306 100755
--- a/t/t2200-add-update.sh
+++ b/t/t2200-add-update.sh
@@ -65,6 +65,16 @@ test_expect_success 'update did not touch untracked files' '
test_must_be_empty out
'
+test_expect_success 'error out when passing untracked path' '
+ git reset --hard &&
+ echo content >>baz &&
+ echo content >>top &&
+ test_must_fail git add -u baz top 2>err &&
+ test_grep -e "error: pathspec .baz. did not match any file(s) known to git" err &&
+ git diff --cached --name-only >actual &&
+ test_must_be_empty actual
+'
+
test_expect_success 'cache tree has not been corrupted' '
git ls-files -s |
diff --git a/t/t2400-worktree-add.sh b/t/t2400-worktree-add.sh
index c28c04133c..ba320dc417 100755
--- a/t/t2400-worktree-add.sh
+++ b/t/t2400-worktree-add.sh
@@ -427,7 +427,7 @@ test_expect_success '"add" worktree with orphan branch, lock, and reason' '
# Note: Quoted arguments containing spaces are not supported.
test_wt_add_orphan_hint () {
local context="$1" &&
- local use_branch=$2 &&
+ local use_branch="$2" &&
shift 2 &&
local opts="$*" &&
test_expect_success "'worktree add' show orphan hint in bad/orphan HEAD w/ $context" '
diff --git a/t/t3428-rebase-signoff.sh b/t/t3428-rebase-signoff.sh
index e1b1e94764..1bebd1ce74 100755
--- a/t/t3428-rebase-signoff.sh
+++ b/t/t3428-rebase-signoff.sh
@@ -8,47 +8,45 @@ This test runs git rebase --signoff and make sure that it works.
TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
-# A simple file to commit
-cat >file <<EOF
-a
-EOF
+test_expect_success 'setup' '
+ git commit --allow-empty -m "Initial empty commit" &&
+ test_commit first file a &&
+
+ ident="$GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>" &&
-# Expected commit message for initial commit after rebase --signoff
-cat >expected-initial-signed <<EOF
-Initial empty commit
+ # Expected commit message for initial commit after rebase --signoff
+ cat >expected-initial-signed <<-EOF &&
+ Initial empty commit
-Signed-off-by: $(git var GIT_COMMITTER_IDENT | sed -e "s/>.*/>/")
-EOF
+ Signed-off-by: $ident
+ EOF
-# Expected commit message after rebase --signoff
-cat >expected-signed <<EOF
-first
+ # Expected commit message after rebase --signoff
+ cat >expected-signed <<-EOF &&
+ first
-Signed-off-by: $(git var GIT_COMMITTER_IDENT | sed -e "s/>.*/>/")
-EOF
+ Signed-off-by: $ident
+ EOF
-# Expected commit message after rebase without --signoff (or with --no-signoff)
-cat >expected-unsigned <<EOF
-first
-EOF
+ # Expected commit message after rebase without --signoff (or with --no-signoff)
+ cat >expected-unsigned <<-EOF &&
+ first
+ EOF
+ git config alias.rbs "rebase --signoff"
+'
# We configure an alias to do the rebase --signoff so that
# on the next subtest we can show that --no-signoff overrides the alias
-test_expect_success 'rebase --signoff adds a sign-off line' '
- git commit --allow-empty -m "Initial empty commit" &&
- git add file && git commit -m first &&
- git config alias.rbs "rebase --signoff" &&
- git rbs HEAD^ &&
- git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
- test_cmp expected-signed actual
+test_expect_success 'rebase --apply --signoff adds a sign-off line' '
+ git rbs --apply HEAD^ &&
+ test_commit_message HEAD expected-signed
'
test_expect_success 'rebase --no-signoff does not add a sign-off line' '
git commit --amend -m "first" &&
git rbs --no-signoff HEAD^ &&
- git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
- test_cmp expected-unsigned actual
+ test_commit_message HEAD expected-unsigned
'
test_expect_success 'rebase --exec --signoff adds a sign-off line' '
@@ -56,30 +54,25 @@ test_expect_success 'rebase --exec --signoff adds a sign-off line' '
git commit --amend -m "first" &&
git rebase --exec "touch exec" --signoff HEAD^ &&
test_path_is_file exec &&
- git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
- test_cmp expected-signed actual
+ test_commit_message HEAD expected-signed
'
test_expect_success 'rebase --root --signoff adds a sign-off line' '
git commit --amend -m "first" &&
git rebase --root --keep-empty --signoff &&
- git cat-file commit HEAD^ | sed -e "1,/^\$/d" >actual &&
- test_cmp expected-initial-signed actual &&
- git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
- test_cmp expected-signed actual
+ test_commit_message HEAD^ expected-initial-signed &&
+ test_commit_message HEAD expected-signed
'
test_expect_success 'rebase -i --signoff fails' '
git commit --amend -m "first" &&
git rebase -i --signoff HEAD^ &&
- git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
- test_cmp expected-signed actual
+ test_commit_message HEAD expected-signed
'
test_expect_success 'rebase -m --signoff fails' '
git commit --amend -m "first" &&
git rebase -m --signoff HEAD^ &&
- git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
- test_cmp expected-signed actual
+ test_commit_message HEAD expected-signed
'
test_done
diff --git a/t/t4011-diff-symlink.sh b/t/t4011-diff-symlink.sh
index d7a5f7ae78..bc8ba88719 100755
--- a/t/t4011-diff-symlink.sh
+++ b/t/t4011-diff-symlink.sh
@@ -13,13 +13,13 @@ TEST_PASSES_SANITIZE_LEAK=true
# Print the short OID of a symlink with the given name.
symlink_oid () {
- local oid=$(printf "%s" "$1" | git hash-object --stdin) &&
+ local oid="$(printf "%s" "$1" | git hash-object --stdin)" &&
git rev-parse --short "$oid"
}
# Print the short OID of the given file.
short_oid () {
- local oid=$(git hash-object "$1") &&
+ local oid="$(git hash-object "$1")" &&
git rev-parse --short "$oid"
}
diff --git a/t/t4018/csharp-exclude-assignments b/t/t4018/csharp-exclude-assignments
new file mode 100644
index 0000000000..239f312963
--- /dev/null
+++ b/t/t4018/csharp-exclude-assignments
@@ -0,0 +1,20 @@
+class Example
+{
+ string Method(int RIGHT)
+ {
+ var constantAssignment = "test";
+ var methodAssignment = MethodCall();
+ var multiLineMethodAssignment = MethodCall(
+ );
+ var multiLine = "first"
+ + MethodCall()
+ +
+ ( MethodCall()
+ )
+ + MethodCall();
+
+ return "ChangeMe";
+ }
+
+ string MethodCall(int a = 0, int b = 0) => "test";
+}
diff --git a/t/t4018/csharp-exclude-control-statements b/t/t4018/csharp-exclude-control-statements
new file mode 100644
index 0000000000..3a0f404ee1
--- /dev/null
+++ b/t/t4018/csharp-exclude-control-statements
@@ -0,0 +1,34 @@
+class Example
+{
+ string Method(int RIGHT)
+ {
+ if (false)
+ {
+ return "out";
+ }
+ else { }
+ if (true) MethodCall(
+ );
+ else MethodCall(
+ );
+ switch ("test")
+ {
+ case "one":
+ return MethodCall(
+ );
+ case "two":
+ break;
+ }
+ (int, int) tuple = (1, 4);
+ switch (tuple)
+ {
+ case (1, 4):
+ MethodCall();
+ break;
+ }
+
+ return "ChangeMe";
+ }
+
+ string MethodCall(int a = 0, int b = 0) => "test";
+}
diff --git a/t/t4018/csharp-exclude-exceptions b/t/t4018/csharp-exclude-exceptions
new file mode 100644
index 0000000000..b1e64256cf
--- /dev/null
+++ b/t/t4018/csharp-exclude-exceptions
@@ -0,0 +1,29 @@
+using System;
+
+class Example
+{
+ string Method(int RIGHT)
+ {
+ try
+ {
+ throw new Exception("fail");
+ }
+ catch (Exception)
+ {
+ }
+ finally
+ {
+ }
+ try { } catch (Exception) {}
+ try
+ {
+ throw GetException(
+ );
+ }
+ catch (Exception) { }
+
+ return "ChangeMe";
+ }
+
+ Exception GetException() => new Exception("fail");
+}
diff --git a/t/t4018/csharp-exclude-generic-method-calls b/t/t4018/csharp-exclude-generic-method-calls
new file mode 100644
index 0000000000..31af546665
--- /dev/null
+++ b/t/t4018/csharp-exclude-generic-method-calls
@@ -0,0 +1,12 @@
+class Example
+{
+ string Method(int RIGHT)
+ {
+ GenericMethodCall<int, int>(
+ );
+
+ return "ChangeMe";
+ }
+
+ string GenericMethodCall<T, T2>() => "test";
+}
diff --git a/t/t4018/csharp-exclude-init-dispose b/t/t4018/csharp-exclude-init-dispose
new file mode 100644
index 0000000000..2bc8e194e2
--- /dev/null
+++ b/t/t4018/csharp-exclude-init-dispose
@@ -0,0 +1,22 @@
+using System;
+
+class Example : IDisposable
+{
+ string Method(int RIGHT)
+ {
+ new Example();
+ new Example(
+ );
+ new Example { };
+ using (this)
+ {
+ }
+ var def =
+ this is default(
+ Example);
+
+ return "ChangeMe";
+ }
+
+ public void Dispose() {}
+}
diff --git a/t/t4018/csharp-exclude-iterations b/t/t4018/csharp-exclude-iterations
new file mode 100644
index 0000000000..960aa182ae
--- /dev/null
+++ b/t/t4018/csharp-exclude-iterations
@@ -0,0 +1,26 @@
+using System.Linq;
+
+class Example
+{
+ string Method(int RIGHT)
+ {
+ do { } while (true);
+ do MethodCall(
+ ); while (true);
+ while (true);
+ while (true) {
+ break;
+ }
+ for (int i = 0; i < 10; ++i)
+ {
+ }
+ foreach (int i in Enumerable.Range(0, 10))
+ {
+ }
+ int[] numbers = [5, 4, 1, 3, 9, 8, 6, 7, 2, 0];
+
+ return "ChangeMe";
+ }
+
+ string MethodCall(int a = 0, int b = 0) => "test";
+}
diff --git a/t/t4018/csharp-exclude-method-calls b/t/t4018/csharp-exclude-method-calls
new file mode 100644
index 0000000000..51e2dc2040
--- /dev/null
+++ b/t/t4018/csharp-exclude-method-calls
@@ -0,0 +1,20 @@
+class Example
+{
+ string Method(int RIGHT)
+ {
+ MethodCall();
+ MethodCall(1, 2);
+ MethodCall(
+ 1, 2);
+ MethodCall(
+ 1, 2,
+ 3);
+ MethodCall(
+ 1, MethodCall(),
+ 2);
+
+ return "ChangeMe";
+ }
+
+ int MethodCall(int a = 0, int b = 0, int c = 0) => 42;
+}
diff --git a/t/t4018/csharp-exclude-other b/t/t4018/csharp-exclude-other
new file mode 100644
index 0000000000..4d5581cf3e
--- /dev/null
+++ b/t/t4018/csharp-exclude-other
@@ -0,0 +1,18 @@
+class Example
+{
+ string Method(int RIGHT)
+ {
+ lock (this)
+ {
+ }
+ unsafe
+ {
+ byte[] bytes = [1, 2, 3];
+ fixed (byte* pointerToFirst = bytes)
+ {
+ }
+ }
+
+ return "ChangeMe";
+ }
+}
diff --git a/t/t4018/csharp-method b/t/t4018/csharp-method
new file mode 100644
index 0000000000..16b367aca2
--- /dev/null
+++ b/t/t4018/csharp-method
@@ -0,0 +1,10 @@
+class Example
+{
+ string Method(int RIGHT)
+ {
+ // Filler
+ // Filler
+
+ return "ChangeMe";
+ }
+}
diff --git a/t/t4018/csharp-method-array b/t/t4018/csharp-method-array
new file mode 100644
index 0000000000..1126de8201
--- /dev/null
+++ b/t/t4018/csharp-method-array
@@ -0,0 +1,10 @@
+class Example
+{
+ string[] Method(int RIGHT)
+ {
+ // Filler
+ // Filler
+
+ return ["ChangeMe"];
+ }
+}
diff --git a/t/t4018/csharp-method-explicit b/t/t4018/csharp-method-explicit
new file mode 100644
index 0000000000..5a710116cc
--- /dev/null
+++ b/t/t4018/csharp-method-explicit
@@ -0,0 +1,12 @@
+using System;
+
+class Example : IDisposable
+{
+ void IDisposable.Dispose() // RIGHT
+ {
+ // Filler
+ // Filler
+
+ // ChangeMe
+ }
+}
diff --git a/t/t4018/csharp-method-generics b/t/t4018/csharp-method-generics
new file mode 100644
index 0000000000..b3216bfb2a
--- /dev/null
+++ b/t/t4018/csharp-method-generics
@@ -0,0 +1,11 @@
+class Example<T1, T2>
+{
+ Example<int, string> Method<TA, TB>(TA RIGHT, TB b)
+ {
+ // Filler
+ // Filler
+
+ // ChangeMe
+ return null;
+ }
+}
diff --git a/t/t4018/csharp-method-generics-alternate-spaces b/t/t4018/csharp-method-generics-alternate-spaces
new file mode 100644
index 0000000000..9583621743
--- /dev/null
+++ b/t/t4018/csharp-method-generics-alternate-spaces
@@ -0,0 +1,11 @@
+class Example<T1, T2>
+{
+ Example<int,string> Method<TA ,TB>(TA RIGHT, TB b)
+ {
+ // Filler
+ // Filler
+
+ // ChangeMe
+ return null;
+ }
+}
diff --git a/t/t4018/csharp-method-modifiers b/t/t4018/csharp-method-modifiers
new file mode 100644
index 0000000000..caefa8ee99
--- /dev/null
+++ b/t/t4018/csharp-method-modifiers
@@ -0,0 +1,13 @@
+using System.Threading.Tasks;
+
+class Example
+{
+ static internal async Task Method(int RIGHT)
+ {
+ // Filler
+ // Filler
+
+ // ChangeMe
+ await Task.Delay(1);
+ }
+}
diff --git a/t/t4018/csharp-method-multiline b/t/t4018/csharp-method-multiline
new file mode 100644
index 0000000000..3983ff42f5
--- /dev/null
+++ b/t/t4018/csharp-method-multiline
@@ -0,0 +1,10 @@
+class Example
+{
+ string Method_RIGHT(
+ int a,
+ int b,
+ int c)
+ {
+ return "ChangeMe";
+ }
+}
diff --git a/t/t4018/csharp-method-params b/t/t4018/csharp-method-params
new file mode 100644
index 0000000000..3f00410ba1
--- /dev/null
+++ b/t/t4018/csharp-method-params
@@ -0,0 +1,10 @@
+class Example
+{
+ string Method(int RIGHT, int b, int c = 42)
+ {
+ // Filler
+ // Filler
+
+ return "ChangeMe";
+ }
+}
diff --git a/t/t4018/csharp-method-special-chars b/t/t4018/csharp-method-special-chars
new file mode 100644
index 0000000000..e6c7bc01a1
--- /dev/null
+++ b/t/t4018/csharp-method-special-chars
@@ -0,0 +1,11 @@
+class @Some_Type
+{
+ @Some_Type @Method_With_Underscore(int RIGHT)
+ {
+ // Filler
+ // Filler
+
+ // ChangeMe
+ return new @Some_Type();
+ }
+}
diff --git a/t/t4018/csharp-method-with-spacing b/t/t4018/csharp-method-with-spacing
new file mode 100644
index 0000000000..233bb976cc
--- /dev/null
+++ b/t/t4018/csharp-method-with-spacing
@@ -0,0 +1,10 @@
+class Example
+{
+ string Method ( int RIGHT )
+ {
+ // Filler
+ // Filler
+
+ return "ChangeMe";
+ }
+}
diff --git a/t/t4018/csharp-property b/t/t4018/csharp-property
new file mode 100644
index 0000000000..e56dfce34c
--- /dev/null
+++ b/t/t4018/csharp-property
@@ -0,0 +1,11 @@
+class Example
+{
+ public bool RIGHT
+ {
+ get { return true; }
+ set
+ {
+ // ChangeMe
+ }
+ }
+}
diff --git a/t/t4018/csharp-property-braces-same-line b/t/t4018/csharp-property-braces-same-line
new file mode 100644
index 0000000000..608131d3d3
--- /dev/null
+++ b/t/t4018/csharp-property-braces-same-line
@@ -0,0 +1,10 @@
+class Example
+{
+ public bool RIGHT {
+ get { return true; }
+ set
+ {
+ // ChangeMe
+ }
+ }
+}
diff --git a/t/t4210-log-i18n.sh b/t/t4210-log-i18n.sh
index d2dfcf164e..75216f19ce 100755
--- a/t/t4210-log-i18n.sh
+++ b/t/t4210-log-i18n.sh
@@ -64,7 +64,7 @@ test_expect_success 'log --grep does not find non-reencoded values (latin1)' '
'
triggers_undefined_behaviour () {
- local engine=$1
+ local engine="$1"
case $engine in
fixed)
@@ -85,7 +85,7 @@ triggers_undefined_behaviour () {
}
mismatched_git_log () {
- local pattern=$1
+ local pattern="$1"
LC_ALL=$is_IS_locale git log --encoding=ISO-8859-1 --format=%s \
--grep=$pattern
diff --git a/t/t7300-clean.sh b/t/t7300-clean.sh
index 1f7201eb60..0aae0dee67 100755
--- a/t/t7300-clean.sh
+++ b/t/t7300-clean.sh
@@ -5,6 +5,7 @@
test_description='git clean basic tests'
+TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
git config clean.requireForce no
diff --git a/t/t7501-commit-basic-functionality.sh b/t/t7501-commit-basic-functionality.sh
index bced44a0fc..cc12f99f11 100755
--- a/t/t7501-commit-basic-functionality.sh
+++ b/t/t7501-commit-basic-functionality.sh
@@ -101,22 +101,8 @@ test_expect_success 'fail to commit untracked file (even with --include/--only)'
test_must_fail git commit --only -m "baz" baz 2>err &&
test_grep -e "$error" err &&
- # TODO: as for --include, the below command will fail because
- # nothing is staged. If something was staged, it would not fail
- # even though the provided pathspec does not match any tracked
- # path. (However, the untracked paths that match the pathspec are
- # not committed and only the staged changes get committed.)
- # In either cases, no error is returned to stderr like in (--only
- # and without --only/--include) cases. In a similar manner,
- # "git add -u baz" also does not error out.
- #
- # Therefore, the below test is just to document the current behavior
- # and is not an endorsement to the current behavior, and we may
- # want to fix this. And when that happens, this test should be
- # updated accordingly.
-
test_must_fail git commit --include -m "baz" baz 2>err &&
- test_must_be_empty err
+ test_grep -e "$error" err
'
test_expect_success 'setup: non-initial commit' '
diff --git a/t/t7700-repack.sh b/t/t7700-repack.sh
index 94f9f4a1da..127efe99f8 100755
--- a/t/t7700-repack.sh
+++ b/t/t7700-repack.sh
@@ -629,6 +629,7 @@ test_expect_success '--write-midx with preferred bitmap tips' '
git log --format="create refs/tags/%s/%s %H" HEAD >refs &&
git update-ref --stdin <refs &&
+ GIT_TEST_MULTI_PACK_INDEX=0 \
git repack --write-midx --write-bitmap-index &&
test_path_is_file $midx &&
test_path_is_file $midx-$(midx_checksum $objdir).bitmap &&
@@ -749,6 +750,7 @@ test_expect_success '--write-midx with --pack-kept-objects' '
keep="$objdir/pack/pack-$one.keep" &&
touch "$keep" &&
+ GIT_TEST_MULTI_PACK_INDEX=0 \
git repack --write-midx --write-bitmap-index --geometric=2 -d \
--pack-kept-objects &&
diff --git a/t/t9300-fast-import.sh b/t/t9300-fast-import.sh
index 60e30fed3c..1e68426852 100755
--- a/t/t9300-fast-import.sh
+++ b/t/t9300-fast-import.sh
@@ -1059,30 +1059,33 @@ test_expect_success 'M: rename subdirectory to new subdirectory' '
compare_diff_raw expect actual
'
-test_expect_success 'M: rename root to subdirectory' '
- cat >input <<-INPUT_END &&
- commit refs/heads/M4
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- rename root
- COMMIT
+for root in '""' ''
+do
+ test_expect_success "M: rename root ($root) to subdirectory" '
+ cat >input <<-INPUT_END &&
+ commit refs/heads/M4
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ rename root
+ COMMIT
- from refs/heads/M2^0
- R "" sub
+ from refs/heads/M2^0
+ R $root sub
- INPUT_END
+ INPUT_END
- cat >expect <<-EOF &&
- :100644 100644 $oldf $oldf R100 file2/oldf sub/file2/oldf
- :100755 100755 $f4id $f4id R100 file4 sub/file4
- :100755 100755 $newf $newf R100 i/am/new/to/you sub/i/am/new/to/you
- :100755 100755 $f6id $f6id R100 newdir/exec.sh sub/newdir/exec.sh
- :100644 100644 $f5id $f5id R100 newdir/interesting sub/newdir/interesting
- EOF
- git fast-import <input &&
- git diff-tree -M -r M4^ M4 >actual &&
- compare_diff_raw expect actual
-'
+ cat >expect <<-EOF &&
+ :100644 100644 $oldf $oldf R100 file2/oldf sub/file2/oldf
+ :100755 100755 $f4id $f4id R100 file4 sub/file4
+ :100755 100755 $newf $newf R100 i/am/new/to/you sub/i/am/new/to/you
+ :100755 100755 $f6id $f6id R100 newdir/exec.sh sub/newdir/exec.sh
+ :100644 100644 $f5id $f5id R100 newdir/interesting sub/newdir/interesting
+ EOF
+ git fast-import <input &&
+ git diff-tree -M -r M4^ M4 >actual &&
+ compare_diff_raw expect actual
+ '
+done
###
### series N
@@ -1259,49 +1262,52 @@ test_expect_success PIPE 'N: empty directory reads as missing' '
test_cmp expect actual
'
-test_expect_success 'N: copy root directory by tree hash' '
- cat >expect <<-EOF &&
- :100755 000000 $newf $zero D file3/newf
- :100644 000000 $oldf $zero D file3/oldf
- EOF
- root=$(git rev-parse refs/heads/branch^0^{tree}) &&
- cat >input <<-INPUT_END &&
- commit refs/heads/N6
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- copy root directory by tree hash
- COMMIT
-
- from refs/heads/branch^0
- M 040000 $root ""
- INPUT_END
- git fast-import <input &&
- git diff-tree -C --find-copies-harder -r N4 N6 >actual &&
- compare_diff_raw expect actual
-'
+for root in '""' ''
+do
+ test_expect_success "N: copy root ($root) by tree hash" '
+ cat >expect <<-EOF &&
+ :100755 000000 $newf $zero D file3/newf
+ :100644 000000 $oldf $zero D file3/oldf
+ EOF
+ root_tree=$(git rev-parse refs/heads/branch^0^{tree}) &&
+ cat >input <<-INPUT_END &&
+ commit refs/heads/N6
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ copy root directory by tree hash
+ COMMIT
-test_expect_success 'N: copy root by path' '
- cat >expect <<-EOF &&
- :100755 100755 $newf $newf C100 file2/newf oldroot/file2/newf
- :100644 100644 $oldf $oldf C100 file2/oldf oldroot/file2/oldf
- :100755 100755 $f4id $f4id C100 file4 oldroot/file4
- :100755 100755 $f6id $f6id C100 newdir/exec.sh oldroot/newdir/exec.sh
- :100644 100644 $f5id $f5id C100 newdir/interesting oldroot/newdir/interesting
- EOF
- cat >input <<-INPUT_END &&
- commit refs/heads/N-copy-root-path
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- copy root directory by (empty) path
- COMMIT
+ from refs/heads/branch^0
+ M 040000 $root_tree $root
+ INPUT_END
+ git fast-import <input &&
+ git diff-tree -C --find-copies-harder -r N4 N6 >actual &&
+ compare_diff_raw expect actual
+ '
+
+ test_expect_success "N: copy root ($root) by path" '
+ cat >expect <<-EOF &&
+ :100755 100755 $newf $newf C100 file2/newf oldroot/file2/newf
+ :100644 100644 $oldf $oldf C100 file2/oldf oldroot/file2/oldf
+ :100755 100755 $f4id $f4id C100 file4 oldroot/file4
+ :100755 100755 $f6id $f6id C100 newdir/exec.sh oldroot/newdir/exec.sh
+ :100644 100644 $f5id $f5id C100 newdir/interesting oldroot/newdir/interesting
+ EOF
+ cat >input <<-INPUT_END &&
+ commit refs/heads/N-copy-root-path
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ copy root directory by (empty) path
+ COMMIT
- from refs/heads/branch^0
- C "" oldroot
- INPUT_END
- git fast-import <input &&
- git diff-tree -C --find-copies-harder -r branch N-copy-root-path >actual &&
- compare_diff_raw expect actual
-'
+ from refs/heads/branch^0
+ C $root oldroot
+ INPUT_END
+ git fast-import <input &&
+ git diff-tree -C --find-copies-harder -r branch N-copy-root-path >actual &&
+ compare_diff_raw expect actual
+ '
+done
test_expect_success 'N: delete directory by copying' '
cat >expect <<-\EOF &&
@@ -1431,98 +1437,102 @@ test_expect_success 'N: reject foo/ syntax in ls argument' '
INPUT_END
'
-test_expect_success 'N: copy to root by id and modify' '
- echo "hello, world" >expect.foo &&
- echo hello >expect.bar &&
- git fast-import <<-SETUP_END &&
- commit refs/heads/N7
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- hello, tree
- COMMIT
-
- deleteall
- M 644 inline foo/bar
- data <<EOF
- hello
- EOF
- SETUP_END
-
- tree=$(git rev-parse --verify N7:) &&
- git fast-import <<-INPUT_END &&
- commit refs/heads/N8
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- copy to root by id and modify
- COMMIT
+for root in '""' ''
+do
+ test_expect_success "N: copy to root ($root) by id and modify" '
+ echo "hello, world" >expect.foo &&
+ echo hello >expect.bar &&
+ git fast-import <<-SETUP_END &&
+ commit refs/heads/N7
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ hello, tree
+ COMMIT
- M 040000 $tree ""
- M 644 inline foo/foo
- data <<EOF
- hello, world
- EOF
- INPUT_END
- git show N8:foo/foo >actual.foo &&
- git show N8:foo/bar >actual.bar &&
- test_cmp expect.foo actual.foo &&
- test_cmp expect.bar actual.bar
-'
+ deleteall
+ M 644 inline foo/bar
+ data <<EOF
+ hello
+ EOF
+ SETUP_END
-test_expect_success 'N: extract subtree' '
- branch=$(git rev-parse --verify refs/heads/branch^{tree}) &&
- cat >input <<-INPUT_END &&
- commit refs/heads/N9
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- extract subtree branch:newdir
- COMMIT
+ tree=$(git rev-parse --verify N7:) &&
+ git fast-import <<-INPUT_END &&
+ commit refs/heads/N8
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ copy to root by id and modify
+ COMMIT
- M 040000 $branch ""
- C "newdir" ""
- INPUT_END
- git fast-import <input &&
- git diff --exit-code branch:newdir N9
-'
+ M 040000 $tree $root
+ M 644 inline foo/foo
+ data <<EOF
+ hello, world
+ EOF
+ INPUT_END
+ git show N8:foo/foo >actual.foo &&
+ git show N8:foo/bar >actual.bar &&
+ test_cmp expect.foo actual.foo &&
+ test_cmp expect.bar actual.bar
+ '
+
+ test_expect_success "N: extract subtree to the root ($root)" '
+ branch=$(git rev-parse --verify refs/heads/branch^{tree}) &&
+ cat >input <<-INPUT_END &&
+ commit refs/heads/N9
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ extract subtree branch:newdir
+ COMMIT
-test_expect_success 'N: modify subtree, extract it, and modify again' '
- echo hello >expect.baz &&
- echo hello, world >expect.qux &&
- git fast-import <<-SETUP_END &&
- commit refs/heads/N10
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- hello, tree
- COMMIT
+ M 040000 $branch $root
+ C "newdir" $root
+ INPUT_END
+ git fast-import <input &&
+ git diff --exit-code branch:newdir N9
+ '
+
+ test_expect_success "N: modify subtree, extract it to the root ($root), and modify again" '
+ echo hello >expect.baz &&
+ echo hello, world >expect.qux &&
+ git fast-import <<-SETUP_END &&
+ commit refs/heads/N10
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ hello, tree
+ COMMIT
- deleteall
- M 644 inline foo/bar/baz
- data <<EOF
- hello
- EOF
- SETUP_END
+ deleteall
+ M 644 inline foo/bar/baz
+ data <<EOF
+ hello
+ EOF
+ SETUP_END
- tree=$(git rev-parse --verify N10:) &&
- git fast-import <<-INPUT_END &&
- commit refs/heads/N11
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- copy to root by id and modify
- COMMIT
+ tree=$(git rev-parse --verify N10:) &&
+ git fast-import <<-INPUT_END &&
+ commit refs/heads/N11
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ copy to root by id and modify
+ COMMIT
- M 040000 $tree ""
- M 100644 inline foo/bar/qux
- data <<EOF
- hello, world
- EOF
- R "foo" ""
- C "bar/qux" "bar/quux"
- INPUT_END
- git show N11:bar/baz >actual.baz &&
- git show N11:bar/qux >actual.qux &&
- git show N11:bar/quux >actual.quux &&
- test_cmp expect.baz actual.baz &&
- test_cmp expect.qux actual.qux &&
- test_cmp expect.qux actual.quux'
+ M 040000 $tree $root
+ M 100644 inline foo/bar/qux
+ data <<EOF
+ hello, world
+ EOF
+ R "foo" $root
+ C "bar/qux" "bar/quux"
+ INPUT_END
+ git show N11:bar/baz >actual.baz &&
+ git show N11:bar/qux >actual.qux &&
+ git show N11:bar/quux >actual.quux &&
+ test_cmp expect.baz actual.baz &&
+ test_cmp expect.qux actual.qux &&
+ test_cmp expect.qux actual.quux
+ '
+done
###
### series O
@@ -2142,6 +2152,7 @@ test_expect_success 'Q: deny note on empty branch' '
EOF
test_must_fail git fast-import <input
'
+
###
### series R (feature and option)
###
@@ -2790,7 +2801,7 @@ test_expect_success 'R: blob appears only once' '
'
###
-### series S
+### series S (mark and path parsing)
###
#
# Make sure missing spaces and EOLs after mark references
@@ -3060,21 +3071,283 @@ test_expect_success 'S: ls with garbage after sha1 must fail' '
test_grep "space after tree-ish" err
'
+#
+# Path parsing
+#
+# There are two sorts of ways a path can be parsed, depending on whether it is
+# the last field on the line. Additionally, ls without a <dataref> has a special
+# case. Test every occurrence of <path> in the grammar against every error case.
+# Paths for the root (empty strings) are tested elsewhere.
+#
+
+#
+# Valid paths at the end of a line: filemodify, filedelete, filecopy (dest),
+# filerename (dest), and ls.
+#
+# commit :301 from root -- modify hello.c (for setup)
+# commit :302 from :301 -- modify $path
+# commit :303 from :302 -- delete $path
+# commit :304 from :301 -- copy hello.c $path
+# commit :305 from :301 -- rename hello.c $path
+# ls :305 $path
+#
+test_path_eol_success () {
+ local test="$1" path="$2" unquoted_path="$3"
+ test_expect_success "S: paths at EOL with $test must work" '
+ test_when_finished "git branch -D S-path-eol" &&
+
+ git fast-import --export-marks=marks.out <<-EOF >out 2>err &&
+ blob
+ mark :401
+ data <<BLOB
+ hello world
+ BLOB
+
+ blob
+ mark :402
+ data <<BLOB
+ hallo welt
+ BLOB
+
+ commit refs/heads/S-path-eol
+ mark :301
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ initial commit
+ COMMIT
+ M 100644 :401 hello.c
+
+ commit refs/heads/S-path-eol
+ mark :302
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit filemodify
+ COMMIT
+ from :301
+ M 100644 :402 $path
+
+ commit refs/heads/S-path-eol
+ mark :303
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit filedelete
+ COMMIT
+ from :302
+ D $path
+
+ commit refs/heads/S-path-eol
+ mark :304
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit filecopy dest
+ COMMIT
+ from :301
+ C hello.c $path
+
+ commit refs/heads/S-path-eol
+ mark :305
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit filerename dest
+ COMMIT
+ from :301
+ R hello.c $path
+
+ ls :305 $path
+ EOF
+
+ commit_m=$(grep :302 marks.out | cut -d\ -f2) &&
+ commit_d=$(grep :303 marks.out | cut -d\ -f2) &&
+ commit_c=$(grep :304 marks.out | cut -d\ -f2) &&
+ commit_r=$(grep :305 marks.out | cut -d\ -f2) &&
+ blob1=$(grep :401 marks.out | cut -d\ -f2) &&
+ blob2=$(grep :402 marks.out | cut -d\ -f2) &&
+
+ (
+ printf "100644 blob $blob2\t$unquoted_path\n" &&
+ printf "100644 blob $blob1\thello.c\n"
+ ) | sort >tree_m.exp &&
+ git ls-tree $commit_m | sort >tree_m.out &&
+ test_cmp tree_m.exp tree_m.out &&
+
+ printf "100644 blob $blob1\thello.c\n" >tree_d.exp &&
+ git ls-tree $commit_d >tree_d.out &&
+ test_cmp tree_d.exp tree_d.out &&
+
+ (
+ printf "100644 blob $blob1\t$unquoted_path\n" &&
+ printf "100644 blob $blob1\thello.c\n"
+ ) | sort >tree_c.exp &&
+ git ls-tree $commit_c | sort >tree_c.out &&
+ test_cmp tree_c.exp tree_c.out &&
+
+ printf "100644 blob $blob1\t$unquoted_path\n" >tree_r.exp &&
+ git ls-tree $commit_r >tree_r.out &&
+ test_cmp tree_r.exp tree_r.out &&
+
+ test_cmp out tree_r.exp
+ '
+}
+
+test_path_eol_success 'quoted spaces' '" hello world.c "' ' hello world.c '
+test_path_eol_success 'unquoted spaces' ' hello world.c ' ' hello world.c '
+test_path_eol_success 'octal escapes' '"\150\151\056\143"' 'hi.c'
+
+#
+# Valid paths before a space: filecopy (source) and filerename (source).
+#
+# commit :301 from root -- modify $path (for setup)
+# commit :302 from :301 -- copy $path hello2.c
+# commit :303 from :301 -- rename $path hello2.c
+#
+test_path_space_success () {
+ local test="$1" path="$2" unquoted_path="$3"
+ test_expect_success "S: paths before space with $test must work" '
+ test_when_finished "git branch -D S-path-space" &&
+
+ git fast-import --export-marks=marks.out <<-EOF 2>err &&
+ blob
+ mark :401
+ data <<BLOB
+ hello world
+ BLOB
+
+ commit refs/heads/S-path-space
+ mark :301
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ initial commit
+ COMMIT
+ M 100644 :401 $path
+
+ commit refs/heads/S-path-space
+ mark :302
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit filecopy source
+ COMMIT
+ from :301
+ C $path hello2.c
+
+ commit refs/heads/S-path-space
+ mark :303
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit filerename source
+ COMMIT
+ from :301
+ R $path hello2.c
+
+ EOF
+
+ commit_c=$(grep :302 marks.out | cut -d\ -f2) &&
+ commit_r=$(grep :303 marks.out | cut -d\ -f2) &&
+ blob=$(grep :401 marks.out | cut -d\ -f2) &&
+
+ (
+ printf "100644 blob $blob\t$unquoted_path\n" &&
+ printf "100644 blob $blob\thello2.c\n"
+ ) | sort >tree_c.exp &&
+ git ls-tree $commit_c | sort >tree_c.out &&
+ test_cmp tree_c.exp tree_c.out &&
+
+ printf "100644 blob $blob\thello2.c\n" >tree_r.exp &&
+ git ls-tree $commit_r >tree_r.out &&
+ test_cmp tree_r.exp tree_r.out
+ '
+}
+
+test_path_space_success 'quoted spaces' '" hello world.c "' ' hello world.c '
+test_path_space_success 'no unquoted spaces' 'hello_world.c' 'hello_world.c'
+test_path_space_success 'octal escapes' '"\150\151\056\143"' 'hi.c'
+
+#
+# Test a single commit change with an invalid path. Run it with all occurrences
+# of <path> in the grammar against all error kinds.
+#
+test_path_fail () {
+ local change="$1" what="$2" prefix="$3" path="$4" suffix="$5" err_grep="$6"
+ test_expect_success "S: $change with $what must fail" '
+ test_must_fail git fast-import <<-EOF 2>err &&
+ blob
+ mark :1
+ data <<BLOB
+ hello world
+ BLOB
+
+ commit refs/heads/S-path-fail
+ mark :2
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit setup
+ COMMIT
+ M 100644 :1 hello.c
+
+ commit refs/heads/S-path-fail
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ commit with bad path
+ COMMIT
+ from :2
+ $prefix$path$suffix
+ EOF
+
+ test_grep "$err_grep" err
+ '
+}
+
+test_path_base_fail () {
+ local change="$1" prefix="$2" field="$3" suffix="$4"
+ test_path_fail "$change" 'unclosed " in '"$field" "$prefix" '"hello.c' "$suffix" "Invalid $field"
+ test_path_fail "$change" "invalid escape in quoted $field" "$prefix" '"hello\xff"' "$suffix" "Invalid $field"
+ test_path_fail "$change" "escaped NUL in quoted $field" "$prefix" '"hello\000"' "$suffix" "NUL in $field"
+}
+test_path_eol_quoted_fail () {
+ local change="$1" prefix="$2" field="$3"
+ test_path_base_fail "$change" "$prefix" "$field" ''
+ test_path_fail "$change" "garbage after quoted $field" "$prefix" '"hello.c"' 'x' "Garbage after $field"
+ test_path_fail "$change" "space after quoted $field" "$prefix" '"hello.c"' ' ' "Garbage after $field"
+}
+test_path_eol_fail () {
+ local change="$1" prefix="$2" field="$3"
+ test_path_eol_quoted_fail "$change" "$prefix" "$field"
+}
+test_path_space_fail () {
+ local change="$1" prefix="$2" field="$3"
+ test_path_base_fail "$change" "$prefix" "$field" ' world.c'
+ test_path_fail "$change" "missing space after quoted $field" "$prefix" '"hello.c"' 'x world.c' "Missing space after $field"
+ test_path_fail "$change" "missing space after unquoted $field" "$prefix" 'hello.c' '' "Missing space after $field"
+}
+
+test_path_eol_fail filemodify 'M 100644 :1 ' path
+test_path_eol_fail filedelete 'D ' path
+test_path_space_fail filecopy 'C ' source
+test_path_eol_fail filecopy 'C hello.c ' dest
+test_path_space_fail filerename 'R ' source
+test_path_eol_fail filerename 'R hello.c ' dest
+test_path_eol_fail 'ls (in commit)' 'ls :2 ' path
+
+# When 'ls' has no <dataref>, the <path> must be quoted.
+test_path_eol_quoted_fail 'ls (without dataref in commit)' 'ls ' path
+
###
### series T (ls)
###
# Setup is carried over from series S.
-test_expect_success 'T: ls root tree' '
- sed -e "s/Z\$//" >expect <<-EOF &&
- 040000 tree $(git rev-parse S^{tree}) Z
- EOF
- sha1=$(git rev-parse --verify S) &&
- git fast-import --import-marks=marks <<-EOF >actual &&
- ls $sha1 ""
- EOF
- test_cmp expect actual
-'
+for root in '""' ''
+do
+ test_expect_success "T: ls root ($root) tree" '
+ sed -e "s/Z\$//" >expect <<-EOF &&
+ 040000 tree $(git rev-parse S^{tree}) Z
+ EOF
+ sha1=$(git rev-parse --verify S) &&
+ git fast-import --import-marks=marks <<-EOF >actual &&
+ ls $sha1 $root
+ EOF
+ test_cmp expect actual
+ '
+done
test_expect_success 'T: delete branch' '
git branch to-delete &&
@@ -3176,30 +3449,33 @@ test_expect_success 'U: validate directory delete result' '
compare_diff_raw expect actual
'
-test_expect_success 'U: filedelete root succeeds' '
- cat >input <<-INPUT_END &&
- commit refs/heads/U
- committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
- data <<COMMIT
- must succeed
- COMMIT
- from refs/heads/U^0
- D ""
+for root in '""' ''
+do
+ test_expect_success "U: filedelete root ($root) succeeds" '
+ cat >input <<-INPUT_END &&
+ commit refs/heads/U-delete-root
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data <<COMMIT
+ must succeed
+ COMMIT
+ from refs/heads/U^0
+ D $root
- INPUT_END
+ INPUT_END
- git fast-import <input
-'
+ git fast-import <input
+ '
-test_expect_success 'U: validate root delete result' '
- cat >expect <<-EOF &&
- :100644 000000 $f7id $ZERO_OID D hello.c
- EOF
+ test_expect_success "U: validate root ($root) delete result" '
+ cat >expect <<-EOF &&
+ :100644 000000 $f7id $ZERO_OID D hello.c
+ EOF
- git diff-tree -M -r U^1 U >actual &&
+ git diff-tree -M -r U U-delete-root >actual &&
- compare_diff_raw expect actual
-'
+ compare_diff_raw expect actual
+ '
+done
###
### series V (checkpoint)
diff --git a/t/test-lib-functions.sh b/t/test-lib-functions.sh
index 2eccf100c0..862d80c974 100644
--- a/t/test-lib-functions.sh
+++ b/t/test-lib-functions.sh
@@ -385,7 +385,7 @@ test_commit () {
shift
done &&
indir=${indir:+"$indir"/} &&
- local file=${2:-"$1.t"} &&
+ local file="${2:-"$1.t"}" &&
if test -n "$append"
then
$echo "${3-$1}" >>"$indir$file"
@@ -1748,7 +1748,7 @@ test_oid () {
# Insert a slash into an object ID so it can be used to reference a location
# under ".git/objects". For example, "deadbeef..." becomes "de/adbeef..".
test_oid_to_path () {
- local basename=${1#??}
+ local basename="${1#??}"
echo "${1%$basename}/$basename"
}
@@ -1765,7 +1765,7 @@ test_parse_ls_tree_oids () {
# Choose a port number based on the test script's number and store it in
# the given variable name, unless that variable already contains a number.
test_set_port () {
- local var=$1 port
+ local var="$1" port
if test $# -ne 1 || test -z "$var"
then
@@ -1840,7 +1840,7 @@ test_subcommand () {
shift
fi
- local expr=$(printf '"%s",' "$@")
+ local expr="$(printf '"%s",' "$@")"
expr="${expr%,}"
if test -n "$negate"
@@ -1930,7 +1930,7 @@ test_readlink () {
# An optional increment to the magic timestamp may be specified as second
# argument.
test_set_magic_mtime () {
- local inc=${2:-0} &&
+ local inc="${2:-0}" &&
local mtime=$((1234567890 + $inc)) &&
test-tool chmtime =$mtime "$1" &&
test_is_magic_mtime "$1" $inc
@@ -1943,7 +1943,7 @@ test_set_magic_mtime () {
# argument. Usually, this should be the same increment which was used for
# the associated test_set_magic_mtime.
test_is_magic_mtime () {
- local inc=${2:-0} &&
+ local inc="${2:-0}" &&
local mtime=$((1234567890 + $inc)) &&
echo $mtime >.git/test-mtime-expect &&
test-tool chmtime --get "$1" >.git/test-mtime-actual &&
diff --git a/t/unit-tests/t-prio-queue.c b/t/unit-tests/t-prio-queue.c
index 5358346361..7a4e5780e1 100644
--- a/t/unit-tests/t-prio-queue.c
+++ b/t/unit-tests/t-prio-queue.c
@@ -66,43 +66,26 @@ static void test_prio_queue(int *input, size_t input_size,
clear_prio_queue(&pq);
}
-#define BASIC_INPUT 2, 6, 3, 10, 9, 5, 7, 4, 5, 8, 1, DUMP
-#define BASIC_RESULT 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10
-
-#define MIXED_PUT_GET_INPUT 6, 2, 4, GET, 5, 3, GET, GET, 1, DUMP
-#define MIXED_PUT_GET_RESULT 2, 3, 4, 1, 5, 6
-
-#define EMPTY_QUEUE_INPUT 1, 2, GET, GET, GET, 1, 2, GET, GET, GET
-#define EMPTY_QUEUE_RESULT 1, 2, MISSING, 1, 2, MISSING
-
-#define STACK_INPUT STACK, 8, 1, 5, 4, 6, 2, 3, DUMP
-#define STACK_RESULT 3, 2, 6, 4, 5, 1, 8
-
-#define REVERSE_STACK_INPUT STACK, 1, 2, 3, 4, 5, 6, REVERSE, DUMP
-#define REVERSE_STACK_RESULT 1, 2, 3, 4, 5, 6
-
-#define TEST_INPUT(INPUT, RESULT, name) \
- static void test_##name(void) \
-{ \
- int input[] = {INPUT}; \
- int result[] = {RESULT}; \
- test_prio_queue(input, ARRAY_SIZE(input), \
- result, ARRAY_SIZE(result)); \
-}
-
-TEST_INPUT(BASIC_INPUT, BASIC_RESULT, basic)
-TEST_INPUT(MIXED_PUT_GET_INPUT, MIXED_PUT_GET_RESULT, mixed)
-TEST_INPUT(EMPTY_QUEUE_INPUT, EMPTY_QUEUE_RESULT, empty)
-TEST_INPUT(STACK_INPUT, STACK_RESULT, stack)
-TEST_INPUT(REVERSE_STACK_INPUT, REVERSE_STACK_RESULT, reverse)
+#define TEST_INPUT(input, result) \
+ test_prio_queue(input, ARRAY_SIZE(input), result, ARRAY_SIZE(result))
int cmd_main(int argc, const char **argv)
{
- TEST(test_basic(), "prio-queue works for basic input");
- TEST(test_mixed(), "prio-queue works for mixed put & get commands");
- TEST(test_empty(), "prio-queue works when queue is empty");
- TEST(test_stack(), "prio-queue works when used as a LIFO stack");
- TEST(test_reverse(), "prio-queue works when LIFO stack is reversed");
+ TEST(TEST_INPUT(((int []){ 2, 6, 3, 10, 9, 5, 7, 4, 5, 8, 1, DUMP }),
+ ((int []){ 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10 })),
+ "prio-queue works for basic input");
+ TEST(TEST_INPUT(((int []){ 6, 2, 4, GET, 5, 3, GET, GET, 1, DUMP }),
+ ((int []){ 2, 3, 4, 1, 5, 6 })),
+ "prio-queue works for mixed put & get commands");
+ TEST(TEST_INPUT(((int []){ 1, 2, GET, GET, GET, 1, 2, GET, GET, GET }),
+ ((int []){ 1, 2, MISSING, 1, 2, MISSING })),
+ "prio-queue works when queue is empty");
+ TEST(TEST_INPUT(((int []){ STACK, 8, 1, 5, 4, 6, 2, 3, DUMP }),
+ ((int []){ 3, 2, 6, 4, 5, 1, 8 })),
+ "prio-queue works when used as a LIFO stack");
+ TEST(TEST_INPUT(((int []){ STACK, 1, 2, 3, 4, 5, 6, REVERSE, DUMP }),
+ ((int []){ 1, 2, 3, 4, 5, 6 })),
+ "prio-queue works when LIFO stack is reversed");
return test_done();
}
diff --git a/usage.c b/usage.c
index 09f0ed509b..7a2f7805f5 100644
--- a/usage.c
+++ b/usage.c
@@ -19,8 +19,11 @@ static void vreportf(const char *prefix, const char *err, va_list params)
}
memcpy(msg, prefix, prefix_len);
p = msg + prefix_len;
- if (vsnprintf(p, pend - p, err, params) < 0)
+ if (vsnprintf(p, pend - p, err, params) < 0) {
+ fprintf(stderr, _("error: unable to format message: %s\n"),
+ err);
*p = '\0'; /* vsnprintf() failed, clip at prefix */
+ }
for (; p != pend - 1 && *p; p++) {
if (iscntrl(*p) && *p != '\t' && *p != '\n')
diff --git a/userdiff.c b/userdiff.c
index 92ef649c99..82bc76b910 100644
--- a/userdiff.c
+++ b/userdiff.c
@@ -90,12 +90,48 @@ PATTERNS("cpp",
"|\\.[0-9][0-9]*([Ee][-+]?[0-9]+)?[fFlL]?"
"|[-+*/<>%&^|=!]=|--|\\+\\+|<<=?|>>=?|&&|\\|\\||::|->\\*?|\\.\\*|<=>"),
PATTERNS("csharp",
- /* Keywords */
- "!^[ \t]*(do|while|for|if|else|instanceof|new|return|switch|case|throw|catch|using)\n"
- /* Methods and constructors */
- "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe|async)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
- /* Properties */
- "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[@._[:alnum:]]+)[ \t]*$\n"
+ /*
+ * Jump over reserved keywords which are illegal method names, but which
+ * can be followed by parentheses without special characters in between,
+ * making them look like methods.
+ */
+ "!(^|[ \t]+)" /* Start of line or whitespace. */
+ "(do|while|for|foreach|if|else|new|default|return|switch|case|throw"
+ "|catch|using|lock|fixed)"
+ "([ \t(]+|$)\n" /* Whitespace, "(", or end of line. */
+ /*
+ * Methods/constructors:
+ * The strategy is to identify a minimum of two groups (any combination
+ * of keywords/type/name) before the opening parenthesis, and without
+ * final unexpected characters, normally only used in ordinary statements.
+ */
+ "^[ \t]*" /* Remove leading whitespace. */
+ "(" /* Start chunk header capture. */
+ "(" /* First group. */
+ "[][[:alnum:]@_.]" /* Name. */
+ "(<[][[:alnum:]@_, \t<>]+>)?" /* Optional generic parameters. */
+ ")+"
+ "([ \t]+" /* Subsequent groups, prepended with space. */
+ "([][[:alnum:]@_.](<[][[:alnum:]@_, \t<>]+>)?)+"
+ ")+"
+ "[ \t]*" /* Optional space before parameters start. */
+ "\\(" /* Start of method parameters. */
+ "[^;]*" /* Allow complex parameters, but exclude statements (;). */
+ ")$\n" /* Close chunk header capture. */
+ /*
+ * Properties:
+ * As with methods, expect a minimum of two groups. But, more trivial than
+ * methods, the vast majority of properties long enough to be worth
+ * showing a chunk header for don't include "=:;,()" on the line they are
+ * defined, since they don't have a parameter list.
+ */
+ "^[ \t]*("
+ "([][[:alnum:]@_.](<[][[:alnum:]@_, \t<>]+>)?)+"
+ "([ \t]+"
+ "([][[:alnum:]@_.](<[][[:alnum:]@_, \t<>]+>)?)+"
+ ")+" /* Up to here, same as methods regex. */
+ "[^;=:,()]*" /* Compared to methods, no parameter list allowed. */
+ ")$\n"
/* Type definitions */
"^[ \t]*(((static|public|internal|private|protected|new|unsafe|sealed|abstract|partial)[ \t]+)*(class|enum|interface|struct|record)[ \t]+.*)$\n"
/* Namespace */